HN日报PDF下载自动合并
本帖最后由 fengcong1980 于 2024-4-16 19:51 编辑输入日期格式:2024-04/16
AI
https://hnrb.voc.com.cn/hnrb_epaper/html/2024-04/16/node_201.htm
链接:https://pan.baidu.com/s/1D_7Jh2FI1uM0KfeIH8qb4Q?pwd=52pj
提取码:52pj
import os
import requests
from bs4 import BeautifulSoup
from urllib.parse import urljoin
from PyPDF2 import PdfMerger, PdfReader
from datetime import datetime
def fetch_pdf_links(url):
session = requests.Session()
response = session.get(url)
if response.status_code != 200:
print(f"请求失败,状态码:{response.status_code}")
return []
soup = BeautifulSoup(response.text, 'html.parser')
pdf_links = ) for link in soup.find_all('a', href=True) if link['href'].endswith('.pdf')]
return pdf_links
def save_and_count_pages(pdf_url, target_folder):
safe_filename = os.path.basename(pdf_url).replace('?', '').replace(':', '').replace('*', '')
target_path = os.path.join(target_folder, safe_filename)
if os.path.exists(target_path):
print(f"文件已存在:{target_path}")
return None
pdf_response = requests.get(pdf_url)
if pdf_response.status_code == 200:
with open(target_path, 'wb') as f:
f.write(pdf_response.content)
with open(target_path, 'rb') as pdf_file:
pdf_reader = PdfReader(pdf_file)
num_pages = len(pdf_reader.pages)
print(f"PDF 文件 '{safe_filename}' 包含 {num_pages} 页")
return (target_path, num_pages)
else:
print(f"下载PDF失败,状态码:{pdf_response.status_code}")
return None
def merge_downloaded_pdfs(pdf_files_info, target_folder, output_filename):
merger = PdfMerger()
for file_path, _ in pdf_files_info:
merger.append(open(file_path, 'rb'))
merged_pdf_path = os.path.join(target_folder, output_filename)
with open(merged_pdf_path, 'wb') as output_stream:
merger.write(output_stream)
print(f"成功按顺序合并所有PDF文件为: {merged_pdf_path}")
def format_and_validate_date(input_date):
try:
year, month_day = input_date.split('-')
month, day = month_day.split('/')
formatted_date = f"{year}-{month.zfill(2)}/{day.zfill(2)}"
return formatted_date
except ValueError:
print("无效的日期格式,请按照'YYYY-MM/DD'格式输入")
return None
def main():
input_date = input("请输入日期(格式YYYY-MM/DD):")
formatted_date = format_and_validate_date(input_date)
if formatted_date is not None:
base_url = f"https://hnrb.voc.com.cn/hnrb_epaper/html/{formatted_date}/node_201.htm"
target_folder = os.path.join(os.path.expanduser("~"), 'Desktop', 'HNRB')
os.makedirs(target_folder, exist_ok=True)
pdf_links = fetch_pdf_links(base_url)
downloaded_pdfs_info = [(save_and_count_pages(link, target_folder)) for link in pdf_links if link is not None]
# 删除下载失败的记录
downloaded_pdfs_info =
now = datetime.now()
output_filename = f"HNRB_{now.strftime('%Y%m%d%H%M%S')}.pdf"
merged_pdf_path = os.path.join(target_folder, output_filename)
merge_downloaded_pdfs(downloaded_pdfs_info, target_folder, output_filename)
print(f"成功按顺序合并所有PDF文件为: {merged_pdf_path}")
if __name__ == "__main__":
main() 感谢分享
页:
[1]