抓取公众号图片并保存,添加了报错处理
import requestsimport re
from PIL import Image
from io import BytesIO
import os
# 输入话题链接
topic_url = input("请输入话题链接:")
# 提取__biz和album_id参数
biz_match = re.search(r'__biz=(.*?)&', topic_url)
biz = biz_match.group(1) if biz_match is not None else None
album_match = re.search(r'album_id=(.*?)&', topic_url)
album_id = album_match.group(1) if album_match is not None else None
# 构造请求头
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'
}
# 发送请求获取网页源代码
response = requests.get(topic_url, headers=headers)
# 使用正则表达式匹配出图片链接和标题
img_links = re.findall('data-src="(.*?)"', response.text)
titles = re.findall('data-title="(.*?)"', response.text)
# 遍历标题和图片链接,下载图片并保存
print("正在下载图片...")
for i, img_link in enumerate(img_links):
try:
response = requests.get(img_link)
if response.status_code == 200:
img = Image.open(BytesIO(response.content))
ext = img.format.lower()
if ext in ["png", "gif", "jpg", "jpeg"]:
file_path = os.path.join(os.path.expanduser("~"), "Desktop/TH", f"{i+1}.{ext}")
with open(file_path, "wb") as f:
f.write(response.content)
print(f"{i+1}.jpg 图片下载完成!")
else:
print("不支持的图片格式")
else:
print(f"请求失败,状态码:{response.status_code}")
except Exception as e:
print(f"下载第{i+1}张图片时出错:{e}")
continue
print("全部图片下载完成!")
这段代码默认将下载的图片保存在桌面的TH文件夹内。如需修改保存路径,请修改 os.path.join(os.path.expanduser("~"), "Desktop/TH", f"{i+1}.{ext}") 这一行的路径。之前看到的帖子都没有对图片类型以及报错进行处理,故我额外增加了一些代码,保证能正常运行下去 #这玩意用来抓研报真的很不错
import requests
import re
from PIL import Image
from io import BytesIO
import os
import random
import string
# 生成三个随机字母
letters = string.ascii_lowercase
folder_name = ''.join(random.choice(letters) for i in range(3))
# 拼接桌面路径和文件夹名称
desktop_path = os.path.join(os.path.expanduser('~'), 'Desktop')
folder_path = os.path.join(desktop_path, folder_name)
# 创建文件夹
if not os.path.exists(folder_path):
os.mkdir(folder_path)
print(f"已在桌面创建文件夹 '{folder_name}'")
else:
print(f"文件夹 '{folder_name}' 已存在于桌面")
# 输入话题链接
topic_url = input("请输入话题链接:")
# 提取__biz和album_id参数
biz_match = re.search(r'__biz=(.*?)&', topic_url)
biz = biz_match.group(1) if biz_match is not None else None
album_match = re.search(r'album_id=(.*?)&', topic_url)
album_id = album_match.group(1) if album_match is not None else None
# 构造请求头
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'
}
# 发送请求获取网页源代码
response = requests.get(topic_url, headers=headers)
# 使用正则表达式匹配出图片链接和标题
img_links = re.findall('data-src="(.*?)"', response.text)
titles = re.findall('data-title="(.*?)"', response.text)
# 遍历标题和图片链接,下载图片并保存
print("正在下载图片...")
for i, img_link in enumerate(img_links):
try:
response = requests.get(img_link)
if response.status_code == 200:
img = Image.open(BytesIO(response.content))
ext = img.format.lower()
if ext in ["png", "gif", "jpg", "jpeg"]:
file_path = os.path.join(os.path.expanduser("~"), "Desktop/"+folder_name, f"{i+1}.{ext}")
with open(file_path, "wb") as f:
f.write(response.content)
print(f"{i+1}.jpg 图片下载完成!")
else:
print("不支持的图片格式")
else:
print(f"请求失败,状态码:{response.status_code}")
except Exception as e:
print(f"下载第{i+1}张图片时出错:{e}")
continue
print("全部图片下载完成!")
from PIL import Image
from fpdf import FPDF
import os
# 获取图片文件夹路径和文件列表
folder_path = os.path.join(os.path.expanduser("~"), "Desktop/"+folder_name)
file_list = os.listdir(folder_path)
# 创建pdf对象
pdf = FPDF()
# 遍历图片列表,将图片添加到pdf中
for file in file_list:
if file.endswith(".jpg") or file.endswith(".png") or file.endswith(".jpeg") or file.endswith(".gif"):
image_path = os.path.join(folder_path, file)
img = Image.open(image_path)
width, height = img.size
pdf.add_page()
pdf.image(image_path, 0, 0, pdf.w, pdf.h)
# 保存pdf文件
pdf_file_path = os.path.join(folder_path, "images.pdf")
pdf.output(pdf_file_path, "F")
print("PDF保存成功")
#https://mp.weixin.qq.com/s/A2hLMDrF2qPAImCLZriaRA
进一步修改代码,在指定位置随便创建文件夹,保存公众号图片,并在文档内生成图片拼接成的PDF 大佬求成品 请问这个怎么使用 学习了,感谢大佬 学习了,这个主要用在哪里 感谢分享 借鉴一下 大佬,求成品 少了个webp格式,有时候公众号图片中会出现这种格式的图片
页:
[1]
2