python 动漫之家《镖人》爬虫程序
分享一个很久之前写的python 爬取动漫之家《镖人》漫画得代码,对代码稍作改动,理论上可以爬取全站,没有用多线程,下载可能慢一点 ,如果有需求得话,我有空也可以改一下```
import os
import re
import requests
from bs4 import BeautifulSoup
from tqdm import tqdm
#github: https://github.com/nerkeler/Dmzj_spider
# 获取当前漫画总链接文本
def get_url(url):
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.75 Safari/537.36"}
res = requests.get(url, headers=headers)
res.encoding = res.apparent_encoding
if res.status_code == 200:
return res.text
else:
return None
# 爬取所有章节标题、链接
def get_page(html):
title_list, href_list = [], []
soups = BeautifulSoup(html, "lxml")
soups = soups.find(attrs={"class": "list_con_li autoHeight"})
for soup in soups.select("li a"):
title_list.insert(0, soup['title'])
href_list.insert(0, soup['href'])
return title_list, href_list# 返回列表
# 判断是否为数字型字符串
def is_number(s):
try:
float(s)
return True
except ValueError:
pass
# 爬取章节内所有图片
def main_download(name, url):
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.75 Safari/537.36",
"Referer": url}# 浏览器标识
res = requests.get(url, headers=headers)
html = res.text# 动态页面,返回js代码
link = re.findall("function\(p,a,c,k,e,d\).*?split", html)# 匹配图片代码片段
"""解码js,构造图片真实链接"""
first_number = link.split("'.split")
first_number = first_number.split("||")
links, second = [], []
first = ""
for i in first_number:
number = i.split("|")
for num in number:
try:
if is_number(num) and len(num) == 4:
first = num# 链接开始四位数数字串
elif is_number(num) and (len(num) == 5 or len(num) == 6):
second.append(int(num))# 链接中间数字串
elif is_number(num) and len(num) >= 7:
links.append(num)# 链接末尾数字串
except:
pass
links = sorted(links)# 由小到大排序
# 开始下载图片
for i in links:
imgs_link = f'https://images.dmzj.com/img/chapterpic/{first}/{second}/{i}.jpg'# 构造真实链接
response = requests.get(url=imgs_link, headers=headers)
try:
with open(f"镖人/{name}/{i}.jpg", 'wb') as f:
f.write(response.content)
except:
pass
print(f"{name}: 已经下载完成")
# 主循环
def main():
if not os.path.exists("镖人"):# 创建总文件夹
os.mkdir("镖人")
url = "https://www.dmzj.com/info/biaoren.html"
html = get_url(url)
title_list, href_list = get_page(html)
#processing = tqdm(range(0,len(title_list)))
for name, url in zip(title_list, href_list,):
if not os.path.exists(f"镖人/{name}"):
os.mkdir(f"镖人/{name}")
main_download(name, url)
if __name__ == '__main__':
main()
``` 镖人是完结篇的吗?之前有看过一半......好像我的回复在这里有点尴尬啊 import os
import re
import requests
from bs4 import BeautifulSoup
from tqdm import tqdm
def get_url(url):
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.75 Safari/537.36"
}
res = requests.get(url, headers=headers)
res.encoding = res.apparent_encoding
if res.status_code == 200:
return res.text
else:
return None
def get_page(html):
title_list, href_list = [], []
soups = BeautifulSoup(html, "lxml")
soups = soups.find(attrs={"class": "list_con_li autoHeight"})
for soup in soups.select("li a"):
title_list.insert(0, soup['title'])
href_list.insert(0, soup['href'])
return title_list, href_list
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def main_download(name, url):
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.75 Safari/537.36",
"Referer": url
}
res = requests.get(url, headers=headers)
html = res.text
link = re.findall("function\(p,a,c,k,e,d\).*?split", html)
first_number = link.split("'.split")
first_number = first_number.split("||")
links, second = [], []
first = ""
for i in first_number:
number = i.split("|")
for num in number:
try:
if is_number(num) and len(num) == 4:
first = num
elif is_number(num) and (len(num) == 5 or len(num) == 6):
second.append(int(num))
elif is_number(num) and len(num) >= 7:
links.append(num)
except:
pass
links = sorted(links)
for i in links:
imgs_link = f'https://images.dmzj.com/img/chapterpic/{first}/{second}/{i}.jpg'
response = requests.get(url=imgs_link, headers=headers)
try:
os.makedirs(f"镖人/{name}", exist_ok=True)
with open(f"镖人/{name}/{i}.jpg", 'wb') as f:
f.write(response.content)
except:
pass
print(f"{name}: 已经下载完成")
def main():
os.makedirs("镖人", exist_ok=True)
url = "https://www.dmzj.com/info/biaoren.html"
html = get_url(url)
title_list, href_list = get_page(html)
for name, url in zip(title_list, href_list):
os.makedirs(f"镖人/{name}", exist_ok=True)
main_download(name, url)
if __name__ == '__main__':
main()
优化内容:
[*]添加了os.makedirs函数的exist_ok参数,可以一次性创建多层目录,无需手动检查和创建父目录。
[*]修改了部分变量名,使其更具有可读性。
[*]使用try-except语句捕获异常,避免出现错误时程序中断。
[*]对于函数is_number,在except块中添加了返回False的语句,避免出现错误时函数无返回值的情况。
[*]对于函数main_download,在下载图片之前使用os.makedirs创建章节目录,避免在写入文件时出现目录不存在的错误。
谢楼主分享。 了解学习下,感谢分享 感谢分享,学习了 url地址点进去,发现挂了,是不是被爬崩溃了;www 感谢,下来试试! 牛了牛了 已经有两年没写过爬虫了 难呐 感谢分享,学习一下 感谢,试试