为悬赏区一个大兄弟做的小说爬虫
本帖最后由 YuanFang0w0 于 2022-3-14 22:10 编辑## 看到悬赏区的兄弟发贴的的需求,没啥事就做了做
### 代码也比较潦草,没有过多的花时间推敲和封装
### 自己也测试了没啥问题
### 环境是python39,电脑是win10,
### 编译的exe文件在那个兄弟帖子里面
### 源码放在下面
```
# #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@File name : main.py
@Time : 2022-03-14 10:42
@Author : lc
@Version : 1.0"""
# here put the import lib
import re
import os
import requests
import traceback
import multiprocessing
import lxml.etree
from lxml import etree
from multiprocessing.dummy import Pool
class ZhenHun(object):
def __init__(self):
self._root_url = 'https://www.zhenhunxiaoshuo.com'
self._headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.51 Safari/537.36 Edg/99.0.1150.39',
}
self._chunai_path = './纯爱小说类'
self._yanqing_path = './言情小说类'
self._priest_path = './priest小说集'
def get_pagination(self, url) -> int:
'''
获取当前分类共有多少页
:param page_source: requests请求获得的页面文本
:return: 返回页面数pagination或者只有一页返回0
'''
response = requests.get(url=url, headers=self._headers)
response.close()
tree = etree.HTML(response.text)
try:
th_list = tree.xpath('//div[@class="content"]/article/table//tr/th')
if th_list:
pagination = len(th_list) - 2
return pagination
else:
return 0
except Exception as e:
return 0
def parse_outline(self, url: str) -> list:
'''
解析大纲中的tr标签
:param url:页面url
:return:tr标签的列表tr_list
'''
response = requests.get(url=url, headers=self._headers)
response.close()
tree = etree.HTML(response.text)
tr_list = tree.xpath('//article[@class="article-content"]/table//tr')
del tr_list
# 返回tr标签的列表tr_list
return tr_list
def get_novel_data(self, tr: lxml.etree) -> tuple:
'''
解析tr标签中对应的小说url,小说名字,小说作者
:param tr:标签对象
:return:(path_novel, novel_url)
'''
try:
novel_name = tr.xpath('./td/a/strong/text()')
novel_author = tr.xpath('./td/text()')
novel_url = self._root_url + tr.xpath('./td/a/@href')
path_novel = f'{self._chunai_path}/{novel_name}---{novel_author}'
if not os.path.exists(path_novel):
os.mkdir(path_novel)
return path_novel, novel_name, novel_url
except Exception as e:
pass
def get_section_data(self, novel_data: dict) -> None:
'''
通过novel_data获取每一章节对应的内容
:return:None
'''
novel_path = novel_data
novel_url = novel_data
novel_name = novel_data
response = requests.get(url=novel_url, headers=self._headers)
response.close()
tree = etree.HTML(response.text)
article_list = tree.xpath('//div[@class="excerpts"]/article')
for article in article_list:
section_name = article.xpath('./a/text()')
section_url = article.xpath('./a/@href')
try:
r = re.findall('[¥#%*&@]', section_name)
for i in r:
section_name = section_name.replace(i, '')
except Exception as e:
pass
path = novel_path + f'/{section_name}.txt'
name = novel_name + f'---{section_name}'
response = requests.get(url=section_url, headers=self._headers)
response.close()
tree = etree.HTML(response.text)
content = tree.xpath('//article[@class="article-content"]//text()')
content = ''.join(content)
# 保存
self.save_data(path=path, name=name, content=content)
return
def save_data(self, path: str, name: str, content: str) -> None:
'''
保存数据
:param path:保存的路径
:param name: 提示信息需要的文件名
:param content: 要保存的数据
:return:
'''
if not os.path.exists(path):
with open(file=path, mode='w', encoding='utf-8') as f:
f.write(content)
print(f'下载完成---{name}')
else:
print(f'已经下载---{name}')
return
def spider(self, url: str) -> None:
'''
开始爬虫
:param url:页面url
:return:None
'''
pool = Pool(15)
# 解析第一页大纲每本小说的tr标签
tr_list = self.parse_outline(url=url)
# 通过tr标签解析对应的小说url, 小说名字,小说作者
novel_data_list = pool.map(self.get_novel_data, tr_list)
# 获取小说内容并保存
pool.map(self.get_section_data, novel_data_list)
return
def chunai(self):
if not os.path.exists(self._chunai_path):
os.mkdir(self._chunai_path)
url_outline = self._root_url + '/chunai'
# 获取页数
pagination = self.get_pagination(url=url_outline)
if pagination == 0:
print('纯爱类小说---共1页')
self.spider(url=url_outline)
else:
print(f'纯爱类小说---共{pagination}页')
# 爬取第一页
self.spider(url=url_outline)
# 爬取以后的:
for i in range(2, pagination + 1):
page_url = self._root_url + f'/chunai{i}'
self.spider(url=page_url)
def yanqing(self):
if not os.path.exists(self._yanqing_path):
os.mkdir(self._yanqing_path)
url_outline = self._root_url + '/yanqing'
# 获取页数
pagination = self.get_pagination(url=url_outline)
if pagination == 0:
print('言情类小说---共1页')
self.spider(url=url_outline)
else:
print(f'言情类小说---共{pagination}页')
# 爬取第一页
self.spider(url=url_outline)
# 爬取以后的:
for i in range(2, pagination + 1):
page_url = self._root_url + f'/yanqing{i}'
self.spider(url=page_url)
def priest(self):
if not os.path.exists(self._priest_path):
os.mkdir(self._priest_path)
url_outline = self._root_url + '/priest'
# 获取页数
pagination = self.get_pagination(url=url_outline)
if pagination == 0:
print('priest小说集---共1页')
self.spider(url=url_outline)
else:
print(f'priest小说集---共{pagination}页')
# 爬取第一页
self.spider(url=url_outline)
# 爬取以后的:
for i in range(2, pagination + 1):
page_url = self._root_url + f'/priest{i}'
self.spider(url=page_url)
if __name__ == '__main__':
try:
zh = ZhenHun()
zh.chunai()
zh.yanqing()
zh.priest()
input('全部小说获取完成,回车键退出')
except Exception as e:
print(traceback.format_exc())
input('有异常,请保存异常信息提交管理员\n回车键退出')
``` 有点小问题,改后的源码
# #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@File name : main.py
@Time : 2022-03-14 10:42
@Author : lc
@Version : 1.0
"""
# here put the import lib
import re
import os
import requests
import traceback
import multiprocessing
import lxml.etree
from lxml import etree
from multiprocessing.dummy import Pool
class ZhenHun(object):
def __init__(self):
self._root_url = 'https://www.zhenhunxiaoshuo.com'
self._headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.51 Safari/537.36 Edg/99.0.1150.39',
}
self._chunai_path = './纯爱小说类'
self._yanqing_path = './言情小说类'
self._priest_path = './priest小说集'
def get_pagination(self, url) -> int:
'''
获取当前分类共有多少页
:param page_source: requests请求获得的页面文本
:return: 返回页面数pagination或者只有一页返回0
'''
response = requests.get(url=url, headers=self._headers)
response.close()
tree = etree.HTML(response.text)
try:
th_list = tree.xpath('//div[@class="content"]/article/table//tr/th')
if th_list:
pagination = len(th_list) - 2
return pagination
else:
return 0
except Exception as e:
return 0
def parse_outline(self, url: str) -> list:
'''
解析大纲中的tr标签
:param url:页面url
:return:tr标签的列表tr_list
'''
response = requests.get(url=url, headers=self._headers)
response.close()
tree = etree.HTML(response.text)
tr_list = tree.xpath('//article[@class="article-content"]/table//tr')
del tr_list
# 返回tr标签的列表tr_list
return tr_list
def get_novel_data(self, tr: lxml.etree) -> tuple:
'''
解析tr标签中对应的小说url,小说名字,小说作者
:param tr:标签对象
:return:(path_novel, novel_url)
'''
try:
novel_name = tr.xpath('./td/a/strong/text()')
novel_author = tr.xpath('./td/text()')
novel_url = self._root_url + tr.xpath('./td/a/@href')
path_novel = f'{self._chunai_path}/{novel_name}---{novel_author}'
if not os.path.exists(path_novel):
os.mkdir(path_novel)
return path_novel, novel_name, novel_url
except Exception as e:
pass
def get_section_data(self, novel_data: dict) -> None:
'''
通过novel_data获取每一章节对应的内容
:return:None
'''
novel_path = novel_data
novel_url = novel_data
novel_name = novel_data
response = requests.get(url=novel_url, headers=self._headers)
response.close()
tree = etree.HTML(response.text)
article_list = tree.xpath('//div[@class="excerpts"]/article')
for article in article_list:
section_name = article.xpath('./a/text()')
section_url = article.xpath('./a/@href')
try:
r = re.findall('[¥#%\\* /&@]', section_name)
for i in r:
section_name = section_name.replace(i, '')
except Exception as e:
pass
path = novel_path + f'/{section_name}.txt'
name = novel_name + f'---{section_name}'
response = requests.get(url=section_url, headers=self._headers)
response.close()
tree = etree.HTML(response.text)
content = tree.xpath('//article[@class="article-content"]//text()')
content = ''.join(content)
# 保存
self.save_data(path=path, name=name, content=content)
return
def save_data(self, path: str, name: str, content: str) -> None:
'''
保存数据
:param path:保存的路径
:param name: 提示信息需要的文件名
:param content: 要保存的数据
:return:
'''
if not os.path.exists(path):
with open(file=path, mode='w', encoding='utf-8') as f:
f.write(content)
print(f'下载完成---{name}')
else:
print(f'已经下载---{name}')
return
def spider(self, url: str) -> None:
'''
开始爬虫
:param url:页面url
:return:None
'''
pool = Pool(15)
# 解析第一页大纲每本小说的tr标签
tr_list = self.parse_outline(url=url)
# 通过tr标签解析对应的小说url, 小说名字,小说作者
novel_data_list = pool.map(self.get_novel_data, tr_list)
# 获取小说内容并保存
pool.map(self.get_section_data, novel_data_list)
return
def chunai(self):
if not os.path.exists(self._chunai_path):
os.mkdir(self._chunai_path)
url_outline = self._root_url + '/chunai'
# 获取页数
pagination = self.get_pagination(url=url_outline)
if pagination == 0:
print('纯爱类小说---共1页')
self.spider(url=url_outline)
else:
print(f'纯爱类小说---共{pagination}页')
# 爬取第一页
self.spider(url=url_outline)
# 爬取以后的:
for i in range(2, pagination + 1):
page_url = self._root_url + f'/chunai{i}'
self.spider(url=page_url)
def yanqing(self):
if not os.path.exists(self._yanqing_path):
os.mkdir(self._yanqing_path)
url_outline = self._root_url + '/yanqing'
# 获取页数
pagination = self.get_pagination(url=url_outline)
if pagination == 0:
print('言情类小说---共1页')
self.spider(url=url_outline)
else:
print(f'言情类小说---共{pagination}页')
# 爬取第一页
self.spider(url=url_outline)
# 爬取以后的:
for i in range(2, pagination + 1):
page_url = self._root_url + f'/yanqing{i}'
self.spider(url=page_url)
def priest(self):
if not os.path.exists(self._priest_path):
os.mkdir(self._priest_path)
url_outline = self._root_url + '/priest'
# 获取页数
pagination = self.get_pagination(url=url_outline)
if pagination == 0:
print('priest小说集---共1页')
self.spider(url=url_outline)
else:
print(f'priest小说集---共{pagination}页')
# 爬取第一页
self.spider(url=url_outline)
# 爬取以后的:
for i in range(2, pagination + 1):
page_url = self._root_url + f'/priest{i}'
self.spider(url=page_url)
if __name__ == '__main__':
try:
zh = ZhenHun()
zh.chunai()
zh.yanqing()
zh.priest()
input('全部小说获取完成,回车键退出')
except Exception as e:
print(traceback.format_exc())
input('有异常,请保存异常信息提交管理员\n回车键退出')
感谢分享,希望能再把功能说具体一点。{:1_908:} 不错不错,可以试试。原贴地址放出来吧 ouyang9r 发表于 2022-3-14 23:54
感谢分享,希望能再把功能说具体一点。
就是爬取小说网站的小说 flycity 发表于 2022-3-15 07:02
请教下如何反反爬
这个网站没有反爬机制喔 jinxin6670 发表于 2022-3-15 07:21
不错不错,可以试试。原贴地址放出来吧
https://www.52pojie.cn/forum.php?mod=viewthread&tid=1602436&page=1 感谢分享 本帖最后由 wxs513 于 2022-3-15 11:39 编辑
Traceback (most recent call last):
File "main.py", line 205, in <module>
File "main.py", line 159, in chunai
File "main.py", line 144, in spider
File "multiprocessing\pool.py", line 364, in map
File "multiprocessing\pool.py", line 771, in get
File "multiprocessing\pool.py", line 125, in worker
File "multiprocessing\pool.py", line 48, in mapstar
File "main.py", line 113, in get_section_data
File "main.py", line 125, in save_data
OSError: Invalid argument: './纯爱小说类/鬼服兵团---颜凉雨/第175章巨舰贱?起航上.txt'
感觉一次性把“?”、“、”、“╲”、“/”、“*”、““”、“”“、“<”、“>”、“|”这些文件名不能包含的字符全写进去比较好 然后半角全角也要兼顾 鬼知道这个破网站小说名都怎么起的。。。。
麻烦了
或者直接跳过 然后所有错误代码最后列出来 手动去搞一下 应该不多 感谢分享,先试试水!
页:
[1]
2