初学python爬虫-下载网络小说
本帖最后由 jjjzw 于 2021-3-16 13:38 编辑本人python小白一枚,写出来的代码不是很好看,有写得不好的地方还请大佬不吝赐教!
代码用于从某趣阁下载小说,分别将正文和目录保存为book_name.txt和log.txt
import requests
from bs4 import BeautifulSoup
import os
import time
import sys
# BeautifulSoup需要安装html5lib解析
# 使用时修改Path为小说保存路径
Path = "/Users/apple/Desktop/小说/"
soup = BeautifulSoup
url = "http://www.xbiquge.la/modules/article/waps.php"
host = "http://www.xbiquge.la"
# 用于过滤章节名的字典
keys = {
"key1": "章",
"key2": "结局",
"key3": "番",
"key4": "1",
"key5": "2",
"key6": "3",
"key7": "4",
"key8": "5",
"key9": "6",
"key10": "7",
"key11": "8",
"key12": "9"
}
global data
global book_name
global name__
global now_
global start_time
# 一些list
book_names = []
book_urls = []
book_authors = []
book_counts = []
chapter_hrefs = []
chapter_names = []
# 构造请求头
headers = {
"Content-Type": "application/x-www-form-urlencoded",
"Origin": "http://www.xbiquge.la",
"Host": "www.xbiquge.la",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Connection": "keep-alive",
"Content-Length": "11",
"Accept-Encoding": "gzip, deflate",
"Cookie": "_abcde_qweasd=0; _abcde_qweasd=0; bdshare_firstime=1615377374914",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/605.1.15 (KHTML, like Gecko) "
"Version/11.1.2 Safari/605.1.15",
"Referer": "http://www.xbiquge.la/",
"Accept-Language": "zh-cn"
}
def name_():
global data
global book_name
print("请输入书名:")
book_name = input()
data = {"searchkey": book_name}
def choose_book(search_result_):
bs_search_result = soup(search_result_.content, "lxml")
book_infos = bs_search_result.find_all(name="tr")
# 将搜索结果写入list,便于打印
for book_info in book_infos:
book_name1 = book_info.find("a")
book_names.append(book_name1.string)
book_url = book_info.find("a").get("href")
book_urls.append(book_url)
book_author = book_info.find_all("td")
book_authors.append(book_author.string)
if book_names is None:
pass
else:
# 判断是否有搜索结果
for check in book_names:
if book_name not in check:
print("——————没找到!——————")
else:
print("——————搜索结果:——————\n")
for name in book_names:
count = book_names.index(name)
book_counts.append(count)
print(str(count) + "书籍名:" + name + "作者:" + book_authors)
print("\n——————输入序号以选择书籍:——————")
choose_count = input()
print("——————加载中···——————")
# 选择书籍
if 0 <= int(choose_count) <= 99:
book_choose_url = book_urls
make_dir(choose_count)
book_web_page(book_choose_url)
else:
print("——————错误,请检查输入——————")
choose_book(search_result_)
def book_web_page(book_choose_url_):
web_page = requests.get(book_choose_url_)
web_page_soup = soup(web_page.content, "lxml")
read_(web_page_soup)
def read_(page_soup):
global start_time
chapter_lists = page_soup.find_all(name="dd")
for chapter_list in chapter_lists:
href_ = chapter_list.find("a").get("href")
chapter_hrefs.append(host + href_)
chapter_name = chapter_list.find("a")
chapter_names.append(chapter_name.string)
start_time = time.time()
print("——————正在检索起始章节···——————")
# 读取已下载部分,找到开始下载的位置
if not os.path.exists(Path + name__ + "/" + "log.txt"):
with open(Path + name__ + "/" + "log.txt", "w") as f3:
f3.close()
start_num = 0
else:
with open(Path + name__ + "/" + "log.txt", "r") as f4:
last_line = f4.readlines()[-1]
start_num = chapter_names.index(last_line) + 1
print("——————开始下载——————")
down_novel(start_num)
def down_novel(chapter_now_):
global now_
try:
for links in chapter_hrefs:
now__ = chapter_hrefs.index(links)
novel_content = requests.get(links)
content_soup = soup(novel_content.content, "html5lib")
title = content_soup.find("h1").string
wrong = "503 Service Temporarily Unavailable"
# 防止爬取过快出现503错误
if wrong in title:
chapter_now_ = now__
time.sleep(5)
down_novel(chapter_now_)
else:
# 过滤无效章节
for value in keys.values():
if value not in title:
pass
else:
# 处理正文部分
content = content_soup.find_all(name="div", id="content")
content1 = str(content)
content2 = content1.replace("<br/>", "")
# 写入正文到txt
with open(Path + name__ + "/" + name__ + ".txt", "a", encoding="utf-8") as f:
f.write(chapter_names + "\n")
f.write(content2 + "\n\n\n\n")
# 写入目录到txt
with open(Path + name__ + "/" + "log.txt", "a", encoding="utf-8") as f1:
f1.write("\n" + chapter_names)
print("已下载:" + chapter_names)
break
end_time = time.time()
time_ = end_time - start_time
print("——————" + name__ + "已下载完成——————")
print("用时: " + str(time_) + "s")
sys.exit(0)
except (ConnectionResetError, requests.exceptions.ConnectionError) as e:
down_novel(now_)
def make_dir(count):
global name__
name__ = book_names
if not os.path.exists(Path + name__ + "/"):
print("——————正在创建目录···——————")
os.makedirs(Path + name__ + "/")
else:
pass
def run():
name_()
search_result = requests.post(url=url, headers=headers, data=data)
choose_book(search_result)
if __name__ == "__main__":
run()
运行效果:
第一次下载:
中断后继续下载:
—————————————————————————————
3.16 1:02
修复两个错误:
1、优化了是否搜索到书籍的判断方式,避免搜索不到书籍
2、优化了章节名过滤的方法,避免重复下载多次同一章
———————————————————————
3.16 13:32
修复两个错误:
1、下载用时算反了
2、下载时间长了之后出现Previous line repeated * more times错误,使用try跳过并继续下载
一开始省时间没有完整测试代码运行情况,导致出现各种问题,以后上传代码会仔细测试修改的{:1_890:} xiangxiaoyi 发表于 2021-3-15 23:08
请问python有什么好的教学视频吗?
明日科技的零基础python的pdf版本里面自带视频讲解里面每章开头又二维码 微信扫描 就可一边看视频一边看书 继续加油,我始于开始,终于开始,:wwqwq fengfei1230 发表于 2021-3-15 22:45
继续加油,我始于开始,终于开始,
谢谢鼓励! 刚整完前端代码,就看到你这一堆Python代码,脑瓜疼, 很6呀,继续加油 加油,又一个入坑python
的 加油学习,代码挺清晰的 请问python有什么好的教学视频吗? 围观学习一下 小白来学习,支持你