多线程爬某小说网
from bs4 import BeautifulSoupimport os,requests,re,threading,time,json
url_list = []
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.87 Safari/537.36"
}
directory = "txt"# 相对路径,将在当前工作目录下创建txt目录
if not os.path.exists(directory):
os.makedirs(directory)
def get_list(bookid):#获取章节列表
data = {"bookId": bookid}
r = requests.post("https://bookapi.zongheng.com/api/chapter/getChapterList", data=data, headers=headers)
response_data = json.loads(r.text)
# print(response_data["result"]["chapterList"]["chapterViewList"]["chapterId"])
chapter_list = response_data["result"]["chapterList"]
for chapter in chapter_list:
for chapte in chapter["chapterViewList"]:
chapterId = chapte["chapterId"]
url_list.append(f"https://read.zongheng.com/chapter/{bookid}/{chapterId}.html")
return True
def get_text(url,Lock:threading.Lock):#访问正文
p_text = ""
for ur in url:
#Lock.acquire()# 锁
r = requests.get(ur,headers=headers)
#Lock.release()
soup = BeautifulSoup(r.text, 'html.parser')
name = soup.find(class_="title_txtbox").text #标题
contents = soup.find('div', class_="content") #正文
content = contents.find_all("p")
for conten in content:
p_text += conten.text+"\n\n"
name = re.sub('[?|&]',"",name.strip()) #正则过滤内容
#将标题和内容写进去
file_name = os.path.join("txt",name+".txt")
sava_file(file_name,p_text)
time.sleep(1)
print(name)
def sava_file(name,text):
with open(name,"w",encoding="utf8") as f:
f.write(text)
Chapter = get_list("1249806") #访问章节
Lock = threading.Lock() #设置线程锁
print("长度:"+str(len(url_list)))
if Chapter:
# 计算每个子列表的长度
num = int(input("输入线程数:")) #线程数
Length = len(url_list) // num
urls = for i in range(0,len(url_list),num)]#对列表进行切片为子列表
for url in urls:
threading.Thread(target=get_text, args=(url,Lock)).start()
有一点我不是很明白,我测试使用线程池并发CPU直接占满,但是使用threading多线程并发却不会
还有一点保存下来的文件排序也不好排,如果设置线程锁,确实能按照排序,但是其他线程阻塞导致就像单线程一样慢 试了下,我电脑线程30时速度比较快,具体线程可自行测试。
★maxWorkers:30 time:10.26秒
# -*- coding: utf-8 -*-
import concurrent.futures
import json
import os
import re
import time
import requests
from bs4 import BeautifulSoup
url_list = []
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.87 Safari/537.36"
}
directory = "txt"# 相对路径,将在当前工作目录下创建txt目录
if not os.path.exists(directory):
os.makedirs(directory)
def time_it(func):
def wrapper(*args, **kwargs):
start_time = time.time()
result = func(*args, **kwargs)
end_time = time.time()
print(f"time:{end_time - start_time:.2f}秒")
return result
return wrapper
def get_list(bookid):# 获取章节列表
data = {"bookId": bookid}
r = requests.post(
"https://bookapi.zongheng.com/api/chapter/getChapterList",
data=data,
headers=headers,
)
response_data = json.loads(r.text)
chapter_list = response_data["result"]["chapterList"]
for chapter in chapter_list:
for chapte in chapter["chapterViewList"]:
chapterId = chapte["chapterId"]
url_list.append(
f"https://read.zongheng.com/chapter/{bookid}/{chapterId}.html"
)
return True
def get_text(url):
p_text = ""
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.text, "html.parser")
name = soup.find(class_="title_txtbox").text# 标题
contents = soup.find("div", class_="content")# 正文
content = contents.find_all("p")
for conten in content:
p_text += conten.text + "\n\n"
name = re.sub("[?|&]", "", name.strip())# 正则过滤内容
file_name = os.path.join("txt", name + ".txt")
sava_file(file_name, p_text)
# print(name)
def sava_file(name, text):
with open(name, "w", encoding="utf8") as f:
f.write(text)
@time_it
def main(maxWorkers):
print(f"★maxWorkers:{maxWorkers}", end="\t ")
Chapter = get_list("1249806")# 访问章节
# print("长度:" + str(len(url_list)))
if Chapter:
with concurrent.futures.ThreadPoolExecutor(maxWorkers) as executor:
executor.map(get_text, url_list)
if __name__ == "__main__":
main(30)# 线程数
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.87 Safari/537.36"
}
如果换成代{过}{滤}理,这么该怎么填? 看这个发布时间,楼主这是早起学习? 我用threading多线程的时候,CPU会占满。没用过线程池 num = int(input("输入线程数:")),是不是因为线程数不够多?网站不禁IP的话,我就不怎么限制线程数量 学习一下代码 感谢分享! 学习了. 多谢分享,学习了。 感谢分享 我试一下网站就不给我下了
页:
[1]
2