在上一篇文章的代码基础上,本次引入多线程增加爬取速度
看那本篇之前可以先看单线程的
https://www.52pojie.cn/thread-1257875-1-1.html
1.在原代码基础上加入分割list方法
[Python] 纯文本查看 复制代码
#each:分割为几个list
# ls :list
def split_list(ls, each):
list = []
eachExact = float(each)
groupCount =int(len(ls) // each)#整除 例:3/2取值为1
groupCountExact =math.ceil(len(ls) / eachExact)#向上取整 例:3/2取值为2
start = 0
for i in xrange(each):
if i == each-1 & groupCount < groupCountExact: # 假如有余数,将剩余的所有元素加入到最后一个分组
list.append(ls[start:len(ls)])#ls[start:len(ls)] 从start开始截取到ls的最后一位,len(ls)取ls的长度
else:
list.append(ls[start:start + groupCount])
start = start + groupCount
#这里的返回值类似于java中的List[],里面包含each个list
return list
2.分割完成以后就是代码改造了,因为要分多个线程执行,每个线程执行不冲突的内容,所以保存为txt的方法需要单独分割出来
[Python] 纯文本查看 复制代码
def download_book(split_dds, bookName): file_handle = open('D:/SanMu/' + bookName + '.txt', mode='w')
for dd in split_dds:
beautiful_soup = BeautifulSoup(download_page(dd.find('a')['href']))
name = beautiful_soup.find_all('h1')[0].text
file_handle.write(name)
file_handle.write('\r\n')
catalogue_html = str(beautiful_soup.find('div', attrs={'id': 'content'}))
html_replace = catalogue_html.replace("<div id=\"content\">", "")
replace = html_replace.replace("/n", "").replace(
"</div>", "").replace("<p>", "")
split = replace.split("</p>")
for p_ in split:
file_handle.write(p_)
file_handle.write('\r\n')
file_handle.close()
3.然后就是创建线程
[Python] 纯文本查看 复制代码
class myThread (threading.Thread):
def __init__(self, split_dds, name):
threading.Thread.__init__(self)
self.name = name
self.split_dds = split_dds
def run(self):
print("开始线程:" + self.name)
download_book(self.split_dds, self.name)#看到run方法就知道这是线程要执行的内容
print("退出线程:" + self.name)
4.之后开启线程
[Asm] 纯文本查看 复制代码 try:
thread1 = myThread(split_dds[0], movie_name_list[count] + "1")#分别下载为三个文件,之后再进行整合,这样的好处是三个线程分开执行不同内容,互不影响
thread2 = myThread(split_dds[1], movie_name_list[count] + "2")
thread3 = myThread(split_dds[2], movie_name_list[count] + "3")
thread1.start()
thread2.start()
thread3.start()
thread1.join()
thread2.join()
thread3.join()
except:
pass
while 1:
pass
5.最后把3个文件合并就基本结束了
[Asm] 纯文本查看 复制代码 def combineText(bookName, count):
combine = 'D:/SanMu/' + bookName + '.txt'
t = open(combine, mode='w', encoding='utf-8')
for i in xrange(count):
txt = 'D:/SanMu/' + bookName + str(i+1) + '.txt'
with open(txt, mode='r', encoding='utf-8') as f: # 打开文件
data = f.read() # 读取文件
t.write(data)
f.close()
if os.path.exists(txt):
# 如果文件存在
#删除文件,可使用以下两种方法。
os.remove(txt)
t.close()
6.以上代码,基本实现多线程,但是还有很大问题,因为多线程频繁的调用biquge,所以网站会把当成攻击,所以隔一段时间会访问不到页面,这时候爬下来的就是503.解决办法是出现503便让线程休眠3秒,然后再次请求
[Python] 纯文本查看 复制代码 import math
import threading
import time
import os
import requests
from bs4 import BeautifulSoup
from urllib3.connectionpool import xrange
def combineText(bookName, count):
combine = 'D:/SanMu/' + bookName + '.txt'
t = open(combine, mode='w', encoding='utf-8')
for i in xrange(count):
txt = 'D:/SanMu/' + bookName + str(i+1) + '.txt'
with open(txt, mode='r', encoding='utf-8') as f: # 打开文件
data = f.read() # 读取文件
t.write(data)
f.close()
if os.path.exists(txt):
# 如果文件存在
#删除文件,可使用以下两种方法。
os.remove(txt)
t.close()
def split_list(ls, each):
list = []
eachExact = float(each)
groupCount =int(len(ls) // each)
groupCountExact =math.ceil(len(ls) / eachExact)
start = 0
for i in xrange(each):
if i == each-1 & groupCount < groupCountExact: # 假如有余数,将剩余的所有元素加入到最后一个分组
list.append(ls[start:len(ls)])
else:
list.append(ls[start:start + groupCount])
start = start + groupCount
return list
def download_page(url):
try:
data = requests.get(url).content
if str(data).find('503 Service Temporarily Unavailable') > 0:
print("503空数据休眠3秒")
time.sleep(3)
data = requests.get(url).content
except:
time.sleep(3)
data = requests.get(url).content
return data
class myThread (threading.Thread):
def __init__(self, split_dds, name):
threading.Thread.__init__(self)
self.name = name
self.split_dds = split_dds
def run(self):
print("开始线程:" + self.name)
download_book(self.split_dds, self.name)
print("退出线程:" + self.name)
def download_book(split_dds, bookName):
file_handle = open('D:/SanMu/'+bookName+'.txt', mode='w', encoding='utf-8')
for dd in split_dds:
beautiful_soup = BeautifulSoup(download_page(dd.find('a')['href']))
name = beautiful_soup.find_all('h1')[0].text
# if name.find('503') > 0:
# time.sleep(3)
# print("503空数据")
# beautiful_soup = BeautifulSoup(download_page(dd.find('a')['href']))
file_handle.write(name)
file_handle.write('\r\n')
catalogue_html = str(beautiful_soup.find('div', attrs={'id': 'content'}))
html_replace = catalogue_html.replace("<div id=\"content\">", "")
replace = html_replace.replace("/n", "").replace(
"</div>", "").replace("<p>", "")
split = replace.split("</p>")
for p_ in split:
try:
file_handle.write(p_)
file_handle.write('\r\n')
except:
pass
file_handle.close()
def parse_html(html):
soup = BeautifulSoup(html)
movie_list_soup = soup.find('table')
# print(movie_list_soup)
movie_list = []
movie_name_list = []
if movie_list_soup is not None:
i = 1
for movie_li in movie_list_soup.find_all('tr'):
if movie_li.find_all('th'):
continue
a_ = movie_li.find_all('td', attrs={'class': 'odd'})[0].find('a')
print(i, '.', a_.text)
movie_list.append(a_['href'])
movie_name_list.append(a_.text)
i = i + 1
count = int(input('请输入书籍序号')) - 1
page = BeautifulSoup(download_page(movie_list[count]))
dds = page.find_all('dd')
print('本次下载共'+str(len(dds))+'章')
split_dds = split_list(dds, 3)
try:
book_name = movie_name_list[count]
thread1 = myThread(split_dds[0], book_name + "1")
thread2 = myThread(split_dds[1], book_name + "2")
thread3 = myThread(split_dds[2], book_name + "3")
thread1.start()
thread2.start()
thread3.start()
thread1.join()
thread2.join()
thread3.join()
combineText(book_name, 3)
except:
pass
while 1:
pass
def main():
parse_html(download_page("https://www.biquge5200.com/modules/article/search.php?searchkey=" + input("搜索:")))
main()
以上便是多线程爬取小说的全部内容,其实还是有点问题,503就算是休眠3秒,依然会出现,如果休眠时间长的话,又影响效率,所以暂时没有什么完美的办法
,如果不在乎效率的话可以适当加大休眠时间
|