python下载小说遮天
本帖最后由 luoshiyong123 于 2020-5-2 00:20 编辑V1.0
---抓取的笔趣阁网站上的---用的xpath解析html
---后边的章节目录为啥不见了尴尬
import requests
import unicodedata
from lxml import etree
headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:75.0) Gecko/20100101 Firefox/75.0'
}
num = 10#下载多少章
first = 'https://www.bqg5.cc/1_1273/'
numfirst = 669621
last = '.html'
for i in range(0,num):
url = first+str(numfirst+3*i)+last
res = requests.get(url = url,headers=headers)
if res.status_code==200:
print('请求成功!')
html = etree.HTML(res.text)
data = html.xpath('/html/body/div[@id="wrapper"]/div[@class="content_read"]/div[@class="box_con"]/div[@id="content"]/p/text()')
mystr = '\n'.join(data)
print(mystr)
if i==0:
with open('C:/Users/lsy/Desktop/1.txt','w') as fp:
fp.write(mystr)
else:
with open('C:/Users/lsy/Desktop/1.txt','a') as fp:
fp.write(mystr)
V1.1
经过大神们的回复,目前修复了章节不存在的bug,但是还是存在下载速度不够快和下到了500多章后程序无响应这种异常情况
import requests
import unicodedata
from lxml import etree
headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:75.0) Gecko/20100101 Firefox/75.0'
}
mulu_url = 'https://www.bqg5.cc/5_5157/'
main_url='https://www.bqg5.cc'
rep = requests.get(url = mulu_url,headers=headers)
if rep.status_code==200:
print('请求目录页成功!')
mulu_html = etree.HTML(rep.text)
mulu_array= mulu_html.xpath('//div[@class="box_con"]//dl/dd/a/@href')
num = len(mulu_array) #章节一共有多少章
for i in range(9,num):
#rint('i='+str(i))
xiazai_url = main_url+mulu_array
#print(xiazai_url)
res = requests.get(url = xiazai_url,headers=headers)
if res.status_code==200:
print('请求成功!')
print('正在下载',i-8)
html = etree.HTML(res.text)
data = html.xpath('/html/body/div[@id="wrapper"]/div[@class="content_read"]/div[@class="box_con"]/div[@id="content"]/p/text()')
mystr = '\n'.join(data)
#print(mystr)
if i==9:
with open('C:/Users/lsy/Desktop/蛊真人1.txt','w') as fp:
fp.write(mystr)
else:
with open('C:/Users/lsy/Desktop/蛊真人1.txt','a') as fp:
fp.write(mystr)
else:
print('请求失败!'+xiazai_url) 本帖最后由 wkfy 于 2020-4-30 08:42 编辑
用BS4也很好使,代码如下:
import requests as CC
from bs4 import BeautifulSoup as BS
def get_bs4_date(url):
r=CC.get(url)
r.encoding=r.apparent_encoding
date = BS(r.text,'lxml')
return date
def get_txt(url):
codes=get_bs4_date(url)
date=codes.find_all('div',id='content')
return date.text
Url='https://www.bqg5.cc/1_1273/'
N1 = get_bs4_date(Url)
items = N1.find_all('dd')
for i in items:
text=get_txt('https://www.bqg5.cc'+i.a['href'])
print(i.a.text,'\n','*'*50,'\n',text)
fanvalen 发表于 2020-4-30 00:32
我去查看了小说的页面发现它章节的链接不是连续的,所以不能用+1来获取下一章,你下载返回一个404直接跳了
我看你做的+3递进,但几次之后就不一样了 小说页面的url要在https://www.bqg5.cc/1_1273/这个页面使用xpath获取,自己拼接的地址不对。 我去查看了小说的页面发现它章节的链接不是连续的,所以不能用+1来获取下一章,你下载返回一个404直接跳了 你应该是把章节url全部获取然后请求 刚学习,看不懂。 python不太懂,易语言的话
文本_取出中间文本 (目录内容, “<dd><a href="/”, “">”),或者自己用正则匹配呗,也不难吧 学习下,感谢楼主 感谢分享,希望完善后能分享成品源码。
页:
[1]
2