小说网站的爬虫
想下载哪本小说,就进入小说第一章的网页,复制网页地址,作为输入参数就可以了
[Python] 纯文本查看 复制代码 import requests
import re
from bs4 import BeautifulSoup
start_url = "https://www.aabook.xyz/read.php?id=68339"
start_url = input("请输入网址:")
headers = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.75 Safari/537.36"
}
res = requests.get(start_url)
res.encoding = 'utf-8'
soup = BeautifulSoup(res.text, 'lxml')
# page_num = re.search(r'\d+', soup.find_all('li')[-1]('a')[0].text).group()
sumUrl = ["https://www.aabook.xyz/" + i.find('a').get('href') for i in soup.find('ul','section_list')('li')]
name = soup.find('p',["nav","pngFix"])('a')[-1].text
# res.encoding = "utf-8"
# &v=a52bT%2FFbmkRqe7W8hwFugqPbNG5OpnWcepYftiB1SNuFddrUcJKZ
def get_text(url):
r = requests.get(url)
v = re.search(r"(&v=.*)\"", r.text).group(1)
chapid = url.split("=")[1]
true_url = "https://www.aabook.xyz/" + "_getcontent.php?id=" + chapid + v
res2 = requests.get(true_url)
res2.encoding = "utf-8"
soup2 = BeautifulSoup(res2.text,"lxml")
textList = [i.text for i in soup2('p')]
return textList
if __name__ == "__main__":
for id in sumUrl:
text = get_text(id)
print(text)
with open(name + ".txt",'a',encoding='utf-8') as f:
f.write(''.join(text))
f.close() |