菜鸟,爬取有声小说续集
又到了周末,继上次发了土鳖级别的爬取代码之后,又在网上查阅了相关的资料,发现了CSDN博客上有一篇功能比较全面的https://blog.csdn.net/u013951943/article/details/79568517,不过好像源网站已经不能下载了,因此又到了借鉴的时刻:keai下边讲解一下主要思路,首先需要获取搜索主页,跟百度搜索一样,就是如下链接加上需要搜索的小说名称:”http://www.ting89.com/search.asp?searchword=”
需要将中文名转换成'gb2312'格式,不过依然有个问题,就是‘屌’字无法编码,会报错,或许是因为太屌,
接下来就是用正则表达式,一步一步匹配链接
1.搜索页,正则匹配获取,搜索结果和各个主页,让用户从搜索结果选择需要下载的文章名
2.进入对应小说主页,爬取章节名和下载主页链接
3.进入下载主页获取下载链接格式,完成下载,下载目录为在程序当前目录下创建的小说名文件夹,下载部分代码还是借鉴上次帖子里一哥们的{:301_998:}
里边使用了第三方库requests,需要安装才能导入
整体代码如下
import requests
import urllib
import re
import os
import time
class YsSpider:
def __init__(self, name):
self.search_name = name
self.search_url = "http://www.ting89.com/search.asp?searchword="
self.home_url = "http://www.ting89.com/books/"
self.index_pattern = r"""<a href="/books/(+).html" title="(.+?)" target='_blank'>"""
self.chapter_pattern=r"""<a href='(/down/\?[^-]+-\d+.html)' target="_blank">(.+?)</a>"""
self.down_pattern=r"""url=(.*)/(.+?)\.mp3"""
self.book_id = ''
self.book_name = ''
self.Chapter_list = []
# 返回搜索书目的id
def searchbook(self):
file = requests.get(self.search_url + urllib.parse.quote(self.search_name, encoding='gb2312'))
data = file.content.decode('gbk')
result = re.findall(self.index_pattern, data)
if len(result):
for index, i in enumerate(result):
print('%d.%s'%(index+1,i))
str = input("输入你要下载的书目名称序号: ")
self.book_name = result
self.book_id = result
return self.book_id
else:
print('*******没有找到你输入的相关书籍,请更换后重新运行程序*******')
exit()
def get_chapter_list(self):#获取各章节list和url
data = requests.get(self.home_url+self.searchbook()+'.html').content.decode('gbk')
result = re.findall(self.chapter_pattern, data)
return result
def _getAllUrl(self):# 获得所有的章节的下载地址
chapter_list = self.get_chapter_list()
chapter = for x in chapter_list]
self.Chapter_list= for x in chapter_list]
_list = for x in chapter_list]
data = requests.get("http://www.ting89.com" + chapter).content.decode('gbk')
result = re.findall(self.down_pattern, data)
# return result
return self.sub_get_url(result,_list, re.search("^0.*1$", result))
def sub_get_url(self, down_url, _list, down_url_flag):
url = []
if down_url_flag:
xulie = list(range(len(_list)))
weishu = len(str(xulie[-1]))
for i in xulie:
i1 = i + 1
tmp_url = down_url+'/' + str(i1).zfill(weishu) + '.mp3'
url.append(urllib.request.quote(tmp_url, safe='/:?='))
else:
for item in _list:
tmp_url = down_url + '/'+item + ".mp3"
url.append(urllib.request.quote(tmp_url, safe='/:?='))
return url
# 保存指定URL的文件
def save_a_file(self, url, path, chapter):
try:
print('尝试下载',chapter)
if not os.path.exists(path):
response = requests.get(url)
with open(path, 'wb') as f:
f.write(response.content)
f.close
print(chapter,'保存成功')
response.close()
time.sleep(1)
else:
print('文件已经存在')
except:
print('爬取失败,已下载至',chapter,'即将重新尝试下载')
self.save_a_file(url, path, chapter)
def download_files(self):
result = self._getAllUrl()# 所有的章节对应的下载地址
root = os.path.join(os.getcwd(), self.book_name)
if not os.path.exists(root):
os.mkdir(root)
for index,i in enumerate(result):
path = os.path.join(root, self.Chapter_list)+'.mp3'
self.save_a_file(i, path, self.Chapter_list)
aa = YsSpider('世界未解之谜')
print(aa.download_files())
因为选择的这个网站所有的下载链接都是静态的,所以没有什么难度,其实还可以加上一些针对反爬的措施,动态user-agent以及动态代{过}{滤}理ip,有兴趣的可以一起改进,我懒得加了
没有用过几次类,因此整个编程还是一副面向过程编程的味道,代码比较粗糙,大牛勿喷,大家可以提出意见,一起进步
静态大法好啊
学习一下 你要是能抓取喜马拉雅付费小说那才有用,这种功能简单实现复杂的不行 这个代码有问题,我明天修改一下,本以为下载链接命名只有两种方式,没想到还有更复杂的命名方式{:301_1008:},那就只能挨个进下载主页爬取下载链接了。 有时间静下来学习学习。 修改版来了,下载地址由原来的批量生成改为进入每一章主页爬取,这样就用研究下载地址如何命名了
import requests
import urllib
import re
import os
import time
import random
class YsSpider:
def __init__(self, name):
self.search_name = name
self.search_url = "http://www.ting89.com/search.asp?searchword="
self.home_url = "http://www.ting89.com/books/"
self.index_pattern = r"""<a href="/books/(+).html" title="(.+?)" target='_blank'>"""
self.chapter_pattern=r"""<a href='(/down/\?[^-]+-\d+.html)' target="_blank">(.+?)</a>"""
self.down_pattern=r"""url=(.*)/(.+?)\.mp3"""
self.book_id = ''
self.book_name = ''
self.Chapter_list = []
self.headers_list = ['Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0']
# 返回搜索书目的id
def searchbook(self):
file = requests.get(self.search_url + urllib.parse.quote(self.search_name, encoding='gb2312'))
data = file.content.decode('gbk')
result = re.findall(self.index_pattern, data)
if len(result):
for index, i in enumerate(result):
print('%d.%s'%(index+1,i))
str = input("输入你要下载的书目名称序号: ")
self.book_name = result
self.book_id = result
return self.book_id
else:
print('*******没有找到你输入的相关书籍,请更换后重新运行程序*******')
exit()
def get_chapter_list(self):#获取各章节list和url
data = requests.get(self.home_url+self.searchbook()+'.html').content.decode('gbk')
result = re.findall(self.chapter_pattern, data)
return result
def _get_down_url(self, item):
try:
tmp_url = []
randomHeaderUserAgent = random.choice(self.headers_list)# 随机取值
headers = dict([('User-Agent', randomHeaderUserAgent)])
response= requests.get("http://www.ting89.com" + item, headers=headers,timeout=1)
data = response.content.decode('gbk')
result = re.findall(self.down_pattern, data)
if result:
tmp_url = result + '/' + result + ".mp3"
response.close()
return tmp_url
except:
print('爬取下载地址超时,即将重新爬取')
return self._get_down_url(item)
# 保存指定URL的文件
def save_a_file(self, url, path, chapter):
try:
print('尝试下载', self.book_name, chapter)
if not os.path.exists(path):
response = requests.get(url, timeout=5)
with open(path, 'wb') as f:
f.write(response.content)
f.close
print(self.book_name, chapter, '保存成功')
response.close()
time.sleep(1)
else:
print('文件已经存在')
except:
print('爬取失败,已下载至',chapter,'即将重新尝试下载')
self.save_a_file(url, path, chapter)
def download_files(self):
chapter_list = self.get_chapter_list()
chapter = for x in chapter_list]
self.Chapter_list = for x in chapter_list]
_list = for x in chapter_list]
root = os.path.join(os.getcwd(), self.book_name)
if not os.path.exists(root):
os.mkdir(root)
for index, item in enumerate(chapter):
result= self._get_down_url(item)
if result:
path = os.path.join(root, self.Chapter_list)+'.mp3'
self.save_a_file(result, path, self.Chapter_list)
aa = YsSpider('凡人修仙传')
print(aa.download_files())
页:
[1]