[Python] 纯文本查看 复制代码
import urllib.request as ub
import urllib.parse as parse
import time
import datetime
import re
import random
import sys
import os
import shutil
class My_urlEC(object):
"""
对url进行自己指定方式编码和解码
"""
def __init__(self,ec="utf-8"):
"""
初始化解码器
:param EC: 解码器的编码格式
"""
self.encode=ec
def Ec(self,value):
"""
将传入字符串的值进行url编码
:param value:传入的字符串
:return:
"""
value=str(value).encode(self.encode)
return parse.quote(value)
def unEc(self,value):
"""
将传入的url编码值进行解码
:param value:传入的url编码值
:return:
"""
value=str(value).encode(self.encode)
return parse.unquote(value)
def dict_urlEc(self,dict):
"""
传入的键值对的值,按照自己设定的编码格式进行转码,并生成一个get/post请求格式的字符串
功能和parse.urlencode()类似
:param value:要进行转变的字典
:return:
"""
dict_key=dict.keys()
i = 0
key_value=""
for key in dict_key:
value=self.Ec(dict[key])
if not i :
key_value_temp=key+"="+value
key_value+=key_value_temp
i+=1
else:
key_value_temp = "&"+key+"="+value
key_value += key_value_temp
return key_value
class biquSpider(object):
"""
笔趣阁爬虫
"""
def __init__(self,bookname,Ec):
self.book = bookname#爬取的书名
self.MyEc = My_urlEC(Ec)#转码器
self.schUrl="http://www.biquge.com.tw/modules/article/soshu.php?"#搜索的url
self.pgBaseUrl="http://www.biquge.com.tw"
self.pageUrl=[]#章节目录(一个列表里面存的是一些元组)
self.timeoutPage=[]#超时章节
self.timeout=[2,0]
def get_time(self):
"""
获取以秒为单位的时间截
:return:
"""
mytime = datetime.datetime.now()+datetime.timedelta(seconds=-2*30)
return int(time.mktime(mytime.timetuple()))
def mk_request(self,url=None):
"""
获取request 根据url不同获取的request不同
:param url: 如果不传url则会返回一个搜索book的url,如果传入ulr 则去搜索对应书的章节
:return:
"""
if None == url:
self.data = {"searchkey":self.book}
url = self.schUrl+self.MyEc.dict_urlEc(self.data)
cookie = "__cdnuid=e14ba5b39182134958641bb40ee2e716; jieqiVisitTime=jieqiArticlesearchTime%3D"+str(self.get_time())
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.67 Safari/537.36",
"Referer":"http://www.biquge.com.tw/16_16508/",
"Cookie":cookie,
}
return ub.Request(url,headers=headers)
def search_bk(self):
"""
搜索书并进行分析处理
:return:
"""
print("正在搜索书籍:{}------------------".format(self.book))
self.bookpath = os.path.join(os.getcwd(), self.book)
if not os.path.exists(self.bookpath):
os.mkdir(self.bookpath)
request = self.mk_request()
for i in range(0,10):
try:
response = ub.urlopen(request, timeout=self.timeout[0])
except:
self.timeout[0]=self.timeout[0]*2
if i == 9:
return -11
print("请求超时进行重新连接 {}次".format(i+1))
continue
isTimeout = self.parse_sch(response)
if isTimeout==-1:
self.timeout[0] = self.timeout[0] * 2
if i==9:
return -1
print("请求超时进行重新连接 {}次".format(i))
continue
else:
print("搜索书籍完成------------------")
return 0
def search_bkPage(self,url_temp,bg_page=None,end_page=None):
"""
搜索书的章节并进行处理
:param url_temp: 如果url_temp 为列表会进行循环遍历然后进行处理
如果不是列表 就进行简单处理
:return:
"""
self.timeoutPage=[]
if bg_page and end_page:
url_temp=url_temp[bg_page-1:end_page]
if isinstance(url_temp,list):
for url,title in url_temp:
print("正在搜索{}-----------------".format(title))
print(self.pgBaseUrl+url)
request = self.mk_request(self.pgBaseUrl+url)
try:
response = ub.urlopen(request,timeout=self.timeout[0])
except:
for i in range(0,10):
try:
response = ub.urlopen(request,timeout=self.timeout[0])
except:
self.timeout[0]=self.timeout[0]*2
if 9==i:
print("连接服务器超时")
sys.exit()
continue
else:
break
isTimeout = self.parse_artc(response, title)#进行处理
if -1==isTimeout:
self.timeoutPage.append((url,title))
elif isinstance(url_temp,str):
print("正在搜索章节-----------------")
request = self.mk_request(url_temp)
response = ub.urlopen(request)
self.parse_artc(response,"章节")#进行处理
if not self.timeoutPage:
print("搜索章节完成-----------------")
return
else:
if self.timeout[1]>=10:
return
self.timeout[1]+=1
self.timeout[0]=self.timeout[0]*2
print("超时的章节是:{}".format(self.timeoutPage),"自动进行重新搜索")
self.search_bkPage(self.timeoutPage)
# def get_pageUrl(self,page_url):
#
# for page in page_url:
# page[0] = self.pgBaseUrl+page[0]
# return page_url
def parse_sch(self,response):
"""
处理搜索的书的数据
:param response: 服务器传回的数据
:return:
"""
try:
bookhtml=response.read().decode(self.MyEc.encode)
except:
print("搜索书籍超时!")
return -1
pattern2 = re.compile(r'抱歉,搜索没有结果')
m2 = pattern2.search(bookhtml)
if m2:
m2 = pattern2.search(bookhtml)
print(m2.group())
shutil.rmtree(self.bookpath)
sys.exit()
else:
with open(os.path.join(self.bookpath,self.book+".html"),"bw") as file:
file.write(bookhtml.encode("utf-8"))
pattern = re.compile(r'<dd><a href="(.+?)">(.*?)</a></dd>')
m = pattern.findall(bookhtml)#字符串匹配 ('<dd><a href="/16_16508/9562507.html">第二千一百三十一章 我不入地狱,谁入地狱</a></dd>', '/16_16508/9562507.html', '第二千一百三十一章 我不入地狱,谁入地狱')
self.pageUrl = m
return 0
def parse_artc(self,response,title):
"""
处理搜索的章节的数据
:param response:服务器传回的数据
:return:
"""
try:
article = response.read().decode(self.MyEc.encode).encode("utf-8").decode("utf-8")
except Exception as e:
error = str(e)
if error =="timed out":
print("搜索章节{}超时".format(title))
return -1
else:
print(error)
return -2
pattern = re.compile(r' (.+?)<br />')
m = pattern.findall(article)
file2=''
for string in m:
string = string+"\n"
file2+=string
with open(os.path.join(self.bookpath,self.book +title+".txt"), "w") as file:
file.write(file2)
if __name__ == '__main__':
bookname = input("请输入书名:")
while 1:
try:
bg_page=int(input("请输入搜索的起始章节:"))
end_page=int(input("请输入搜索的结束章节:"))
except:
print('请输入阿拉伯数字')
if bg_page>0 and end_page>=bg_page:
break
print("输入的章节数错误")
spider = biquSpider(bookname,"gbk")
isTimeout = spider.search_bk()
if not isTimeout:
spider.search_bkPage(spider.pageUrl, bg_page, end_page)
else:
print("连接失败,请重新开始程序")