学习爬虫想实践一下,就想到了爬取易班的活跃度排名(http://www.yiban.cn/yforumprovince/schoolrank/puid/15083998/type/2),因为易班我一直觉得很垃圾,所以我觉得易班网站的反爬也不会很厉害。
结果第一次尝试返回521状态码,查询返回的response的信息,通过询问老师知道是返回的js代码,然后通过查询可以用pytho的execjs跑js代码,获取cookies添加到头部,再次访问就成功了。没想到我们学校还是第一名哈哈哈
话不多说上代码:
[Python] 纯文本查看 复制代码 import re
import requests
import execjs
import bs4
from bs4 import BeautifulSoup
import os
#修改头部
base_headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.8",
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 UBrowser/6.2.4094.1 Safari/537..*",
}
#获取返回信息
def page_html(url):
response = requests.get(url=url, headers=base_headers)
return response
#根据返回状态码执行应该进行相应的处理(200是成功的)
def parse(response):
if response.status_code == 521:
parse_js(response)
response = page_html(response.url)
parse_html(response)
elif response.status_code == 200:
parse_html(response)
#由于该网站返回的是js代码所以用execjs库来跑返回的js代码
def parse_js(response):
# 获取js数据
jsstr = re.search('(function.*)</script>', response.text).group(1)
# 修改js数据,将数据作为返回
jsstr = jsstr.replace('eval("qo=eval;qo(po);")', "return po")
# 获取方法与参数
func, para = re.search('setTimeout\(\"(.*?)\((.*?)\)",', response.text).group(1, 2)
# 解析js
parsejs = execjs.compile(jsstr).call(func, para)
# 获取cookie
cookie = re.search("cookie=\\'(.*?);", parsejs).group(1)
# 添加cookie到headers
base_headers["Cookie"] = cookie
def parse_html(response):
list_1=[]
sum=0
soup=BeautifulSoup(response.text,"html.parser")
soup_main = soup.main
soup_main_a = soup_main.find_all(class_="name")
for a in soup_main_a:
sum=sum+1
list_1.append([a.string,sum])
printlist(list_1,sum)
def printlist(list_1,sum):
root='C:/Users/l1768/Desktop/'
path=root+'内容.txt'
tplt='{0:{2}<10}\t{1:>10}'
print(tplt.format("学校名称:","排名:",chr(12288)))
# if not os.path.exists(root):
# os.mkdir(path)
# f=open(path,'wb')
for i in range(sum):
a=('{0:' '<20}\t{1:' '^10}'.format(list_1[i][0],list_1[i][1]))
# f.write(a.encode('utf-8'))
# f.write('\n'.encode('utf-8'))
print(a)
# f.close()
# print("文件保存成功")
if __name__ == '__main__':
response = page_html("https://www.yiban.cn/yforumprovince/schoolrank/puid/15083998/type/2")
parse(response)
|