为什么直接none了 而不是打印标签内容
本帖最后由 一只凉不了 于 2020-9-16 23:26 编辑import requests
from lxml import etree
class Tieba(object):
def __init__(self,name):
self.url ="https://tieba.baidu.com/f?ie=utf-8&kw={}".format(name)
self.headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36"
}
def get_data(self,url):
response = requests.get(url,headers=self.headers)
return response.content
#返回响应
def parse_data(self,data):
data = data.decode().replace("<!--","").replace("-->","")
# 在网页中;浏览器会注释掉网页源码 可能无法爬取需要的数据所以需要将网页源码中注释掉的代码进行启用
# data这个参数接收decode这个方法接收的网页源码
html = etree.HTML(data)
el_list =html.xpath('//*[@id="thread_list"]/li[@class=" j_thread_list clearfix"]/div/div/div/div/a')
data_list=[]
for el in el_list:
temp ={}
temp['title'] = el.xpath('./text()')[0]
temp['link'] = 'https://tieba.baidu.com' + el.xpath('./@href')[0]
data_list.append(temp)
try:
next_url = 'https:' + html.xpath('//*/@href')[0]
except:
next_url = None
return data_list,next_url
def save_data(self,data_list):
for data in data_list:
print(data)
#接收数据
def run (self):
next_url=self.url
while True:
#发送请求
data = self.get_data(next_url)
#从响应中提取数据(数据和翻页的url)
data_list,next_url=self.parse_data(data)
self.save_data(data_list)
print(next_url)
#判断是否终结
if next_url == None:
break
if __name__ == '__main__':
tieba = Tieba("西京")
tieba.run()
我怎么可以呀 是不是爬多了,ip被封了? 有可能是网站探测到爬虫,直接禁止访问了 ip问题? 先抓一些免费的代{过}{滤}理,用代{过}{滤}理爬虫。爬的时候请求间要适当的sleep一下,爬的太快容易封ip
页:
[1]