小白的第一篇scrapy框架爬取 看视频很简单 一写各种报错,还好最后还能跑起来 留...
import scrapyfromcopy import deepcopy
class ZxSpider(scrapy.Spider):
name = 'zx'
allowed_domains = ['zxcs.info']
start_urls = ['http://zxcs.info/']
def parse(self, response):
#大分类分组
li_list = response.xpath('.//div[@id="nav"]/ul/li')
for li in li_list:
item = {}
item["b_cate"] = li.xpath('./a//text()').extract_first()
item["b_href"] = li.xpath('./a/@href').extract_first()
#小分类分组
li_2_list= li.xpath('./ul/li')
for li_2 in li_2_list:
item["s_cate"]= li_2.xpath('./a/text()').extract_first()
item["s_href"] = li_2.xpath('./a/@href').extract_first()
if item["s_href"] is not None:
yield scrapy.Request(item["s_href"],
callback=self.parse_book_list,
meta= {"item":deepcopy(item)}
)
else:
yield scrapy.Request(item["b_href"],
callback=self.parse_book_list,
meta={"item":deepcopy(item)}
)
def parse_book_list(self,response):
item = response.meta["item"]
dl_list =response.xpath('// div[@class="wrap"]/div//dl')
#图书列表页分组
for dl in dl_list:
item["book_name"] = dl.xpath('./dt/a/text()').extract_first()
item["book_detail"] = dl.xpath('./dd/text()').extract_first()
item["book_href"] = dl.xpath('./dt/a/@href').extract_first()
yield scrapy.Request(
item["book_href"],
callback=self.parse_book_detail,
meta={"item":deepcopy(item)}
)
#翻页
a_pagenavi = response.xpath('//div[@id="pagenavi"]/a/@href').extract()
a_pagenavi = a_pagenavi
i = a_pagenavi.split('/')[-1]
y=1
forpageini:
if y <= int(page):
y += 1
next_url = item["s_href"] + "/page/" +str(y)
yieldscrapy.Request(next_url,
callback=self.parse_book_list,
meta={"item":item}
)
def parse_book_detail(self,response):
item = response.meta["item"]
item["book_img"] = response.xpath('.//div[@id="content"]/div//a/@href').extract_first()
item["book_img"] = "http://zxcs.info" + item["book_img"]
item["book_down_url"] = response.xpath('//*[@id="content"]/div/div/div/p/a/@href').extract_first()
item["book_down_url"] = "http://zxcs.info" + item["book_down_url"]
print(item) 知轩的网站得图书 只取到下载链接 lihu5841314 发表于 2021-5-29 19:08
知轩的网站得图书 只取到下载链接
继续努力,继续关注,向你请教 程序和人只要有一个人能跑起来就行{:301_998:} 第一个程序能跑起来,就不错了。再接再厉 不错,学习了,感谢分享 能够跑起来就不错了。楼主要能把思路写一下就好了 学习了,感谢分享
scrapy是个好东西,还有xpath也是好东西,多练习就熟练了
页:
[1]