无意中发现pyppeteer,这个爬虫速度还是很快的.是我喜欢的类型...
安装:pip3 install pyppeteer
自己设置好镜像源要不然很慢...
import asyncio
from pyppeteer import launch
# 获取中国大学排名
'''
# 设置表格
wb = Workbook()
sheet = wb.active
sheet.title = '中国大学排名'
sheet['A1'].value = '序号'
sheet['B1'].value = '学校名称'
sheet['C1'].value = '省市'
sheet['D1'].value = '学校类型'
sheet['E1'].value = '总分'
async def main():
id = 1
browser = await launch({'headless': True, # 无头模式
'args': ['--disable-infobars',
'--window-size=1024,800'
]
})
page = await browser.newPage()
await page.setViewport({'width': 1024, 'height': 800})
await page.goto('http://www.zuihaodaxue.cn/zuihaodaxuepaiming2020.html')
# xpath 获取表格位置
tbody = await page.xpath('//tbody/tr')
for i in tbody:
# 获取文本:方法一,通过getProperty方法获取
# title_str1 = await (await item.getProperty('textContent')).jsonValue()
# 获取文本:方法二,通过evaluate方法获取
# title_str2 = await page.evaluate('item => item.textContent', item)
# 获取链接:通过getProperty方法获取
# title_link = await (await item.getProperty('href')).jsonValue()
title_str1 = await (await i.getProperty('textContent')).jsonValue()
srt = title_str1.splitlines()
id += 1
sheet['A%s' % (id)].value = id - 1
sheet['B%s' % (id)].value = srt[2]
sheet['C%s' % (id)].value = srt[3]
sheet['D%s' % (id)].value = srt[4]
sheet['E%s' % (id)].value = srt[5]
await browser.close()
asyncio.get_event_loop().run_until_complete(main())
wb.save('爬虫1-中国大学排名.xlsx')
'''
# 豆瓣电影 Top 250
'''
#表格设置
wd=Workbook()
sheet=wd.active
sheet.title='豆瓣电影 Top 250'
sheet['A1'].value = '序号'
sheet['B1'].value = '电影名称'
sheet['C1'].value = '电影评分'
sheet['D1'].value = '电影链接'
sheet['E1'].value = '电影图片'
ws = wd[wd.sheetnames[0]]
ws.column_dimensions['B'].width = 23.0 # 调整列B宽
ws.column_dimensions['D'].width = 45.0 # 调整列D宽
ws.column_dimensions['E'].width = 80.0 # 调整列E宽
async def main():
browser=await launch()
page=await browser.newPage()
id = 1
for start in range(0,250,25):
await page.goto('https://movie.douban.com/top250?start=%s&filter='%(start))
ol=await page.Jx('//ol/li')
for i in ol:
title_str1 = await (await i.getProperty('textContent')).jsonValue()
img=await i.Jx('.//img')
span=await i.Jx('.//div[@class="star"]/span')
a=await i.Jx('.//div[@class="pic"]/a')
id += 1
sheet['A%s' % (id)].value = id - 1
sheet['B%s' % (id)].value = await (await img[0].getProperty('alt')).jsonValue()
sheet['C%s' % (id)].value = await (await span[1].getProperty('textContent')).jsonValue()
sheet['D%s' % (id)].value = await (await a[0].getProperty('href')).jsonValue()
sheet['E%s' % (id)].value = await (await img[0].getProperty('src')).jsonValue()
await browser.close()
asyncio.get_event_loop().run_until_complete(main())
wd.save('爬虫2-豆瓣电影 Top 250.xlsx')
'''
# 爬取汽车之家
'''
async def main():
browser = await launch()
page = await browser.newPage()
await page.goto('https://www.autohome.com.cn/news/')
li = await page.Jx('//ul[@class="article"]/li')
for i in li:
a = await i.Jx('.//a')
h3 = await i.Jx('.//h3')
p = await i.Jx('.//p')
if a:
print(await (await a[0].getProperty('href')).jsonValue())
print(await (await h3[0].getProperty('textContent')).jsonValue())
print(await (await p[0].getProperty('textContent')).jsonValue())
await browser.close()
asyncio.get_event_loop().run_until_complete(main())
'''
# 斗图表情包
'''
async def main():
browser = await launch({'headless': False})
page = await browser.newPage()
await page.goto('https://www.doutula.com/photo/list?page=0')
for i in range(0,3600,50):
# 缓慢滚动页面显示图片
await page.evaluate('window.scrollBy(0, %s)'%i)
await page.waitFor(3 * 100)
div=await page.J('.page-content')
a=await div.Jx('.//img')
for i in a:
print(await (await i.getProperty('src')).jsonValue())
await browser.close()
asyncio.get_event_loop().run_until_complete(main())
'''
# 梨视频
#
# array=[]
# async def sort_data1(data):
# for i in data:
# dic={}
# div=await i.J('.vervideo-name')
#
# dic['title']=await (await div.getProperty('textContent')).jsonValue()
# dic['url']=await (await i.getProperty('href')).jsonValue()
# array.append(dic)
#
# async def sort_data2(data):
# for i in data:
# dic={}
# div=await i.J('.vervideo-title')
# dic['title']=await (await div.getProperty('textContent')).jsonValue()
# dic['url']=await (await i.getProperty('href')).jsonValue()
# array.append(dic)
# async def main():
# browser = await launch({'headless': False,'executablePath':r'C:\Users\haohao\AppData\Local\Google\Chrome\Application\chrome.exe'})
# page = await browser.newPage()
# await page.goto('https://www.pearvideo.com/')
# a=await page.JJ('.vervideo-tbd .vervideo-lilink')
# aa=await page.JJ('.vervideo-bd .vervideo-lilink')
# await sort_data1(a)
# await sort_data2(aa)
# print(len(array))
# for i in array:
# await page.goto(i['url'])
# await page.click('.play-icon')
# await page.waitFor(4* 1000)
# try:
# video=await page.J('video')
# mp4=await (await video.getProperty('src')).jsonValue()
# i['mp4']=mp4
# print(mp4)
# print(i['url'])
#
# except:
# pass
#
# print(array)
# await browser.close()
# asyncio.get_event_loop().run_until_complete(main())
#用chrome爬,不出错
# 2020年5月中华人民共和国县以上行政区划代码
async def main():
browser = await launch()
page = await browser.newPage()
await page.goto('http://www.mca.gov.cn///article/sj/xzqh/2020/2020/2020072805001.html')
tr=await page.Jx('//tbody/tr[@height="19"]')
for i in tr:
td=await i.Jx('.//td')
print(await (await td[1].getProperty('textContent')).jsonValue(),await (await td[2].getProperty('textContent')).jsonValue())
asyncio.get_event_loop().run_until_complete(main())
|