Python3多线程爬取妹子图的图片
我也来写个爬虫,主要爬取妹子图网站的图片,使用前需要确认你的Python版本为Python3。1.安装依赖
pip install requests
pip install lxml
pip install feedparser
2.创建一个文件夹放下此脚本
3.运行脚本
python mzitu.py
说明:我偷了个懒,发现这个网站有rss订阅源,所以直接获取rss里的链接。在rss订阅源里刚好20个链接,所以15行是range(20),这里没写异常捕获,如果超过20,会报错,可以小于不能大于。如果想获取所有文章链接的话,可以自己修改get_url()函数。
注意:如果环境,依赖包都OK的话,可以直接把代码复制好运行的,Windows10跟Linux系统都可以运行。
# -*- coding: UTF-8 –*-
import feedparser
import requests
from lxml import etree
import threading
import random
import os
def get_url():
rss_url = 'https://www.mzitu.com/feed/'
feeds = feedparser.parse(rss_url)
page_url = []
for i in range(20):
page_url.append(feeds.entries['link'])
return page_url
def download(dirname, imgurl):
headers = {
'referer':'https://www.mzitu.com/',
'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'
}
filename = imgurl.split('/')[-1]
r = requests.get(imgurl, headers = headers, stream=True)
if os.path.exists(dirname):
with open(dirname + '/' + filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=32):
f.write(chunk)
print('下载:%s中' % filename)
else:
os.mkdir(dirname)
with open(dirname + '/' + filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=32):
f.write(chunk)
print('下载:%s中' % filename)
def get_img(url):
r = requests.get(url)
page = etree.HTML(r.text)
span = page.xpath('/html/body/div/div/div/a/span')
hs = page.xpath('//h2[@class="main-title"]')
for h in hs:
title = h.text
for a in span:
pages = a.text
try:
for i in range(int(pages)+1):
if i == 1:
pass
else:
imgpage = url + '/' + str(i)
r1 = requests.get(imgpage)
page1 = etree.HTML(r1.text)
x_href = page1.xpath('/html/body/div/div/div/p/a/img')
for href in x_href:
imgurl = href.get('src')
download(title, imgurl)
except KeyboardInterrupt:
pass
except:
pass
def main():
urls = get_url()
threads=[]
for i in range(len(urls)):
t = threading.Thread(target=get_img, args=(urls,))
threads.append(t)
for i in threads:
i.start()
for i in threads:
i.join()
if __name__ == '__main__':
main()
本帖最后由 Dmail 于 2019-6-2 18:50 编辑
def get_url2():
headers = {
'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.80 Safari/537.36'
} rss_url = 'https://www.mzitu.com/all/'
r = requests.get(rss_url,headers=headers)
page = etree.HTML(r.text)
result =page.xpath('/html/body/div/div/div/ul/li/p/a')
print('有%d组图'%len(result))
page_url = []
for x in result:
page_url.append(x.get('href'))
#print(x.get('href'))
return page_url
采集每日更新里面的所有url
修改后的main函数。
def main(): #get_img("https://www.mzitu.com/177416")
newUrls = get_url2()
#print('请输入你要下载的组数目')
urls=[]
flag=input("请输入你要下载的组数目:")
flag=int(flag)
#print(flag)
for x in range(flag):
urls.append(newUrls)
#urls = get_url()
threads=[]
for i in range(len(urls)):
#print(len(urls))
t = threading.Thread(target=get_img, args=(urls,))
threads.append(t)
for i in threads:
i.start()
for i in threads:
i.join()
这福利满满 的,走你 好像很不错的样子。 网址好评 嗯,已阅,收下来了 C:\Users\Administrator\AppData\Local\Programs\Python\Python35-32\python.exe H:/python-leason/12.py
Traceback (most recent call last):
File "H:/python-leason/12.py", line 3, in <module>
from lxml import etree
ImportError: cannot import name 'etree'
Process finished with exit code 1
出现这个错误什么回事呢 留记。看着不错。python我在学习中 有点紧张,但还是收藏了 学习了 网址mzitu 代码先放一边,网址我记下了