爬取清纯美女图片
本帖最后由 Hswyc 于 2022-3-28 23:02 编辑最近浏览论坛,看到了帖子:https://www.52pojie.cn/thread-1404328-1-1.html
帖子上爬取的是https://www.vmgirls.com/这个网站的图片
最近在学习爬虫,准备拿这个练练手
https://www.vmgirls.com/archives.html 这里有所有文章链接的入口,所以就从这里出发了
源代码:一共是两种,一个用到了异步协程,下载会快一点
import requests
import re
import os
import time
# 请求头
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/99.0.4844.74 Safari/537.36 Edg/99.0.1150.52 '
}
# 在文章归档页面,获取每个分类下的链接
def get_url(url):
resp = requests.get(url, headers=headers)
# 这里很奇怪,F12看到的网页源代码和resp.text获得源代码不一样
pattern = '<a target=_blank style="color:.*?" href=(.*?)>'
# url_lst存放所有文章的链接
arch_url_lst = re.findall(pattern, resp.text)
arch_urls_new = []
url_head = 'https://www.vmgirls.com/'
for url in arch_url_lst:
url = url_head + url
arch_urls_new.append(url)
resp.close()
return arch_urls_new
# 解析
def parse_html(arch_url):
resp = requests.get(arch_url, headers=headers)
print(arch_url + '' + str(resp.status_code))
html = resp.text
resp.close()
title_pattern = '<h1 class=.*?>(.*)</h1>'
img_pattern = '<a href="(.*?)" alt=.*?</a>'
title = re.findall(title_pattern, html)
img_url_lst = re.findall(img_pattern, html)
url_head = 'https:'
img_url_lst_new = []
for url in img_url_lst:
url = url_head + url
img_url_lst_new.append(url)
return title, img_url_lst_new
# 下载
def download_img(img_url, title):
resp = requests.get(img_url, headers=headers)
content = resp.content
resp.close()
dir_name = title
img_name = img_url.split('/')[-1]
if not os.path.exists(f'../data/meimei/{dir_name}'):
os.mkdir(f'../data/meimei/{dir_name}')
print(dir_name + '-->' + img_name + '正在下载...')
with open(f'../data/meimei/{dir_name}/{img_name}', 'wb') as f:
f.write(content)
print(dir_name + '-->' + img_name + '下载完成!')
# 主函数
def main():
url = 'https://www.vmgirls.com/archives.html'
arch_urls = get_url(url)
n = 0
num = int(input('输入要下载多少篇文章的图片:'))
for arch_url in arch_urls:
n += 1
if n == num + 1:
break
arch_title, img_urls = parse_html(arch_url)
for img_url in img_urls:
download_img(img_url, arch_title)
if __name__ == '__main__':
start = time.time()
main()
end = time.time()
print(f'下载完成,用时: {end - start}s')
import asyncio
import random
import aiofile
import aiohttp
import requests
import re
import os
import time
# 请求头
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/99.0.4844.74 Safari/537.36 Edg/99.0.1150.52 '
}
# 在文章归档页面,获取每个分类下的链接
def get_url(url):
resp = requests.get(url, headers=headers)
# 这里很奇怪,F12看到的网页源代码和resp.text获得源代码不一样
pattern = '<a target=_blank style="color:.*?" href=(.*?)>'
# arch_urls存放所有文章的链接
arch_urls = re.findall(pattern, resp.text)
arch_urls_new = []
url_head = 'https://www.vmgirls.com/'
# 补全链接
for url in arch_urls:
url = url_head + url
arch_urls_new.append(url)
resp.close()
return arch_urls_new
# 解析
async def download_img(arch_url):
# 随机暂停1-2秒
time.sleep(random.randint(1, 2))
async with aiohttp.ClientSession() as session:
async with session.get(arch_url, headers=headers) as resp:
html = await resp.text()
title_pattern = '<h1 class=.*?>(.*)</h1>'
img_pattern = '<a href="(.*?)" alt=.*?</a>'
arch_title = re.findall(title_pattern, html)
img_url_lst = re.findall(img_pattern, html)
url_head = 'https:'
# 补全图片链接
for one_url in img_url_lst:
new_url = url_head + one_url
async with session.get(new_url, headers=headers) as resp_2:
content = await resp_2.content.read()
dir_name = arch_title
img_name = new_url.split('/')[-1]
if not os.path.exists(f'../data/meimei/{dir_name}'):
os.mkdir(f'../data/meimei/{dir_name}')
print(dir_name + '-->' + img_name + '正在下载...')
async with aiofile.async_open(f'../data/meimei/{dir_name}/{img_name}', 'wb') as f:
await f.write(content)
print(dir_name + '-->' + img_name + '下载完成!')
# 主程序
async def main():
url = 'https://www.vmgirls.com/archives.html'
arch_urls = get_url(url)
tasks = []
n = 0
num = int(input('输入要下载多少篇文章的图片:'))
for arch_url in arch_urls:
n += 1
if n == num + 1:
break
tasks.append(asyncio.ensure_future(download_img(arch_url)))
await asyncio.wait(tasks)
if __name__ == '__main__':
start = time.time()
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
end = time.time()
print(f'下载完成,用时: {end - start}s')
速度对比,都是下载9篇文章的图片
Hswyc 发表于 2022-3-29 08:47
可以先把开发人员工具当做窗口独立出来,然后再进入网站就可以使用了
不行啊,选了后再打开,网页还是会自动关闭
另外,昨天刚好有人说它有一个js是检测F12的(关键词devtool),拦截掉就行,但我这里无论是浏览器的Network还是fiddler,都没那个js python太强大了, 人生苦短, 我学python. 最近刚开始学习python, 看了20讲, 学到了字符串的操作, 这一块内容, b站上有个up主的课程很不错, 推荐给想要学习的朋友们
https://www.bilibili.com/video/BV1fa411i7q4?p=25&spm_id_from=333.1007.top_right_bar_window_history.content.click 如有错误,敬请各位指正{:301_998:} 不明觉厉,能给我个工具就更好了{:1_906:} 这个软件深得我心啊{:1_927:} 感谢分享!!!! 这个网址我 F12居然自动给我关了! 有空一定要学学Python
的安装和卸载 上学期学过python,这学期还在学c++。python当工具是真的强! 想要学习python了太强了 感谢分享