使用python的urllib库和正则表达式爬取
网站图片,支持批量下载。
学习地址(自行base64解密):aHR0cDovL3BpYy5uZXRiaWFuLmNvbQ==
(本文仅供学习交流,请勿商用,侵删)
1.可选择图片类型、下载指定几页的内容。
https://img-blog.csdnimg.cn/7a35bf3840e8430ca63cc6a7e04dff75.png
https://img-blog.csdnimg.cn/2019051913241197.png
2.源代码
"""
功能:批量下载网站图片
时间:2022-6-18 16:14:01
作者:倚窗听雨
"""
import urllib.request
import re
import os
headers = {
"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.106 Safari/537.36",
}
tempList = ["pic","netbian","com"]
ur = f"http://{'.'.join(tempList)}"
url_list = []
#获取各类型图片url
def picture(url):
res = urllib.request.Request(url,data=None,headers=headers)
html = urllib.request.urlopen(res).read().decode('gbk')
pic = re.findall(r'<div class="classify clearfix.+</div>',html)
start = 0
text = pic[0]
for i in re.findall(r'<a ',pic[0]):
a = re.search(r'<a ',text)
end_a = re.search(r'</a>',text)
href = url+text[a.start():end_a.end()].split('"')[1]
title = text[a.start():end_a.end()].split('"')[3]
d = {'href':href,'title':title}
url_list.append(d)
text = text[end_a.end():]
#获取图片链接
def get_pic(url):
res = urllib.request.Request(url,headers=headers)
html = urllib.request.urlopen(res).read().decode('gbk')
page = re.search(r'class="slh".+[\s].+',html).group().split('</a')[0].split('>')[-1]
print('共',page,'页')
start_page = int(input('下载起始页:'))
end_page = int(input('下载到哪页:'))
total = 0
for p in range(start_page,end_page+1):
if p ==1:
url2 = url + 'index'+'.html'
else:
url2 = url + 'index_'+str(p)+'.html'
print('\n',url2)
res2 = urllib.request.Request(url2, headers=headers)
html2 = urllib.request.urlopen(res2).read().decode('gbk')
texts =re.findall(r'<ul class="clearfix".+[\s]*.+',html2)
t = texts[0]
print('准备下载第',p,'页图片......')
#获取当前页的图片并下载
for i in re.findall(r'<li>',t):
start_li = re.search(r'<li>',t)
end_li = re.search(r'</li>',t)
href = ur+t[start_li.start():end_li.end()].split('"')[1]
name = t[start_li.start():end_li.end()].split('b>')[-2][:-2]
total += download(href,name)
t = t[end_li.end():]
print('\n下载完成,共下载{}张图片'.format(total))
#下载图片
def download(url,name):
res = urllib.request.Request(url, headers=headers)
html = urllib.request.urlopen(res).read().decode('gbk')
down_url = re.search(r'<a href="" id="img">.+',html).group()
down_url = ur + re.split('"',down_url)[5]
try:
data = urllib.request.urlopen(down_url,timeout=10).read()
except Exception as e:
print(e,'跳过此图片:'+name+'.jpg')
return 0
t = -1
#img目录不存在自动创建
if not os.path.exists('img'):
os.makedirs('img')
#该文件不存在时才下载(存储位置可自行修改,现在存在当前目录的img目录下)
if not os.path.exists('img\\'+name+'.jpg'):
with open('img\\'+name+'.jpg','wb')as f:
f.write(data)
print(name+'.jpg'+"\t\t下载成功")
t = 1
else:
print(name + '.jpg' + "\t\t已存在")
t = 0
return t
#主程序
def main():
picture(ur)
print('4k图片网欢迎您'.center(20,'*'))
for i,v in enumerate(url_list):
print("{}、{}".format(i+1,v['title']))
while True:
try:
t = int(input('要下载的图片类型(序号):'))
if t not in range(1,13):
raise IndexError
break
except ValueError:
print('请输入正确的序号!')
except IndexError:
print('请输入正确的序号!')
get_pic(url_list[t-1]['href'])
if __name__ == '__main__':
main()
存储位置可自行修改,现在存在当前目录的img目录下
声明:初次学习爬虫,还有诸多不足,大神勿喷。