简单爬虫分享--爬取站长素材图片
本帖最后由 ∫護着妳佉遠方 于 2022-1-16 15:57 编辑已修改,获取高清图片图片仅仅适合当测试图片
此次爬虫是新手练习之举,简单分享
"""
* @codding : utf-8
* @Time: 2022/1/5 13:19:01
* @system: Windows 11
* @SOFTWARE : PyCharm
* @AuThor :
"""
# 需求,下载站长素材10页图片
# 第一页 https://sc.chinaz.com/tupian/siwameinvtupian.html
# 第二页https://sc.chinaz.com/tupian/siwameinvtupian_2.html
# 第三页https://sc.chinaz.com/tupian/siwameinvtupian_3.html
import time
import urllib.request
import random
from lxml import etree
# 1.请求对象的定制
def create_requset(i):
if i == 1:
url = 'https://sc.chinaz.com/tupian/siwameinvtupian.html'
else:
url = 'https://sc.chinaz.com/tupian/siwameinvtupian_{}.html'.format(i)
headers = {
'Cookie': 'Hm_lvt_398913ed58c9e7dfe9695953fb7b6799=1641359989; Hm_lpvt_398913ed58c9e7dfe9695953fb7b6799=1641360418',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:95.0) Gecko/20100101 Firefox/95.0',
}
request = urllib.request.Request(url=url, headers=headers)
return request
# 获取网页数据
def get_content_type(request):
response = urllib.request.urlopen(request)
content = response.read().decode('utf-8')
return content
# 下载
def down_load(content):
# 解析网页
tree = etree.HTML(content)
# 图片地址
img_address = tree.xpath('//*[@id="container"]/div/div/a/img/@src2')
# 图片名称
img_name = tree.xpath('//*[@id="container"]/div/div/a/img/@alt')
# urllib.request.urlretrieve(图片地址, 图片名字)
for i in range(len(img_address)):
address = img_address
address = address.replace('_s','' )
name = img_name
img_url = 'https:' + address
urllib.request.urlretrieve(url=img_url, filename= './14_站长素材下载/' + name + '.jpg')
if __name__ == "__main__":
start_page = int(input("请输入开始页码(从第1页开始):") or '1')
end_page = int(input("请输入结束页码(最大为8):"))
for i in range(start_page, end_page + 1):
request = create_requset(i)
content = get_content_type(request)
down_load(content)
y = random.randint(5, 10)# 随机生成 5~10的数字
time.sleep(y)
print('---成功下载第:%s 页内容---' % (i))
print('生成:{} ,休眠了{}秒'.format(y, y))
页:
[1]