发现个不错的壁纸网站
本帖最后由 话痨司机啊 于 2022-7-5 19:27 编辑先看下效果图吧:
都下载了,30分钟换一张壁纸,都是4K 2K的壁纸
下载源码:
import requests
from pathlib import Path
from lxml import etree
from rich import print
from loguru import logger
from requests.adapters import HTTPAdapter
logpath = Path(__file__).parent.joinpath('img.log')
logger.add(str(logpath))
def get_res(url):
"""
获取网页内容
"""
headers = {
"user-agent":
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.69 Safari/537.36"
}
r = requests.Session()
r.mount('https://',HTTPAdapter(max_retries=5))
res = r.get(url, headers=headers,timeout=30)
return res
def parse_src(res):
"""
分析src,获取图片下载链接
"""
try:
et = etree.HTML(res.text)
masonry = et.xpath("//div[@class='masonry']")[-1]
src = masonry.xpath("//article//a[@class='entry-thumbnail']/img/@data-src")
img_url_list = []
for s in src:
img_url_list.append("-".join(s.split('x').split('-')[:-1]) + "." +
s.split('x').split('.')[-1])
return img_url_list
except Exception as e:
logger.error(f"此页{res.url}访问失败,请重试!")
def download_img(img_url_list):
"""
下载图片
"""
if type(img_url_list) is list:
path = Path(__file__).parent.joinpath('images')
path.mkdir(parents = True, exist_ok = True)
file_name = .replace("?","") for imgurl in img_url_list]
for i,imgurl in enumerate(img_url_list):
if path.joinpath(file_name).exists():
img_url_list.remove(imgurl)
print(f"文件{file_name}已下载不能重复下载")
if len(img_url_list)>0:
ress = map(get_res, img_url_list)
for i, res in enumerate(ress):
if res:
with open(str(path.joinpath(file_name)), 'wb') as f:
f.write(res.content)
print(f'已经成功下载{file_name},保存在{str(path)}')
def main(startnum=1,endnum=20):
'''
逻辑主函数
'''
url = lambda num: f"https://bz.qinggongju.com/page/{num}/"
urls =
list(map(download_img, )]))
if __name__ == "__main__":
startnum = input('共20页热门图片,请输入开始页面数字:')
endnum = input('请输入结束页面数字,不能超过20:')
if int(startnum) >= 1 and int(endnum) <= 20 :
main(int(startnum),int(endnum))
else:
print(' Error:请重新启动程序输入数字!')
如果质疑缩略图,请看文件属性(家里电脑又下了一遍~):
import requests
from bs4 import BeautifulSoup
def get_pic_real_url_list():
headers = {
"cookie": "Hm_lvt_618c9e04ccc77a6b8c744b5199bd3c3b=1656994115; Hm_lpvt_618c9e04ccc77a6b8c744b5199bd3c3b=1656994762",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.5060.66 Safari/537.36 Edg/103.0.1264.44"
}
url = 'https://bz.qinggongju.com/category/%e4%ba%8c%e6%ac%a1%e5%85%83/page/1/'
html = requests.get(url, headers=headers).content.decode()
soup =
return soup
def Download_pic(soup):
headers = {
"cookie": "Hm_lvt_618c9e04ccc77a6b8c744b5199bd3c3b=1656994115; Hm_lpvt_618c9e04ccc77a6b8c744b5199bd3c3b=1656994762",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.5060.66 Safari/537.36 Edg/103.0.1264.44"
}
for i in soup:
r = requests.get(i, headers=headers).content.decode()
real_url = BeautifulSoup(r, 'lxml').select('#xiazai').get('href')
print(real_url)
if __name__ == '__main__':
s = get_pic_real_url_list()
Download_pic(s)
"""
日期:2022年 07月 06日10:46
"""
import os
import time
import requests
from bs4 import BeautifulSoup
def get_pic_real_url_list():
url = 'https://bz.qinggongju.com/category/%e4%ba%8c%e6%ac%a1%e5%85%83/page/1/'
html = requests.get(url, headers=headers).content.decode()
soup =
return soup
def download_pic(soup):
urls = []
for i in soup:
r = requests.get(i, headers=headers).content.decode()
real_url = BeautifulSoup(r, 'lxml').select('#xiazai').get('href')
print(real_url)
urls.append(real_url)
return urls
# 执行请求图片地址,保存图片的函数
def down_save_img(url_pic):
img_name = os.path.split(url_pic)
time.sleep(4)
# print('显示此说明下一行开始请求图片地址了.................')
resp = requests.get(url_pic, headers=headers)
# assert resp.status_code == 200
if resp.status_code == 200:
print(f'请求{img_name}链接成功')
save(resp.content, url_pic)
else:
print(f'请求出错:代码{resp.status_code},可能反爬了...')
def save(date, url_pic):
if not os.path.exists(f'D:/新建文件夹/{os.path.split(url_pic)}'):
with open(f'D:/新建文件夹/{os.path.split(url_pic)}', 'wb') as f:
f.write(date)
print(f'图片{os.path.split(url_pic)}保存成功')
else:
print(f'图片{os.path.split(url_pic)}已经存在......')
if __name__ == '__main__':
headers = {
"cookie": "Hm_lvt_618c9e04ccc77a6b8c744b5199bd3c3b=1656994115; "
"Hm_lpvt_618c9e04ccc77a6b8c744b5199bd3c3b=1656994762",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/103.0.5060.66 Safari/537.36 Edg/103.0.1264.44"
}
s = get_pic_real_url_list()
da = download_pic(s)
for ur in da:
down_save_img(ur) 不错,电脑桌面刚好可以换一换了 壁纸都很精美,爱了爱了,感谢分享。 不错不错 Traceback (most recent call last):
File "C:\Users\Ritsu_Namine\Desktop\pic.py", line 70, in <module>
main(int(startnum),int(endnum))
File "C:\Users\Ritsu_Namine\Desktop\pic.py", line 63, in main
list(map(download_img, )]))
File "C:\Users\Ritsu_Namine\Desktop\pic.py", line 44, in download_img
file_name = .replace("?","") for imgurl in img_url_list]
TypeError: 'NoneType' object is not iterable 学习学习 感觉没有拍摄的清晰啊。不过免费的是这样啦 谢谢分享,这个网站这不错我也来试一下 盘他,谢谢楼主分享PY源码。 感谢大佬的分享