利用github的Actions,每天爬取bing壁纸上传的百度网盘
import requestsimport base64
from datetime import datetime
from datetime import timedelta
from datetime import timezone
from bs4 import BeautifulSoup
import time
import ddddocr
import os
baidu_cookie = os.environ["BAIDU_COOKIE"]
SHA_TZ = timezone(
timedelta(hours=8),
name='Asia/Shanghai',
)
utc_now = datetime.utcnow().replace(tzinfo=timezone.utc)
beijing_now = utc_now.astimezone(SHA_TZ)
# 定义headers
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36',
}
# 定义baidu_headers,cookie需要 BAIDUIDBDUSS ,STOKEN
baidu_headers = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/"
"97.0.4692.99 Safari/537.36 Edg/97.0.1072.69",
"Referer": "https://pan.baidu.com/disk/home?from=newversion&stayAtHome=true",
"cookie": baidu_cookie,
'Connection': 'close'
}
# 获取bing每日一图
def get_bing_pic():
url = 'https://cn.bing.com/'
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.text, 'html.parser')
# 寻找link标签,preloadBg图片的url
link = soup.find('link', id='preloadBg')
# 获取图片url
img_url = link['href']
# 分割url通过&分割
img_url = img_url.split('&')
# 判断url是否有'1920x1080' 如果有则替换为'UHD',UHD为4K图片标识
if '1920x1080' in img_url:
img_url = img_url.replace('1920x1080', 'UHD')
img_url = "https://s.cn.bing.net" + img_url
# 获取图片名称
img_name = img_url.split('=')
# 返回图片url
return img_url, img_name
# 判断网盘是否登录
def is_login():
url = 'https://pan.baidu.com/disk/home?from=newversion&stayAtHome=true#/all?path=%2F&vmode=list'
r = requests.get(url, headers=baidu_headers)
# 通过网页源码判断是否登录
if '百度网盘-全部文件' in r.text:
print('网盘登录成功')
return True
else:
print("更换COOKIE")
return False
# 获取logid,bdstoken
def get_logid_bdstoken():
# 获取bdstoken的url
get_bdstoken_url = "http://pan.baidu.com/api/gettemplatevariable?clienttype=0&app_id=250528&web=1&fields=[%22bdstoken%22]"
bdstoken = requests.get(get_bdstoken_url, headers=baidu_headers).json().get('result').get('bdstoken')
# 获取logid
logid = str(base64.b64encode(
requests.get(get_bdstoken_url, headers=baidu_headers).request.headers.get('cookie').split(';').strip(
"BAIDUID=").encode(
'utf-8'))).replace("b'", "").replace("'", "")
return logid, bdstoken
# 验证网盘目标目录文件夹是否存在
def is_dir_exist(logid, bdstoken):
url = f"https://pan.baidu.com/api/list?clienttype=0&app_id=250528&web=1&dp-logid={logid}&order=time&desc=1&dir=/图片&num=100&page=1"
url1 = f"https://pan.baidu.com/api/create?a=commit&channel=chunlei&web=1&app_id=250528&bdstoken={bdstoken}&logid={logid}&clienttype=0"
data = {
"path": "/图片/" + str(beijing_now.date()),
"isdir": 1,
'block_list': []
}
r = requests.get(url, headers=baidu_headers)
for i in r.json().get('list'):
if str(beijing_now.date()) in i.get('server_filename'):
return True
else:
requests.post(url1, headers=baidu_headers, data=data)
return is_dir_exist(logid, bdstoken)
def is_file_exist(logid, img_name):
url = f"https://pan.baidu.com/api/list?clienttype=0&app_id=250528&web=1&dp-logid={logid}&order=time&desc=1&dir=/图片/{str(beijing_now.date())}&num=100&page=1"
# 发送请求
r = requests.get(url=url, headers=baidu_headers)
# 判断是否下载成功
if len(r.json().get("list")) == 0:
print("今日未下载")
return True
for i in r.json().get('list'):
if img_name in i.get('path'):
print('今天图片下载完成')
return False
# 网盘离线下载,参数为logid,bdstoken,图片url
def download_offline(logid, bdstoken, img_url):
# 拼接网盘离线下载url
download_offline_url = f"https://pan.baidu.com/rest/2.0/services/cloud_dl?channel=chunlei&web=1&app_id=250528&bdstoken={bdstoken}&logid={logid}&clienttype=0"
# 定义data
data = {
"method": "add_task",
"app_id": "250528",
"source_url": img_url,
"save_path": f"/图片/{str(beijing_now.date())}"# 网盘目录
}
# 发送请求
r = requests.post(download_offline_url, headers=baidu_headers, data=data)
if "Invalid source url" in r.json():
print("重新下载 下载地址无效,请核对")
download_offline(logid, bdstoken, img_url)
if 'vcode' in r.json():
print("需要验证码调用")
download_offline_captcha(logid, bdstoken, img_url, r)
# 需要验证码的离线下载
def download_offline_captcha(logid, bdstoken, img_url, request):
# 拼接网盘离线下载url
download_offline_url = f"https://pan.baidu.com/rest/2.0/services/cloud_dl?channel=chunlei&web=1&app_id=250528&bdstoken={bdstoken}&logid={logid}&clienttype=0"
# 通过ddddocr库识别验证码
ocr = ddddocr.DdddOcr()
# 验证码图片
captcha_img = requests.get(request.json()['img'], headers=baidu_headers).content
# 验证码ocr
captcha = ocr.classification(captcha_img)
print(captcha)
# 定义data
data = {
"method": "add_task",
"app_id": "250528",
"source_url": img_url,
"save_path": f"/图片/{str(beijing_now.date())}",# 网盘目录
"input": captcha,
"vcode": request.json()['vcode']
}
# 发送请求
r = requests.post(download_offline_url, headers=baidu_headers, data=data)
print(r.json())
if 'vcode' in r.json():
time.sleep(3)
print('重新验证')
download_offline_captcha(logid, bdstoken, img_url, request)
# 判断是否下载成功
def is_download_success(logid, img_name):
time.sleep(2)
# 拼接判断是否下载成功的url
url = f"https://pan.baidu.com/api/list?clienttype=0&app_id=250528&web=1&dp-logid={logid}&order=time&desc=1&dir=/图片/{str(beijing_now.date())}&num=100&page=1"
# 发送请求
r = requests.get(url, headers=baidu_headers)
# 判断是否下载成功
if len(r.json().get("list")) == 0:
print("未下载完成")
time.sleep(2)
return is_download_success(logid, img_name)
for ii in r.json().get('list'):
if img_name in ii.get('path'):
print('今天图片下载完成')
return False
for i in r.json().get('list'):
if "th.jpg" in i.get('path'):
print('下载完成')
return True
else:
print(i)
print("有其他图片,继续下载")
return is_download_success(logid, img_name)
def rechristen(logid, bdstoken, img_name):
# 拼接改名url
name_url = f"https://pan.baidu.com/api/filemanager?opera=rename&async=2&onnest=fail&channel=chunlei&web=1&app_id=250528&bdstoken={bdstoken}&logid={logid}&clienttype=0"
# 定义data
data = {
"filelist": '[{"path":"' + f"/图片/{str(beijing_now.date())}" + '/th.jpg","newname":"' + img_name + '"}]'
}
# 发送请求
requests.post(name_url, data, headers=baidu_headers)
print("改名完成")
if __name__ == '__main__':
img_url, img_name = get_bing_pic()
if is_login():
logid, bdstoken = get_logid_bdstoken()
if is_dir_exist(logid, bdstoken) and is_file_exist(logid, img_name):
download_offline(logid, bdstoken, img_url)
if is_download_success(logid, img_name):
rechristen(logid, bdstoken, img_name)
好巧,我也做过一个自动收集bingwallpaper的脚本,我是每天早上定时发送到邮箱,内容是图片链接和早上好的问候语。楼主传到百度网盘实在是牛{:17_1073:} 感谢大神分享源码 感谢分享 这个上传百度网盘可以哈 不错啊 确实不是啊 爬行自动 这个是在网站上运行的吗?
如果能爬取历史日期范围、存到本地岂不更美好 感谢大佬分享 爬虫写得好 牢饭少不了 有了源码, 学起来容易得多
页:
[1]
2