王者全皮肤下载
import requestsfrom lxml import etree
import os
from time import sleep
headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36'
}
hero_list_url = 'https://pvp.qq.com/web201605/js/herolist.json'
hero_list_resp = requests.get(hero_list_url, headers=headers)
# print(hero_list_resp.json())
for h in hero_list_resp.json():
# print(h.get('cname'))
cname = h.get('cname')
# print(h.get('id_name'))
id_name = h.get('id_name')
ename = h.get('ename')
if not os.path.exists('cname'):
os.makedirs(cname)
hero_info_url = f'https://pvp.qq.com/web201605/herodetail/{id_name}.shtml'
hero_info_resp = requests.get(hero_info_url, headers=headers)
hero_info_resp.encoding = 'gbk'
e = etree.HTML(hero_info_resp.text)
names = e.xpath('//ul[@class="pic-pf-list pic-pf-list3"]/@data-imgname')
# print(names)
names = for name in names.split('|')]
for i, n in enumerate(names):
url = f'https://game.gtimg.cn/images/yxzj/img201606/skin/hero-info/{ename}/{ename}-bigskin-{i + 1}.jpg'
resp = requests.get(url, headers=headers)
with open(f'{cname}/{n}.jpg', 'wb') as f:
f.write(resp.content)
print(f'已下载:{n}')
sleep(1) import os
import requests
from lxml import etree
from concurrent.futures import ThreadPoolExecutor, as_completed
from time import sleep
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36'
}
# 获取英雄列表
def get_hero_list():
hero_list_url = 'https://pvp.qq.com/web201605/js/herolist.json'
response = requests.get(hero_list_url, headers=headers)
return response.json()
# 下载单个皮肤图片
def download_skin_image(ename, cname, skin_name, index):
skin_url = f'https://game.gtimg.cn/images/yxzj/img201606/skin/hero-info/{ename}/{ename}-bigskin-{index + 1}.jpg'
try:
response = requests.get(skin_url, headers=headers, timeout=10)
if response.status_code == 200:
# 保存图片
file_path = f'{cname}/{skin_name}.jpg'
with open(file_path, 'wb') as f:
f.write(response.content)
print(f'已下载:{skin_name}')
else:
print(f'图片下载失败: {skin_url}')
except Exception as e:
print(f'下载图片时发生错误:{e}')
# 获取英雄的皮肤图片链接
def get_skin_names(ename, id_name):
hero_info_url = f'https://pvp.qq.com/web201605/herodetail/{id_name}.shtml'
try:
hero_info_resp = requests.get(hero_info_url, headers=headers, timeout=10)
hero_info_resp.encoding = 'gbk'
e = etree.HTML(hero_info_resp.text)
names = e.xpath('//ul[@class="pic-pf-list pic-pf-list3"]/@data-imgname')
return for name in names.split('|')]
except Exception as e:
print(f'获取皮肤名称时发生错误:{e}')
return []
# 下载英雄皮肤图片
def download_hero_skins(hero):
cname = hero.get('cname')
ename = hero.get('ename')
id_name = hero.get('id_name')
# 创建文件夹
if not os.path.exists(cname):
os.makedirs(cname)
# 获取皮肤名称列表
skin_names = get_skin_names(ename, id_name)
if not skin_names:
return
# 并发下载皮肤图片
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [
executor.submit(download_skin_image, ename, cname, skin_name, i)
for i, skin_name in enumerate(skin_names)
]
for future in as_completed(futures):
future.result()
# 主函数
def main():
hero_list = get_hero_list()
# 遍历英雄列表并下载皮肤
for hero in hero_list:
download_hero_skins(hero)
sleep(1)# 防止过多请求导致服务器封锁
if __name__ == "__main__":
main()
修改了下试试这个 增加了 try-except,确保即使某些请求失败,脚本也不会因为异常而终止减少重复调用逻辑等下载图片的操作已优化为并发执行,而不是一个一个顺序执行。并发改成5测试是可以的~ import os
import requests
from lxml import etree
from concurrent.futures import ThreadPoolExecutor, as_completed
from time import sleep
from tqdm import tqdm
import logging
import argparse
# 设置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
}
def get_hero_list():
hero_list_url = 'https://pvp.qq.com/web201605/js/herolist.json'
try:
response = requests.get(hero_list_url, headers=headers, timeout=10)
response.raise_for_status()
return response.json()
except requests.RequestException as e:
logger.error(f"获取英雄列表失败: {e}")
return []
def download_skin_image(ename, cname, skin_name, index, max_retries=3):
skin_url = f'https://game.gtimg.cn/images/yxzj/img201606/skin/hero-info/{ename}/{ename}-bigskin-{index + 1}.jpg'
for attempt in range(max_retries):
try:
response = requests.get(skin_url, headers=headers, timeout=10)
response.raise_for_status()
file_path = os.path.join(cname, f'{skin_name}.jpg')
with open(file_path, 'wb') as f:
f.write(response.content)
logger.info(f'已下载:{skin_name}')
return True
except requests.RequestException as e:
logger.warning(f'下载 {skin_name} 失败 (尝试 {attempt + 1}/{max_retries}): {e}')
sleep(1)
logger.error(f'下载 {skin_name} 失败,已达到最大重试次数')
return False
def get_skin_names(ename, id_name):
hero_info_url = f'https://pvp.qq.com/web201605/herodetail/{id_name}.shtml'
try:
hero_info_resp = requests.get(hero_info_url, headers=headers, timeout=10)
hero_info_resp.raise_for_status()
hero_info_resp.encoding = 'gbk'
e = etree.HTML(hero_info_resp.text)
names = e.xpath('//ul[@class="pic-pf-list pic-pf-list3"]/@data-imgname')
if names:
return for name in names.split('|')]
else:
logger.warning(f'未找到英雄 {ename} 的皮肤信息')
return []
except requests.RequestException as e:
logger.error(f'获取英雄 {ename} 皮肤信息失败: {e}')
return []
def download_hero_skins(hero):
cname = hero.get('cname')
ename = str(hero.get('ename'))
id_name = hero.get('id_name')
if not os.path.exists(cname):
os.makedirs(cname)
skin_names = get_skin_names(ename, id_name)
if not skin_names:
return
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [
executor.submit(download_skin_image, ename, cname, skin_name, i)
for i, skin_name in enumerate(skin_names)
]
for future in as_completed(futures):
future.result()
def main(hero_name=None):
hero_list = get_hero_list()
if not hero_list:
logger.error("无法获取英雄列表,程序退出")
return
if hero_name:
hero_list = == hero_name]
if not hero_list:
logger.error(f"未找到英雄: {hero_name}")
return
with tqdm(total=len(hero_list), desc="下载进度", unit="英雄") as pbar:
for hero in hero_list:
download_hero_skins(hero)
pbar.update(1)
sleep(1)# 防止过多请求导致服务器封锁
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="下载王者荣耀英雄皮肤")
parser.add_argument("--hero", help="指定要下载的英雄名称")
args = parser.parse_args()
main(args.hero)
错误处理和日志:
使用 logging 模块替代 print,提供更详细的日志信息。
在关键操作中增加了异常处理,并添加了重试机制。
用户体验:
使用 tqdm 库添加了进度条,让用户能够直观地看到下载进度。
添加了命令行参数解析,允许用户指定要下载的特定英雄。
性能和稳定性:
在 download_skin_image 函数中添加了重试机制,提高下载成功率。
使用 raise_for_status() 来检查 HTTP 响应状态。
代码结构和可读性:
重构了部分函数,提高了代码的可读性和可维护性。
使用 os.path.join() 来创建文件路径,提高了跨平台兼容性。
灵活性:
允许用户通过命令行参数指定要下载的英雄,增加了脚本的灵活性。
使用方法:
下载所有英雄的皮肤:python script.py
下载特定英雄的皮肤:python script.py --hero 李白 怎么用?这个只是下皮肤? 这个是什么语言?脚本用什么工具调试
要是能获取动图就好了 未搜索到 etree 软件包呢{:1_904:} ifend 发表于 2024-9-15 21:42
未搜索到 etree 软件包呢
pip 一下就可以了啊 小冷ice 发表于 2024-9-15 17:22
这个是什么语言?脚本用什么工具调试
python语言 zjcnm 发表于 2024-9-15 12:30
import os
import requests
from lxml import etree
大佬帮我看一下lol
import requests
import re
from time import sleep
all_hero_url = 'https://lol.qq.com/biz/hero/champion.js'
# hero_info_js_url = 'https://lol.qq.com/biz/hero/1.js'
hero_info_js_url = 'https://game.gtimg.cn/images/lol/act/img/js/hero/1.js'
headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36'
}
hero_info_js_resp = requests.get(hero_info_js_url, headers=headers)
hero_info_js = hero_info_js_resp.json()
print(hero_info_js)
# hero_ids = re.findall(r'"id":"\d+?"', hero_info_js)
# hero_name = re.findall(r'"name":"(.+?)","chromas"', hero_info_js)
#
# print(hero_ids)
# print(hero_name)
# for id,name in zip(hero_ids,hero_name):
# # url = 'https://game.gtimg.cn/images/lol/act/img/skin/small_46358cd4-3f36-4987-9db8-aab046adf43f.jpg'
# url = f'https://game.gtimg.cn/images/lol/act/img/skin/big{id}.jpg'
# response = requests.get(url=url, headers=headers)
# name=name.encode().decode('unicode_escape')
# with open(f'./img/{name}.jpg', 'wb') as f:
# f.write(response.content)
# sleep(2)
# all_hero_js_resp = requests.get(all_hero_url, headers=headers)
# # print(all_hero_js_resp.text)
# # all_hero_name=re.findall(r'"\d+?":"(\w+?)"',all_hero_js_resp.text)
# all_hero_name=re.findall(r'"\d+?":',all_hero_js_resp.text)
# print(all_hero_name)
# # for i in all_hero_name:
# # print(i,i)
页:
[1]
2