好友
阅读权限10
听众
最后登录1970-1-1
|
奈陌
发表于 2023-8-17 08:42
这个项目起源于我每天一个一个网站签到,后面突发奇想,直接抓包,将签到所需要的东西参数化,通过程序定期执行
[Python] 纯文本查看 复制代码 import json
import logging
import os
import platform
import re
import time
import requests
import urllib3
from bs4 import BeautifulSoup
# 忽略警告
urllib3.disable_warnings()
# 为避免长时间无响应,设定最大超时时间
timeout = 15
system = platform.system()
# log格式
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s')
# 配置文件路径
config_path = './config.json'
# 部分代码在exec函数中执行,因此主文件会显示未使用的import
BeautifulSoup('', 'html.parser')
time.sleep(0)
re.search('', '')
# 占位结束
def send(content, url=None):
pass
class web:
count = 0
def __init__(self, name, url, cookie, headers, method, data, params, cmd, extra):
self.name = name
self.url = url
self.cookie = json.loads(cookie.replace("'", '"')) if len(cookie) != 0 else ''
self.headers = json.loads(headers.replace("'", '"')) if headers is not None else None
self.method = method
self.data = json.loads(data.replace("'", '"')) if data is not None else None
self.params = json.loads(params.replace("'", '"')) if params is not None else None
self.extra = extra if extra is not None else None
self.cmd = cmd
self.info = ''
web.count += 1
def __str__(self):
# logging.debug(self.name)
logging.debug(self.data)
logging.debug(self.params)
logging.debug(self.headers)
# logging.debug(self.extra)
def my_requests(self):
if self.method == 'get':
response = requests.get(self.url, headers=self.headers, cookies=self.cookie, params=self.params,
timeout=timeout, verify=False)
elif self.method == 'post':
response = requests.post(self.url, headers=self.headers, cookies=self.cookie, params=self.params,
data=self.data, timeout=timeout, verify=False)
else:
logging.warning('requests方法错误' + self.method)
response = None
return response
def run(self):
try:
response = self.my_requests()
if system == 'Windows':
logging.debug(response.content.decode('gbk'))
else:
logging.debug(response.text)
if self.extra is not None:
time.sleep(1)
exec(self.extra)
response = eval('rep')
except Exception as e:
logging.error('错误类型是', e.__class__.__name__)
logging.error('错误明细是', e)
self.info = self.name + ':签到异常'
logging.error(self.info)
return 0
try:
self.info = eval(self.cmd)
except Exception as e:
logging.error('错误类型是', e.__class__.__name__)
logging.error('错误明细是', e)
logging.error('错误代码是', self.cmd)
self.info = self.name + ':代码异常'
logging.error(response.text)
logging.info(self.info)
def main_handler(event, context):
def is_default(__, parma):
if __[parma]:
locals()[parma] = __[parma]
else:
locals()[parma] = None
return locals()[parma]
desc = ''
error_msg = ''
with open(config_path, encoding='utf-8') as f:
data = json.load(f)
i = 1
os.environ['uid'] = data['uid']
for _ in data['web']:
name = _['name']
url = _['url']
cookie = _['cookie']
method = _['method']
cmd = _['exec']
_.setdefault('extra', False)
_.setdefault('parmas', False)
_.setdefault('data', False)
_.setdefault('headers', False)
locals()['s' + str(i)] = web(name, url, cookie, is_default(_, 'headers'), method, is_default(_, 'data'),
is_default(_, 'parmas'), cmd, is_default(_, 'extra'))
i += 1
logging.info('共载入%d个网站' % web.count)
delay = 1
i = 0
for j in range(web.count):
locals()['s' + str(j + 1)].run()
logging.info('签到完成')
for j in range(web.count):
if '异常' in locals()['s' + str(j + 1)].info:
error_msg = error_msg + locals()['s' + str(j + 1)].info + '\n'
else:
desc = desc + locals()['s' + str(j + 1)].info + '\n'
del locals()['s' + str(j + 1)]
if len(desc) != 0:
send(desc)
if len(error_msg) != 0:
send(error_msg)
配置文件
[] 纯文本查看 复制代码 {
"web": [
{
"name": "***",
"url": "http://******/zb_users/plugin/mochu_us/cmd.php",
"cookie": "{'username_***':'****','token_***':'****'}",
"headers": "{'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/102.0.5005.63 Safari/537.36'}",
"method": "get",
"parmas": "{'act':'qiandao'}",
"exec": "'%s:%s' % (self.name, re.sub('<[\\s\\S]*?>','',response.json()['msg'])+'余额:'+response.json()['giod'])"
},
{
"name": "***",
"url": "https://*******/plugin.php",
"cookie": "{'****':'******'}",
"headers": "{'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/102.0.5005.63 Safari/537.36'}",
"method": "get",
"parmas": "{'H_name':'tasks','action':'ajax','actions':'job','cid':'14'}",
"exec": "'%s:%s' % (self.name, BeautifulSoup(rep.text, 'html.parser').find('ajax').text.strip())",
"extra": "if 'success' or '未完成' in response.text:\n\ttime.sleep(1)\n\tself.params['actions']='job2'\n\trep=self.my_requests()\nelse:\n\trep=response"
}
]
}
name用于日志打印时输出网站名称,url是网站签到地址,cookie不用多解释,headers是因为有的网站的cookies是和ua绑定的,method可以选择get还是post方法,parmas是签到网址所需要的参数,exec用于解析访问签到网址的返回值,有的网站是先领取任务再完成任务的签到模式,所以这类网站的exec就用于领取任务,extra用于提交任务。
功能上虽然能稳定实现我的需求了,但感觉代码很不优雅,例如用了eval函数执行代码、cookies过期要手动重新抓包、配置文件的配置是以文本形式存储参数。所以抛砖引玉,希望大家能给出一些优化建议。 |
免费评分
-
查看全部评分
|