python爬虫下载的某网站156个网页小游戏源码
本帖最后由 三木猿 于 2021-6-29 17:35 编辑有哪些游戏自己看吧↓
一波网页小游戏(摸鱼专用)
https://www.52pojie.cn/thread-1269936-1-1.html
压缩包内有广告,管理员不让发,难受,马上升级了还给我积分撤回了
下载网页小游戏素材版:
import requests
from bs4 import BeautifulSoup
def get_Url(url):
str_list = []
content = requests.get(url).content
soup = BeautifulSoup(content, 'lxml')
find = soup.find('span', attrs={'class': 'current'})
sum = int(find.text.split('/'))
for i in range(sum):
if i == 0:
str_list.append('https://www.mycodes.net/166/')
continue
str_list.append('https://www.mycodes.net/166/' + str(i + 1) + '.htm')
return str_list
def get_document(url):
soup = BeautifulSoup(requests.get(url).content, 'lxml')
find_all = soup.find_all('a', attrs={'style': 'color:#006BCD;font-size:14px;'})
a = ''
for value in find_all:
if a.__eq__(str(value['href'])):
continue
a = value['href']
document = BeautifulSoup(requests.get(value['href']).content, 'lxml')
text = document.find('td', attrs={'class': 'a0'}).text
print(text+":")
td_s = document.find_all('td', attrs={'class': 'b4'})
for td in td_s:
find = td.find('a')
if find is not None:
href_ = 'https://www.mycodes.net' + find['href']
down = requests.get(href_)
with open('d:/SanMu/'+text+".zip", "wb") as code:
code.write(down.content)
break
if __name__ == '__main__':
url_list = get_Url('https://www.mycodes.net/166/')
for url in url_list:
get_document(url)
分成了两个包
https://wwi.lanzoui.com/iwGxvgqiwzc
密码:d89r
https://wwi.lanzoui.com/i7WQvgqisqj
密码:dg3j
成品如上
获取在线网址版:
以下是代码
import requestsfrom bs4 import BeautifulSoup
def get_Url(url):
str_list = []
content = requests.get(url).content
soup = BeautifulSoup(content, 'lxml')
find = soup.find('span', attrs={'class': 'current'})
sum = int(find.text.split('/'))
for i in range(sum):
if i == 0:
str_list.append('https://www.mycodes.net/166/')
continue
str_list.append('https://www.mycodes.net/166/' + str(i + 1) + '.htm')
return str_list
def get_document(url):
soup = BeautifulSoup(requests.get(url).content, 'lxml')
find_all = soup.find_all('a', attrs={'style': 'color:#006BCD;font-size:14px;'})
a = ''
for value in find_all:
if a.__eq__(str(value['href'])):
continue
a = value['href']
document = BeautifulSoup(requests.get(value['href']).content, 'lxml')
text = document.find('td', attrs={'class': 'a0'}).text
print(text+":")
td_s = document.find_all('td', attrs={'class': 'b1'})
for td in td_s:
find = td.find('a')
if find is not None:
print(find['href'])
if __name__ == '__main__':
url_list = get_Url('https://www.mycodes.net/166/')
for url in url_list:
get_document(url)
哈哈哈,有没有电影网站 有爬文库的吗 楼主,帮忙打包成exe哦,电脑没装python环境 天可怜见 发表于 2021-5-26 10:50
楼主,帮忙打包成exe哦,电脑没装python环境
exe就不会了,包链接发出来了 为啥是两个代码? 南归不NG 发表于 2021-5-26 11:32
为啥是两个代码?
一个只取网址,一个下载包 感谢分享 好高端的技能! 没一点 python技术的可以尝试下载不