我不知道如何把两个分开程序组合到一块 菜鸟求助
本帖最后由 wushigudan 于 2020-12-13 19:09 编辑各位大佬 求赐教因为不懂自动定义函数调用 我知道如何爬取目录连接跟 单个目录的图片地址但是不知道如何放到一块使用 我先把代码贴出来
#获取目录
import requests ,time
from bs4 import BeautifulSoup
import sys
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.75 Safari/537.36"
}
for p in range(1, 10):
url = "https://www.nvshens.org/gallery/chengshu/%s" % p + ".html"
web_data = requests.get(url=url, headers=headers)
soup = BeautifulSoup(web_data.text, 'lxml')
imgs = soup.select('div.post_entry >div.listdiv > ul >li.galleryli >div.galleryli_div >a')
for i in imgs:
link = i.get('href')
print('https://www.nvshens.org' + link)
这个为获取目录代码
输出结果如下
接下来是单个目录的图片前10页的图片地址获取
# 获取前十页的图片地址
import requests
from bs4 import BeautifulSoup
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.75 Safari/537.36"
}
for ii in range(1, 10):
url = "https://www.nvshens.org/g/34233/%s" % ii + ".html"
web_data = requests.get(url=url, headers=headers)
soup = BeautifulSoup(web_data.text, 'lxml')
imgs = soup.select('div.photos>div.gallery_wrapper > ul >img')
for i in imgs:
img = i.get('src')
print(img)
目录下的单个地址获取的图片链接
或许我做错了爬取方式请大佬们帮忙看看
最后下载还没写 应该不难,新手诚心求教 麻烦各位大佬了
#获取目录
import re
import requests, time
from bs4 import BeautifulSoup
import sys
def get_pages():
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.75 Safari/537.36"
}
for p in range(1, 10):
url = "https://www.nvshens.org/gallery/chengshu/%s" % p + ".html"
web_data = requests.get(url=url, headers=headers)
# print(web_data)
soup = BeautifulSoup(web_data.text, 'lxml')
imgs = soup.select('div.post_entry >div.listdiv > ul >li.galleryli >div.galleryli_div >a')
temp = []
for i in imgs:
link = i.get('href')
temp.append('https://www.nvshens.org' + link)
return temp
def feilNameValid(name=None):
if name is None:
print("name is None!")
return
reg = re.compile(r'[\\/:*?"<>|\r\n]+')
valid_name = reg.findall(name)
if valid_name:
for nv in valid_name:
name = name.replace(nv, "_")
return name
def get_page(page_url_head):
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.75 Safari/537.36"
}
for ii in range(1, 10):
url = page_url_head + "%s" % ii + ".html"
web_data = requests.get(url=url, headers=headers)
soup = BeautifulSoup(web_data.text, 'lxml')
imgs = soup.select('div.photos>div.gallery_wrapper > ul >img')
for i in imgs:
img = i.get('src')
with open(feilNameValid(name=url) + "/%s" % (str(ii)), "wb") as f:
f.write(requests.get(img, headers={
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.75 Safari/537.36"
}).content)
if __name__ == '__main__':
for item in get_pages():
get_page(item)
能正常拿到数据这个也就对的。 看看是不是你的要结果? 上班中..不好意思打开链接。
#获取目录
import requests ,time
from bs4 import BeautifulSoup
import sys
def TestHref():
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.75 Safari/537.36"}
list = []
for p in range(1, 10):
url = "https://www.nvshens.org/gallery/chengshu/%s" % p + ".html"
web_data = requests.get(url=url, headers=headers)
soup = BeautifulSoup(web_data.text, 'lxml')
imgs = soup.select('div.post_entry >div.listdiv > ul >li.galleryli >div.galleryli_div >a')
for i in imgs:
link = i.get('href')
# print('https://www.nvshens.org' + link)
list.append('https://www.nvshens.org' + link)
return list
def TestJpg(lists):
# 获取前十页的图片地址
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.75 Safari/537.36"
}
for list in lists:
for ii in range(1, 10):
url = list + "%s.html" % ii
print("=========================",url)
web_data = requests.get(url=url, headers=headers)
soup = BeautifulSoup(web_data.text, 'lxml')
imgs = soup.select('div.photos>div.gallery_wrapper > ul >img')
for i in imgs:
img = i.get('src')
print(img)
TestJpg(TestHref())
是不是可以把目录链接用一个列表存起来里,然后遍历列表发送请求获取图片链接{:1_886:} 你这代码,拿不到数据啊 忧伤_ 发表于 2020-12-13 19:20
是不是可以把目录链接用一个列表存起来里,然后遍历列表发送请求获取图片链接
差不多意思
SailZhao520 发表于 2020-12-13 19:32
#获取目录
import re
感谢 不过确定可以吗 我刚试了下好像不行呀
SailZhao520 发表于 2020-12-13 19:31
你这代码,拿不到数据啊
求大佬帮忙
谢谢分享,先收藏了。 n3iuarem3t 发表于 2020-12-13 20:20
谢谢分享,先收藏了。
大哥你这收藏干嘛啊
我都没写好
wushigudan 发表于 2020-12-13 20:31
大哥你这收藏干嘛啊
我都没写好
我刚才收藏的不是这个内容啊。。。你这贴我也没看过。
页:
[1]
2