Python遇到爬取到空值的的时候希望跳过空值继续下一个网址爬取问题
def manhua(html:str):rr =requests.get(html,headers=headers)
rr.encoding="utf-8"
duo=[]
manhua=BeautifulSoup(rr.text,'html.parser')
dizhi=manhua.find('ol',class_="links-of-books num_div")
for a in dizhi.find_all('li',class_='sort_div'):
duo=('https://www.manhuadb.com'+str(a.find('a').get('href')))
return duo
def download(url2:str,page:int):
r4 = requests.get(url2+'.html',headers=headers)
r4.encoding="utf-8"
dj = BeautifulSoup(r4.text,'lxml')
name = dj.find('h1',class_='h2 text-center mt-3 ccdiv-m').getText()
mkdir(name)
for x in range(1,page):
r3 = requests.get(url2+'p'+str(x)+'.html',headers=headers)
r3.encoding="utf-8"
dl = BeautifulSoup(r3.text,'lxml')
page = dl.find('li',class_="breadcrumb-item active").getText()
reponse=dl.find('img',class_="img-fluid show-pic").get('src')
image=requests.get(reponse,headers=headers)
time.sleep(0.1)
filename='{}/{}/{}'.format('yiteng',name,name+str(x)+'.jpg')
with open(filename,'wb') as fpp:
fpp.write(image.content)
print('成功第{}图片'.format(str(x)))
if reponse is None :
continue
print('全部保存成功!')
#if not os.path.exists('yiteng'):
# os.makedirs('yiteng')
#with open('teng','wb') as fpp:
# fpp.write(img.content)
#------------------------------------------
def main():
url = 'https://www.manhuadb.com/author/370'
dict={}
r = requests.get(url,headers=headers)
r.encoding="utf-8"
soup=BeautifulSoup(r.text,'lxml')
print(soup.find('li',class_="breadcrumb-item active").getText())
soup2=soup.find('div',class_="comic-main-section bg-white p-3")
for i in soup2.find_all('div',class_='media comic-book-unit'):
dict["作品名"]=i.find('h2',class_="h3 my-0").getText().replace("\n","")
dict["内容简介"]=i.find('div',class_="comic-story-intro text-justify mt-3").getText()
dict["下载地址"]="https://www.manhuadb.com"+str(i.find('a',class_="d-block").get('href'))
#print(dict)
down=manhua(dict["下载地址"])
print(down)
if down is not None:
#mkdir(dict["作品名"])
print(down)
get_pic=down.split('.')
get_pic2='.'.join(get_pic)
download(get_pic2,225)
else:
continue
down=manhua(dict["下载地址"])down里面有多个网址需要爬取
在经过download(get_pic2,225)的时候。其中第一部漫画只有17张图片,爬完以后会显示空值无法get,有没有办法,让他空值以后不要继续爬取,直接调到下一个网址继续爬取呢??
求教各位老师!!! 在追加一个问题
def manhua(html:str):
rr =requests.get(html,headers=headers)
rr.encoding="utf-8"
duo=[]
manhua=BeautifulSoup(rr.text,'html.parser')
dizhi=manhua.find('ol',class_="links-of-books num_div")
for a in dizhi.find_all('li',class_='sort_div'):
duo=('https://www.manhuadb.com'+str(a.find('a').get('href')))
return duo
这段里面,因为有些漫画里面包含了三卷,多以return无法返回多个值,我只能把所有值赋值到列表,然后在用return返回到主main里 这样在传给download能够爬取吗?
我这样写对不对 try:
dosomething
except:
showerror 使用try except 捕获错误就行,就不会报错,接着往下运行 hahawangzi 发表于 2020-4-24 11:00
在追加一个问题
def manhua(html:str):
rr =requests.get(html,headers=headers)
从前有座山,山上有座庙,庙里有个老和尚和一个小和尚,有一度天回,老和尚对小和尚说答:从前有座山,山上有座庙,庙里有个老和尚和一个小和尚,有一天,老和尚对小和尚说:从前有座山,山上有座庙,庙里有个老和尚和一个小和尚,有一天,老和尚对小和尚说.......(以此循环),利用这个思想做,哦,好像叫当归。 我试试奥!!! 用try包含就不会了
https://img.alicdn.com/.gif pwp 发表于 2020-4-24 11:21
从前有座山,山上有座庙,庙里有个老和尚和一个小和尚,有一度天回,老和尚对小和尚说答:从前有座山,山上有座 ...
叫递归哦~ 萌虎乖乖 发表于 2020-4-24 12:36
叫递归哦~
:rggrg对哦,年纪大了记不住了。 for x in range(1,page):
try:
r3 = requests.get(url2+'p'+str(x)+'.html',headers=headers)
r3.encoding="utf-8"
dl = BeautifulSoup(r3.text,'lxml')
page = dl.find('li',class_="breadcrumb-item active").getText()
reponse=dl.find('img',class_="img-fluid show-pic").get('src')
image=requests.get(reponse,headers=headers)
time.sleep(0.1)
filename='{}/{}/{}'.format('yiteng',name,name+str(x)+'.jpg')
with open(filename,'wb') as fpp:
fpp.write(image.content)
print('成功第{}图片'.format(str(x)))
except:
pass
print('全部保存成功!')
我加了这一段就可以保存了。。。。。。。。。。。。。。。。不错
页:
[1]