python爬虫宅男女神图片
本帖最后由 qq58452077 于 2018-8-2 16:01 编辑```python
#!/usr/bin/env python
# encoding: utf-8
"""
@version: 1.0
@author: CJ
@software: PyCharm
@file: onvshen.py
@time: 2017/7/13 23:01
"""
import urllib2
importlxml.html
import time
import os
import re
def serchIndex(name):
url='https://www.nvshens.com/girl/search.aspx?name='+name
print url
html = urllib2.urlopen(url).read().decode('UTF-8')
return html
def selectOne(html):
tree = lxml.html.fromstring(html)
one = tree.cssselect('#DataList1 > tr > td:nth-child(1) > li > div > a')
href = one.get('href')
url = 'https://www.nvshens.com'+href+'album/'
print url
html = urllib2.urlopen(url).read().decode('UTF-8')
print html
return html
def findPageTotal(html):
tree = lxml.html.fromstring(html)
lis = tree.cssselect('#photo_list > ul > li')
list = []
for li in lis:
url = li.cssselect('div.igalleryli_div > a')
href = url.get('href')
list.append(href)
findimage_urls = set(list)
print findimage_urls
return findimage_urls
def dowmloadImage(image_url,filename):
for i inrange(len(image_url)):
try:
req = urllib2.Request(image_url)
req.add_header('User-Agent','chrome 4{}'.format(i))
image_data = urllib2.urlopen(req).read()
except (urllib2.HTTPError, urllib2.URLError) as e:
time.sleep(0.1)
continue
open(filename,'wb').write(image_data)
break
def mkdirByGallery(path):
# 去除首位空格
path = path.strip()
path = 'E:\\py\\photo\\'+path
#这两个函数之间最大的区别是当父目录不存在的时候os.mkdir(path)
#不会创建,os.makedirs(path)
#则会创建父目录。
isExists = os.path.exists(path)
if not isExists:
os.makedirs(path)
return path
if __name__ != '__main__':
name = str(raw_input("name:"))
html = serchIndex(name)
html = selectOne(html)
pages = findPageTotal(html)
img_id = 1
for page in pages:
path = re.search(r'+',page).group()
path = mkdirByGallery(path)
for i in range(1,31):
url='https://www.nvshens.com'+page+str(i)+'.html'
html = urllib2.urlopen(url).read().decode('UTF-8')
tree = lxml.html.fromstring(html)
title = tree.cssselect('head > title').text
if title.find(u"该页面未找到")!= -1:
break
imgs = tree.cssselect('#hgallery > img')
list = []
for img in imgs:
src = img.get('src')
list.append(src)
image_urls = set(list)
image_id = 0
for image_url in image_urls:
dowmloadImage(image_url,path+'\\'+'2017-{}-{}-{}.jpg'.format(img_id,i,image_id))
image_id += 1
img_id += 1
if __name__ == '__main__':
page = str(raw_input("pageid:"))
path = mkdirByGallery(page)
for i in range(1,31):
url = 'https://www.nvshens.com/g/' + page+'/' + str(i) + '.html'
print url
html = urllib2.urlopen(url).read().decode('UTF-8')
tree = lxml.html.fromstring(html)
title = tree.cssselect('head > title').text
if title.find(u"该页面未找到") != -1:
break
imgs = tree.cssselect('#hgallery > img')
list = []
for img in imgs:
src = img.get('src')
list.append(src)
image_urls = set(list)
image_id = 0
for image_url in image_urls:
dowmloadImage(image_url, path+'\\'+'2017-{}-{}.jpg'.format(i,image_id))
image_id += 1
if __name__ != '__main__':
url = 'https://www.nvshens.com/gallery/meitui/'
print url
html = urllib2.urlopen(url).read().decode('UTF-8')
tree = lxml.html.fromstring(html)
lis = tree.cssselect('#listdiv > ul > li')
list = []
for li in lis:
url = li.cssselect('div.galleryli_div > a')
href = url.get('href')
list.append(href)
findimage_urls = set(list)
print findimage_urls
print len(findimage_urls)
```
######################################################
仅供学习参考(Python2)
萌新发帖不易。请求评分。。。。。 我也写了一个,图片地址是有规律的,不用一页一页来
#爬取zngirls最新图片链接
import os#导入os模块
import requests #导入requests库
from bs4 import BeautifulSoup
import random
import time
import re
class BeautifulPicture():
def __init__(self):#类的初始化操作
self.headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1'}#给请求指定一个请求头来模拟chrome浏览器
self.web_url = 'https://www.nvshens.com/gallery/'#要访问的网页地址
self.folder_path = 'G:\BP'#设置图片要存放的文件目录
self.pn=''
def get_pic(self):
print('开始网页get请求')
r = self.request(self.web_url)
r.encoding='utf-8'
soup = BeautifulSoup(r.text, 'lxml')#声明BeautifulSoup对象
#print('开始创建文件夹')
self.mkdir(self.folder_path)#创建文件夹
#print('开始切换文件夹')
os.chdir(self.folder_path) #切换路径至上面创建的文件夹
for img in soup.find_all("li",{"class":"galleryli"}):
#print(img.div.a.img['alt']) #相册名称
pn=img.div.a['href'] #相册编号
self.pn=pn.replace('/','')
#print(img.div.a.img['data-original']) #循环获取a标签中的data-original
rliurl='https://www.nvshens.com'+img.div.a['href']
#print(rliurl)
#print('开始创建文件夹')
#self.mkdir(self.folder_path+'\\'+pn.replace('/',''))#创建文件夹
#print('开始切换文件夹')
#os.chdir(self.folder_path+ '\\' + pn.replace('/','')) #切换路径至上面创建的文件夹
#print(self.folder_path+'\\'+pn.replace('/',''))
sleep_time=random.randint(1,3)
time.sleep(sleep_time)
#print('Wait%ds'%sleep_time)
rli = requests.get(rliurl) #像目标url地址发送get请求,返回一个response对象
rli.encoding='utf-8'
lisoup = BeautifulSoup(rli.text, 'lxml')#声明BeautifulSoup对象
lidiv = lisoup.find('div',{"class":"gallery_wrapper"})#使用find方法查到第一个p标签
#print(lidiv.ul.img['src'])
lipum=lidiv.ul.img.next_sibling.get('src').replace('001.jpg','')
lin = lisoup.find('div',{"id":"dinfo"})
pn = int(re.sub("\D", "", lin.span.text))
#print(str(pn))
lipu=''
lipn=''
for i in range(pn) :
if i==0:
lipn='0.jpg'
elif i<10:
lipn='00'+str(i)+'.jpg'
elif i<100:
lipn='0'+str(i)+'.jpg'
else:
lipn=str(i)+'.jpg'
lipu=lipum+lipn
#print(lipu)
self.save_img(lipu, lipn) #调用save_img方法来保存图片
def save_img(self, url, name): ##保存图片
#print('开始请求图片地址,过程会有点长...')
img = self.request(url)
file_name = self.pn+'_'+name
#print('开始保存图片')
f = open(file_name, 'ab')
f.write(img.content)
print(url,'图片保存成功!')
f.close()
def request(self, url):#返回网页的response
r = requests.get(url)# 像目标url地址发送get请求,返回一个response对象。有没有headers参数都可以。
return r
def mkdir(self, path):##这个函数创建文件夹
path = path.strip()
isExists = os.path.exists(path)
if not isExists:
print('创建名字叫做', path, '的文件夹')
os.makedirs(path)
#print('创建成功!')
else:
print(path, '文件夹已经存在了,不再创建')
beauty = BeautifulPicture()#创建类的实例
beauty.get_pic()#执行类中的方法 没有看懂 貌似很厉害 支持楼主 这个是用在哪里啊
没有看懂 貌似很厉害 不明觉厉~!{:301_1009:} python真是强大,果断名不虚传 不错不错,收藏了,谢谢楼主分享! 大哥大哥大