hululove 发表于 2020-5-25 20:45

有没有大神帮看下这个代码哪里有问题。图片保存不下来

本帖最后由 hululove 于 2020-5-25 20:47 编辑

有没有大神帮看下这个代码哪里有问题。图片保存不下来没报错信息。

UserAgent随机头
import random

user_agent = [
    "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
    "Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
    "Mozilla/5.0 (Windows NT 10.0; WOW64; rv:38.0) Gecko/20100101 Firefox/38.0",
    "Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; .NET4.0C; .NET4.0E; .NET CLR 2.0.50727; .NET CLR 3.0.30729; .NET CLR 3.5.30729; InfoPath.3; rv:11.0) like Gecko",
    "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)",
    "Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)",
    "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
    "Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
    "Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; en) Presto/2.8.131 Version/11.11",
    "Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Maxthon 2.0)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; TencentTraveler 4.0)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; The World)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SE 2.X MetaSr 1.0; SE 2.X MetaSr 1.0; .NET CLR 2.0.50727; SE 2.X MetaSr 1.0)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; 360SE)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Avant Browser)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)",
    "Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5",
    "Mozilla/5.0 (iPod; U; CPU iPhone OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5",
    "Mozilla/5.0 (iPad; U; CPU OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5",
    "Mozilla/5.0 (Linux; U; Android 2.3.7; en-us; Nexus One Build/FRF91) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
    "MQQBrowser/26 Mozilla/5.0 (Linux; U; Android 2.3.7; zh-cn; MB200 Build/GRJ22; CyanogenMod-7) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
    "Opera/9.80 (Android 2.3.4; Linux; Opera Mobi/build-1107180945; U; en-GB) Presto/2.8.149 Version/11.10",
    "Mozilla/5.0 (Linux; U; Android 3.0; en-us; Xoom Build/HRI39) AppleWebKit/534.13 (KHTML, like Gecko) Version/4.0 Safari/534.13",
    "Mozilla/5.0 (BlackBerry; U; BlackBerry 9800; en) AppleWebKit/534.1+ (KHTML, like Gecko) Version/6.0.0.337 Mobile Safari/534.1+",
    "Mozilla/5.0 (hp-tablet; Linux; hpwOS/3.0.0; U; en-US) AppleWebKit/534.6 (KHTML, like Gecko) wOSBrowser/233.70 Safari/534.6 TouchPad/1.0",
    "Mozilla/5.0 (SymbianOS/9.4; Series60/5.0 NokiaN97-1/20.0.019; Profile/MIDP-2.1 Configuration/CLDC-1.1) AppleWebKit/525 (KHTML, like Gecko) BrowserNG/7.1.18124",
    "Mozilla/5.0 (compatible; MSIE 9.0; Windows Phone OS 7.5; Trident/5.0; IEMobile/9.0; HTC; Titan)",
    "UCWEB7.0.2.37/28/999",
    "NOKIA5700/ UCWEB7.0.2.37/28/999",
    "Openwave/ UCWEB7.0.2.37/28/999",
    "Mozilla/4.0 (compatible; MSIE 6.0; ) Opera/UCWEB7.0.2.37/28/999",
    # iPhone 6
      "Mozilla/6.0 (iPhone; CPU iPhone OS 8_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/8.0 Mobile/10A5376e Safari/8536.25",
    # 新版移动ua
    "Mozilla/5.0 (Linux;u;Android 4.2.2;zh-cn;) AppleWebKit/534.46 (KHTML,like Gecko) Version/5.1 Mobile Safari/10600.6.3 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)"
]

# 随机获取一个请求头
def get_headers():
    return {'User-Agent': random.choice(user_agent)}





功能码名字随便取
import requests
from bs4 import BeautifulSoup
import os
import time
import json
import UserAgent

index = 'http://www.netbian.com' # 网站根地址
interval = 0.1 # 爬取图片的间隔时间
firstDir = 'D:\彼岸桌面爬虫' # 总路径
classificationDict = {} # 存放网站分类子页面的信息

def ui():
    print('--------------netbian-------------')
    print('全部', end=' ')
    for c in classificationDict.keys():
      print(c, end=' ')
    print()
    choice = input('请输入分类名:')
    if(choice == '全部'):
      for c in classificationDict.keys():
            select_classification(c)
    elif(choice not in classificationDict.keys()):
      print("输入错误,请重新输入!")
      print('----')
      ui()
    else:
      print('---------------------------')
      print('--------------' + choice + '-------------')
      print('---------------------------')
      secondUrl = classificationDict['url']
      secondDir = classificationDict['path']

      select = '#main > div.page > span.slh'
      pageIndex = screenPage(secondUrl, select)
      lastPagenum = int(pageIndex)# 获取最后一页的页码
      for i in range(0, lastPagenum):
            if i == 0:
                save_list(secondUrl)
            else:
                save_list(secondUrl+ 'index_%d.htm' % (i + 1))

            print('--------------' + choice + ': ' + str(i + 1) + '-------------')


def screen(url, select):
    headers = UserAgent.get_headers() # 随机获取一个headers
    html = requests.get(url = url, headers = headers)
    html.encoding = 'gbk' # 网站的编码
    html = html.text
    soup = BeautifulSoup(html, 'lxml')
    return soup.select(select)

def init_classification():
    url = index
    select = '#header > div.head > ul > li:nth-child(1) > div > a'
    classifications = screen(url, select)
    for c in classifications:
      href = c.get('href') # 获取的是相对地址
      text = c.string # 获取分类名
      if(text == '4k壁纸'): # 4k壁纸,因权限问题无法爬取,直接跳过
            continue
      secondDir = firstDir + '/' + text # 分类目录
      url = index + href # 分类子页面url
      global classificationDict
      classificationDict = {
            'path': secondDir,
            'url': url
      }
def screenPage(url, select):
    html = requests.get(url = url, headers = UserAgent.get_headers())
    html.encoding = 'gbk'
    html = html.text
    soup = BeautifulSoup(html, 'lxml')
    return soup.select(select).next_sibling.text

def save_list(result):
    """ 保存结果 """
    with open('result.json','a',encoding='utf-8') as f:
      f.write(json.dumps(result,ensure_ascii=False))
      f.close()
def download(url):
    select = 'div#main div.list ul li a'
    links = screen(url, select)
    for link in links:
      href = link.get('href')
      if(href == 'http://pic.netbian.com/'): # 过滤图片广告
            continue

      # 第一次跳转
      if('http://' in href): # 有极个别图片不提供正确的相对地址
            url = href
      else:
            url = index + href
      select = 'div#main div.endpage div.pic div.pic-down a'
      link = screen(url, select)
      if(link == []):
            print(url + ' 无此图片,爬取失败')
            continue
      href = link.get('href')

      # 第二次跳转
      url = index + href

      # 获取到图片了
      select = 'div#main table a img'
      link = screen(url, select)
      if(link == []):
            print(url + " 该图片需要登录才能爬取,爬取失败")
            continue
      name = link.get('alt').replace('\t', '').replace('|', '').replace(':', '').replace('\\', '').replace('/', '').replace('*', '').replace('?', '').replace('"', '').replace('<', '').replace('>', '')
      print(name) # 输出下载图片的文件名
      src = link.get('src')
      if(requests.get(src).status_code == 404):
            print(url + ' 该图片下载链接404,爬取失败')
            continue
      response = requests.get(src)
      path2 =r'D:\彼岸桌面爬虫'+ name + '.jpg'
      with open(path2, 'wb') as pic:
            for chunk in response.iter_content(128):
                  pic.write(chunk)
            time.sleep(interval)

init_classification()
ui()
with open('result.json','r') as f:
    line = f.read().strip()
    linestr = line.split("\"")
    while '' in linestr:
      linestr.remove('')
    for url in linestr:
      download(url)

wws741 发表于 2020-5-25 20:54

用大佬的话来说就是太长不看:lol

hululove 发表于 2020-5-25 20:56

wws741 发表于 2020-5-25 20:54
用大佬的话来说就是太长不看

{:1_923:}{:1_923:}{:1_923:}伤心太平洋

399713194 发表于 2020-5-25 21:14

liu3632778 发表于 2020-5-25 21:18

有错误提示码么?直接帮你分析代码,工作量太大了。不如直接说问题啊。

hululove 发表于 2020-5-25 21:23

liu3632778 发表于 2020-5-25 21:18
有错误提示码么?直接帮你分析代码,工作量太大了。不如直接说问题啊。

就是不报错 不成功啊 。。。。

790181742 发表于 2020-5-25 21:51

自己debug看一下不就知道了或者打印输出

ciker_li 发表于 2020-5-25 21:56

本帖最后由 ciker_li 于 2020-5-25 22:16 编辑

firstDir = 'D:\彼岸桌面爬虫' # 总路径
这行改成firstDir = 'D:/bianpahong'   试试。
我是成功了

hululove 发表于 2020-5-25 23:04

ciker_li 发表于 2020-5-25 21:56
firstDir = 'D:\彼岸桌面爬虫' # 总路径
这行改成firstDir = 'D:/bianpahong'   试试。
我是成功了

嗯 是路径问题。大佬这个知道怎么改多线程吗

ciker_li 发表于 2020-5-26 15:22

本帖最后由 ciker_li 于 2020-5-26 15:26 编辑

hululove 发表于 2020-5-25 23:04
嗯 是路径问题。大佬这个知道怎么改多线程吗
import random
import requests
from bs4 import BeautifulSoup
import os
import time
import json
import threading

user_agent = [
    "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
    "Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
    "Mozilla/5.0 (Windows NT 10.0; WOW64; rv:38.0) Gecko/20100101 Firefox/38.0",
    "Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; .NET4.0C; .NET4.0E; .NET CLR 2.0.50727; .NET CLR 3.0.30729; .NET CLR 3.5.30729; InfoPath.3; rv:11.0) like Gecko",
    "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)",
    "Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)",
    "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
    "Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
    "Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; en) Presto/2.8.131 Version/11.11",
    "Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Maxthon 2.0)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; TencentTraveler 4.0)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; The World)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SE 2.X MetaSr 1.0; SE 2.X MetaSr 1.0; .NET CLR 2.0.50727; SE 2.X MetaSr 1.0)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; 360SE)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Avant Browser)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)",
    "Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5",
    "Mozilla/5.0 (iPod; U; CPU iPhone OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5",
    "Mozilla/5.0 (iPad; U; CPU OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5",
    "Mozilla/5.0 (Linux; U; Android 2.3.7; en-us; Nexus One Build/FRF91) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
    "MQQBrowser/26 Mozilla/5.0 (Linux; U; Android 2.3.7; zh-cn; MB200 Build/GRJ22; CyanogenMod-7) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
    "Opera/9.80 (Android 2.3.4; Linux; Opera Mobi/build-1107180945; U; en-GB) Presto/2.8.149 Version/11.10",
    "Mozilla/5.0 (Linux; U; Android 3.0; en-us; Xoom Build/HRI39) AppleWebKit/534.13 (KHTML, like Gecko) Version/4.0 Safari/534.13",
    "Mozilla/5.0 (BlackBerry; U; BlackBerry 9800; en) AppleWebKit/534.1+ (KHTML, like Gecko) Version/6.0.0.337 Mobile Safari/534.1+",
    "Mozilla/5.0 (hp-tablet; Linux; hpwOS/3.0.0; U; en-US) AppleWebKit/534.6 (KHTML, like Gecko) wOSBrowser/233.70 Safari/534.6 TouchPad/1.0",
    "Mozilla/5.0 (SymbianOS/9.4; Series60/5.0 NokiaN97-1/20.0.019; Profile/MIDP-2.1 Configuration/CLDC-1.1) AppleWebKit/525 (KHTML, like Gecko) BrowserNG/7.1.18124",
    "Mozilla/5.0 (compatible; MSIE 9.0; Windows Phone OS 7.5; Trident/5.0; IEMobile/9.0; HTC; Titan)",
    "UCWEB7.0.2.37/28/999",
    "NOKIA5700/ UCWEB7.0.2.37/28/999",
    "Openwave/ UCWEB7.0.2.37/28/999",
    "Mozilla/4.0 (compatible; MSIE 6.0; ) Opera/UCWEB7.0.2.37/28/999",
    # iPhone 6
      "Mozilla/6.0 (iPhone; CPU iPhone OS 8_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/8.0 Mobile/10A5376e Safari/8536.25",
    # 新版移动ua
    "Mozilla/5.0 (Linux;u;Android 4.2.2;zh-cn;) AppleWebKit/534.46 (KHTML,like Gecko) Version/5.1 Mobile Safari/10600.6.3 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)"
]

# 随机获取一个请求头
def get_headers():
    return {'User-Agent': random.choice(user_agent)}



index = 'http://www.netbian.com' # 网站根地址
interval = 0.1 # 爬取图片的间隔时间
firstDir = 'e:/bian' # 总路径
classificationDict = {} # 存放网站分类子页面的信息

def ui():
    print('--------------netbian-------------')
    print('全部', end=' ')
    for c in classificationDict.keys():
      print(c, end=' ')
    print()
    choice = input('请输入分类名:')
    if(choice == '全部'):
      for c in classificationDict.keys():
            select_classification(c)
    elif(choice not in classificationDict.keys()):
      print("输入错误,请重新输入!")
      print('----')
      ui()
    else:
      print('---------------------------')
      print('--------------' + choice + '-------------')
      print('---------------------------')
      secondUrl = classificationDict['url']
      secondDir = classificationDict['path']

      select = '#main > div.page > span.slh'
      pageIndex = screenPage(secondUrl, select)
      lastPagenum = int(pageIndex)# 获取最后一页的页码
      for i in range(0, lastPagenum):
            if i == 0:
                save_list(secondUrl)
            else:
                save_list(secondUrl+ 'index_%d.htm' % (i + 1))

            print('--------------' + choice + ': ' + str(i + 1) + '-------------')


def screen(url, select):
    headers = get_headers() # 随机获取一个headers
    html = requests.get(url = url, headers = headers)
    html.encoding = 'gbk' # 网站的编码
    html = html.text
    soup = BeautifulSoup(html, 'lxml')
    return soup.select(select)

def init_classification():
    url = index
    select = '#header > div.head > ul > li:nth-of-type(1) > div > a'
    classifications = screen(url, select)
    for c in classifications:
      href = c.get('href') # 获取的是相对地址
      text = c.string # 获取分类名
      if(text == '4k壁纸'): # 4k壁纸,因权限问题无法爬取,直接跳过
            continue
      secondDir = firstDir + '/' + text # 分类目录
      url = index + href # 分类子页面url
      global classificationDict
      classificationDict = {
            'path': secondDir,
            'url': url
      }
def screenPage(url, select):
    html = requests.get(url = url, headers = get_headers())
    html.encoding = 'gbk'
    html = html.text
    soup = BeautifulSoup(html, 'lxml')
    return soup.select(select).next_sibling.text

def save_list(result):
    """ 保存结果 """
    with open('result.json','a',encoding='utf-8') as f:
      f.write(json.dumps(result,ensure_ascii=False))
      f.close()

def downloadpic(link):
    href = link.get('href')
    if(href == 'http://pic.netbian.com/'): # 过滤图片广告
      return

    # 第一次跳转
   
    if('http://' in href): # 有极个别图片不提供正确的相对地址
      url = href
    else:
      url = index + href
    select = 'div#main div.endpage div.pic div.pic-down a'
    link = screen(url, select)
    if(link == []):
      print(url + ' 无此图片,爬取失败')
      return
    href = link.get('href')

    # 第二次跳转
    url = index + href

    # 获取到图片了
    select = 'div#main table a img'
    link = screen(url, select)
    if(link == []):
      print(url + " 该图片需要登录才能爬取,爬取失败")
      return
    name = link.get('alt').replace('\t', '').replace('|', '').replace(':', '').replace('\\', '').replace('/', '').replace('*', '').replace('?', '').replace('"', '').replace('<', '').replace('>', '')
    print(name) # 输出下载图片的文件名
    src = link.get('src')
    if(requests.get(src).status_code == 404):
      print(url + ' 该图片下载链接404,爬取失败')
      return
    response = requests.get(src)
    path2 =r'e:\bian\\'+ name + '.jpg'
    with open(path2, 'wb') as pic:
      for chunk in response.iter_content(128):
                pic.write(chunk)
      time.sleep(interval)


def download(url):
    select = 'div#main div.list ul li a'
    links = screen(url, select)
    threads = []
    for link in links:
      t = threading.Thread(target=downloadpic,args=(link,))
      t.start()
      threads.append(t)

    for t in threads:
      t.join()

def main():
      
    init_classification()
    ui()
    with open('result.json','r') as f:
      line = f.read().strip()
      linestr = line.split("\"")
      while '' in linestr:
            linestr.remove('')

    for url in linestr:
      download(url)


if __name__ == '__main__':
    main()


多线程
页: [1] 2
查看完整版本: 有没有大神帮看下这个代码哪里有问题。图片保存不下来