douabi 发表于 2021-3-2 23:44

Python爬取某招聘网站,代码运行成功却没有任何数据

运行成功了,但是没有爬取到任何东西,是网站防爬技术升级了吗,还是代收本身有问题
各位大佬帮忙看看import urllib
from urllib.parse import *
from urllib import request
from bs4 import BeautifulSoup
import string
import lxml
import random
import pandas as pd
import os
headers = [
    "Mozilla/5.0 (Windows NT 6.1; Win64; rv:27.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36"
    "Mozilla/5.0 (Windows NT 10.0; WOW64; rv:27.0) Gecko/20100101 Firfox/27.0"
    "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36"
    "Mozilla/5.0 (Windows NT 10.0; WOW64; rv:10.0) Gecko/20100101 Firfox/10.0"
    "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/21.0.1180.110 Safari/537.36"
    "Mozilla/5.0 (X11; Ubuntu; Linux i686 rv:10.0) Gecko/20100101 Firfox/27.0"
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/34.0.1838.2 Safari/537.36"
    "Mozilla/5.0 (X11; Ubuntu; Linux i686 rv:27.0) Gecko/20100101 Firfox/27.0"
    "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36"
    'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'
    ]
def get_content(url, headers,str):
    '''''
    @url:需要登录的网址
    @headers:模拟的登陆的终端
    *********************模拟登陆获取网址********************
    '''
    random_header = random.choice(headers)
    req = urllib.request.Request(url)
    req.add_header("User-Agent", random_header)
    req.add_header("Get", url)
    req.add_header("Host", "{0}.zhaopin.com".format(str))
    req.add_header("refer", "http://{0}.zhaopin.com/".format(str))
    try:
      html = urllib.request.urlopen(req)
      contents = html.read()
      # print(contents)
      # 判断输出内容contents是否是字节格式
      if isinstance(contents, bytes):
            # 转成字符串格式
            contents = contents.decode('utf-8')
      else:
            print('输出格式正确,可以直接输出')
      ##输出的是字节格式,需要将字节格式解码转成’utf-8‘
      return (contents)
    except Exception as e:
      print(e)
def get_links_from(job, city, page):
    '''''
    @job:工作名称
    @city:网址中城市名称
    @page:表示第几页信息
    @urls:所有列表的超链接,即子页网址
    ****************此网站需要模拟登陆**********************
    返回全部子网页地址
    '''
    urls = []
    for i in range(page):
      url='http://sou.zhaopin.com/jobs/searchresult.ashx?jl={0}&kw={1}&p={2}&isadv=0'.format(str(city),str(job),i)
      url = quote(url, safe=string.printable)
      info = get_content(url, headers,'sou')
      soup = BeautifulSoup(info, 'lxml')# 设置解析器为“lxml”
      # print(soup)
      link_urls = soup.select('td.zwmc a')
      for url in link_urls:
            urls.append(url.get('href'))
    # print(urls)
    return (urls)
def get_recuite_info(job, city, page):
    '''''
    获取招聘网页信息
    '''
    urls = get_links_from(job, city, page)
    path='/data/zhilian/'
    if os.path.exists(path)==False:
      os.makedirs(path)
    for url in urls:
      print(url)
      file=url.split('/')[-1]
      print(file)
      str=url.split('/').split('.')
      html = get_content(url, headers, str)
      if html!=None and file!='':
            with open(path+file,'w') as f:
                f.write(html)
'''
*********************获取招聘信息***************************
'''
if __name__ == '__main__':
    city='北京'
    #city='北京%2b上海%2b广州%2b深圳'
    get_recuite_info('大数据', city, 100)

qianshang666 发表于 2021-3-2 23:56

别的不说,这个网站不登陆就没办法看数据,你连cookie都不加,怎么能获取到数据呢

qianshang666 发表于 2021-3-3 00:00

import requests
from bs4 import BeautifulSoup
from urllib.parse import quote


url = 'https://sou.zhaopin.com/?jl=763&kw=%E5%A4%A7%E6%95%B0%E6%8D%AE'
headers = {
    'cookie': 'acw_tc=2760827d16147003619821719e091b87bfaa99934c2e57611a6028fd77f632; x-zp-client-id=9e638b9b-74e3-41fb-b053-aad08a3b9cdd; FSSBBIl1UgzbN7N443S=WOQCUCqXgN2tt3qOBsWRzi3xffpk8E3.ScNuGUKIlgJH9sO.jpOnw1UaEGtk0Zgv; sajssdk_2015_cross_new_user=1; _uab_collina=161470036303486780634239; locationInfo_search={%22code%22:%22870%22%2C%22name%22:%22%E5%BC%A0%E6%8E%96%22%2C%22message%22:%22%E5%8C%B9%E9%85%8D%E5%88%B0%E5%B8%82%E7%BA%A7%E7%BC%96%E7%A0%81%22}; Hm_lvt_38ba284938d5eddca645bb5e02a02006=1614700363; 1420ba6bb40c9512e9642a1f8c243891=c3c1bee3-8727-4001-870d-4d1a45ef8dc1; zp_passport_deepknow_sessionId=32655f67s08e964d26af689043d632cff551; at=3cf58c8cc64645fe8ad173529779ba6e; rt=6802ce7457c24de28fedb25a052ad536; sts_deviceid=177f3a531bda72-08dccdb029b017-53e356a-921600-177f3a531be8b8; sts_evtseq=1; sts_sid=177f3a531c0533-043f26858fe3a4-53e356a-921600-177f3a531c16c4; sts_sg=1; sts_chnlsid=Unknown; zp_src_url=https%3A%2F%2Fpassport.zhaopin.com%2F; ZP_OLD_FLAG=false; sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%221022970012%22%2C%22first_id%22%3A%22177f3a43bccb01-022ff395fa962c-53e356a-921600-177f3a43bcd19a%22%2C%22props%22%3A%7B%22%24latest_traffic_source_type%22%3A%22%E7%9B%B4%E6%8E%A5%E6%B5%81%E9%87%8F%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC_%E7%9B%B4%E6%8E%A5%E6%89%93%E5%BC%80%22%2C%22%24latest_referrer%22%3A%22%22%7D%2C%22%24device_id%22%3A%22177f3a43bccb01-022ff395fa962c-53e356a-921600-177f3a43bcd19a%22%7D; LastCity=%E5%B9%BF%E5%B7%9E; LastCity%5Fid=763; Hm_lpvt_38ba284938d5eddca645bb5e02a02006=1614700444; FSSBBIl1UgzbN7N443T=5QGTKzqkINbMjCad397mgWSfNveBqT4f63Td19X_qhqRixI71X3jZBlnOkZeQe6ymyhM9f0UfbS2HyoHFG6FMtzfP38JSfobsb5MYr.XF5yD9.sHj7HkMcANKRIuTSCVHmN.e4MSSjADOxrHevB7fXoFO7u1mkVhc_C0Sot8ZZ24nUWcoJD6VIkLWOQ7FtgSRikyKlhtSpuFMscAXsLrUfz9EY2JLs4uCt2ea0AcqK6cff7Gn0gaue9mpvrsjSzoexwdeTsHWYlsdpFdMXJkkZKvTEjSu32.rzrzwOtLYi_OApA7skvdpIrxtdu28AgHp54BNZje7JrivmB8389SKvHHedXBQWcpAKdmyPF.nKy6AEpSkbN5ZYqWOSp7GXzhBlua',
    'referer': 'https://i.zhaopin.com/',
    'sec-ch-ua': '"Chromium";v="88", "Google Chrome";v="88", ";Not A Brand";v="99"',
    'sec-ch-ua-mobile': '?0',
    'sec-fetch-dest': 'document',
    'sec-fetch-mode': 'navigate',
    'sec-fetch-site': 'same-origin',
    'sec-fetch-user': '?1',
    'upgrade-insecure-requests': '1',
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.190 Safari/537.36'
}

html = requests.get(url = url,headers = headers)
print(html.text)


这样就能获取到数据

qianshang666 发表于 2021-3-3 00:05

我对urllib这个库不是很了解,所以没办法在你的代码上改,不急的话明天我可以用requests库写一下

bookaccount 发表于 2021-3-3 00:07

cookie, ajax,多半总是这类问题

douabi 发表于 2021-3-3 00:08

qianshang666 发表于 2021-3-3 00:05
我对urllib这个库不是很了解,所以没办法在你的代码上改,不急的话明天我可以用requests库写一下

不急,完全就是小白,在网上找的代码,抱佛脚为了完成论文的代码部分:'(weeqw

qianshang666 发表于 2021-3-3 00:11

douabi 发表于 2021-3-3 00:08
不急,完全就是小白,在网上找的代码,抱佛脚为了完成论文的代码部分

好家伙,我说呢,没事的

q124444 发表于 2021-3-3 00:25

本帖最后由 q124444 于 2021-3-3 00:47 编辑

这Headers写这么多。。。
https://www.bilibili.com/video/BV12E411A7ZQ?from=search&seid=16902707249924159174
爬虫教程

aonima 发表于 2021-3-3 02:35

添加cookie试试

lili2312280 发表于 2021-3-3 08:07

你这最基本的cookie呢
页: [1] 2
查看完整版本: Python爬取某招聘网站,代码运行成功却没有任何数据