本人小白,有什么问题欢迎各位朋友指正
[Python] 纯文本查看 复制代码 # -*- codeing = utf-8 -*-
# [url=home.php?mod=space&uid=238618]@Time[/url] : 2021/2/13 9:29
# [url=home.php?mod=space&uid=686208]@AuThor[/url] : wuqi
# [url=home.php?mod=space&uid=267492]@file[/url] : 链家二手房信息爬虫.py
# [url=home.php?mod=space&uid=371834]@SOFTWARE[/url] :PyCharm
import urllib.request
import urllib.error
import re
from bs4 import BeautifulSoup
import xlwt
findUrl=re.compile(r'<a class="" href="(.+?)" target="_blank"')
savepath = "上海二手房爬虫.xls"
def getUrl():
baseurl = 'http://sh.lianjia.com/ershoufang/pg'
url = ''
urllist = []
for i in range(1, 5):
url = baseurl + str(i)
response = urllib.request.urlopen(baseurl)
Html = response.read().decode('utf-8')
url1 = re.findall(findUrl, Html)
for url in url1:
urllist.append(url)
return urllist #获得所有房子详情页的网页链接,以列表形式
def getData(url,j):
head = { # 模拟浏览器头部信息,向服务器发送消息
"User-Agent": "Mozilla / 5.0(Windows NT 10.0; Win64; x64) AppleWebKit / 537.36(KHTML, like Gecko) Chrome / 80.0.3987.122 Safari / 537.36"
}
# 用户代{过}{滤}理,表示告诉服务器,我们是什么类型的机器、浏览器(本质上是告诉浏览器,我们可以接收什么水平的文件内容)
req=urllib.request.Request(url,headers=head)
html = ''
try:
response = urllib.request.urlopen(url)
html = response.read().decode('utf-8')
except urllib.error.URLError as e:
if hasattr(e,"code"):
print(e.code)
if hasattr(e,"reason"):
print(e.reason)
bsobj=BeautifulSoup(html,'html.parser')
Total_prince=bsobj.findAll(class_='total')[0].get_text()
sheet.write(j,0,Total_prince)
unit_price=bsobj.findAll(class_='unitPriceValue')[0].get_text()
sheet.write(j,1,unit_price)
community=bsobj.select('.communityName>.info ')[0].get_text()
sheet.write(j,2,community)
area=bsobj.select('.areaName>.info')[0].get_text()
sheet.write(j,3,area)
Total_type=bsobj.select('li span.label')
for i in range(len(Total_type)):
sheet.write(j,4+i,Total_type[i].next_sibling)
book = xlwt.Workbook(encoding='utf-8')
sheet = book.add_sheet('二手房信息爬取', cell_overwrite_ok=True)#创建工作表
col=("总价","元/平米","小区","所在区域")
urllist=getUrl()
print(len(urllist))
for i in range(0,4):
sheet.write(0,i,col[i])
url='http://sh.lianjia.com/ershoufang/107103021426.html'
response = urllib.request.urlopen(url)
html = response.read().decode('utf-8')
bsobj=BeautifulSoup(html,'html.parser')
Total_type=bsobj.select('li span.label')
for i in range(4,15):
sheet.write(0,i,Total_type[i-4].get_text()) #写入列名
for j in range(len(urllist)):
try:
url=urllist[j]
getData(url,j+1)
print("第"+str(j+1)+"个房源提取成功")
except:
print("第"+str(j+1)+"个房源提取失败")
continue
book.save(savepath)
|