话不多直接上代码。自己也是刚学Python,写这个写了两天了。。代码不是很整洁。大神勿喷。。
[Python] 纯文本查看 复制代码 from bs4 import BeautifulSoup
import requests
import json
import re
import time
def get_one_page(url):
try:
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36"
}
response = requests.get(url, headers=headers)
if response.status_code == 200:
response.encoding = "UTF-8"
return response.text
except requests.ConnectionError:
return None
def choice_jiekou(): # 抓取每个风格的地址
URL = "https://www.gushiwen.org/shiju/xiejing.aspx"
html = requests.get(URL).text
soup = BeautifulSoup(html, 'lxml')
gushi_1 = soup.find_all("div", {"class":"cont"})
raws = []
for gushi in gushi_1:
gushi = gushi.findAll(href=re.compile("shiju"))
for jiekou in gushi:
raws.append(jiekou['href'])
return raws
def parse_one_page(html): #每个风格的诗句
soup = BeautifulSoup(html, 'lxml')
gushi_1 = soup.find_all("div", {"class": "cont"})
rows = []
row = []
raw = []
gushibiaoti_1 = soup.find_all("div", {"class": "title"})
for gushibiaoti in gushibiaoti_1: #找到标题
for biaoti1 in gushibiaoti.stripped_strings:
biaoti = repr(biaoti1)
row.append(biaoti)
for gushi2 in gushi_1: #找到每个诗句第几句
gushi2 = gushi2.findAll(style='float:left;')
for jiekou in gushi2:
raw.append(jiekou.string)
for gushi in gushi_1:#找到诗句
gushici = gushi.find_all('a')
gushici_1 = gushici[0].string
gushici_2 = gushici[1].string
gushi1 = gushici_1 + "————" + gushici_2
rows.append(gushi1)
rows.pop()
rows.pop(0)#去除第一项
row.pop()#去除最后一项
c = [(raw[i] + rows[i]) for i in range(0, len(raw))]
e = [' ']
d = row + e + c + e
return d
jiekou = choice_jiekou() # 每个接口地址提取出来 赋值给jiekou
changdu = len(jiekou) # 计算多少条接口地址
def dizhi(): # 把地址罗列出来
URL = ['https://www.gushiwen.org/'+x for x in jiekou] # 把接口合成地址
dizhi = tuple(URL) #列表 转元组完美。。
return dizhi
dizhi1 = dizhi()
def writeInfomation(content):#写到文本
with open("gushi3.txt", "a+", encoding='utf-8') as f: #定义格式 utf-8
f.write(json.dumps(content, ensure_ascii=False) + "\n")
def main(offset):
url = dizhi1[offset]# 每隔1秒解析一个地址
html = get_one_page(url)
for item in parse_one_page(html):
print(item)
writeInfomation(item)
if __name__ == '__main__':
for i in range(changdu):
main(offset=i)
time.sleep(1)# 每隔一秒
|