本帖最后由 jing99 于 2024-7-31 22:53 编辑
好喜欢Python 尤其是爬虫 尤其是没有反爬手段的网站
上一次分享知乎盐选小说官方爬取路径之后,有佬友@铭焱 指出,我不是盐神 onehu.xyz这个网站能不能爬取?
这就安排!!!!!!!!!!!
先看结果
第一步:读取所有链接并保存下来
这是所有的链接文件,大家不用重复爬啦:https://www.123pan.com/s/cnHcVv-jalgv.html
这一步的目的如题,但是,预防请求太多封ip,于是设置请求一条保存一条,并且记录已经请求第几页了,方面断了之后再续
代码如下:有多线程 爬的很快
[Python] 纯文本查看 复制代码 import requests
from bs4 import BeautifulSoup
import re
from concurrent.futures import ThreadPoolExecutor, as_completed
from collections import OrderedDict
# 基础URL
base_url = 'https://onehu.xyz'
# 获取指定页码的URL
def get_page_url(page_num):
if page_num == 1:
return base_url
else:
return f'{base_url}/page/{page_num}/#board'
# 提取单个页面中的符合条件的链接
def extract_links_from_page(page_num):
url = get_page_url(page_num)
response = requests.get(url)
response.encoding = 'utf-8' # 设置正确的编码
soup = BeautifulSoup(response.text, 'html.parser')
container = soup.select_one('#board > div > div > div')
if container:
hrefs = [a['href'] for a in container.find_all('a', href=True)]
# 筛选符合特定模式的链接
pattern = re.compile(r'^/2024/\d{2}/\d{2}/.+$')
filtered_hrefs = [href for href in hrefs if pattern.match(href)]
# 为所有的href添加前缀
prefixed_hrefs = [base_url + href for href in filtered_hrefs]
return page_num, prefixed_hrefs
else:
return page_num, []
# 保存链接到文件
def save_links_to_file(links, filename='extracted_links.txt'):
with open(filename, 'w', encoding='utf-8') as file:
for link in links:
file.write(link + '\n')
# 遍历所有页码并提取链接
def main():
total_pages = 281
all_links = OrderedDict()
unique_links = set()
with ThreadPoolExecutor(max_workers=10) as executor:
future_to_page = {executor.submit(extract_links_from_page, page_num): page_num for page_num in range(1, total_pages + 1)}
for future in as_completed(future_to_page):
page_num, links = future.result()
for link in links:
if link not in unique_links:
unique_links.add(link)
if page_num not in all_links:
all_links[page_num] = []
all_links[page_num].append(link)
print(f'正在提取第{page_num}个页面,一共提取了{len(unique_links)}个链接')
# 将所有链接按顺序保存到文件
save_links_to_file([link for links in all_links.values() for link in links])
print(f'总共提取了{len(unique_links)}个链接')
if __name__ == '__main__':
main()
第二步:逐条访问保存的链接,将文本保存为txt以及markdown(回复可查看)
[Python] 纯文本查看 复制代码 import requests
from bs4 import BeautifulSoup
import re
import os
from concurrent.futures import ThreadPoolExecutor, as_completed
# 读取链接
def read_links_from_file(filename='extracted_links.txt'):
with open(filename, 'r', encoding='utf-8') as file:
links = [line.strip() for line in file.readlines()]
return links
# 解析单个链接的HTML内容并保存为文件
def parse_and_save_link(link):
response = requests.get(link)
response.encoding = 'utf-8' # 设置正确的编码
soup = BeautifulSoup(response.text, 'html.parser')
target_element = soup.select_one('body > main > div:nth-of-type(1) > div > div:nth-of-type(2) > div > div > article')
if target_element:
paragraphs = target_element.find_all('p')
content = "\n\n".join([p.get_text() for p in paragraphs])
# 获取页面标题并格式化为合法文件名
title = soup.title.string if soup.title else 'No Title'
title = re.sub(r'[\\/*?:"<>|]', "", title) # 去除文件名中的非法字符
# 检查并创建文件夹
txt_folder = 'txt文件'
md_folder = 'markdown文件'
os.makedirs(txt_folder, exist_ok=True)
os.makedirs(md_folder, exist_ok=True)
# 保存为txt文件
with open(os.path.join(txt_folder, f'{title}.txt'), 'w', encoding='utf-8') as file:
file.write(f'# {title}\n\n')
file.write(content)
# 保存为markdown文件
with open(os.path.join(md_folder, f'{title}.md'), 'w', encoding='utf-8') as file:
file.write(f'# {title}\n\n')
file.write(content)
print(f'内容已保存为 {title}.txt 和 {title}.md')
else:
print(f'未找到指定的元素,链接:{link}')
# 主函数:读取链接并保存内容
def main():
links = read_links_from_file()
with ThreadPoolExecutor(max_workers=10) as executor:
future_to_link = {executor.submit(parse_and_save_link, link): link for link in links}
for future in as_completed(future_to_link):
link = future_to_link[future]
try:
future.result()
except Exception as e:
print(f'链接解析失败:{link},错误:{e}')
if __name__ == '__main__':
main()
最后一步:打包下载!!!
我已经打包好了 ,地址:
打包下载地址、.txt
(42 Bytes, 下载次数: 46)
最后的最后~~~~~
提醒各位,源码仅仅做学习交流,千万别都去爬取,给网站造成负担,毕竟这个网站公益很多年了,担心跑路,打包文章。
小的还贴心的给各位看官提供了一键爬取功能,所有的代码以及过程输出都放在了谷歌的colab中,这是链接,方便小白也能体验一把爬虫的乐趣~~~(不要乱试哦)
点我直达 谷歌colab文件 一键运行 无需配置
|