17k小说网爬取书架中的小说
本帖最后由 原野OK 于 2023-10-14 08:34 编辑#代码主要是下载的17k小说网添加到书架内的小说,只需要把账号和密码替换成自己的账号和密码就可以了,然后在pycharm里运行就可以了,不知道怎么安装pycharm的,去B站学一下,本贴,仅限交流
import requests
from lxml import etree
import os
session = requests.session()
accountNumber = input("请输入你的17K小说网账号:")
password = input("请输入你的登录密码:")
def login():
session.post("https://passport.17k.com/ck/user/login",data={
"loginName": accountNumber,
"password": password
})
def get_shelf_books():
'''
:return:
'''
res = session.get("https://user.17k.com/ck/author2/shelf?page=1&appKey=2406394919")
res.encoding="utf8"
data = res.json().get("data")
return data
def get_books(data):
for bookDict in data:
# print(bookDict)
bookID = bookDict.get("bookId")
bookName = bookDict.get("bookName")
book_path = os.path.join(root_path,bookName)
if not os.path.exists(book_path):
os.mkdir(book_path)
get_chapter(bookID,book_path,bookName)
def get_chapter(bookID,book_path,bookName):
res = requests.get(f"https://www.17k.com/list/{bookID}.html")
# print("res",res)
res.encoding = "utf8"
selector = etree.HTML(res.text)
items = selector.xpath('//dl[@class="Volume"]/dd/a')
# print(items)
for item in items:
chapter_href = item.xpath('./@href')[0]
chapter_title = item.xpath('./span/text()')[0].strip()
print("chapter_href", chapter_href)
print("chapter_text", chapter_title)
res = requests.get("https://www.17k.com" + chapter_href)
res.encoding = "utf8"
chapter_html = res.text
print(chapter_html)
selector = etree.HTML(res.text)
chapter_text = selector.xpath('//div/div[@class="p"]/p/text()')
# ret = selector.xpath('//div/div[@class="p"]/p/text()')
print(chapter_text)
download(book_path, chapter_title,chapter_text)
print(f'{bookName}书籍的{chapter_title}章节已经下载完成')
def download(book_path, chapter_title,chapter_text):
chapter_path = os.path.join(book_path, chapter_title)
with open(chapter_path, "w", encoding="utf8") as f:
for line in chapter_text:
f.write(line + "\n")
login()
data = get_shelf_books()
root_path = "我的书架"
ifnot os.path.exists(root_path):
os.mkdir(root_path)
get_books(data)
谢谢分享 谢谢楼主分享 小白不知道咋用呀 这样发更好
import requests
from lxml import etree
import os
headers= {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36",
}
session = requests.session()
def login():
session.post("https://passport.17k.com/ck/user/login",data={
"loginName": "账号",
"password": "密码"
},headers=headers)
def get_shelf_books():
'''
:return:
'''
res = session.get("https://user.17k.com/ck/author2/shelf?page=1&appKey=2406394919")
res.encoding="utf8"
data = res.json().get("data")
return data
def get_books(data):
for bookDict in data:
# print(bookDict)
bookID = bookDict.get("bookId")
bookName = bookDict.get("bookName")
book_path = os.path.join(root_path,bookName)
if not os.path.exists(book_path):
os.mkdir(book_path)
get_chapter(bookID,book_path,bookName)
def get_chapter(bookID,book_path,bookName):
res = requests.get(f"https://www.17k.com/list/{bookID}.html")
# print("res",res)
res.encoding = "utf8"
selector = etree.HTML(res.text)
items = selector.xpath('//dl[@class="Volume"]/dd/a')
# print(items)
for item in items:
chapter_href = item.xpath('./@href')
chapter_title = item.xpath('./span/text()').strip()
print("chapter_href", chapter_href)
print("chapter_text", chapter_title)
res = requests.get("https://www.17k.com" + chapter_href)
res.encoding = "utf8"
chapter_html = res.text
print(chapter_html)
selector = etree.HTML(res.text)
chapter_text = selector.xpath('//div/div[@class="p"]/p/text()')
# ret = selector.xpath('//div/div[@class="p"]/p/text()')
print(chapter_text)
download(book_path, chapter_title,chapter_text)
print(f'{bookName}书籍的{chapter_title}章节已经下载完成')
def download(book_path, chapter_title,chapter_text):
chapter_path = os.path.join(book_path, chapter_title)
with open(chapter_path, "w", encoding="utf8") as f:
for line in chapter_text:
f.write(line + "\n")
login()
data = get_shelf_books()
root_path = "我的书架"
ifnot os.path.exists(root_path):
os.mkdir(root_path)
get_books(data) 谢谢分享 谢谢分享,如何下载视频啊 w12928013 发表于 2023-10-13 15:54
谢谢分享,如何下载视频啊
下载视频首先要看下是哪个网站,针对性的去下载
! 毛泽西 发表于 2023-10-13 14:42
小白不知道咋用呀
哈哈,不止你一个,我也没搞明白 学习学习,谢谢了