百度文库API免费下载百度文库收费资料(DOC|PPT|TXT|PDF)
本帖最后由 ccbynow 于 2018-7-17 15:38 编辑这里还有更多接口,请移步https://github.com/Jack-Cherish/python-spiderimport requests
import re
import argparse
import sys
import json
import os
parser = argparse.ArgumentParser()
parser.add_argument("url", help="Target Url,你所需要文档的URL",type=str)
parser.add_argument('type', help="Target Type,你所需要文档的的类型(DOC|PPT|TXT|PDF)",type=str)
args = parser.parse_args()
url = args.url
type = args.type
#根据文件决定函数
y = 0
def DOC(url):
doc_id = re.findall('view/(.*).html', url)
html = requests.get(url).text
lists=re.findall('(https.*?0.json.*?)\\\\x22}',html)
lenth = (len(lists)//2)
NewLists = lists[:lenth]
for i in range(len(NewLists)) :
NewLists = NewLists.replace('\\','')
txts=requests.get(NewLists).text
txtlists = re.findall('"c":"(.*?)".*?"y":(.*?),',txts)
for i in range(0,len(txtlists)):
global y
print(txtlists.encode('utf-8').decode('unicode_escape','ignore'))
if y != txtlists:
y = txtlists
n = '\n'
else:
n = ''
filename = doc_id + '.txt'
with open(filename,'a',encoding='utf-8') as f:
f.write(n+txtlists.encode('utf-8').decode('unicode_escape','ignore').replace('\\',''))
print("文档保存在"+filename)
def PPT(url):
doc_id = re.findall('view/(.*).html',url)
url = "https://wenku.baidu.com/browse/getbcsurl?doc_id="+doc_id+"&pn=1&rn=99999&type=ppt"
html = requests.get(url).text
lists=re.findall('{"zoom":"(.*?)","page"',html)
for i in range(0,len(lists)):
lists = lists.replace("\\",'')
try:
os.mkdir(doc_id)
except:
pass
for i in range(0,len(lists)):
img=requests.get(lists).content
with open(doc_id+'\img'+str(i)+'.jpg','wb') as m:
m.write(img)
print("PPT图片保存在" + doc_id +"文件夹")
def TXT(url):
doc_id = re.findall('view/(.*).html', url)
url = "https://wenku.baidu.com/api/doc/getdocinfo?callback=cb&doc_id="+doc_id
html = requests.get(url).text
md5 = re.findall('"md5sum":"(.*?)"',html)
pn = re.findall('"totalPageNum":"(.*?)"',html)
rsign = re.findall('"rsign":"(.*?)"',html)
NewUrl = 'https://wkretype.bdimg.com/retype/text/'+doc_id+'?rn='+pn+'&type=txt'+md5+'&rsign='+rsign
txt = requests.get(NewUrl).text
jsons = json.loads(txt)
texts=re.findall("'c': '(.*?)',",str(jsons))
print(texts)
filename=doc_id+'.txt'
with open(filename,'a',encoding='utf-8') as f:
for i in range(0,len(texts)):
texts = texts.replace('\\r','\r')
texts = texts.replace('\\n','\n')
f.write(texts)
print("文档保存在" + filename)
def PDF(url):
doc_id = re.findall('view/(.*).html',url)
url = "https://wenku.baidu.com/browse/getbcsurl?doc_id="+doc_id+"&pn=1&rn=99999&type=ppt"
html = requests.get(url).text
lists=re.findall('{"zoom":"(.*?)","page"',html)
for i in range(0,len(lists)):
lists = lists.replace("\\",'')
try:
os.mkdir(doc_id)
except:
pass
for i in range(0,len(lists)):
img=requests.get(lists).content
with open(doc_id+'\img'+str(i)+'.jpg','wb') as m:
m.write(img)
print("FPD图片保存在" + doc_id + "文件夹")
if __name__ == "__main__":
try:
print("""
###Athor:52pojie
###TIPS:PDF|PPT只能下载图片
""")
eval(type.upper())(url)
except:
print("获取出错,可能URL错误\n使用格式name.exe url type\n请使用--help查看帮助") zlab 发表于 2018-11-14 18:29
应该是把百度文库显示的内容下载后再加工的思路,无法搞到原始文件,不过也比自己手工保存图片什么的方便多 ...
感谢提醒, 看了代码应该是这样, 如果这样的话意义就不大了,刚开始还以为能获取原始文档呢 菜鸟表示有源码也不知道怎么用。 所以不是源文档? 看不懂源码的菜鸡只好膜拜大佬 +1+1
不会编程,有源码也没啥用 厉害了,收藏下。 支持一下
+1+1
不会编程,有源码也没啥用 不会编程,有源码也没啥用
知道这个东西以后怎么用呢?