下载微博图片搜索的图片
本帖最后由 mxwawaawxm 于 2019-2-1 23:03 编辑功能如下
在微博图片搜索页面提交关键词,下载前几页的图片,保存至本地。并创建json文件,保存图片其余信息,包括发布用户名等
写得很粗糙。还需修改。
小白请各位大大指导。
#!/usr/bin/env python3
# coding:utf-8
import requests, os, time, json
def get_resp(url, headers, params=None):
try:
response = requests.get(url, headers=headers, params=params)
if response.status_code == 200:
return response
except requests.ConnectionError:
print('连接出错')
return None
def get_keyword():
while True:
keyword = input(r'请输入微博搜索的关键字:')
if not keyword.strip():
print(r'输入为空,请重新输入。')
continue
return keyword
def get_page_total_num():
while True:
page_num = input(r'请输入所要下载的图片页数(微博搜索非登录状态只能查看100页图片):').strip()
if page_num.isdigit() and 1 <= int(page_num) <= 100:
return int(page_num)
else:
print('输入\t{}\t有误(页数需在区间内)'.format(page_num))
continue
def create_dir(keyword):
#根据输入的关键字,创建文件夹
if not os.path.isdir(os.path.join(os.getcwd(), keyword)):
save_dir_path = os.path.join(os.getcwd(), keyword)
os.makedirs(save_dir_path)
print('已经创建文件夹{}'.format(save_dir_path))
else:
save_dir_path = os.path.join(os.getcwd(), '{}{}'.format(keyword, time.strftime('%Y%m%d%H%M%S', time.localtime())))
print('{}路径下,文件夹【{}】已存在。\n脚本将在文件夹名自动后添加时间戳,并创建文件夹'.format(os.getcwd(), keyword))
os.makedirs(save_dir_path)
print('已经创建文件夹{}'.format(save_dir_path))
return save_dir_path
def save_pic(pic_resp, pic_name, each_page, index, pic_download_total_num):
if pic_resp:
with open(pic_name, 'wb') as f:
f.write(pic_resp.content)
print('成功下载第{}页第{}张图片\t{}'.format(each_page, index+1, pic_resp.url))
pic_download_total_num += 1
return pic_download_total_num
else:
print('下载第{}页第{}张图片\t{}出错'.format(each_page, index+1, pic_resp.url))
return pic_download_total_num
def save_to_json(pic_json_path, pic_data_dict):
with open(pic_json_path, 'a', encoding='utf-8') as f_obj:
f_obj.write(json.dumps(pic_data_dict, indent=4, ensure_ascii=False))
f_obj.write('\n'*4)
def downlaod_pic(base_url, keyword, headers, save_dir_path, page_total_num):
pic_download_total_num = 0
pic_json_path = os.path.join(save_dir_path, '{}.json'.format(keyword))
#根据微博搜索关键字构造url
url = base_url.format(keyword)
for each_page in range(1, page_total_num+1):
params = {'page': each_page}
#获取微博搜索每页的响应
page_resp = get_resp(url, headers, params)
#获取微博搜索每页的响应的json数据
page_resp_json = page_resp.json()
#https://s.weibo.com/ajax_pic/list?q=%E9%90%A7%E5%80%9F%E6%B3%A7%E9%94%9B%E6%B0%B1%E7%B4%AD%E7%92%A7%3F2019&page=1
#搜索没有结果时,输出结果
if each_page == 1 and page_resp_json['data']['is_end'] == 1:
print('微博搜索\t{}\t,搜索结果为0,搜索不到图片'.format(keyword))
os.rmdir(save_dir_path)
break
elif each_page < page_total_num and page_resp_json['data']['is_end'] == 1:
print('微博搜索\t{}\t,搜索结果只有{}页,共下载{}张图片\n创建{}文件,以供查询'.format(keyword, each_page-1, pic_download_total_num, pic_json_path))
break
elif page_resp_json['data']['is_end'] == 0:
for index, each_pic in enumerate(page_resp.json()['data']['pic_list']):
#构造微博搜索每页下面的图片源链接
pic_address = 'https:{}'.format(each_pic['original_pic'])
pic_data_dict = {
'索引': '第{}页第{}张图片'.format(each_page, index+1),
'图片源地址': pic_address,
'图片微博文本': each_pic['text'],
'图片微博发布时间': each_pic['created_at'],
'图片微博用户名': each_pic['user']['name'],
'图片微博用户链接地址': 'https:{}'.format(each_pic['user']['profile_url']),
'图片微博用户头像地址': 'https:{}'.format(each_pic['user']['profile_image_url']),
}
save_to_json(pic_json_path, pic_data_dict)
#截取图片文件名
pic_name = os.path.join(save_dir_path, '第{}页第{}张-{}'.format(each_page, index+1, os.path.split(pic_address)))
#获取微博搜索每页的图片
pic_resp = get_resp(pic_address, headers)
#保存图片
pic_download_total_num = save_pic(pic_resp, pic_name, each_page, index, pic_download_total_num)
time.sleep(0.2)
else:
print('微博搜索\t{}\t,共搜索{}页,共下载{}张图片\n创建{}文件,以供查询'.format(keyword, each_page, pic_download_total_num, pic_json_path))
def main():
#https://s.weibo.com/ajax_pic/list?q=%E6%B5%81%E6%98%9F%E8%8A%B1%E5%9B%AD&page=1
#https://s.weibo.com/ajax_pic/list?q=%E9%8F%8D%E5%9B%AC%EE%95%BD&page=3
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:64.0) Gecko/20100101 Firefox/64.0',
}
base_url = r'https://s.weibo.com/ajax_pic/list?q={}'
#获取用户输入的微博搜索关键字
keyword = get_keyword()
#获取用户输入的下载图片总数
page_total_num = get_page_total_num()
#根据输入的关键字,创建文件夹
save_dir_path = create_dir(keyword)
downlaod_pic(base_url, keyword, headers, save_dir_path, page_total_num)
if __name__ == '__main__':
main()
再度修改。第2版。还是好多问题需要完善。{:301_982:}
#!/usr/bin/env python3
# coding:utf-8
import requests, os, time, json, re
def get_resp(url, headers, params=None):
'''
获取页面response
'''
try:
response = requests.get(url, headers=headers, params=params)
if response.status_code == 200:
return response
except requests.ConnectionError:
return None
def get_keyword():
'''
获取用户输入的微博搜索关键字
'''
while True:
keyword = input(r'请输入微博搜索的关键字:')
if not keyword.strip():
print(r'输入为空,请重新输入。')
continue
return keyword
def get_page_total_num():
'''
获取用户输入的下载图片总页数
'''
while True:
page_num = input(r'请输入所要下载的图片页数(微博搜索非登录状态只能查看100页图片):').strip()
if page_num.isdigit() and 1 <= int(page_num) <= 100:
return int(page_num)
else:
print('输入\t{}\t有误(页数需在区间内)'.format(page_num))
continue
def create_dir(keyword, pattern):
'''
根据输入的关键字,创建文件夹
'''
if not os.path.isdir(os.path.join(os.getcwd(), pattern.sub('', keyword))):
save_dir_path = os.path.join(os.getcwd(), pattern.sub('', keyword))
os.makedirs(save_dir_path)
print('已经创建文件夹{}'.format(save_dir_path))
else:
save_dir_path = os.path.join(os.getcwd(), '{}{}'.format(pattern.sub('', keyword), time.strftime('%Y%m%d%H%M%S', time.localtime())))
print('{}路径下,文件夹【{}】已存在。\n脚本将在文件夹名后自动添加时间戳,并创建文件夹'.format(os.getcwd(), keyword))
os.makedirs(save_dir_path)
print('已经创建文件夹{}'.format(save_dir_path))
return save_dir_path
def save_pic(pic_resp, pic_name, pic_address, each_page, index, pic_download_total_num):
'''
保存图片
'''
if pic_resp:
with open(pic_name, 'wb') as f:
f.write(pic_resp.content)
print('成功下载第{}页第{}张图片\t{}'.format(each_page, index+1, pic_resp.url))
pic_download_total_num += 1
return pic_download_total_num
else:
print('下载第{}页第{}张图片\t{}出错'.format(each_page, index+1, pic_address))
return pic_download_total_num
def save_to_json(pic_json_path, pic_data_dict):
'''
保存图片信息如发布用户名、发布时间等至json文件
'''
with open(pic_json_path, 'a', encoding='utf-8') as f_obj:
f_obj.write(json.dumps(pic_data_dict, indent=4, ensure_ascii=False))
f_obj.write('\n'*4)
def downlaod_pic(base_url, keyword, headers, save_dir_path, page_total_num, pattern):
#统计图片下载总数
pic_download_total_num = 0
#保存图片信息的json文件的名称
pic_json_path = os.path.join(save_dir_path, '{}.json'.format(pattern.sub('', keyword)))
#根据微博搜索关键字构造url
url = base_url.format(keyword)
for each_page in range(1, page_total_num+1):
params = {'page': each_page}
#获取微博搜索每页的response
page_resp = get_resp(url, headers, params)
#获取微博搜索每页的response的json数据
if page_resp:
page_resp_json = page_resp.json()
if page_resp_json['data']['is_end'] == 0:
for index, each_pic in enumerate(page_resp.json()['data']['pic_list']):
#构造微博搜索每页下面的图片源链接
pic_address = 'https:{}'.format(each_pic['original_pic'])
pic_data_dict = {
'索引': '第{}页第{}张图片'.format(each_page, index+1),
'图片源地址': pic_address,
'图片微博文本': each_pic['text'],
'图片微博发布时间': each_pic['created_at'],
'图片微博用户名': each_pic['user']['name'],
'图片微博用户链接地址': 'https:{}'.format(each_pic['user']['profile_url']),
'图片微博用户头像地址': 'https:{}'.format(each_pic['user']['profile_image_url']),
}
#保存图片信息如发布用户名、发布时间等至json文件
save_to_json(pic_json_path, pic_data_dict)
#截取图片文件名,并加上索引,构造新名字
pic_name = os.path.join(save_dir_path, '第{}页第{}张-{}'.format(each_page, index+1, os.path.split(pic_address)))
#获取微博搜索每页的图片response
pic_resp = get_resp(pic_address, headers)
#保存图片
pic_download_total_num = save_pic(pic_resp, pic_name, pic_address, each_page, index, pic_download_total_num)
time.sleep(0.2)
#搜索总页数小于用户指定下载的页数时
#输出提示,并跳出遍历
elif page_resp_json['data']['is_end'] == 1 and each_page < page_total_num:
print('微博搜索\t{}\t,搜索结果只有{}页,共下载{}张图片\n在{}创建{}文件,以供查询'.format(keyword, each_page-1, pic_download_total_num, *os.path.split(pic_json_path)))
break
#https://s.weibo.com/ajax_pic/list?q=%E9%90%A7%E5%80%9F%E6%B3%A7%E9%94%9B%E6%B0%B1%E7%B4%AD%E7%92%A7%3F2019&page=1
#搜索结果为0,输出提示并删除之前创建的文件夹
elif page_resp_json['data']['is_end'] == 1 and each_page == 1:
print('微博搜索\t{}\t,搜索结果为0,搜索不到图片'.format(keyword))
os.rmdir(save_dir_path)
break
else:
print('获取微博搜索第{}页的response出错'.format(each_page))
#按照用户指定的下载总页数正常遍历
#最后输出提示
else:
print('微博搜索\t{}\t,共搜索{}页,共下载{}张图片\n在{}创建{}文件,以供查询'.format(keyword, each_page, pic_download_total_num, *os.path.split(pic_json_path)))
def main():
#https://s.weibo.com/ajax_pic/list?q=%E6%B5%81%E6%98%9F%E8%8A%B1%E5%9B%AD&page=1
#https://s.weibo.com/ajax_pic/list?q=%E9%8F%8D%E5%9B%AC%EE%95%BD&page=3
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:64.0) Gecko/20100101 Firefox/64.0',
}
base_url = r'https://s.weibo.com/ajax_pic/list?q={}'
pattern = re.compile(r"[\/\\\:\*\?\"\<\>\|]+" )
#获取用户输入的微博搜索关键字
keyword = get_keyword()
#获取用户输入的下载图片总页数
page_total_num = get_page_total_num()
#根据输入的关键字,创建文件夹
save_dir_path = create_dir(keyword, pattern)
downlaod_pic(base_url, keyword, headers, save_dir_path, page_total_num, pattern)
if __name__ == '__main__':
main() 你好 楼主大大运行报错 Traceback (most recent call last):
File "C:\Users\daowuya\Desktop\weibo.py", line 160, in <module>
main()
File "C:\Users\daowuya\Desktop\weibo.py", line 156, in main
downlaod_pic(base_url, keyword, headers, save_dir_path, page_total_num, pattern)
File "C:\Users\daowuya\Desktop\weibo.py", line 98, in downlaod_pic
if page_resp_json['data']['is_end'] == 0:
KeyError: 'data' 楼主报错了喔
Traceback (most recent call last):
File "D:/Java/Python_space/简单爬虫测试4.py", line 165, in <module>
main()
File "D:/Java/Python_space/简单爬虫测试4.py", line 161, in main
downlaod_pic(base_url, keyword, headers, save_dir_path, page_total_num, pattern)
File "D:/Java/Python_space/简单爬虫测试4.py", line 99, in downlaod_pic
if page_resp_json['data']['is_end'] == 0:
KeyError: 'data' 屁眼通红是个好软件,下载爬虫神器谢谢分享
谢谢楼主,我来试试 谢谢大神分享{:1_932:} 感谢分享 再度修改。第2版。还是好多问题需要完善。{:301_982:} 小白不懂怎么用 感谢分享 感谢🙏谢谢分享
页:
[1]
2