萌新写了一个福利爬虫,恳请大神帮开一个线程
最近在看论坛大佬 @wushaominkk 写的python 入门教程 https://www.52pojie.cn/thread-739688-1-1.html跟着写了一个,爬取一个图片站并下载图片,但是因为是单线程,所以效率比较慢,恳请大神指点一下怎么优化以及加一个线程,感谢!
代码如下:
from urllib import request
import re
import os
link_zhengze=r'<a class="img" href="(.+?)" title=".+?" target="_blank">.+?</a>'
total_page_reg = r'<a class="page-numbers" href=".+?" title="最后页">(.+?)</a>'
img_link_reg = r'<a class="image_cx_cont" .+?><img src="(.+?)".+? /></a>'
img_title_reg = r'<title>(.+?)-.+?</title>'
list_link = list()
view_page_link = ''
view_page_link_ext = ''
root_path = os.getcwd() + '/images'
img_path = ''
def http_request(url,coding = 'utf-8'):
try:
html = request.urlopen(url)
except BaseException:
return False
else:
return html.read().decode(coding)
def regular(reg,data):
return re.findall(reg,data,re.S)
def content_link(url):
resquest=http_request(url)
if(resquest == False): return False
return regular(link_zhengze,resquest)
def page_link_format(url):
global view_page_link,view_page_link_ext
format = url.split('.')
view_page_link = format + '.' + format
view_page_link_ext = format
def mkdir_path(root_path,name = ''):
path = root_path + '/' + name
is_file = os.path.exists(path)
if (is_file == False) : os.makedirs(path)
def set_img_file(file,data):
f = open(file,'wb')
f.write(data)
f.close()
def get_down_img(imgs):
for index in imgs:
if index is None:
continue
print('download now: ' + index)
set_img_file(img_path + '/' + index.split('/')[-1],request.urlopen(index).read())
def get_view_links(url):
global img_path
html = http_request(url)
view_img = regular(img_link_reg,html)
if(view_img is None) : return False
total_page = int(regular(total_page_reg,html))
page_link_format(list_link)
title = regular(img_title_reg,html)
img_path = root_path + '/' + title
mkdir_path(img_path)
print('Download file:' + title)
get_down_img(view_img)
for index in range(1,total_page):
view_img = regular(img_link_reg,http_request(view_page_link + '_' + str(index + 1) + '.' + view_page_link_ext))
if view_img is None:
continue
get_down_img(view_img)
def get_list_link(list_link):
for item in list_link:
returl = get_view_links(item)
if (returl == False): continue
web_link = ['http://mzsock.com/sw/page/','http://mzsock.com/cy/page/','http://mzsock.com/mv/page/','http://mzsock.com/lz/page/','http://mzsock.com/fbx/page/','http://mzsock.com/ydx/page/','http://mzsock.com/rzt/page/','http://mzsock.com/cwzp/page/']
error_number = 0
list_page_number = 1
for index in web_link:
error_status = True
while error_status:
list_link = content_link(index + str(list_page_number))
list_page_number = list_page_number + 1
if list_link == False:
if error_number == 2:
error_status = False
else:
error_number = error_number + 1
else:
get_list_link(list_link)
print('Success')
多向大神们学习 啊这个啊,我不会:Dweeqw 这个单线程编写的有点复杂!! 等大神 我也在学习 大神就在下一位 向大神致敬。我玩易语言,一直没有决心向Python靠拢。 学的java,无能为力 同为萌新,我的回答仅供参考
from gevent import monkey
#从gevent库里导入monkey模块。
monkey.patch_all()
#monkey.patch_all()能把程序变成协作式运行,就是可以帮助程序实现异步。
import gevent,time,requests
#导入gevent、time、requests
from gevent.queue import Queue
#从gevent库里导入queue模块
start = time.time()
url_list = ['https://www.baidu.com/',
'https://www.sina.com.cn/',
'http://www.sohu.com/',
'https://www.qq.com/',
'https://www.163.com/',
'http://www.iqiyi.com/',
'https://www.tmall.com/',
'http://www.ifeng.com/']
work = Queue()
#创建队列对象,并赋值给work。
for url in url_list:
#遍历url_list
work.put_nowait(url)
#用put_nowait()函数可以把网址都放进队列里。
def crawler():
while not work.empty():
#当队列不是空的时候,就执行下面的程序。
url = work.get_nowait()
#用get_nowait()函数可以把队列里的网址都取出。
r = requests.get(url)
#用requests.get()函数抓取网址。
print(url,work.qsize(),r.status_code)
#打印网址、队列长度、抓取请求的状态码。
tasks_list= [ ]
#创建空的任务列表
for x in range(2):
#相当于创建了2个爬虫
task = gevent.spawn(crawler)
#用gevent.spawn()函数创建执行crawler()函数的任务。
tasks_list.append(task)
#往任务列表添加任务。
gevent.joinall(tasks_list)
#用gevent.joinall方法,执行任务列表里的所有任务,就是让爬虫开始爬取网站。
end = time.time()
print(end-start) 在你需要线程运行的函数上,加这个修饰器就行
比如
@thread_run
def hello()
print("hello")
你调用hello函数时,就是在线程上运行了
import threading
def thread_run(func):
def wraper(*args, **kwargs):
t = threading.Thread(target=func, args=args, kwargs=kwargs)
t.daemon = True
t.start()
return wraper
页:
[1]
2