python采集百度url
最近用python写了一个采集百度url的脚本,一个线程一次爬一个页面的全部url,使用了多线程、队列、bs4,感觉bs4挺强大。功能描述:采集百度url,自定义页数,线程数,关键词,保存文件名
脚本还有许多要完善的地方,有空我再写个v2.0版本。
(注:此脚本使用线程数小于或等于页面数即可,由于使用了队列,即使线程数大于页面数效果也和等于页面数一样)
照例先上效果图:
由于是单文件,我就不打包了,有需要的直接复制源代码吧,多谢支持!
源代码:
#coding: utf-8
import requests,re,threading,time
from bs4 import BeautifulSoup as bs
from Queue import Queue
from argparse import ArgumentParser
arg = ArgumentParser(description='baidu_url_collection')
arg.add_argument('keyword',help='inurl:.asp?id=1')
arg.add_argument('-p', '--page', help='page count', dest='pagecount', type=int)
arg.add_argument('-t', '--thread', help='the thread_count', dest='thread_count', type=int, default=10)
arg.add_argument('-o', '--outfile', help='the file save result', dest='outfile', default='result.txt')
result = arg.parse_args()
headers = {'User-Agent':'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; The World)'}
class Bd_url(threading.Thread):
def __init__(self, que):
threading.Thread.__init__(self)
self._que = que
def run(self):
while not self._que.empty():
URL = self._que.get()
try:
self.bd_url_collect(URL)
except Exception,e:
print e
pass
def bd_url_collect(self, url):
r = requests.get(url, headers=headers, timeout=5)
soup = bs(r.content, 'lxml', from_encoding='utf-8')
bqs = soup.find_all(name='a', attrs={'data-click':re.compile(r'.'), 'class':None})
for bq in bqs:
r = requests.get(bq['href'], headers=headers, timeout=5)
if r.status_code == 200:
print r.url
with open(result.outfile, 'a') as f:
f.write(r.url + '\n')
def main():
thread = []
thread_count = result.thread_count
que = Queue()
for i in range(0,(result.pagecount)):
que.put('https://www.baidu.com/s?wd=' + result.keyword + '&pn=' + str(i))
for i in range(thread_count):
thread.append(Bd_url(que))
for i in thread:
i.start()
for i in thread:
i.join()
if __name__ == '__main__':
start = time.clock()
main()
end = time.clock()
urlcount = len(open(result.outfile,'rU').readlines())
with open(result.outfile, 'a') as f:
f.write('--------use time:' + str(end-start) + '-----total url: ' + str(urlcount) + '----------------')
print "total url: " + str(urlcount)
print str(end - start) + "s"
f.close()
人生苦短? 快用python {:1_921:}厉害了我的哥,最近初入python,写爬虫,略遇到困难,正好借鉴一下。万分感谢。
页:
[1]