舆情分析之微博爬取,数据清洗,词云图生成,情感打分
本帖最后由 wuqi001 于 2021-5-23 15:34 编辑舆情分析
1.选定热点事件
2.爬取微博评论
3.数据清洗(删除非中文与非日期字符)
4.词云图生成
5.情感得分
6.事件主题分析
2.微博评论爬取
import sys
import requests
import random
import time
#22:41第一版完成
#19.47第二版完成
#爬取微博某个事件的评论与其子评论
class Main(object):
url = 'https://m.weibo.cn/comments/hotflow?' #评论主链
url_chi='https://m.weibo.cn/comments/hotFlowChild?' #子评论主链
user_agent = [
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
"Mozilla/5.0 (Windows NT 10.0; WOW64; rv:38.0) Gecko/20100101 Firefox/38.0",
"Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; .NET4.0C; .NET4.0E; .NET CLR 2.0.50727; .NET CLR 3.0.30729; .NET CLR 3.5.30729; InfoPath.3; rv:11.0) like Gecko",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)",
"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; en) Presto/2.8.131 Version/11.11",
"Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Maxthon 2.0)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; TencentTraveler 4.0)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; The World)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SE 2.X MetaSr 1.0; SE 2.X MetaSr 1.0; .NET CLR 2.0.50727; SE 2.X MetaSr 1.0)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; 360SE)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Avant Browser)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)",
"Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5",
"Mozilla/5.0 (iPod; U; CPU iPhone OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5",
"Mozilla/5.0 (iPad; U; CPU OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5",
"Mozilla/5.0 (Linux; U; Android 2.3.7; en-us; Nexus One Build/FRF91) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
"MQQBrowser/26 Mozilla/5.0 (Linux; U; Android 2.3.7; zh-cn; MB200 Build/GRJ22; CyanogenMod-7) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
"Opera/9.80 (Android 2.3.4; Linux; Opera Mobi/build-1107180945; U; en-GB) Presto/2.8.149 Version/11.10",
"Mozilla/5.0 (Linux; U; Android 3.0; en-us; Xoom Build/HRI39) AppleWebKit/534.13 (KHTML, like Gecko) Version/4.0 Safari/534.13",
"Mozilla/5.0 (BlackBerry; U; BlackBerry 9800; en) AppleWebKit/534.1+ (KHTML, like Gecko) Version/6.0.0.337 Mobile Safari/534.1+",
"Mozilla/5.0 (hp-tablet; Linux; hpwOS/3.0.0; U; en-US) AppleWebKit/534.6 (KHTML, like Gecko) wOSBrowser/233.70 Safari/534.6 TouchPad/1.0",
"Mozilla/5.0 (SymbianOS/9.4; Series60/5.0 NokiaN97-1/20.0.019; Profile/MIDP-2.1 Configuration/CLDC-1.1) AppleWebKit/525 (KHTML, like Gecko) BrowserNG/7.1.18124",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows Phone OS 7.5; Trident/5.0; IEMobile/9.0; HTC; Titan)",
"UCWEB7.0.2.37/28/999",
"NOKIA5700/ UCWEB7.0.2.37/28/999",
"Openwave/ UCWEB7.0.2.37/28/999",
"Mozilla/4.0 (compatible; MSIE 6.0; ) Opera/UCWEB7.0.2.37/28/999",
# iPhone 6:
"Mozilla/6.0 (iPhone; CPU iPhone OS 8_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/8.0 Mobile/10A5376e Safari/8536.25",
]
cookies=[ ] #此列可储存多个cookie值
headers = {'User-Agent': random.choice(user_agent)
,'Cookie': random.choice(cookies)
# 'Referer': 'https://m.weibo.cn/detail/4497103885505673',
# 'Sec-Fetch-Mode': 'navigate'
}
params = {}
params_chi={}
list_text = [] #储存评论文本
data_text={}
cid_list=[]
date_list=[]
proxies = { #代{过}{滤}理
'http': 'http://118.113.247.115:9999',
'https': 'https://118.113.247.115:9999'
}
def get_max_id(self):
try:
try:
response = requests.get(url=self.url, headers=self.headers, params=self.params).json()#爬取网页
# print(response)
except:
print('e')
response = requests.get(url=self.url, headers=self.headers, params=self.params).json()# 爬取网页
pass
max_id = response['data']['max_id'] #get请求时的参数,从前一页json源码中获取
max_id_type = response['data']['max_id_type']
data = response['data']['data']
for i in range(0, len(data)):
text = data['text']
date=data['created_at']
self.data_text=date #将评论日期储存为字典的值,评论储存为键
cid=data['id']
# print(text)
self.list_text.append(text)
self.cid_list.append(cid)
# print(self.data_text)
except:
max_id=self.params['max_id']
max_id_type=self.params['max_id_type']
print('错误!!!')
print('爬到:',max_id,max_id_type)
self.save_data()
sys.exit()
return max_id, max_id_type
def get_max_id_chi(self):
try: #子评论最后一页爬取完之后还会继续爬取,故使用try-except代码块捕捉异常,继续执行爬取
response = requests.get(url=self.url_chi, headers=self.headers, params=self.params_chi).json()
# print(response)
max_id = response['max_id'] #子评论的json源码格式与主页评论不同,这里的max_id与max_id_type移动到了data外
max_id_type = response['max_id_type']
data_chi = response['data']
for i in range(0, len(data_chi)):
text = data_chi['text']
# print(text)
self.list_text.append(text)
except:
max_id=0
max_id_type=0
pass
return max_id, max_id_type
def __init__(self):
num_page = int(input('请输入你要爬的页数:'))
ID = input('请输入你要爬取内容的id:')
# num_page=555
# ID=4636281969312670
return_info = ('0', '0') #给第一次爬取的页面初始参数,若程序中途暂停,可输入返回的值重新爬取
for i in range(0, num_page):
print(f'正在爬取第{i + 1}页')
time.sleep(random.randint(0,5)) #多休息一会,免得被微博给发现
self.params = {
'id': ID,
'mid': ID,
'max_id': return_info[0],
'max_id_type': return_info[1]
}
return_info = self.get_max_id()
print(f'第{i+1}页爬取完毕')
# self.cid_data() #爬取子评论函数,可关闭
self.save_data()
def cid_data(self):
return_info_chi=('0','0')
print('子评论共', 10, '页')
for i in range(10):
print(f'子评论爬取中,第{i +1}页')
time.sleep(1)
self.params_chi={
'cid':self.cid_list,
# 'mid':self.cid_list,
'max_id':return_info_chi[0],
'max_id_type':return_info_chi[1]
}
return_info_chi=self.get_max_id_chi()
def save_data(self): #保存数据
for text in self.data_text:
with open('weibo_10_9.txt', 'a', encoding='utf-8') as f:
date=self.data_text
# print(date)
f.write(text)
f.write('\t')
f.write(date)
f.write('\n')
m=Main()
3.数据清洗与词云图生成
import jiebafrom wordcloud import WordCloud
import matplotlib.pyplot as plt
import PIL.Image as Image
import numpy as np
import re
# 导入文本数据并进行简单的文本处理
# 去掉换行符和空格
text = open("weibo_4000.txt",encoding='utf8').read()
pattern = re.compile(r'[^\u4e00-\u9fa5|\n]') #\u4e00-\u9fa5 代表所有中文字符,\n代表换行符。这里匹配所有的非中文与非换行符
text=re.sub(pattern,'',text)
text = text.replace('\n',"")
# 分词,返回结果为词的列表
text_cut = jieba.lcut(text)
# 将分好的词用某个符号分割开连成字符串
text_cut = ' '.join(text_cut)
# usa_mask = np.array(Image.open('p4.jpg'))
# 导入停词
# 用于去掉文本中类似于'啊'、'你','我'之类的词
stop_words = open("stopwords.txt",encoding="utf8").read().split("\n")
stop_words.append('说')
# 使用WordCloud生成词云
word_cloud = WordCloud(scale=12
,font_path="simsun.ttc"# 设置词云字体
,background_color="white" # 词云图的背景颜色
,stopwords=stop_words # 去掉的停词
# ,mask=usa_mask
)
wc=word_cloud.generate(text_cut)
# 运用matplotlib展现结果
plt.subplots(figsize=(12,8))
plt.imshow(wc)
plt.axis("off")
plt.show()
plt.savefig('ci.png')
5.情感得分
import matplotlib.pyplot as plt #导入相关库
import os
import re
from snownlp import SnowNLP
path='E:\pycharm社区版\pythonProject\l1\舆情分析' #文件夹路径
files=os.listdir(path) #读取文件夹下的所有文件
t=[]
for i in files: #将文件名中含有new_weibo的文件存入t中
if 'new_weibo' in i:
t.append(i)
emotion={} #创建emotion字典,用来存储对应事件下的情感得分
for i in t:
text = open(i,'r',encoding='utf8').read() #读取文件
pattern = re.compile(r'[^\u4e00-\u9fa5|\n]') #\u4e00-\u9fa5 代表所有中文字符,\n代表换行符。这里匹配所有的非中文与非换行符
text=re.sub(pattern,'',text) #替换掉非中文字符
lines=text.split('\n') #根据换行符将其全部划分进列表中
k=0 #初始化k,m
m=0
for line in lines: #分别对每行文本情感打分
try:
s=SnowNLP(line) #情感打分
k=k+s.sentiments #每行分数累积
m=m+1
except: #异常时继续执行
pass
e=round(k/m,3) #总的情感打分,保留三位小数
emotion=e #将分数与对应事件存入emotion字典中
e_score=emotion.values()
plt.plt(e_score)
plt.show()
6.事件主题分析
from gensim import corpora,models# 类似于scikit-learn
from snownlp import SnowNLP
import pandas as pd
import jieba
mycut = lambda x: ' '.join(jieba.cut(x)) # 自定义简单分词函数
stop=open("stopwords.txt",'r',encoding='UTF-8').read()
stop=stop.split('\n')
stop.append('说')
stop.append('蜡烛')
df=pd.read_csv('new_weibo_10_1(0).txt', header=None, names=['comments']) #读取文件
df.dropna(inplace=True)
df['coms']=df['comments'].apply(lambda x: SnowNLP(x).sentiments)
# 情感分析,coms在0~1之间,以0.5分界,大于0.5,则为正面情感
pos_data=df'coms']>=0.6].comments # 正面情感数据集
neg_data=df'coms']<0.4].comments # 负面情感数据集
pos_data=pos_data.apply(mycut)
neg_data=neg_data.apply(mycut)
pos = pd.DataFrame(pos_data)
neg = pd.DataFrame(neg_data)
pos[1] = pos['comments'].apply(lambda s: s.split(' ')) #定义一个分隔函数,然后apply广播
pos[2] = pos[1].apply(lambda x: for i in x if i not in stop if len(i)>1] ) # 逐词判断是否为停用词
neg[1] = neg['comments'].apply(lambda s: s.split(' ')) #定义一个分隔函数,然后apply广播
neg[2] = neg[1].apply(lambda x: for i in x if i not in stop if len(i)>1] )
neg_dict = corpora.Dictionary(neg[2]) # 建立词典,以计算机可以处理的方式(数字)
neg_corpus = for i in neg[2]] # 建立语料库,bag of word
neg_lda = models.LdaModel(neg_corpus,num_topics = 2,id2word =neg_dict ) # LDA模型训练
for i in range(2):
print('neg_topic' + str(i))
print(neg_lda.print_topic(i)) # 输出每个主题
# 正面主题分析
pos_dict = corpora.Dictionary(pos[2]) # 建立词典,以计算机可以处理的方式(数字)
pos_corpus = for i in pos[2]] # 建立语料库,bag of word
pos_lda = models.LdaModel(pos_corpus,num_topics = 2,id2word =pos_dict ) # LDA模型训练
for i in range(2):
print('pos_topic' + str(i))
print(pos_lda.print_topic(i)) # 输出每个主题
博主采取的是分析一个事件在不同时间段的热点评论,爬取了多个文本,后又分别对文本进行处理与打分。注:这是当时做项目所用的源码,未对其进行任何修改。故各位需要使用需自行修改一些部分,以期正常运行。且以上每一页为一单独py文件。欢迎各位留帖交流,有事可私信博主,吾爱不常逛,回复慢勿急。转载请注明出处
代码粘贴建议参考置顶帖的方式
【公告】发帖代码插入以及添加链接教程(有福利)
https://www.52pojie.cn/thread-713042-1-1.html
Traceback (most recent call last):
File "C:/迅雷下载/微信公众号爬取/微信公众号爬取/weibo.py", line 54, in <module>
headers = {'User-Agent': random.choice(user_agent), 'Cookie': random.choice(cookies)}#'Referer':'https://m.weibo.cn/detail/4497103885505673','Sec-Fetch-Mode': 'navigate'}
File "C:\Users\john\AppData\Local\Programs\Python\Python38-32\lib\random.py", line 290, in choice
raise IndexError('Cannot choose from an empty sequence') from None
IndexError: Cannot choose from an empty sequence 可以,学习到了 感谢分享,学习到了 感谢分享~ :victory:{:1_921:}学习了棒 看起来很意思! 看起来很意思! 有意思,感谢分享 这个有点智能啊,能自动采集热点吗?