任务:
1、完成爱奇艺《青春有你2》评论数据爬取:爬取任意一期正片视频下评论,评论条数不少于1000条 2、词频统计并可视化展示 3、绘制词云 4、结合PaddleHub,对评论进行内容审核
步骤:
第一步:爱奇艺《青春有你2》评论数据爬取(参考链接:https://www.iqiyi.com/v_19ryfkiv8w.html#curid=15068699100_9f9bab7e0d1e30c494622af777f4ba39)
爬取任意一期正片视频下评论,评论条数不少于1000条
第二步:词频统计并可视化展示
数据预处理:清理清洗评论中特殊字符(如:@#¥%、emoji表情符),清洗后结果存储为txt文档
中文分词:添加新增词(如:青你、奥利给、冲鸭),去除停用词(如:哦、因此、不然、也好、但是)
统计top10高频词
可视化展示高频词
第三步:绘制词云
根据词频生成词云
可选项-添加背景图片,根据背景图片轮廓生成词云
第四步:结合PaddleHub,对评论进行内容审核
需要的配置和准备:
中文分词需要jieba
词云绘制需要wordcloud
可视化展示中需要的中文字体
网上公开资源中找一个中文停用词表
根据分词结果自己制作新增词表
准备一张词云背景图(附加项,不做要求,可用hub抠图实现)
paddlehub配置
!pip install jieba
!pip install wordcloud
# Linux系统默认字体文件路径
# !ls /usr/share/fonts/
# 查看系统可用的ttf格式中文字体
# !fc-list :lang=zh | grep ".ttf"
# !wget https://mydueros.cdn.bcebos.com/font/simhei.ttf # 下载中文字体
# #创建字体目录fonts
# !mkdir .fonts
# # 复制字体文件到该路径
# !cp simhei.ttf .fonts/
# #安装模型
!hub install porn_detection_lstm==1.1.0
!pip install --upgrade paddlehub
from __future__ import print_function
import requests
import json
import re #正则匹配
import time #时间处理模块
import jieba #中文分词
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.font_manager as font_manager
from PIL import Image
from wordcloud import WordCloud #绘制词云模块
import paddlehub as hub
#请求爱奇艺评论接口,返回response信息
def getMovieinfo(url):
'''
请求爱奇艺评论接口,返回response信息
参数 url: 评论的url
:return: response信息
'''
session=requests.Session()
headers={
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36",
"Accept":"*/*",
"Accept-Encoding":"gzip, deflate,br",
"Accept-Language": "zh-CN,zh;q=0.9",
"Connection":"keep-alive",
"Cookie":"T00404=1e4f2d665ce5643bdbae82e3a9dd033a; QC005=76e5927cfe7a6018380310701bf892c8; QP001=1; QP0013=; QC006=o8hmgc8sr7u01ffp24omnzpg; QC173=0; QC175=%7B%22upd%22%3Atrue%2C%22ct%22%3A%22%22%7D; P00004=.1587968182.a8b9f2c93f; __guid=112705862.873531527953199000.1588058719083.5146; monitor_count=1; T00700=EgcI9L-tIRABEgcIuMDtIRABEgcIz7-tIRABEgcI67-tIRABEgcIkMDtIRABEgcI77-tIRABEgcIg8DtIRABEgcI0b-tIRABEgcI8L-tIRABEgcIhcDtIRABEgcI87-tIRABEgcI7L-tIRABEgcI9sDtIRABEgcImMDtIRABEgcI07-tIRABEgcI57-tIRABEgcI6b-tIRABEgcIlMDtIRAB; QP007=420; QC159=%7B%22color%22%3A%22FFFFFF%22%2C%22channelConfig%22%3A0%2C%22hideRoleTip%22%3A1%2C%22speed%22%3A10%2C%22density%22%3A30%2C%22opacity%22%3A86%2C%22isFilterColorFont%22%3A1%2C%22proofShield%22%3A0%2C%22forcedFontSize%22%3A24%2C%22isFilterImage%22%3A1%2C%22isOpen%22%3A1%2C%22hadTip%22%3A1%7D; QILINPUSH=1; Hm_lvt_53b7374a63c37483e5dd97d78d9bb36e=1588067562,1588068851,1588069171,1588078410; Hm_lpvt_53b7374a63c37483e5dd97d78d9bb36e=1588078410; QC007=DIRECT; QC008=1587967510.1588069170.1588078410.8; nu=0; QC010=24139966; QP0027=18; IMS=IggQDxj_paH1BSokCiA3NDVmZTUyOTQ2MDUzZDIxNzQ2NmIxNTc3OGI5ZDJiMRAAciQKIDc0NWZlNTI5NDYwNTNkMjE3NDY2YjE1Nzc4YjlkMmIxEAA; __dfp=a1780760622a79476b8100a6b506317daf283ed019549df2fc6192cc8e48bc3a46@1589263511006@1587967512006",
"Host":"sns-comment.iqiyi.com",
"Referer": "https://www.iqiyi.com/v_19ryfkiv8w.html",
"Sec-Fetch-Mode":"no-cors",
"Sec-Fetch-Site": "same-site"
}
response=requests.get(url,headers=headers)
if response.status_code==200:
# print(response.text)
return response.text
return None
#-----------------------------------
#解析json数据,获取评论
def saveMovieInfoToFile(lastId,arr):
'''
解析json数据,获取评论
参数 lastId:最后一条评论ID arr:存放文本的list
:return: 新的lastId
'''
responseTxt=[]
url="https://sns-comment.iqiyi.com/v3/comment/get_comments.action?agent_type=118&agent_version=9.11.5&authcookie=null&business_type=17&content_id=15068699100&hot_size=0&last_id="#241158322121&page=&page_size=20&types=time&last_id="
url+=str(lastId)
responseTxt=getMovieinfo(url)
responseJson=json.loads(responseTxt)#将字符串转化为字典
# print(responseJson)
comments=responseJson['data']['comments']#data键对应一个字典值,在字典值中查找comments键
# print(comments)
for val in comments:
# print(val.keys())
if 'content' in val.keys():
# print(val['content'])
arr.append(val['content'])
lastId=str(val['id'])
return lastId
#去除文本中特殊字符
def clear_special_char(content):
'''
正则处理特殊字符
参数 content:原文本
return: 清除后的文本
'''
s=re.sub(r"</?(.+?)>| |\t|\r","",content)
s=re.sub(r'\n'," ",s)
s=re.sub(r"\*","\\*",s)
s=re.sub('^[a-zA-Z0-9_\u4e00-\u9fa5]+$','',s)
# s=re.sub('[a-zA-Z','',s)
# s=re.sub('^\d+(\.\d+)?$','',s)
return s
#-----------------------------------------------
def fenci(text):
'''
利用jieba进行分词
参数 text:需要分词的句子或文本
return:分词结果
'''
jieba.load_userdict('add_words.txt')#添加自定义字典,如网络用词等,达到更好的分词效果
seq=jieba.lcut(text,cut_all=False)
return seq
#-------------------------------------------
def stopwordslist(file_path):
'''
创建停用词表
参数 file_path:停用词文本路径
return:停用词list
'''
stopwords=[line.strip() for line in open(file_path,encoding='UTF-8').readlines()]
return stopwords
#-----------------------------
def movestopwords(sentence,stopwords,counts):
'''
去除停用词,统计词频
参数 file_path:停用词文本路径 stopwords:停用词list counts: 词频统计结果
return:None
'''
out=[]
for word in sentence:
if word not in stopwords:
if len(word) !=1:
counts[word]=counts.get(word,0)+1#统计词频
return None
#--------------------------------
def drawcounts(counts,num):
'''
绘制词频统计表
参数 counts: 词频统计结果 num:绘制topN
return:none
'''
x_aixs=[]
y_aixs=[]
c_order=sorted(counts.items(),key=lambda x:x[1],reverse=True)
#print(c_order)
for c in c_order[:num]:
x_aixs.append(c[0])
y_aixs.append(c[1])
#设置显示中文
matplotlib.rcParams['font.sans-serif']=['simhei']#指定默认字体
matplotlib.rcParams['axes.unicode_minus']=False#解决保存图像是负号‘-’显示为方块的问题
plt.bar(x_aixs,y_aixs)
plt.title('词频统计结果')
plt.show()
return
def drawcloud(word_f):
'''
根据词频绘制词云图
参数 word_f:统计出的词频结果
return:none
'''
#加载背景图片
# cloud_mask=np.array(Image.open('cluod.png'))
#忽略显示的词
st=set(["东西","这是"])
#生成wordcloud对象,WordCloud是一个第三方包,用于生成词频工具
wc=WordCloud(background_color='white',
# mask=cloud_mask,#背景图,如果没有可去除此参数
max_words=150,
font_path='simhei.ttf',
min_font_size=10,
max_font_size=100,
width=400,#图片大小设置
relative_scaling=0.3,
stopwords=st)
wc.fit_words(word_f)#利用词频绘制词云
wc.to_file('pic.png')
#-----------------------------------
def text_detection(text,file_path):
'''
使用hub对评论进行内容分析
return:分析结果
'''
porn_detection_lstm=hub.Module(name="porn_detection_lstm")
f=open('aqy.txt','r',encoding='utf-8')
for line in f:
if len(line.strip())==1:#判断评论长度是否为1
continue
else:
test_text.append(line)
f.close()
input_dict={'text':test_text}
results=porn_detection_lstm.detection(data=input_dict,use_gpu=True,batch_size=1)
print(results)
for index,item in enumerate(results):#将results构成一个索引序列
if item['porn_detection_key']=='porn':
print(item['text'],':',item['porn_probs'])
#------------------------------
#评论是多分页的,得多次请求爱奇艺的评论接口才能获取多页评论,有些评论含有表情、特殊字符之类的
#num 是页数,一页10条评论,假如爬取1000条评论,设置num=100
if __name__ == "__main__":
num=20
lastId='0'#lastId 是接口页id
arr=[]#爬取所有评论存放的数组
with open('aqy.txt','a',encoding='utf-8') as f:#以追加的形式写入文件
for i in range(num):
lastId=saveMovieInfoToFile(lastId,arr)
#print(i)
time.sleep(0.5)#词频访问爱奇艺接口,偶尔出现连接报错的情况。睡眠0.5秒,增加每次访问间隔时间
for item in arr:
Item=clear_special_char(item)
if Item.strip()!=' ':#去除字符串头尾的指定字符
try:
f.write(Item+'\n')
except Exception as e:
print('含有特殊字符')
print('共爬取评论:',len(arr))
f=open('aqy.txt','r',encoding='utf-8')
counts={}
for line in f:
words=fenci(line)
stopwords=stopwordslist('cn_stopwords.txt')
movestopwords(words,stopwords,counts)
drawcounts(counts,10)#绘制top10高频词
drawcloud(counts)#绘制词云
f.close()
'''
使用hub对评论进行分析
'''
file_path='aqy.txt'
test_text=[]
text_detection(test_text,file_path)
display(Image.open('pic.png')) #显示生成的词云图像