亚马逊产品情感评论分析

  • 爬取亚马逊网站评论信息并存入excel表
import requests
from lxml import etree
import re
import xlwt

from openpyxl import workbook  # 写入Excel表所用
from openpyxl import load_workbook  # 读取excel
# import matplotlib.pylab as plt
from xlrd import book

headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:70.0) Gecko/20100101 Firefox/70.0',
    'Cookie': 'x-wl-uid=1DVw4k4T/jAduWIfwW2jvf029Ha4Bgv/AJGjP/yRfJTdq26dr7oDdeEBdb6zOPUl0ByfsaKJ3GUY=; session-id-time=2082729601l; session-id=457-7649276-4174543; csm-hit=tb:DAHATSQRZZBWHWD4ZXYP+s-T61YJHRDEC6Y6S2VMTVZ|1573355007668&t:1573355007668&adb:adblk_no; ubid-acbcn=459-2457809-1906210; session-token="4sZGQQPKw9CJUOzJFLsTdS3FtlpqIyp0hyvhXL6RMOchbDf7p7YLDEL90YFps2Hl80fBT6uPmzQ00meCLYxsrjuoabX3+kz7OB+CLw8GaAYZB8J9oBBcJLBUsGs6LLm/EHQht5Tm0IpOKR0hz0GGtATgcpJXDfRoEdvNol+CUc3mXOMA5KmEfFWstdV+KwyzSGrGW+DdrAftisgZMl2stffIdhcOLh53B4tJwsR5awKqPrOqZF8uJg=="; lc-acbcn=zh_CN; i18n-prefs=CNY'
} #添加headers模拟浏览器防止被发现
hao = []
zhong = []
cha = [] #获取到的评论数存入里面
all_info_list = []
def parge_page(url):
    response = requests.get(url=url,headers=headers)
    #print(response) #测试一下看看也没有请求到网页
    text = response.text
    html = etree.HTML(text)
    quan = html.xpath('//div[@id="cm_cr-review_list"]/div') #获取到每个人的评论
    for i in quan:
        pinfen1 = i.xpath('.//span[@class="a-icon-alt"]/text()') #获取到每个人的评分几颗星
        pinlun = i.xpath('.//span[@data-hook="review-body"]/span/text()') #获取到每个人评论的字
        time = i.xpath('.//span[@data-hook="review-date"]/text()')
        color = i.xpath('.//a[@ data-hook="format-strip"]/text()')
        # size = i.xpath('.//i[@ class="a-icon-text-separator"]/text()')
        #print(pinlun)
        for pinfen in pinfen1:
            #print(pinlun)
            a = re.sub('颗星','',pinfen) #使用正则把后面不用的字符串替换为空,显得好看
            #print(a)
            list = {'评论':pinlun,'颜色和尺寸':color,'评分': a,'日期':time}
            print(list)
            info_list = [pinlun, color, a,time]
            all_info_list.append(info_list)
            # if a < str(2.0): #判断,小于3颗星就存入差评
            #     cha.append(a)
            # elif a < str(4.0): #小于4颗星就存入中评
            #     zhong.append(a)
            # else:
            #     hao.append(a) #否则都是好评

def main():
    # url = 'https://www.amazon.cn/product-reviews/B074MFRPWL'
    # parge_page(url)
    for x in range(10): #获取100条评论,一页10条
        url = 'https://www.amazon.com/product-reviews/B07XGK5QXD/?pageNumber='+ str(x) #网站:
        
if __name__ == '__main__':
    main()  # 调用main方法
    book = xlwt.Workbook(encoding='utf-8')
    sheet = book.add_sheet('sheet1')
    head = ['评论', '颜色和尺寸', '评分', '日期']  # 表头
    for h in range(len(head)):
        sheet.write(0, h, head[h])  # 写入表头
    i = 1
    for list in all_info_list:
        j = 0
        for data in list:
            sheet.write(i, j, data)
            j += 1
        i += 1
    book.save('评论信息.xlsx')
    print('完成',book)
如果还想爬取其他信息,自己添加Xpath!
  • 导入爬取的评论信息,并清洗数据
import pandas as pd

data_path = '8-8026.xlsx'
df = pd.read_excel(data_path, encoding='gbk')
print(len(df))  # 长度
print(type(df))    # 数据类型
df = df.dropna()  # 消除缺失数据 NaN为缺失数据
print('清除缺失数据后:')
print(len(df))
print(type(df))
df = pd.DataFrame(df.iloc[:, 0].unique())  # 去掉第一列的重复数据;iloc[:, 0]表示索引每一行的第一列;
print(len(df))

def str_unique(raw_str, reverse=False):
    """
    比如:我喜欢喜欢喜欢喜欢喜欢喜欢该商品;去掉重复的“喜欢”
    :param raw_str:
    :param reverse: 是否转置
    :return:
    """
    if reverse:
        raw_str = raw_str[::-1]
    res_str = ''
    for i in raw_str:
        if i not in res_str:
            res_str += i
    if reverse:
        res_str = res_str[::-1]
    return res_str

ser1 = df.iloc[:, 0].apply(str_unique)	# 这时,因为索引了第一列,所以结果成了Series;
# print('df2', type(ser1))  # 
df2 = pd.DataFrame(ser1.apply(str_unique, reverse=True))	# 再次生成DataFrame;
print('机械压缩去词后:')
print(len(df2))
print(type(df2))
print('------------------')

df3 = df2[df2.iloc[:, 0].apply(len) >= 4]
print('短句过滤后:')
print(len(df3))
print('------------------')
  • 导入情感分析语料库,处理好评和差评
from snownlp import SnowNLP  # 情感分析语言处理库

# 语义积极的概率,越接近1情感表现越积极
coms = df3.iloc[:, 0].apply(lambda x: SnowNLP(x).sentiments)
# print('情感分析后:')
positive_df = df3[coms >= 0.9]  # 好评
negative_df = df3[coms < 0.1]  # 差评

print('好评')
print(positive_df)
print('差评')
print(negative_df)
  • jieba分词
import jieba

data1 = pd.read_csv('comments_post.txt',encoding='utf-8',header=None)
data2 = pd.read_csv('comments_neg.txt',encoding='utf-8',header=None)

mycut = lambda s: ' '.join(jieba.cut(s))   # 自定义简单分词函数
data1 = data1[0].apply(mycut)
data2 = data2[0].apply(mycut)

data1.to_csv('好评原因.txt',index=False,header=False,encoding='utf_8_sig')
data2.to_csv('差评原因.txt',index=False,header=False,encoding='utf_8_sig')
print(data2)
  • 主题分析
from gensim import corpora, models
#
post = pd.read_csv('好评原因.txt',encoding='utf-8',header=None,error_bad_lines=False)
neg = pd.read_csv('差评原因.txt',encoding='utf-8',header=None,error_bad_lines=False)
stop = pd.read_csv('stopwords.txt',header=None,sep='tipdm',engine='python')

stop = [' ',''] + list(stop[0])   # 添加空格

post[1] = post[0].apply(lambda s: s.split(' '))
post[2] = post[1].apply(lambda x: [i for i in x if i not in stop])

neg[1] = neg[0].apply(lambda s: s.split(' '))
neg[2] = neg[1].apply(lambda x: [i for i in x if i not in stop])
'''正面主题分析'''
post_dict = corpora.Dictionary(post[2])  # 建立词典
post_corpus = [post_dict.doc2bow(i) for i in post[2]]
post_lda = models.LdaModel(post_corpus, num_topics=3, id2word=post_dict)  # LDA模型训练
for i in range(3):
    print("正面主题",post_lda.print_topic(i))   # 输出每个主题


'''负面主题分析'''
neg_dict = corpora.Dictionary(neg[2])  # 建立词典
neg_corpus = [neg_dict.doc2bow(i) for i in neg[2]]
neg_lda = models.LdaModel(neg_corpus, num_topics=3, id2word=neg_dict)  # LDA模型训练
for i in range(3):
    print("负面主题",neg_lda.print_topic(i))   # 输出每个主题

你可能感兴趣的:(python,数据分析,xpath)