TF-IDF(term frequency–inverse document frequency)是一种用于信息检索与数据挖掘的常用加权技术。TF意思是词频(Term Frequency),IDF意思是逆文本频率指数(Inverse Document Frequency)。
TF-IDF实际上是:TF * IDF,TF词频(Term Frequency),IDF逆向文件频率(Inverse Document Frequency)。
TF表示词条在文档d中出现的频率。
IDF的主要思想是:如果包含词条t的文档越少,也就是n越小,IDF越大,则说明词条t具有很好的类别区分能力。
注意事项:
userdict.txt :是我需要增加专业性的词汇的词库
stop_words.txt:是我的停用词词库
df_data :数据库中100篇文章的text文档(这里df_data是一个文档集,每篇文章占据一行,100篇文章则是100行显示。)读取的时候默认是每一行识别为一篇文章
完整代码可以直接复制使用:
#! /usr/bin/python
# -*- coding: utf8 -*-
# @Time : 2019/3/4 15:40
# @Author : yukang
# 来源:https://blog.csdn.net/a2099948768/article/details/89189587
import sys,codecs
import pandas as pd
import numpy as np
import jieba.posseg
import jieba.analyse
from pyquery import PyQuery
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
from MysqlDBPool import *
from Kmeans_word import *
"""
TF-IDF权重
1,CountVectorizer 构建词频举证
2,TfidfTransformer 构建tfidf权重计算
3,文本的关键词
4,对应的tfidf矩阵
"""
jieba.load_userdict('./userdict.txt') # 加载外部 用户词典
# 数据预处理操作: 分词,去停用词,词性筛选
def dataPrepos(text,stopkey):
l = []
pos = ['n','nz','v','vd','vn','l','a','d'] # https://blog.csdn.net/a2099948768/article/details/82216906 有注释
seg = jieba.posseg.cut(PyQuery(text).text())
for i in seg:
if i.word not in stopkey and i.flag in pos:
l.append(i.word)
return l
# tf-idf获取文本top10关键词
def getKeywords_tfidf(data,stopkey,topK):
idList,titleList,abstractList = data['id'],data['title'],data['abstract']
corpus = [] # 将所有文档输出到一个list中,一行就是一个文档
for index in range(len(idList)):
text = '%s。%s'%(titleList[index],abstractList[index])
text = dataPrepos(text,stopkey) # 文本预处理
text = " ".join(text)
corpus.append(text)
# 1,构建词频矩阵,将文本中的词语转换成词频矩阵
vectorizer = CountVectorizer()
X = vectorizer.fit_transform(corpus) # 词频矩阵,a[i][j]:表示j词在第i个文本中的词频
# 2,统计每个词的tf-idf权重
transformer = TfidfTransformer()
tfidf = transformer.fit_transform(X)
# 3,获取词袋模型中的关键词
word = vectorizer.get_feature_names()
# 4,获取tf-idf矩阵,a[i][j]表示j词在i篇文本中的tf-idf权重
weight = tfidf.toarray()
# 5,打印词语权重
ids, titles, keys = [],[],[]
for i in range(len(weight)):
# print("----------这里输出第",i+1,"篇文本的词语tf-idf--------")
ids.append(idList[i])
titles.append(titleList[i])
df_word,df_weight = [],[] # 当前文章的所有词汇列表,词汇对应的权重列表
for j in range(len(word)):
# print(word[j],weight[i][j])
df_word.append(word[j])
df_weight.append(weight[i][j])
df_word = pd.DataFrame(df_word,columns=['word'])
df_weight = pd.DataFrame(df_weight,columns=['weight'])
word_weight = pd.concat([df_word,df_weight],axis=1) # 拼接词汇列表和权重列表
getkeywords_kmeans(word_weight, 100) # 对关键词做聚类
word_weight = word_weight.sort_values(by='weight', ascending=False) # 按照权重值降序排列
keyword = np.array(word_weight['word']) # 选择词汇列并转成数组格式
word_split = [keyword[x] for x in range(0,topK)]
word_split = " ".join(word_split)
keys.append(word_split.encode("utf-8"))
result = pd.DataFrame({"id":ids,"title":titles,"key":keys},columns=["id","title","key"])
return result
def main():
topK = 150
select_data = "SELECT ID,TITLE,FACT,RESULT,BASIS FROM `wenshu_detail` WHERE ID>0 AND ID<101"
all_data = mysql_pool_client.exec_query(select_data)
df_data = pd.DataFrame([[i[0],i[1],i[2]+i[3]+i[4]] for i in all_data], columns=['id','title','abstract'])
# # 读取数据集
# dataFile = "./text.csv"
# data = pd.read_csv(dataFile)
# 停用词
stopkey = [w.strip() for w in codecs.open("./stop_words.txt","r",encoding="utf-8").readlines()]
# tf-idf关键词抽取
result = getKeywords_tfidf(df_data,stopkey,topK)
print(result)
if __name__ == '__main__':
main()