2021SC @SDUSC
一.分割文本,得到有效Tokens
“token” 其实就是指的一个不区分大小写单词,或者说是以空格等等的符号分割的一个个字符串。“tokens"则是token的列表。一个token允许在tokens中存在多次(在tokens中存在多次意味着该单词在原文中出现了多次)。在读入csv文件之后,我们应分别为Amazon和Google的每条数据求它的tokens。
同时像"is”、"of"这样的token,我们对今后的token分析没有贡献。读入stopwords.txt以删除这些token。
二.求TF-IDF
求TF:对于一个tokens中的一个token,
TF(token)=该token在tokens中的出现次数/tokens中的总token数
在Python中,可以用一个字典来存放一条数据的所有tf值
求IDF:对于所有tokens中的一个token,
IDF(token)=tokens(token列表)的个数/出现过该token的tokens(token列表)的个数
求TF-IDF:TF-IDF(token)=TF(token)*IDF(token)
可以看到,TF-IDF与一个词在文档中的出现次数成正比,与该词在整个语言中的出现次数成反比。所以,自动提取关键词的算法就很清楚了,就是计算出文档的每个词的TF-IDF值,然后按降序排列,取排在最前面的几个词。
优缺点
TF-IDF的优点是简单快速,而且容易理解。缺点是有时候用词频来衡量文章中的一个词的重要性不够全面,有时候重要的词出现的可能不够多,而且这种计算无法体现位置信息,无法体现词在上下文的重要性。
STOP_WORDS是无意义的连接词汇,设置停止词以停止执行增加token
构建词库进行词数统计:遍历文档,统计词数,并用字典来保存词出现的次数
然后计算IDF:保存 IDF,每个词作为 key,初始值为 0。在总文档数量词数的遍历中统计字典中的每个词,得到总文档中的每个词的数量并根据公式求出idf值
class KeywordExtractor(object):
STOP_WORDS = set((
"the", "of", "is", "and", "to", "in", "that", "we", "for", "an", "are",
"by", "be", "as", "on", "with", "can", "if", "from", "which", "you", "it",
"this", "then", "at", "have", "all", "not", "one", "has", "or", "that"
))
def set_stop_words(self, stop_words_path):#设置停止词以停止执行增加token
abs_path = _get_abs_path(stop_words_path)
if not os.path.isfile(abs_path):
raise Exception("jieba: file does not exist: " + abs_path)
content = open(abs_path, 'rb').read().decode('utf-8')
for line in content.splitlines():
self.stop_words.add(line)
def extract_tags(self, *args, **kwargs):
raise NotImplementedError
class IDFLoader(object):
def __init__(self, idf_path=None):
self.path = ""
self.idf_freq = {}
self.median_idf = 0.0
if idf_path:
self.set_new_path(idf_path)
def set_new_path(self, new_idf_path):
if self.path != new_idf_path:
self.path = new_idf_path
content = open(new_idf_path, 'rb').read().decode('utf-8')
self.idf_freq = {}
for line in content.splitlines():
word, freq = line.strip().split(' ')
self.idf_freq[word] = float(freq)
self.median_idf = sorted(
self.idf_freq.values())[len(self.idf_freq) // 2]
def get_idf(self):
return self.idf_freq, self.median_idf
class TFIDF(KeywordExtractor):
def __init__(self, idf_path=None):
self.tokenizer = jieba.dt
self.postokenizer = jieba.posseg.dt
self.stop_words = self.STOP_WORDS.copy()
self.idf_loader = IDFLoader(idf_path or DEFAULT_IDF)
self.idf_freq, self.median_idf = self.idf_loader.get_idf()
def set_idf_path(self, idf_path):#设置idf路径
new_abs_path = _get_abs_path(idf_path)
if not os.path.isfile(new_abs_path):
raise Exception("jieba: file does not exist: " + new_abs_path)
self.idf_loader.set_new_path(new_abs_path)
self.idf_freq, self.median_idf = self.idf_loader.get_idf()
def extract_tags(self, sentence, topK=20, withWeight=False, allowPOS=(), withFlag=False):
if allowPOS:
allowPOS = frozenset(allowPOS)
words = self.postokenizer.cut(sentence)
else:
words = self.tokenizer.cut(sentence)
freq = {}
for w in words:
if allowPOS:
if w.flag not in allowPOS:
continue
elif not withFlag:
w = w.word
wc = w.word if allowPOS and withFlag else w
if len(wc.strip()) < 2 or wc.lower() in self.stop_words:
continue
freq[w] = freq.get(w, 0.0) + 1.0
total = sum(freq.values())
for k in freq:
kw = k.word if allowPOS and withFlag else k
freq[k] *= self.idf_freq.get(kw, self.median_idf) / total
if withWeight:
tags = sorted(freq.items(), key=itemgetter(1), reverse=True)
else:
tags = sorted(freq, key=freq.__getitem__, reverse=True)
if topK:
return tags[:topK]
else:
return tags