自然语言处理 gensim

这里我需要申明,我写的所有文章,都是为了我自己以后复习用的

一、gensim基本上分为以下三个步骤

Corpora and Vector Spaces //词向量和向量空间

Topics and Transformations //主题变化

Similarity Queries                 //相似性查询

二、一个例子

#step 1
corpus =  [[(0, 1.0), (1, 1.0), (2, 1.0)],
           [(2, 1.0), (3, 1.0), (4, 1.0), (5, 1.0), (6, 1.0), (8, 1.0)],
           [(1, 1.0), (3, 1.0), (4, 1.0), (7, 1.0)],
           [(0, 1.0), (4, 2.0), (7, 1.0)],
           [(3, 1.0), (5, 1.0), (6, 1.0)],
           [(9, 1.0)],
           [(9, 1.0), (10, 1.0)],
           [(9, 1.0), (10, 1.0), (11, 1.0)],
           [(8, 1.0), (10, 1.0), (11, 1.0)]]
#step 2
from gensim import models
tfidf = models.TfidfModel(corpus)
vec = [(0, 1), (4, 1)]
print(tfidf[vec])
#step 3
from gensim import similarities
index = similarities.SparseMatrixSimilarity(tfidf[corpus], num_features=12)
sims = index[tfidf[vec]]
print(list(enumerate(sims)))

三、Corpora and Vector Spaces

  • 内存方式

documents = ["Human machine interface for lab abc computer applications",
"A survey of user opinion of computer system response time",
"The EPS user interface management system",
"System and human system engineering testing of EPS",
"Relation of user perceived response time to error measurement",
"The generation of random binary unordered trees",
"The intersection graph of paths in trees",
"Graph minors IV Widths of trees and well quasi ordering",
"Graph minors A survey"]

from pprint import pprint 
from collections import defaultdict
stoplist = set('for a of the and to in'.split())
texts = [[word for word in document.lower().split() if word not in stoplist] for document in documents]
frequency = defaultdict(int)
for text in texts:
    for token in text:
        frequency[token] += 1
texts = [[token for token in text if frequency[token] > 1] for text in texts]

import gensim.corpora
dictionary = gensim.corpora.Dictionary(texts)
dictionary.save("C:\\Users\\17768\\Desktop\\deerwester.dict")

corpus = [dictionary.doc2bow(text) for text in texts]
#gensim.corpora.MmCorpus.serialize('C:\\Users\\17768\\Desktop\\deerwester.mm', corpus)  #写入
#corpus = corpora.MmCorpus('C:\\Users\\17768\\Desktop\\deerwester.mm') #读入

print(corpus)

  • 磁盘方式

import gensim.corpora
from six import iteritems
stoplist = set('for a of the and to in'.split())
dictionary = gensim.corpora.Dictionary(line.lower().split() for line in open('C:\\Users\\17768\\Desktop\\mycorpus.txt'))
stop_ids = [dictionary.token2id[stopword] for stopword in stoplist if stopword in dictionary.token2id]
once_ids = [tokenid for tokenid, docfreq in iteritems(dictionary.dfs) if docfreq == 1]
dictionary.filter_tokens(stop_ids + once_ids) 
dictionary.compactify()
print(dictionary)

class MyCorpus(object):
    def __iter__(self):
        for line in open('C:\\Users\\17768\\Desktop\\mycorpus.txt'):
            yield dictionary.doc2bow(line.lower().split())

corpus_memory_friendly = MyCorpus()
for vector in corpus_memory_friendly:
    print(vector)

四、Topics and Transformations

import gensim
corpus = gensim.corpora.MmCorpus('C:\\Users\\17768\\Desktop\\program\\gensim_\\deerwester.mm') 
tfidf = gensim.models.TfidfModel(corpus) 
# tfidf.save('C:\\Users\\17768\\Desktop\\program\\gensim_\\deerwester.tfidf') #写入
# tfidf = gensim.models.TfidfModel.load('C:\\Users\\17768\\Desktop\\program\\gensim_\\deerwester.tfidf') #读入

corpus_tfidf = tfidf[corpus]
for doc in corpus_tfidf:
    print(doc)

#model = models.TfidfModel(corpus, normalize=True)
#model = models.LsiModel(tfidf_corpus, id2word=dictionary, num_topics=300)
#model = models.RpModel(tfidf_corpus, num_topics=500)
#model = models.LdaModel(corpus, id2word=dictionary, num_topics=100)
#model = models.HdpModel(corpus, id2word=dictionary)

五、Similarity Queries

import gensim
dictionary = gensim.corpora.Dictionary.load('C:\\Users\\17768\\Desktop\\program\\gensim_\\deerwester.dict')
corpus = gensim.corpora.MmCorpus('C:\\Users\\17768\\Desktop\\program\\gensim_\\deerwester.mm')  
lsi = gensim.models.LsiModel(corpus, id2word=dictionary, num_topics=2)
index = gensim.similarities.MatrixSimilarity(lsi[corpus])
# index.save('C:\\Users\\17768\\Desktop\\program\\gensim_\\deerwester.index') #写入
# index = similarities.MatrixSimilarity.load('C:\\Users\\17768\\Desktop\\program\\gensim_\\deerwester.index') #读入
doc = "Human computer interaction"
vec_bow = dictionary.doc2bow(doc.lower().split())    
vec_lsi = lsi[vec_bow]
sims = index[vec_lsi]
print(list(enumerate(sims)))

六、所有的读写

dict:

#dictionary.save("C:\\Users\\17768\\Desktop\\deerwester.dict")

#dictionary = gensim.corpora.Dictionary.load('C:\\Users\\17768\\Desktop\\deerwester.dict)

corpus:

#gensim.corpora.MmCorpus.serialize('C:\\Users\\17768\\Desktop\\deerwester.mm', corpus)  #写入

#corpus = gensim.corpora.MmCorpus('C:\\Users\\17768\\Desktop\\deerwester.mm') #读入

model:

# tfidf.save('C:\\Users\\17768\\Desktop\\program\\gensim_\\deerwester.tfidf') #写入

# tfidf = gensim.models.TfidfModel.load('C:\\Users\\17768\\Desktop\\program\\gensim_\\deerwester.tfidf') #读入

model index:

# index.save('C:\\Users\\17768\\Desktop\\program\\gensim_\\deerwester.index') #写入

# index = similarities.MatrixSimilarity.load('C:\\Users\\17768\\Desktop\\program\\gensim_\\deerwester.index') #读入

你可能感兴趣的:(机器学习,NLTK)