from gensim.models import CoherenceModel from gensim.corpora.dictionary import Dictionary from gensim.models.ldamodel import LdaModel from sklearn.feature_extraction.text import CountVectorizer import pandas as pd import nltk from nltk.tokenize import word_tokenize from multiprocessing import freeze_support # 下载停用词和分词器所需的数据 #nltk.download('punkt') nltk.download('stopwords') def main(): # 读取Excel文件 data = pd.read_excel("Laos news overall.xlsx") # 假设您有一个名为 "cleaned_title" 的列,包含了新闻标题 titles = data['内容'].tolist() # 初始化分词器和停用词 stop_words = set(nltk.corpus.stopwords.words('english')) tokenizer = nltk.RegexpTokenizer(r'\w+') # 分词并去除停用词 tokenized_titles = [] for title in titles: words = tokenizer.tokenize(title) words = [word for word in words if word.lower() not in stop_words] tokenized_titles.append(words) # 创建字典 dictionary = Dictionary(tokenized_titles) corpus = [dictionary.doc2bow(title) for title in tokenized_titles] # 训练 LDA 模型 num_topics = 10 lda = LdaModel(corpus, num_topics=num_topics, id2word=dictionary) # 计算 coherence score coherence_model = CoherenceModel(model=lda, texts=tokenized_titles, dictionary=dictionary, coherence='c_v') coherence_score = coherence_model.get_coherence() print(f"Coherence Score: {coherence_score}") if __name__ == '__main__': freeze_support() main()