http://www.sogou.com/labs/resource/ca.php
这里用到的资料我放百度云吧,方便学习
链接:https://pan.baidu.com/s/1cu2uFwBHhKgJb3pWTruIng
提取码:kufj
jieba.analyse
中的jieba.analyse.extract_tags()
df_train = pd.DataFrame({'contents_clean': contents_clean, 'label': df_news['category']})
df_train.tail()
df_train.label.unique()
label_mapping = {"汽车": 1, "财经": 2, "科技": 3, "健康": 4, "体育":5, "教育": 6,"文化": 7,"军事": 8,"娱乐": 9,"时尚": 0}
df_train['label'] = df_train['label'].map(label_mapping)
df_train.head()
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(df_train['contents_clean'].values, df_train['label'].values)
len(x_test)
words = []
for line_index in range(len(x_train)):
try:
words.append(' '.join(x_train[line_index]))
except:
print(line_index, word_index)
words[0]
from sklearn.feature_extraction.text import CountVectorizer
vec = CountVectorizer(analyzer='word', max_features=4000, lowercase=False)
vec.fit(words)
from sklearn.naive_bayes import MultinomialNB
classifier = MultinomialNB()
classifier.fit(vec.transform(words), y_train)
test_words = []
for line_index in range(len(x_test)):
try:
test_words.append(' '.join(x_test[line_index]))
except:
print(line_index, word_index)
test_words[0]
classifier.score(vec.transform(test_words), y_test)
import pandas as pd
import jieba
import numpy as np
df_news = pd.read_table('data/train.txt', names=['category', 'theme', 'URL', 'content'], encoding='utf-8')
df_news = df_news.dropna()
df_news.shape
(50000, 4)
content = df_news.content.values.tolist()
content_S = []
for line in content:
current_segment = jieba.lcut(line)
if len(current_segment)>1 and current_segment != '\r\n':
content_S.append(current_segment)
print(content_S[1000])
df_content = pd.DataFrame({'content_S':content_S})
df_content.head()
stopwords = pd.read_csv('stopwords.txt', index_col=False, sep='\t', quoting=3, names=['stopword'], encoding='utf-8')
stopwords.head()
def drop_stopwords(contents, stopwords):
contents_clean = []
all_words = []
for line in contents:
line_clean = []
for word in line:
if word in stopwords:
continue
line_clean.append(word)
all_words.append(str(word))
contents_clean.append(line_clean)
return contents_clean, all_words
contents = df_content.content_S.values.tolist()
stopwords = stopwords.stopword.values.tolist()
contents_clean, all_word = drop_stopwords(contents, stopwords)
df_all_words = pd.DataFrame({'all_words':all_word})
df_all_words.head()
df_content = pd.DataFrame({'contents_clean': contents_clean})
df_content.head()
words_count = df_all_words.groupby(['all_words'])['all_words'].agg({'count':np.size})
words_count = words_count.reset_index().sort_values(by=['count'], ascending=False)
words_count.head()
from wordcloud import WordCloud
import matplotlib.pyplot as plt
import matplotlib
matplotlib.rcParams['figure.figsize'] = (100, 50)
%matplotlib inline
wordcloud = WordCloud(font_path = 'data/simhei.ttf', background_color='white', max_font_size=100)
word_freq = {x[0]:x[1] for x in words_count.head(500).values}
wordcloud = wordcloud.fit_words(word_freq)
plt.imshow(wordcloud)
plt.show()
# plt.savefig('save.png', dpi=100)
import jieba.analyse
index = 2000
print(df_news['content'][index])
content_S_str = ''.join(content_S[index])
print(' '.join(jieba.analyse.extract_tags(content_S_str, topK=5, withWeight = False)))
from gensim import corpora, models, similarities
import gensim
#做映射,相当于词袋
dictionary = corpora.Dictionary(contents_clean)
corpus = [dictionary.doc2bow(sentence) for sentence in contents_clean]
lda = gensim.models.ldamodel.LdaModel(corpus=corpus, id2word=dictionary, num_topics=20)
print(lda.print_topic(1))
df_train = pd.DataFrame({'contents_clean': contents_clean, 'label': df_news['category']})
df_train.tail()
df_train.label.unique()
label_mapping = {"汽车": 1, "财经": 2, "科技": 3, "健康": 4, "体育":5, "教育": 6,"文化": 7,"军事": 8,"娱乐": 9,"时尚": 0}
df_train['label'] = df_train['label'].map(label_mapping)
df_train.head()
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(df_train['contents_clean'].values, df_train['label'].values)
#将数据划分成训练集和测试集,这个函数的用途就是讲传入的内容进行随机划分
len(x_test)
words = []
for line_index in range(len(x_train)):
try:
words.append(' '.join(x_train[line_index]))
except:
print(line_index, word_index)
words[0]
from sklearn.feature_extraction.text import CountVectorizer
vec = CountVectorizer(analyzer='word', max_features=4000, lowercase=False)
vec.fit(words)
from sklearn.naive_bayes import MultinomialNB
classifier = MultinomialNB()
classifier.fit(vec.transform(words), y_train)
test_words = []
for line_index in range(len(x_test)):
try:
test_words.append(' '.join(x_test[line_index]))
except:
print(line_index, word_index)
test_words[0]
classifier.score(vec.transform(test_words), y_test)#算出最终的分类准确度为0.83左右
对唐宇迪老师的机器学习教程进行笔记整理
编辑日期:2018-10-5
小白一枚,请大家多多指教