操作平台: windows10, python37, jupyter
数据下载: https://www.lanzous.com/iaghe8f
import pandas as pd
# BernoulliNB 二分布,硬币,正面反面,概率差不多
# MultinomialNB 投掷筛子,多分布,6个面概率差不多
from sklearn.naive_bayes import GaussianNB,MultinomialNB,BernoulliNB
#读取数据并命名表头
data = pd.read_csv('../data/SMSSpamCollection',sep = '\t',header=None,names=['target','message'])
data.shape #结果为(5572, 2)
data.head()
target | message | |
---|---|---|
0 | ham | Go until jurong point, crazy.. Available only ... |
1 | ham | Ok lar... Joking wif u oni... |
2 | spam | Free entry in 2 a wkly comp to win FA Cup fina... |
3 | ham | U dun say so early hor... U c already then say... |
4 | ham | Nah I don't think he goes to usf, he lives aro... |
结果分析: 短信就只有两种类型,ham表示正常的短信,spam表示垃圾短信。
X = data['message'] #提取消息
y = data['target'] #提取标签
X.unique().size #统计去重后的大小
5169
结果分析: 原数据中有5572条信息,不重复的有5169条,可以不用去重,它不会影响训练的模型。
# 统计词频,文本向量化处理
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer() #可以设置停用词stop_words='english'
X_cv = cv.fit_transform(X)
X_cv
<5572x8713 sparse matrix of type 'numpy.int64'>'
with 74169 stored elements in Compressed Sparse Row format>
查看向量化结果:
print(X_cv)
(0, 8324) 1
(0, 1082) 1
(0, 3615) 1
(0, 7694) 1
(0, 2061) 1
(0, 1765) 1
: :
(5570, 1802) 1
(5570, 3489) 1
(5570, 2905) 1
(5570, 7099) 1
(5570, 1794) 1
(5570, 8120) 1
(5570, 2606) 1
: :
cv.vocabulary_
{'go': 3571,
'until': 8084,
'jurong': 4374,
'point': 5958,
'crazy': 2338,
'available': 1316,
'only': 5571,
'in': 4114,
'bugis': 1767,
'great': 3655,
'world': 8548,
...}
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test = train_test_split(X_cv,y,test_size =0.2 )
%%time
gNB = GaussianNB()
gNB.fit(X_train.toarray(),y_train)
s = gNB.score(X_test.toarray(),y_test)
print(s)
0.8977578475336323
Wall time: 3.03 s
结果分析: 该模型不建议使用高斯分布,它的准确率不高而且比较费时!
%%time
mNB = MultinomialNB()
mNB.fit(X_train,y_train)
print(mNB.score(X_test,y_test))
0.9820627802690582
Wall time: 111 ms
结果分析: 这个模型的可靠性就高很多了,准确率高达98%,仅用时111毫秒。
bNB = BernoulliNB()
bNB.fit(X_train,y_train)
bNB.score(X_test,y_test)
0.979372197309417
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier()
knn.fit(X_train,y_train)
knn.score(X_test,y_test)
0.9237668161434978
from sklearn.tree import DecisionTreeClassifier
clf = DecisionTreeClassifier()
clf.fit(X_train,y_train)
clf.score(X_test,y_test)
0.9641255605381166
import numpy as np
from sklearn.naive_bayes import MultinomialNB,BernoulliNB
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer,TfidfVectorizer
#读取数据
email_text = []
for i in ['ham', 'spam']:
for j in range(1, 26):
file_path = '../data/email/%s/%d.txt'%(i, j)
print (file_path)
with open(file_path) as f:
email_text.append(f.read())
f.close()
#添加标签 y
y = ['ham', 'spam']*25
y.sort()#排序
#向量化处理
cv = CountVectorizer(stop_words='english')
X1 = cv.fit_transform(email_text)
# 切分训练集和测试集
X_train,X_test,y_train,y_test = train_test_split(X1,y,test_size = 0.2)
mNB = MultinomialNB()#多项式分布
mNB.fit(X_train,y_train)#训练模型
print (mNB.score(X_test,y_test)) #模型测评
tfidf = TfidfVectorizer(stop_words='english')
X2 = tfidf.fit_transform(X)
X_train,X_test,y_train,y_test = train_test_split(X2,y,test_size = 0.2)
mNB = MultinomialNB()
mNB.fit(X_train,y_train)
print (mNB.score(X_test,y_test))
sklearn 有自带的新闻数据集,直接加载来测试就可以了
import numpy as np
from sklearn.naive_bayes import MultinomialNB,BernoulliNB
import sklearn.datasets as datasets #导入自带数据
data = datasets.fetch_20newsgroups(subset='all',remove=('headers', 'footers', 'quotes')) #加载新闻数据
X = data.data
y = data.target
cv = CountVectorizer(stop_words='english') #可以加入ngram_range=(1,2)分词长度为1和2
X2 = cv.fit_transform(X)
#切分训练集和测试集
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test = train_test_split(X2,y,test_size = 0.2)
#训练模型
mNB = MultinomialNB()
mNB.fit(X_train,y_train)
#模型评估
print (mNB.score(X_test,y_test))