'''
创建从词表到向量的转换函数
'''
#创建实验样本
def loadDataSet():
postingList=[['my', 'dog', 'has', 'flea', 'problems', 'help', 'please'], #切分的词条
['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'],
['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'],
['stop', 'posting', 'stupid', 'worthless', 'garbage'],
['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'],
['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']]
classVec=[0,1,0,1,0,1] #1代表侮辱性文字 0代表正常言论
return postingList,classVec
#创建所有文档中出现不重复词的列表
def createVocabList(dataSet):
vocabSet=set([])
for document in dataSet:
vocabSet=vocabSet|set(document) #取并集
return list(vocabSet) #返回的是列表
#输出某个文档的向量,表示文档中词汇是否在词汇表中出现
def setOfWords2Vec(vocabList,inputSet):
returnVec=[0]*len(vocabList) #创建一个和词汇表等长的0向量
for word in inputSet:
if word in vocabList:
returnVec[vocabList.index(word)]=1
else:
print('the word: %s is not in my vocabulary!'%word)
return returnVec
#测试
listOPosts,listClasses=loadDataSet()
myVocabList=createVocabList(listOPosts)
myVocabList #词汇表
['flea',
'help',
'is',
'has',
'I',
'posting',
'garbage',
'maybe',
'stop',
'steak',
'dog',
'stupid',
'not',
'park',
'ate',
'quit',
'mr',
'licks',
'food',
'how',
'him',
'my',
'so',
'buying',
'problems',
'cute',
'dalmation',
'to',
'love',
'please',
'worthless',
'take']
setOfWords2Vec(myVocabList,listOPosts[0])
[1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0]
import numpy as np
def trainNB0(trainMatrix,trainCategory):
numTrainDocs=len(trainMatrix) #训练文档数目
numWords=len(trainMatrix[0]) #每篇文档词条数
pAbusive=sum(trainCategory)/float(numTrainDocs) #属于侮辱类文档的概率
p0Num=np.zeros(numWords) #词条出现向量初始化为0
p1Num=np.zeros(numWords)
p0Denom=0.0 #分母
p1Denom=0.0
for i in range(numTrainDocs):
if trainCategory[i]==1:
p1Num+=trainMatrix[i] #向量相加
p1Denom+=sum(trainMatrix[i]) #统计侮辱类条件概率p(w1|c1),p(w2|c1)……
else:
p0Num+=trainMatrix[i] #统计非侮辱类条件概率p(w1|c0),p(w2|c0)……
p0Denom+=sum(trainMatrix[i])
p1Vect=p1Num/p1Denom
p0Vect=p0Num/p0Denom
return p0Vect,p1Vect,pAbusive #返回属于非侮辱类条件概率数组,属于侮辱类条件概率数组,文档属于侮辱类概率
#测试
#填充trainMat列表
trainMat=[]
for postinDoc in listOPosts:
trainMat.append(setOfWords2Vec(myVocabList,postinDoc))
#计算概率
p0V,p1V,pAb=trainNB0(trainMat,listClasses)
pAb
0.5
p0V
array([0.04166667, 0.04166667, 0.04166667, 0.04166667, 0.04166667,
0. , 0. , 0. , 0.04166667, 0.04166667,
0.04166667, 0. , 0. , 0. , 0.04166667,
0. , 0.04166667, 0.04166667, 0. , 0.04166667,
0.08333333, 0.125 , 0.04166667, 0. , 0.04166667,
0.04166667, 0.04166667, 0.04166667, 0.04166667, 0.04166667,
0. , 0. ])
p1V
array([0. , 0. , 0. , 0. , 0. ,
0.05263158, 0.05263158, 0.05263158, 0.05263158, 0. ,
0.10526316, 0.15789474, 0.05263158, 0.05263158, 0. ,
0.05263158, 0. , 0. , 0.05263158, 0. ,
0.05263158, 0. , 0. , 0.05263158, 0. ,
0. , 0. , 0.05263158, 0. , 0. ,
0.10526316, 0.05263158])
在利用贝叶斯分类器对文档进行分类时,会遇到两个问题:
1. 需要计算多个概率的乘积,即计算p(w1|c1)p(w2|c1)……,如果其中一个概率为0,则结果为0,为了降低这种影响,我们需要对概率值进行“平滑”处理,即分子加1,分母增加Ni表示第i个属性可能的取值数,这种方法称为拉普拉斯平滑,在本例中每个词可能取值数为2,即所有分母加2,分子加1
2. 许多小数相乘会造成下溢,为了解决这个问题通常采取乘积取对数
'''
在这里我们对上面的分类器进行修改
p0Num=np.ones(numWords)
p1Num=np.ones(numWords)
p0Denom=2.0
p1Denom=2.0
p1Vect=log(p1Num/p1Denom)
p0Vect=log(p0Num/p0Denom)
'''
def trainNB0(trainMatrix,trainCategory):
numTrainDocs=len(trainMatrix) #训练文档数目
numWords=len(trainMatrix[0]) #每篇文档词条数
pAbusive=sum(trainCategory)/float(numTrainDocs) #属于侮辱类文档的概率
p0Num=np.ones(numWords) #词条出现向量初始化为0
p1Num=np.ones(numWords)
p0Denom=2.0 #分母
p1Denom=2.0
for i in range(numTrainDocs):
if trainCategory[i]==1:
p1Num+=trainMatrix[i] #向量相加
p1Denom+=sum(trainMatrix[i]) #统计侮辱类条件概率p(w1|c1),p(w2|c1)……
else:
p0Num+=trainMatrix[i] #统计非侮辱类条件概率p(w1|c0),p(w2|c0)……
p0Denom+=sum(trainMatrix[i])
p1Vect=np.log(p1Num/p1Denom) #取对数
p0Vect=np.log(p0Num/p0Denom)
return p0Vect,p1Vect,pAbusive #返回属于非侮辱类条件概率数组,属于侮辱类条件概率数组,文档属于侮辱类概率
#测试计算概率
p0V,p1V,pAb=trainNB0(trainMat,listClasses)
p0V
array([-2.56494936, -2.56494936, -2.56494936, -2.56494936, -2.56494936,
-3.25809654, -3.25809654, -3.25809654, -2.56494936, -2.56494936,
-2.56494936, -3.25809654, -3.25809654, -3.25809654, -2.56494936,
-3.25809654, -2.56494936, -2.56494936, -3.25809654, -2.56494936,
-2.15948425, -1.87180218, -2.56494936, -3.25809654, -2.56494936,
-2.56494936, -2.56494936, -2.56494936, -2.56494936, -2.56494936,
-3.25809654, -3.25809654])
p1V
array([-3.04452244, -3.04452244, -3.04452244, -3.04452244, -3.04452244,
-2.35137526, -2.35137526, -2.35137526, -2.35137526, -3.04452244,
-1.94591015, -1.65822808, -2.35137526, -2.35137526, -3.04452244,
-2.35137526, -3.04452244, -3.04452244, -2.35137526, -3.04452244,
-2.35137526, -3.04452244, -3.04452244, -2.35137526, -3.04452244,
-3.04452244, -3.04452244, -2.35137526, -3.04452244, -3.04452244,
-1.94591015, -2.35137526])
'''
构建贝叶斯分类函数
'''
def classifyNB(vec2Classify,p0Vec,p1Vec,pClass1):
p1=sum(vec2Classify*p1Vec)+np.log(pClass1) #对应元素相乘log(A*B)=log(A)+log(B)所以这里+log(pClass1)
p0=sum(vec2Classify*p0Vec)+np.log(1.0-pClass1) #这里计算的p(w|Ci)*p(Ci)=p(w0,w1,……|Ci)*p(Ci)=p(w0|Ci)p(w1|Ci)……p(Ci)
if p1>p0:
return 1
else:
return 0
#测试朴素贝叶斯分类器
def testingNB():
listOPosts,listClasses=loadDataSet()
myVocabList=createVocabList(listOPosts) #创建词汇表
trainMat=[]
for postinDoc in listOPosts: #得到训练集
trainMat.append(setOfWords2Vec(myVocabList,postinDoc))
p0V,p1V,pAb=trainNB0(np.array(trainMat),np.array(listClasses)) #训练分类器,注意列表到array格式的转化
testEntry=['love','my','dalmation']
thisDoc=np.array(setOfWords2Vec(myVocabList,testEntry))
if classifyNB(thisDoc,p0V,p1V,pAb):
print(testEntry,'属于侮辱类')
else:
print(testEntry,'属于非侮辱类')
testEntry=['stupid','garbage']
thisDoc=np.array(setOfWords2Vec(myVocabList,testEntry))
if classifyNB(thisDoc,p0V,p1V,pAb):
print(testEntry,'属于侮辱类')
else:
print(testEntry,'属于非侮辱类')
#测试
testingNB()
['love', 'my', 'dalmation'] 属于非侮辱类
['stupid', 'garbage'] 属于侮辱类
在之前的模型中我们将每个词出现与否作为一个特征,这种方式为词集模型;如果一个词在文档中不止出现一次,则为词袋模型,为了适应词袋模型,我们需要对函数setOfWords2Vec()进行修改
#基于词袋模型
def bagOfWords2VecMN(vocabList,inputSet):
returnVec=[0]*len(vocabList)
for word in inputSet:
returnVec[vocabList.index(word)]+=1 #词汇没出现一次就加1
return returnVec
2.1 准备数据:切分文本
'''
上面给定了具体的词向量,这里我们要从文本文档中构建词列表
只是使用split()方法进行切分的话,会将标点符号当作词汇的一部分
所以要在这里采取正则的方法
'''
#示例
import re
mySent='This book is the best book on Python or M.L. I have ever laid eyes upon.'
mySent.split()
['This',
'book',
'is',
'the',
'best',
'book',
'on',
'Python',
'or',
'M.L.',
'I',
'have',
'ever',
'laid',
'eyes',
'upon.']
regEx=re.compile('\\W*') #除单词数字外的任意字符串
listOfTokens=regEx.split(mySent)
listOfTokens
['This',
'book',
'is',
'the',
'best',
'book',
'on',
'Python',
'or',
'M',
'L',
'I',
'have',
'ever',
'laid',
'eyes',
'upon',
'']
'''
由上面可以看到返回内容含有空字符,我们可以通过计算字符串长度,返回大于0的方式去除空字符串
并且可以采用python内嵌的字符串全部转换大小写的方法(.lower() or .upper())
'''
[tok.lower() for tok in listOfTokens if len(tok)>0]
['this',
'book',
'is',
'the',
'best',
'book',
'on',
'python',
'or',
'm',
'l',
'i',
'have',
'ever',
'laid',
'eyes',
'upon']
2.2 测试算法:使用朴素贝叶斯进行交叉验证
'''
在这里构建一个简单的文本解析函数
'''
import re
def textParse(bigString):
listOfToken=re.split(r'\W*',bigString)
return [tok.lower() for tok in listOfToken if len(tok)>2]
#构建训练集和测试集,使用交叉验证的方式测试分类准确性
import random
def spamTest():
docList=[]
classList=[]
fullText=[]
for i in range(1,26): #遍历25个文件
wordList=textParse(open('H:/机器学习课程资料/machinelearninginaction/Ch04/email/spam/%d.txt'%i,'r').read())
docList.append(wordList)
fullText.append(wordList)
classList.append(1) #垃圾邮件标记为1
wordList=textParse(open('H:/机器学习课程资料/machinelearninginaction/Ch04/email/ham/%d.txt'%i,'r').read())
docList.append(wordList)
fullText.append(wordList)
classList.append(0) #非垃圾邮件标记为0
vocabList=createVocabList(docList) #创建不重复词汇表
trainingSet=list(range(50)) #共50个文件,索引值0-50,列表格式
testSet=[]
for i in range(10):
randIndex=int(random.uniform(0,len(trainingSet))) #随机选择10个作为测试集
testSet.append(trainingSet[randIndex]) #添加到测试集列表
del(trainingSet[randIndex]) #删除该索引值
trainMat=[]
trainClasses=[]
for docIndex in trainingSet:
trainMat.append(setOfWords2Vec(vocabList,docList[docIndex])) #将选中的索引值对应样本添加到训练集
trainClasses.append(classList[docIndex])
p0V,p1V,pSpam=trainNB0(np.array(trainMat),np.array(trainClasses))
errorCount=0
for docIndex in testSet:
wordVector=setOfWords2Vec(vocabList,docList[docIndex])
if classifyNB(np.array(wordVector),p0V,p1V,pSpam)!=classList[docIndex]:
errorCount+=1 #统计分类错误的样本数量
print('分类错误的测试集:',docList[docIndex])
print('错误率:%.2f%%'%(float(errorCount)/len(testSet)*100))
#测试
spamTest()
分类错误的测试集: ['yeah', 'ready', 'may', 'not', 'here', 'because', 'jar', 'jar', 'has', 'plane', 'tickets', 'germany', 'for']
错误率:10.00%
参考博客
3.1 中文语句切分
分词组件:jieba
官方教程
import jieba
import os
#文本处理
def TextProcessing(folder_path):
folder_list=os.listdir(folder_path)
data_list=[]
class_list=[]
#遍历子文件夹
for folder in folder_list:
new_folder_path=os.path.join(folder_path,folder) #根据子文件夹更新路径
files=os.listdir(new_folder_path) #txt文件列表
j=1
for file in files:
if j>100:
break
with open(os.path.join(new_folder_path,file),'r',encoding = 'utf-8') as f:
raw=f.read()
word_cut=jieba.cut(raw,cut_all=False) #精确模式,返回一个可迭代的generator
word_list=list(word_cut) #保存为列表
data_list.append(word_list)
class_list.append(folder)
j+=1
print(data_list[0][:10])
print(class_list[0])
path='H:/机器学习课程资料/Machine-Learning-master/Naive Bayes/SogouC/Sample'
TextProcessing(path)
['\u3000', '\u3000', '本报记者', '陈雪频', '实习', '记者', '唐翔', '发自', '上海', '\n']
C000008
3.2 文本特征选择
'''
将所有文本分成训练集和测试集,并对训练集中所有单词进行词频统计,按照降序排序
'''
import random
def TextProcessing(folder_path,test_size=0.2): #默认测试集占据20%
folder_list=os.listdir(folder_path)
data_list=[]
class_list=[]
#遍历子文件夹
for folder in folder_list:
new_folder_path=os.path.join(folder_path,folder) #根据子文件夹更新路径
files=os.listdir(new_folder_path) #txt文件列表
j=1
for file in files:
if j>100:
break
with open(os.path.join(new_folder_path,file),'r',encoding = 'utf-8') as f:
raw=f.read()
word_cut=jieba.cut(raw,cut_all=False) #精确模式,返回一个可迭代的generator
word_list=list(word_cut) #保存为列表
data_list.append(word_list)
class_list.append(folder)
j+=1
data_class_list=list(zip(data_list,class_list)) #数据列表和类别压缩打包
random.shuffle(data_class_list) #乱序
index=int(len(data_class_list)*test_size) #分割训练集和测试集的索引
train_list=data_class_list[index:]
test_list=data_class_list[:index]
train_data_list,train_class_list=zip(*train_list)
test_data_list,test_class_list=zip(*test_list)
all_words_dict={} #统计训练集词频
for word_list in train_data_list:
for word in word_list:
if word in all_words_dict.keys():
all_words_dict[word]+=1
else:
all_words_dict[word]=1
#根据键的值倒序排序
all_words_tuple_list=sorted(all_words_dict.items(),key=lambda f:f[1],reverse=True)
all_words_list,all_words_nums=zip(*all_words_tuple_list)
all_words_list=list(all_words_list) #转换成列表
#返回按照词频降序排序的词列表,训练集列表,测试集列表,训练标签列表,测试标签列表
return all_words_list,train_data_list,test_data_list,train_class_list,test_class_list
#测试
all_words_list,train_data_list,test_data_list,train_class_list,test_class_list=TextProcessing(path, test_size=0.2)
print(all_words_list[:5])
[',', '的', '\u3000', '。', '\n']
在单词合集里面包含了许多符号和无意义的词汇,所以我们需要制定规则去消去他们。
我们可以制定规则去掉高频词,具体数目由最终检测准确率确定,同时去除数字和类似‘地’,‘的’之类的词
'''
读取文本并去重
'''
def MakeWordsSet(words_file):
words_set=set()
with open(words_file,'r',encoding='utf-8') as f:
for line in f.readlines(): #一行行读取
word=line.strip()
if len(word)>0:
words_set.add(word)
return words_set
'''
文本特征选取:
all_words_list-训练集所有文本列表
deleteN-删除词频最高的词个数
stopwords_set-指定的结束语
'''
def words_dict(all_words_list,deleteN,stopwords_set=set()):
feature_words=[] #特征列表
n=1
for t in range(deleteN,len(all_words_list),1):
if n>1000: #feature的维度大于1000时退出
break
#如果这个词不是数字,并且不是指定的结束语,并且单词长度大于1小于5,那么这个词就可以作为特征词
if not all_words_list[t].isdigit() and all_words_list[t] not in stopwords_set and 15:
feature_words.append(all_words_list[t])
n+=1
return feature_words
#根据feature_words将文本向量化
def TextFeatures(train_data_list,test_data_list,feature_words):
def text_features(text,feature_words):
text_words=set(text) #出现在特征集中置1
features=[1 if word in text_words else 0 for word in feature_words]
return features
train_feature_list=[text_features(text,feature_words) for text in train_data_list]
test_feature_list=[text_features(text,feature_words) for text in test_data_list]
return train_feature_list,test_feature_list
#测试
stopwords_file='H:/机器学习课程资料/Machine-Learning-master/Naive Bayes/stopwords_cn.txt'
stopwords_set=MakeWordsSet(stopwords_file)
feature_words=words_dict(all_words_list,100,stopwords_set)
feature_words[:10]
['成为', '仿制', '可能', '比赛', '工作', '主要', '复习', '选择', '远程', '问题']
len(feature_words)
714
3.3 使用Sklearn构建朴素贝叶斯分类器
官方文档
在scikit-learn中,一共有3个朴素贝叶斯的分类算法类。分别是GaussianNB,MultinomialNB和BernoulliNB。其中GaussianNB就是先验为高斯分布的朴素贝叶斯,MultinomialNB就是先验为多项式分布的朴素贝叶斯,而BernoulliNB就是先验为伯努利分布的朴素贝叶斯,如何选择先验分布:
1. 如果特征Xj是离散的值,我们可以假设Xj符合多项式分布,如上文采取估计p(Xj|Ci)的方式
2. 如果Xj是非常稀疏的离散值,即各个特征出现概率非常低,我们假设Xj服从伯努利分布,我们不关注Xj出现次数
3. 如果Xj是连续值,通常取先验分布为正态分布
参考博客
from sklearn.naive_bayes import MultinomialNB
import matplotlib.pyplot as plt
def TextClassifier(train_feature_list,test_feature_list,train_class_list,test_class_list):
classifier=MultinomialNB().fit(train_feature_list,train_class_list)
test_accuracy=classifier.score(test_feature_list,test_class_list)
return test_accuracy
#测试
path='H:/机器学习课程资料/Machine-Learning-master/Naive Bayes/SogouC/Sample'
all_words_list,train_data_list,test_data_list,train_class_list,test_class_list=TextProcessing(path,test_size=0.2)
stopwords_file='H:/机器学习课程资料/Machine-Learning-master/Naive Bayes/stopwords_cn.txt'
stopwords_set=MakeWordsSet(stopwords_file)
test_accuracy_list=[]
deleteNs=range(0,1000,20)
for deleteN in deleteNs:
feature_words=words_dict(all_words_list,deleteN,stopwords_set)
train_feature_list,test_feature_list=TextFeatures(train_data_list,test_data_list,feature_words)
test_accuracy=TextClassifier(train_feature_list,test_feature_list,train_class_list,test_class_list)
test_accuracy_list.append(test_accuracy)
plt.figure()
plt.plot(deleteNs,test_accuracy_list)
plt.title('Relationship of deleteNs and test_accuracy')
plt.xlabel('deleteNs')
plt.ylabel('test_accuracy')
plt.show()
尽管每次运行结果都不同,但是我们可以通过多次测试来确定deleteN的值,最后选取合适的值构建分类器