# 词条切分后的文档集合
postingList = [['my', 'dog', 'has', 'flea', 'problems', 'help', 'please'],
['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'],
['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'],
['stop', 'posting', 'stupid', 'worthless', 'garbage'],
['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'],
['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']]
# 类别标签的集合
classVec = [0, 1, 0, 1, 0, 1]
from numpy import *
import numpy as np
from TrainingSet import postingList, classVec
# 加载训练集
def loadDataSet():
# 词条切分后的文档集合和类别标签集合
return postingList, classVec
def createVocabList(dataSet):
# 创建一个空集
vocabSet = set([])
# 将新词集合添加到创建的集合中
for document in dataSet:
# 操作符 | 用于求两个集合的并集
vocabSet = vocabSet | set(document)
# 返回一个包含所有文档中出现的不重复词的列表
return list(vocabSet)
def setOfWords2Vec(vocabList, inputSet):
# 创建一个所含元素都为0的向量
returnVec = [0] * len(vocabList)
# 遍历文档中词汇
for word in inputSet:
# 如果文档中的单词在词汇表中,则相应向量位置置1
if word in vocabList:
returnVec[vocabList.index(word)] = 1
# 否则输出打印信息
else:
print("%s 不在词袋中" % word)
# 向量的每一个元素为1或0,表示词汇表中的单词在文档中是否出现
return returnVec
def trainNB0(trainMatrix, trainCategory):
# 获得训练集中文档个数
numTrainDocs = len(trainMatrix)
# 获得训练集中单词个数
numWords = len(trainMatrix[0])
# 计算文档属于侮辱性文档的概率
pAbusive = sum(trainCategory) / float(numTrainDocs)
# 初始化概率的分子变量
p0Num = ones(numWords)
p1Num = ones(numWords)
# 初始化概率的分母变量
p0Denom = 2.0
p1Denom = 2.0
# 遍历训练集trainMatrix中所有文档
for i in range(numTrainDocs):
# 如果侮辱性词汇出现,则侮辱词汇计数加一,且文档的总词数加一
if trainCategory[i] == 1:
p1Num += trainMatrix[i]
p1Denom += sum(trainMatrix[i])
# 如果非侮辱性词汇出现,则非侮辱词汇计数加一,且文档的总词数加一
else:
p0Num += trainMatrix[i]
p0Denom += sum(trainMatrix[i])
# 对每个元素做除法求概率
p1Vect = np.log(p1Num / p1Denom)
p0Vect = np.log(p0Num / p0Denom)
# 返回两个类别概率向量和一个概率
return p0Vect, p1Vect, pAbusive
def classifyNB(vec2Classify, p0Vec, p1Vec, pClass1):
# 向量元素相乘后求和再加到类别的对数概率上,等价于概率相乘
p1 = sum(vec2Classify * p1Vec) + np.log(pClass1)
p0 = sum(vec2Classify * p0Vec) + np.log(1.0 - pClass1)
# 分类结果
if p1 > p0:
return '侮辱性留言'
else:
return '非侮辱性留言'
def testingNB():
listOPosts, listClasses = loadDataSet()
# 构建一个包含所有词的列表
myVocabList = createVocabList(listOPosts)
# 初始化训练数据列表
trainMat = []
# 填充训练数据列表
for postinDoc in listOPosts:
trainMat.append(setOfWords2Vec(myVocabList, postinDoc))
# 训练
p0V, p1V, pAb = trainNB0(trainMat, listClasses)
# 测试
testEntry1 = ['love', 'my', 'dalmation']
thisDoc = np.array(setOfWords2Vec(myVocabList, testEntry1))
print(testEntry1, '---> ', classifyNB(thisDoc, p0V, p1V, pAb))
testEntry2 = ['stupid', 'garbage', 'my', 'dog', 'love', '1111']
thisDoc = np.array(setOfWords2Vec(myVocabList, testEntry2))
print(testEntry2, '---> ', classifyNB(thisDoc, p0V, p1V, pAb))
if __name__ == '__main__':
testingNB()
注:训练集越多,结果越准确