使用python进行文本分类



#coding:utf-8
__author__ = 'lishuai'

import numpy
def loadDataSet():
postingList=[
['my','dog','has','flea','problems','help','please'],
['maybe','not','take','him','to','dog','park','stupid'],
['my','dalmation','is','so','cute','I','love','him'],
['stop','posting','stupid','worthless','garbage'],
['mr','licks','ate','my','steak','how','to','stop','him'],
['quit','buying','worthless','dog','food','stupid']]
#0代表没有侮辱性的词汇,1代表包涵侮辱性的词
classVec = [0,1,0,1,0,1]
return postingList,classVec


#将选取的文本构建成词汇表
def createVocabList(dataSet):
vocabSet=set([])
for document in dataSet:
vocabSet = vocabSet | set(document)
return list(vocabSet)


#对选取的文本向量化
def setOfWord2Vec(vocabList,inputData):
returnVec = [0] * len(vocabList)
for document in inputData:
if document in vocabList:
returnVec[vocabList.index(document)] = 1
else:
print "the word: %s is not in my Vocabyulary!" %document
return returnVec


def makeMatrix(vocabList,inputData):
trainMatr = []
for document in inputData:
trainMatr.append(setOfWord2Vec(vocabList,document))
return trainMatr

#对向量化后的文本进行训练
def trainNB(trainMatrix,trainCategory):
numTrainDocs = len(trainMatrix)
numWords = len(trainMatrix[0])
pAbusive=sum(trainCategory)/float(numTrainDocs)
p1Vector=numpy.ones(numWords)
p0Vector=numpy.ones(numWords)
p1DocumentWords=2
p0DocumentWords=2

for i in range(numTrainDocs):
if trainCategory[i] == 1:
p1Vector+=trainMatrix[i]
p1DocumentWords+=sum(trainMatrix[i])
else:
p0Vector+=trainMatrix[i]
p0DocumentWords+=sum(trainMatrix[i])

p1=p1Vector/p1DocumentWords
p0=p0Vector/p0DocumentWords

return pAbusive,numpy.log(p1),numpy.log(p0)

#利用建立好的模型对文本testvector进行分类
def classifyNB(testVector,p1Vector,p0Vector,pclass):
p1 = sum(testVector*p1Vector)+numpy.log(pclass)
p0 = sum(testVector*p0Vector)+numpy.log(1-pclass)
if p1 > p0 :
return 1
else:
return 0


你可能感兴趣的:(机器学习,python,机器学习)