[置顶] 朴素贝叶斯 Naive Bayes in Python

1、基本思想

朴素贝叶斯法是基于贝叶斯定理与特征条件独立假设的生成模型。

即对于给定的训练数据集,首先基于特征条件独立假设学习输入/输出的联合概率分布,这其中涉及到的参数估计可以用最大似然估计或者贝叶斯估计,然后基于此模型,对给定的新输入x,利用贝叶斯定理求出后验概率最大的输出y。

2、优点

2.1 在数据量少的情况下仍然有效,可以处理多类别问题(本文程序只涉及二类分类问题)

2.2 模型所需估计的参数很少,对缺失数据不太敏感

3、缺点

3.1 对于输入数据的准备方式较为敏感

3.2 进行了特征条件独立假设,分类的性能不一定很高。理论上,NBC模型与其他分类方法相比具有最小的误差率。但是实际上并非总是如此,这是因为NBC模型假设属性之间相互独立,这个假设在实际应用中往往是不成立的(可以考虑用聚类算法先将相关性较大的属性聚类),这给NBC模型的正确分类带来了一定影响。在属性个数比较多或者属性之间相关性较大时,NBC模型的分类效率比不上决策树模型。而在属性相关性较小时,NBC模型的性能最为良好。

4、注意事项

4.1 拉普拉斯平滑

用极大似然估计可能出现所要估计的概率值为0的情况,这是会影响到后验概率的计算结果,使分类产生偏差。对策即拉普拉斯平滑,也即P51的贝叶斯估计。

4.2 下溢出

对于算法4.1中(1)中求出的概率可能很小,当n个很小的概率相乘,最终结果计算时很可能会下溢出,即变成0,为避免这种情况,可以对连乘取对数运算。

4.3 词集模型与词袋模型

在文档分类中,如果一个词在文档中出现不止一次,这可能意味着包含这词是否出现在文档中所不能表达的某种信息。在词袋中,每个单词可以出现多次,而在词集中,每个词只能出现一次。

5、Python实现

from numpy import *
# the file is designed for dichotomy


### example ###
def loadDataSet():
    postingList = [['my', 'dog', 'has', 'flea', 'problems', 'help', 'please'],
                   ['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'],
                   ['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'],
                   ['stop', 'posting', 'stupid', 'worthless', 'garbage'],
                   ['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'],
                   ['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']]
    classVec = [0, 1, 0, 1, 0, 1]   # 0 for normal, 1 for insult
    return postingList, classVec


### preparing data ###
# create a vocablary list for dataSet
def createVocabList(dataSet):
    vocabSet = set([])
    for document in dataSet:
        vocabSet = vocabSet | set(document)
    return list(vocabSet)

# set-of-words model
def setOfWords2Vec(vocabList, inputSet):
    # the inputSet refers to a new input document as same in dataSet
    returnVec = [0] * len(vocabList)
    for word in inputSet:
        if word in vocabList:
            returnVec[vocabList.index(word)] = 1
        else:
            print "the word: %s is not in my Vocablary!" % word
    return returnVec

# bag-of-words model
def bagOfWords2VecMN(vocabList, inputSet):
    returnVec = [0] * len(vocabList)
    for word in inputSet:
        if word in vocabList:
            returnVec[vocabList.index(word)] += 1
        else:
            print "the word: %s is not in my Vocablary!" % word
    return returnVec    


### training ###
def trainNBO(trainMatrix, trainCategory):
    numTrainDocs = len(trainMatrix)
    numWords = len(trainMatrix[0])
    pAbusive = sum(trainCategory)/float(numTrainDocs)
    p0Num = ones(numWords) # bayes evaluation
    p1Num = ones(numWords) # bayes evaluation
    p0Denom = 2.0 # bayes evaluation
    p1Denom = 2.0 # bayes evaluation
    for ii in range(numTrainDocs):
        if trainCategory[ii] == 1:
            p1Num += trainMatrix[ii]
            p1Denom += sum(trainMatrix[ii])
        else:
            p0Num += trainMatrix[ii]
            p0Denom += sum(trainMatrix[ii])
    p1Vect = log(p1Num/p1Denom) # avoiding the underflow
    p0Vect = log(p0Num/p0Denom) # avoiding the underflow
    return p0Vect, p1Vect, pAbusive


### testing ###
# classifying
def classifyNB(vec2Classify, p0Vec, p1Vec, pClass1):
    p1 = sum(vec2Classify*p1Vec) + log(pClass1)
    p0 = sum(vec2Classify*p0Vec) + log(1.0-pClass1)
    if p1 > p0:
        return 1
    else:
        return 0

def testingNB():
    listPosts, listClasses = loadDataSet()
    myVocabList = createVocabList(listPosts)
    trainMat = []
    for postinDoc in listPosts:
        trainMat.append(setOfWords2Vec(myVocabList, postinDoc))
    p0V, p1V, pAb = trainNBO(array(trainMat), array(listClasses))
    testEntry = ['love', 'my', 'dalmation']
    thisDoc = array(setOfWords2Vec(myVocabList, testEntry))
    print testEntry, 'classfied as: ', classifyNB(thisDoc,p0V, p1V, pAb)
    testEntry = ['stupid', 'garbage']
    thisDoc = array(setOfWords2Vec(myVocabList, testEntry))
    print testEntry, 'classfied as: ', classifyNB(thisDoc,p0V, p1V, pAb)


### classify for spam-emails ###
def textParse(bigString):
    import re
    listOfTokens = re.split(r'\W*', bigString)
    return [tok.lower() for tok in listOfTokens if len(tok)>2]

def spamTest():
    docList = []
    classList = []
    fullText = []
    for ii in range(1, 26):
        wordList = textParse(open(r'F:\ResearchData\MyCode\Python\email\spam/%d.txt' % ii).read())
        docList.append(wordList)
        fullText.extend(wordList)
        classList.append(1)
        wordList = textParse(open(r'F:\ResearchData\MyCode\Python\email\ham/%d.txt' % ii).read())
        docList.append(wordList)
        fullText.extend(wordList)
        classList.append(0)
    vocabList = createVocabList(docList)
    trainingSet = range(50)
    testSet = []
    for ii in range(10):
        randIndex = int(random.uniform(0, len(trainingSet)))
        textSet.append(trainingSet[randIndex])
        del(trainingSet[randIndex])
    trainMat = []
    trainClasses = []
    for docIndex in trainingSet:
        trainMat.append(setOfWords2Vec(vocabList, docList[docIndex]))
        trainClasses.append(classList[docIndex])
    p0V, p1V, pSpam = trainNBO(array(trainMat), array(trainClasses))
    errorCount = 0
    for docIndex in testSet:
        wordVector = setOfWords2Vec(vocabList, docList[docIndex])
        if classifyNB(array(wordVector), p0V, p1V, pSpam) != classList[docIndex]:
            errorCount += 1
    print 'the error rate is: ', float(errorCount)/len(testSet)


### classify for RSS ###
def calcMostFreq(vocabList, fullText):
    import operator
    freqDict = {}
    for token in vocabList:
        freqDict[token] = fullText.count(token)
    sortedFreq = sorted(freqDict.iteritems(), key=operator.itemgetter(1), reverse=True)
    return sortedFreq[:30]

def localWords(feed1, feed0):
    import feedparser
    docList = []
    classList = []
    fullText = []
    minLen = min(len(feed1['entries']), len(feed0['entries']))
    for ii in range(minLen):
        wordList = textParse(feed1['entries'][ii]['summary'])
        docList.append(wordList)
        fullText.extend(wordList)
        classList.append(1)
        wordList = textParse(feed0['entries'][ii]['summary'])
        docList.append(wordList)
        fullText.extend(wordList)
        classList.append(0)
    vocabList = createVocabList(docList)
    top30Words = calcMostFreq(vocabList, fullText)
    for pairW in top30Words:
        if pairW[0] in vocabList:
            vocabList.remove(pairW[0])
    trainingSet = range(2*minLen)
    testSet = []
    for ii in range(20):
        randIndex = int(random.uniform(0, len(trainingSet)))
        testSet.append(trainingSet[randIndex])
        del(trainingSet[randIndex])
    trainMat = []
    trainClasses = []
    for docIndex in trainingSet:
        trainMat.append(bagOfWords2VecMN(vocabList, docList[docIndex]))
        trainClasses.append(classList[docIndex])
    p0V, p1V, pSpam = trainNBO(array(trainMat), array(trainClasses))
    errorCount = 0
    for docIndex in testSet:
        wordVector = bagOfWords2VecMN(vocabList, docList[docIndex])
        if classifyNB(array(wordVector), p0V, p1V, pSpam) != classList[docIndex]:
            errorCount += 1
    print 'the error rate is: ', float(errorCount)/len(testSet)
    return vocabList, p0V, p1V


你可能感兴趣的:([置顶] 朴素贝叶斯 Naive Bayes in Python)