机器学习实战——朴素贝叶斯分类

准备数据:从文本中构建词向量

前期测试函数用的数据

def loadDataSet():
    '''创建一些实验样本'''
    postingList = [['my','dog','has','flea','problems','help','please'],
                  ['maybe','not','take','him','to','dog','park','stupid'],
                  ['my','dalmation','is','so','cute','I','love','him'],
                  ['stop','posting','stupid','worthless','garbage'],
                  ['mr','licks','ate','my','steak','how','to','stop','him'],
                  ['quit','buying','worthless','dog','food','stupid']]
    classVec = [0,1,0,1,0,1]  #0代表正常言论 1表示侮辱性
    return postingList,classVec
def createVocabList(dataSet):
    '''返回一个包含所有文档中出现的不重复的词条集合'''
    vocabSet = set([])
    for document in dataSet:
        vocabSet = vocabSet | set(document)   #创建两个集合的并集
    return list(vocabSet)

词表向向量的转换函数

def setOfWords2Vec(vocabList,inputSet):
    '''接受词汇表和某个文档,返回该文档向量'''
    returnVec = [0]*len(vocabList)
    for word in inputSet:
        if word in vocabList:
            returnVec[vocabList.index(word)] = 1
        else:
            print("the word:{} is not in my Vocabulary".format(word))
    return returnVec

测试上述

listOPosts,listClasses = loadDataSet()
myVocabList = createVocabList(listOPosts)
myVocabList
['so',
 'buying',
 'please',
 'has',
 'dalmation',
 'my',
 'cute',
 'quit',
 'love',
 'stupid',
 'park',
 'not',
 'how',
 'flea',
 'problems',
 'licks',
 'food',
 'stop',
 'help',
 'him',
 'ate',
 'maybe',
 'take',
 'I',
 'worthless',
 'to',
 'steak',
 'mr',
 'is',
 'garbage',
 'posting',
 'dog']
setOfWords2Vec(myVocabList,listOPosts[0])
[0,
 0,
 1,
 1,
 0,
 1,
 0,
 0,
 0,
 0,
 0,
 0,
 0,
 1,
 1,
 0,
 0,
 0,
 1,
 0,
 0,
 0,
 0,
 0,
 0,
 0,
 0,
 0,
 0,
 0,
 0,
 1]

训练算法:从词向量计算概率

朴素贝叶斯分类器训练数据

from numpy import *
def trainNB0(trainMatrix,trainCategory):
    '''输入文档矩阵,每篇文档类别构成的向量 返回两个向量[元素是各个词条的条件概率P(Wi | C1) ,其中i=1,2,...,词条数]和一个先验概率'''
    numTrainDocs = len(trainMatrix)
    numWords = len(trainMatrix[0])
    pAbusive = sum(trainCategory) / float(numTrainDocs)   #类别中侮辱性对应1,它的先验概率(若非二分问题此处修改)
    #p0Num = zeros(numWords);p1Num = zeros(numWords)
    #p0Denom = 0.0;p1denom = 0.0 #初始化概率
    p0Num = ones(numWords);p1Num = ones(numWords)
    p0Denom = 2.0;p1denom = 2.0            #初始化概率,拉普拉斯平滑,避免出现0
    for i in range(numTrainDocs):
        if trainCategory[i] == 1:
            p1Num += trainMatrix[i]
            p1denom += sum(trainMatrix[i])
        else:
            p0Num += trainMatrix[i]
            p0Denom += sum(trainMatrix[i])
    #p1Vect = p1Num/p1denom
    #p0Vect = p0Num/p0Denom
    p1Vect = log(p1Num/p1denom)        #对乘积取自然对数,解决乘积很小时出现下溢出
    p0Vect = log(p0Num/p0Denom)
    return p0Vect,p1Vect,pAbusive

测试上述

trainMat = []
for postinDoc in listOPosts:
    trainMat.append(setOfWords2Vec(myVocabList,postinDoc))
p0v,p1v,pAb = trainNB0(trainMat,listClasses)
pAb
0.5
p0v
array([-2.56494936, -3.25809654, -2.56494936, -2.56494936, -2.56494936,
       -1.87180218, -2.56494936, -3.25809654, -2.56494936, -3.25809654,
       -3.25809654, -3.25809654, -2.56494936, -2.56494936, -2.56494936,
       -2.56494936, -3.25809654, -2.56494936, -2.56494936, -2.15948425,
       -2.56494936, -3.25809654, -3.25809654, -2.56494936, -3.25809654,
       -2.56494936, -2.56494936, -2.56494936, -2.56494936, -3.25809654,
       -3.25809654, -2.56494936])
p1v
array([-3.04452244, -2.35137526, -3.04452244, -3.04452244, -3.04452244,
       -3.04452244, -3.04452244, -2.35137526, -3.04452244, -1.65822808,
       -2.35137526, -2.35137526, -3.04452244, -3.04452244, -3.04452244,
       -3.04452244, -2.35137526, -2.35137526, -3.04452244, -2.35137526,
       -3.04452244, -2.35137526, -2.35137526, -3.04452244, -1.94591015,
       -2.35137526, -3.04452244, -3.04452244, -3.04452244, -2.35137526,
       -2.35137526, -1.94591015])

测试算法:

朴素贝叶斯分类函数

def classifyNB(vec2Classify,p0Vec,p1Vec,pClass1):
    '''输入要分类的向量,及训练得到的参数 返回分类'''
    p1 = sum(vec2Classify * p1Vec) + log(pClass1)   #元素相乘,此处相加是由于取对数了,而且vec2Classify的元素是0,1,从而对p1Vec和p0Vec
    p0 = sum(vec2Classify * p0Vec) + log(1.0 - pClass1)  #进行了筛选(0乘上去为0),即只用样例出现的各个独立条件概率(训练得到的参数)
    if p1 > p0:
        return 1
    else:
        return 0

测试

testEntry = ['love','my','dalmation']
thisDoc = array(setOfWords2Vec(myVocabList,testEntry))
print("{0}classified as:{1}".format(testEntry,classifyNB(thisDoc,p0v,p1v,pAb)))
testEntry = ['stupid','garbage']
thisDoc = array(setOfWords2Vec(myVocabList,testEntry))
print("{0}classified as:{1}".format(testEntry,classifyNB(thisDoc,p0v,p1v,pAb)))
['love', 'my', 'dalmation']classified as:0
['stupid', 'garbage']classified as:1

使用词袋模型

词集模型是上面函数setOfWords2Vec()实现的,而词袋模型是每个单词可以出现多次

def bagOfWords2VecMN(vocabList,inputSet):
    '''接受词汇表和某个文档,返回该文档向量'''
    returnVec = [0] * len(vocabList)
    for word in inputSet:
        if word in vocabList:
            returnVec[vocabList.index(word)] += 1
    return returnVec

示例一:使用朴素贝叶斯过滤垃圾邮件

准备数据,文件解析,从文本文档中构建自己的词列表

def textParse(bigString):
    '''文本文件解析,返回字符串列表'''
    import re
    listOfTokens = re.split(r'\W*',bigString)
    return [tok.lower() for tok in listOfTokens if len(tok) > 2]

对垃圾邮件进行自动化处理

def spamTest():
    docList = [];classList = [];fullText = []
    for i in range(1,26):
        #导入并解析文件
        wordList = textParse(open('E:\DataMining\Project\MLBook\机器学习实战源代码\machinelearninginaction\Ch04\email\spam\{}.txt'.
                                  format(i)).read())
        docList.append(wordList)
        fullText.extend(wordList)
        classList.append(1)
        wordList = textParse(open('E:\DataMining\Project\MLBook\机器学习实战源代码\machinelearninginaction\Ch04\email\ham\{}.txt'.
                                  format(i),encoding='gb18030',errors='ignore').read())
        docList.append(wordList)
        fullText.extend(wordList)
        classList.append(0)
    vocabList = createVocabList(docList)  #返回所有文档中不重复的词集
    trainingSet = list(range(50));testSet = []
    for i in range(10):
        #随机构建训练集
        randomIndex = int(random.uniform(0,len(trainingSet)))
        testSet.append(trainingSet[randomIndex])
        del(trainingSet[randomIndex])
    trainMat = [];trainClasses = []
    for docIndex in trainingSet:
        trainMat.append(setOfWords2Vec(vocabList,docList[docIndex]))
        trainClasses.append(classList[docIndex])
    p0v,p1v,pSpam = trainNB0(array(trainMat),array(trainClasses))
    errorCount = 0
    for docIndex in testSet:
        #对测试集分类
        wordVector = setOfWords2Vec(vocabList,docList[docIndex])
        if classifyNB(array(wordVector),p0v,p1v,pSpam) != classList[docIndex]:
            errorCount += 1
    print("the error rate is {}".format(float(errorCount)/len(testSet)))
    return float(errorCount)/len(testSet)

重复10次取错误率均值

errorPercent = 0.0
for i in range(10):
    errorPercent += spamTest()
print("the average error persent is : {}%".format(errorPercent/10 * 100))
E:\Anaconda3\lib\re.py:212: FutureWarning: split() requires a non-empty pattern match.
  return _compile(pattern, flags).split(string, maxsplit)


the error rate is 0.1
the error rate is 0.0
the error rate is 0.0
the error rate is 0.2
the error rate is 0.0
the error rate is 0.0
the error rate is 0.1
the error rate is 0.1
the error rate is 0.0
the error rate is 0.0
the average error persent is : 5.0%

示例二:使用朴素贝叶斯分类器从当地新闻中获取所属区域

这里只是运用以下该分类器,找到高频词,如果真的要分析,其实要用停词,词性分析等

基于:使用RSS源阅读程序库:feedparser

def calcMostFreq(vocabList,fullText):
    '''遍历词汇表中的每个词并统计在文本出现次数 返回排序最高的30个单词'''
    import operator
    freqDict = {}
    for token in vocabList:
        freqDict[token] = fullText.count(token)
    sortedFreq = sorted(freqDict.items(),key=operator.itemgetter(1),reverse=True)
    return sortedFreq[:30]

RSS源分类器函数

def localWords(feed1,feed0):
    '''输入两个RSS源'''
    import feedparser   #使用RSS源阅读程序库
    docList = [];classList = [];fullText = []
    minLen = min(len(feed1['entries']),len(feed0['entries']))
    for i in range(minLen):
        wordList = textParse(feed1['entries'][i]['summary'])
        docList.append(wordList)
        fullText.extend(wordList)
        classList.append(1)
        wordList = textParse(feed0['entries'][i]['summary'])
        docList.append(wordList)
        fullText.extend(wordList)
        classList.append(0)
    vocabList = createVocabList(docList)
    top30Words = calcMostFreq(vocabList,fullText)
    for pairW in top30Words:
        #去掉出现次数最高的那些词
        if pairW[0] in vocabList:
            vocabList.remove(pairW[0])
    trainingSet = list(range(2*minLen));testSet = []
    #print("minLen is : {}".format(minLen))
    for i in range (20):
        randIndex = int(random.uniform(0,len(trainingSet)))
        #print("randIndex is : {}".format(randIndex))
        testSet.append(trainingSet[randIndex])
        del (trainingSet[randIndex])
    trainMat = [];trainClasses = []
    for docIndex in trainingSet:
        trainMat.append(bagOfWords2VecMN(vocabList,docList[docIndex]))
        trainClasses.append(classList[docIndex])   #使用词袋模型
    p0v,p1v,pSpam = trainNB0(array(trainMat),array(trainClasses))    #开始训练
    errorCount = 0
    for docIndex in testSet:
        wordVector = bagOfWords2VecMN(vocabList,docList[docIndex])
        if classifyNB(array(wordVector),p0v,p1v,pSpam) != classList[docIndex]:
            errorCount += 1
    print('the error rate is : {}'.format(float(errorCount)/len(testSet)))
    return vocabList,p0v,p1v      

导入RSS源测试

import feedparser
ny = feedparser.parse('https://newyork.craigslist.org/d/activity-partners/search/act?format=rss')
sf = feedparser.parse('https://sfbay.craigslist.org/d/activity-partners/search/act?format=rss')    #它的内容是不断变化的
vocabList,pSF,pNY = localWords(ny,sf)
the error rate is : 0.35


E:\Anaconda3\lib\re.py:212: FutureWarning: split() requires a non-empty pattern match.
  return _compile(pattern, flags).split(string, maxsplit)

分析数据:显示最近(我测试时)两地新闻相关用词

def getTopWords(ny,sf):
    import operator
    vocabList,p0v,p1v = localWords(ny,sf)
    topNY = [];topSF = []
    for i in range(len(p0v)):
        if p0v[i] > -5.0 : 
            topSF.append((vocabList[i],p0v[i]))
        if p1v[i] > -5.0 : 
            topNY.append((vocabList[i],p1v[i]))
    sortedSF = sorted(topSF,key = lambda pair: pair[1],reverse=True)
    print("SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**")
    for item in sortedSF:
        print(item[0])
    sortedNY = sorted(topNY,key = lambda pair:pair[1],reverse=True)
    print("NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**")
    for item in sortedNY:
        print(item[0])
getTopWords(ny,sf)
the error rate is : 0.3
SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**
really
abou
join
years
maybe
whom
one
wood
games
working
hang
fitness
early
two
also
know
june
past
level
could
but
NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**
channel
need
lady
great
our
shorter
make
little
attire
call
attend
youtube
things
participate
area
has
hair
help
got
funds
butterfly
social
vide
extra
submit
shiny
outgoing
brooklyn
there
how
long
etc
new
afternoon
noon
conversation
watching
hurry
walks
29th
youtu
back
does
dinner
moments
seeking
paddy
around
people
number
restaurant
put
couple
singers
weekends
maybe
share
when
must
love
full
name
live
then
5twfhtidasa
videos
humor
crowded
friend
articulate
info
pastime
working
starter
black
sports
show
those
considered

你可能感兴趣的:(数据挖掘)