《机器学习入门》笔记 - 决策树

决策树

计算香农熵

def calcShannonEnt(dataSet):
    numEntries = len(dataSet)
    labelCounts = {}
    for featVec in dataSet:
        currentLabel = featVec[-1]
        if currentLabel not in labelCounts.keys():
            labelCounts[currentLabel] = 0
        labelCounts[currentLabel] += 1
    shannonEnt = 0.0
    for key in labelCounts:
        prob = float(labelCounts[key])/numEntries
        shannonEnt -= prob * log(prob,2)
    return shannonEnt

建一组假数据

def createDataSet():
    dataSet = [[1, 1, 'yes'],
               [1, 1, 'yes'],
               [1, 0, 'no'],
               [0, 1, 'no'],
               [0, 1, 'no']]
    labels = ['no surfacing', 'flippers']
    return dataSet, labels

运行

划分数据集

def splitDataSet(dataSet, axis, value):
    retDataSet = []
    for featVec in dataSet:
        #如果featVec第axis+1项的值等于需匹配的value
        if featVec[axis] == value:
            #featVec中不包含第axis+1项的剩余部分
            print 'axis: %d, value: %d, featVec:' % (axis, value), featVec
            reducedFeatVec = featVec[:axis]
            reducedFeatVec.extend(featVec[axis+1:])
            retDataSet.append(reducedFeatVec)
    return retDataSet

运行

寻找最好的划分方式

def chooseBestFeatureToSplit(dataSet):
    # 特征值的数量,不包含第三项标签
    numFeatures = len(dataSet[0]) - 1
    bestEntropy = calcShannonEnt(dataSet)
    bestInfoGain = 0.0
    bestFeature = -1
    for i in range(numFeatures):
        # 列出所有第i+1项的值作为一个列表
        featList = [example[i] for example in dataSet]
        # 列表中的值去重创建集合
        uniqueVals = set(featList)
        newEntropy = 0.0
        for value in uniqueVals:
            #找到划分后的子集
            subDataSet = splitDataSet(dataSet,i,value)
            prob = len(subDataSet)/float(len(dataSet))
            print 'subDataSet:',subDataSet,'\nlen(subDataSet):',len(subDataSet),', prob:',prob
            #将所有value的熵相加
            newEntropy += prob * calcShannonEnt(subDataSet)
            print "i: %d, value: %d, newEntropy: %f\n" % (i,value,newEntropy)
        # 信息增益就是熵的减少
        infoGain = bestEntropy - newEntropy
        print "i: %d, bestEntropy:%f, infoGain(bestEntropy - newEntropy): %f\n" % (i,bestEntropy,infoGain)
        if(infoGain > bestInfoGain):
            bestInfoGain = infoGain
            bestFeature = i
    return bestFeature

运行

寻找最多数的标签

def majorityCnt(classList):
    classCount = {}
    for vote in classList:
        if vote not in classCount.keys():
            classCount[vote] = 0
        classCount[vote] += 1
    sortedClassCount = sorted(classCount.iteritems(),key=operator.itemgetter(1),reverse=True)
    return sortedClassCount

创建决策树

def createTree(dataSet,labels):
    # 取标签列表
    classList = [example[-1] for example in dataSet]
    print '\nclassList',classList
    # 如果classList中第一个元素的数量与总元素数量相同,即classList中的元素均为相同元素
    if classList.count(classList[0]) == len(classList):
        print 'oh!classList[0]',classList[0],'classList.count(classList[0])',classList.count(classList[0]),'len(classList)',len(classList)
        # myTree[bestFeatLabel][value] = return的classlist[0]
        return classList[0]
    # 如果最优解也无法完全将分类划分的话,返回出现最多的类别
    if len(dataSet[0]) == 1:
        return majorityCnt(classList)
    # 寻找最好的划分方式
    bestFeat = chooseBestFeatureToSplit(dataSet)
    # 获取最好的划分方式对应的实际划分标签
    bestFeatLabel = labels[bestFeat]
    myTree = {bestFeatLabel:{}}
    # 在标签中去掉最好的划分方式对应的标签
    del(labels[bestFeat])
    # 最好的划分方式包含的所有值
    featValues = [example[bestFeat] for example in dataSet]
    uniqueVals = set(featValues)
    print 'uniqueVals', uniqueVals
    for value in uniqueVals:
        subLabels = labels[:]
        # 递归调用本函数
        myTree[bestFeatLabel][value] = createTree(splitDataSet(dataSet,bestFeat,value),subLabels)
        print 'bestFeatLabel',bestFeatLabel,'value',value,'本次myTree', myTree
    return myTree

运行

使用文本注解绘制节点树

新建treePlottter.py

# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt

decisionNode = dict(boxstyle = 'sawtooth', fc = '0.8')
leafNode = dict(boxstyle = 'round4', fc = '0.8')
arrow_args = dict(arrowstyle = '<-')

def plotNode(nodeTxt, centerPt, parentPt, nodeType):
    # nodeTxt文本注解, centerPt终点坐标, parentPt起点坐标, nodeType文本框样式
    # 创建一个描述 annotate(s, xy, xytext=None, xycoords='data',textcoords='data', arrowprops=None, **kwargs)
    # s : 描述的文本
    # xy、xytext: 起点及终点坐标
    # xycoords 、textcoords : 给定坐标系,axes fraction(0,0是)轴域左下角,(1,1)是轴域右上角。data为使用轴域数据坐标系
    # arrowstyle : 箭头样式'->'指向标注点 '<-'指向标注内容
    createPlot.ax1.annotate(nodeTxt, xy = parentPt, xycoords = 'axes fraction', xytext = centerPt, textcoords = 'axes fraction', va = 'center', ha = 'center', bbox = nodeType, arrowprops = arrow_args)

def createPlot():
    fig = plt.figure(1, facecolor = 'white')
    fig.clf()
    createPlot.ax1 = plt.subplot(111, frameon = False)
    plotNode(U'决策节点', (0.5, 0.1), (0.1, 0.5), decisionNode)
    plotNode(U'叶节点', (0.8, 0.1), (0.3, 0.8), leafNode)
    plt.show()

运行

获取叶节点的数目和树的层数

def getNumLeafs(myTree):
  # 获取叶子节点数
    numLeafs = 0
    firstStr = myTree.keys()[0]
    secondDict = myTree[firstStr]
    for key in secondDict.keys():
        if type(secondDict[key]).__name__ == 'dict':
            numLeafs += getNumLeafs(secondDict[key])
        else:
            numLeafs += 1
    return numLeafs

def getTreeDepth(myTree):
  # 获取层数
    maxDepth = 0
    firstStr = myTree.keys()[0]
    secondDict = myTree[firstStr]
    for key in secondDict.keys():
        if type(secondDict[key]).__name__ == 'dict':
            thisDepth = 1 + getTreeDepth(secondDict[key])
        else:
            thisDepth = 1
        if thisDepth > maxDepth:
            maxDepth = thisDepth
    return maxDepth

def retrieveTree(i):
    # 一组假数据,方便使用
    listOfTrees =[{'no surfacing': {0: 'no', 1: {'flippers': {0: 'no', 1: 'yes'}}}},
                  {'no surfacing': {0: 'no', 1: {'flippers': {0: {'head': {0: 'no', 1: 'yes'}}, 1: 'no'}}}}
                  ]
    return listOfTrees[i]

运行

绘制树形图

def plotTree(myTree, parentPt, nodeTxt):
    numLeafs = getNumLeafs(myTree)
    depth = getTreeDepth(myTree)
    firstStr = myTree.keys()[0]
    cntrPt = (plotTree.xOff + (1.0 + float(numLeafs)) / 2.0 / plotTree.totalW, plotTree.yOff)
    print '1 plotTree.xOff', plotTree.xOff, 'plotTree.yOff', plotTree.yOff, 'cntrPt', cntrPt, 'parentPt', parentPt
    plotMidText(cntrPt, parentPt, nodeTxt)
    print '\tmidText "%s" drawn,' % nodeTxt, '起点',parentPt,', 终点',cntrPt
    plotNode(firstStr, cntrPt, parentPt, decisionNode)
    print '\tdecisionNode "%s" drawn,' % firstStr, '起点',parentPt,', 终点',cntrPt
    secondDict = myTree[firstStr]
    plotTree.yOff = plotTree.yOff - 1.0 / plotTree.totalD
    print '2 plotTree.xOff', plotTree.xOff, 'plotTree.yOff', plotTree.yOff
    for key in secondDict.keys():
        if type(secondDict[key]).__name__ == 'dict':
            plotTree(secondDict[key], cntrPt, str(key))
        else:
            plotTree.xOff = plotTree.xOff + 1.0 / plotTree.totalW
            print '3 plotTree.xOff', plotTree.xOff, 'plotTree.yOff', plotTree.yOff
            plotNode(secondDict[key], (plotTree.xOff, plotTree.yOff), cntrPt, leafNode)
            print '\tleafNode "%s" drawn,' % secondDict[key], '起点 (',plotTree.xOff, plotTree.yOff,'), 终点',cntrPt
            plotMidText((plotTree.xOff, plotTree.yOff), cntrPt, str(key))
    plotTree.yOff = plotTree.yOff + 1.0 / plotTree.totalD
    print '4 plotTree.xOff', plotTree.xOff, 'plotTree.yOff', plotTree.yOff

def createPlot(inTree):
    fig = plt.figure(1, facecolor = 'white')
    fig.clf()
    axprops = dict(xticks = [], yticks = [])
    createPlot.ax1 = plt.subplot(111, frameon = False, **axprops)
    plotTree.totalW = float(getNumLeafs(inTree))
    plotTree.totalD = float(getTreeDepth(inTree))
    plotTree.xOff = -0.5/plotTree.totalW
    plotTree.yOff = 1.0
    plotTree(inTree, (0.5,1.0), '')
    plt.show()

运行

使用决策树执行分类

def classify(inputTree, featLabels, testVec):
    firstStr = inputTree.keys()[0]
    secondDict = inputTree[firstStr]
    featIndex = featLabels.index(firstStr)
    for key in secondDict.keys():
        if testVec[featIndex] == key:
            if type(secondDict[key]).__name__ == 'dict':
                classLabel = classify(secondDict[key], featLabels, testVec=)
            else:
                classLabel = secondDict[key]
    return classLabel

运行

决策树的存储

def storeTree(inputTree,filename):
    import pickle
    fw = open(filename, 'w')
    pickle.dump(inputTree,fw)
    fw.close()

def grabTree(filename):
    import pickle
    fr = open(filename)
    return pickle.load(fr)

运行

应用:预测隐形眼镜类型

运行

你可能感兴趣的:(《机器学习入门》笔记 - 决策树)