[机器学习][源码]机器学习实战ch3 决策树

把代码保存于此,python3实现,详解就参考《机器学习实战》(Peter Harrington)啦...

1. trees.py

#详细注释: https://www.cnblogs.com/zy230530/p/6813250.html


# 3-1
from math import log
def calcShannonEnt(dataSet):
numEntries=len(dataSet)
labelCounts={}
for featVec in dataSet:
currentLabel=featVec[-1]
if currentLabel not in labelCounts.keys():
labelCounts[currentLabel]=0
labelCounts[currentLabel]+=1
shannoEnt=0.0
for key in labelCounts:
prob=float(labelCounts[key]/numEntries)
shannoEnt-=prob*log(prob,2)
return shannoEnt


def createDataSet():
dataSet=[[1,1,'yes'],
[1,1,'yes'],
[1,0,'no'],
[0,1,'no'],
[0,1,'no']]
labels=['no surfacing','flippers']#meaning of the first/second feature
return dataSet,labels


# 3-2    
def splitDataSet(dataSet,axis,value):
retDataSet=[]
for featVec in dataSet:
if featVec[axis]==value:
reducedFeatVec=featVec[:axis]
reducedFeatVec.extend(featVec[axis+1:])
retDataSet.append(reducedFeatVec)
return retDataSet


# 3-3
def chooseBestFeatureToSplit(dataSet):
numFeatures=len(dataSet[0])-1
baseEntropy=calcShannonEnt(dataSet)
bestInfoGain=0.0
bestFeature=-1
for i in range(numFeatures):
featList=[example[i] for example in dataSet]
uniqueVals=set(featList)
newEntropy=0.0
for value in uniqueVals:
subDataSet=splitDataSet(dataSet,i,value)
prob=len(subDataSet)/float(len(dataSet))
newEntropy+=prob* calcShannonEnt(subDataSet)
infoGain=baseEntropy-newEntropy
if(infoGain>bestInfoGain):
bestInfoGain=infoGain
bestFeature=i
return bestFeature


'''
from importlib import reload
reload(trees)
myDat,labels=trees.createDataSet()
feature=chooseBestFeatureToSplit(myDat)
'''


# 3-4 create the tree
import operator
#多数表决的方法决定叶子结点分类
def majorityCnt(classList):
classCount={}#创建一个类标签的字典
for vote in classList:
if vote not in classCount.keys():
classCount[vote]=0
classCount[vote]+=1
##对字典中的键对应的值所在的列,按照又大到小进行排序
    #@classCount.items 列表对象
    #@key=operator.itemgetter(1) 获取列表对象的第一个域的值
    #@reverse=true 降序排序,默认是升序排序
sortedClassCount=sorted(classCount.iteritems(),key=operator.itemgetter(1),reverse=True)
return sortedClassCount[0][0]




def createTree(dataSet,labels):
classList=[example[-1] for example in dataSet]


if classList.count(classList[0])==len(classList):
return classList[0]


#遍历完所有的特征属性,此时数据集的列为1,即只有类标签列
if len(dataSet[0])==1:
return majorityCnt(classList)


bestFeat=chooseBestFeatureToSplit(dataSet)
bestFeatLabel=labels[bestFeat]


myTree={bestFeatLabel:{}}


del(labels[bestFeat])
featValues=[example[bestFeat] for example in dataSet]
uniqueVals=set(featValues)


for value in uniqueVals:
subLabels=labels[:]
myTree[bestFeatLabel][value]=createTree( splitDataSet(dataSet,bestFeat,value),subLabels )


return myTree
'''
reload(trees)
myTree=trees.createTree(myDat,labels)
'''


#3-8
#输入测试数据,用构建好的决策树进行分类
#@intputTree 构建好的决策树
#@featLabels 特征标签列表
#@testVec 测试实例
def classify(inputTree,featLabels,testVec):
    #找到树的 第一个分类特征 ,或者说根节点'no surfacing'
    firstStr=list(inputTree.keys())[0]
    #从树中得到 该分类特征firstStr 的所有分支,有0和1
    secondDict=inputTree[firstStr]
    #根据分类特征的索引找到对应的标称型数据值
    #'no surfacing'对应的索引为0
    featIndex=featLabels.index(firstStr)
    #遍历分类特征的所有取值
    for key in secondDict.keys():
        if testVec[featIndex]==key:
            #type()函数判断该子节点是否为字典类型
            if type(secondDict[key]).__name__=='dict':
                #子节点为字典类型,则从该 分支树secondDict[key] 开始继续遍历分类
                classLabel=classify(secondDict[key],featLabels,testVec)
            #如果是叶子节点,则返回节点取值
            else:
                classLabel=secondDict[key]
    return classLabel
'''
myDat,labels=trees.createDataSet()
myTree=treePlotter.retrieveTree(0) #decision tree
trees.classify(myTree,labels,[1,1])
trees.classify(myTree,labels,[0,1])
'''


#3-9
#决策树的存储:python的pickle模块序列化决策树对象,使决策树保存在磁盘中
#在需要时读取即可,数据集很大时,可以节省构造树的时间
#pickle模块存储决策树
def storeTree(inputTree,filename):
#导入pickle模块
    import pickle
    #创建一个可以'写'的文本文件
    #这里,如果按树中写的'w',将会报错write() argument must be str,not bytes
    #所以这里改为二进制写入'wb'
    fw=open(filename,'wb')
    #pickle的dump函数将决策树写入文件中
    pickle.dump(inputTree,fw)
    #写完成后关闭文件
    fw.close()


#取决策树操作
def grabTree(filename):
import pickle
fr=open(filename,'rb')
#对应于二进制方式写入数据,'rb'采用二进制形式读出数据
return pickle.load(fr)
'''
trees.storeTree(myTree,'1.txt')
getTree=trees.grabTree('1.txt')
myTree==getTree
'''


#ch3.4
def createLensesTree(filename):
fr = open(filename)
#将文本数据的每一个数据行按照tab键分割,并依次存入lenses
lenses = [inst.strip().split('\t') for inst in fr.readlines()]
lensesLabels = ['age','prescript','astigmatic','tearRate']
lensesTree = createTree(lenses,lensesLabels)
return lensesTree
'''
import treePlotter
from importlib import reload
reload(trees)
lensesTree = trees.createLensesTree('lenses.txt')
treePlotter.createPlot(lensesTree)

'''

2. treePlotter.py

#参考:http://blog.csdn.net/sinat_17196995/article/details/55670932
#pay attention to 3-6


#3-5 绘图 not important
import matplotlib.pyplot as plt
#绘制属性图
decisionNode = dict(boxstyle="sawtooth", fc="0.8")
leafNode = dict(boxstyle="round4", fc="0.8")
arrow_args = dict(arrowstyle="<-")


def plotNode(nodeTxt, centerPt, parentPt, nodeType):
    createPlot.ax1.annotate(nodeTxt, xy=parentPt,  xycoords='axes fraction',
             xytext=centerPt, textcoords='axes fraction',
             va="center", ha="center", bbox=nodeType, arrowprops=arrow_args )


'''
def createPlot(): #主函数
    fig = plt.figure(1, facecolor='white')
    fig.clf()
    axprops = dict(xticks=[], yticks=[])
    #createPlot.ax1 = plt.subplot(111, frameon=False, **axprops)  # no ticks
    createPlot.ax1 = plt.subplot(111, frameon=False) #ticks for demo puropses
    plotNode(U'decisionNode',(0.5,0.1),(0.1,0.5),decisionNode)
    plotNode(U'leafNode',(0.8,0.1),(0.8,0.1),leafNode)
    plt.show()
'''


#3-6  递归求叶子数和高度
#构造注解树 在python字典形式中如何存储树
def getNumLeafs(myTree):
    numLeafs=0 #初始化结点数
    # 下面三行为代码 python3 替换注释的两行代码
    firstSides = list(myTree.keys())
    firstStr = firstSides[0]  # 找到输入的第一个元素,第一个关键词为划分数据集类别的标签
    secondDict = myTree[firstStr]
    #firstStr = list(myTree)
    #secondDict=myTree[firstStr]


    for key in secondDict.keys(): #测试数据是否为字典形式
        if type(secondDict[key]).__name__=='dict': #type判断子结点是否为字典类型
        #或者 if type(secondDict[key]) == dict:
            numLeafs+=getNumLeafs(secondDict[key])
            #若子节点也为字典,则也是判断结点,需要递归获取num
        else:  numLeafs+=1
    return numLeafs #返回整棵树的结点数


def getTreeDepth(myTree):
    maxDepth=0
    # 下面三行为代码 python3 替换注释的两行代码
    firstSides = list(myTree.keys())
    firstStr = firstSides[0]
    secondDict = myTree[firstStr]
    #firstStr=myTree.keys()[0]
    #secondDict=myTree[firstStr]#获取划分类别的标签


    for key in secondDict.keys():
        if type(secondDict[key]).__name__=='dict': #type判断子结点是否为字典类型
        #或:if type(secondDict[key]) == dict:
           thisDepth = 1 + getTreeDepth(secondDict[key])
        else:
           thisDepth = 1
    if thisDepth > maxDepth: 
    maxDepth = thisDepth
    return maxDepth


#输出预先存储的树信息,避免每次测试都需要重新创建树
def retrieveTree(i):
    listOfTrees =[{'no surfacing': {0: 'no', 1: {'flippers': {0: 'no', 1: 'yes'}}}},
                  {'no surfacing': {0: 'no', 1: {'flippers': {0: {'head': {0: 'no', 1: 'yes'}}, 1: 'no'}}}}
                  ]
    return listOfTrees[i]


'''
from importlib import reload
reload(treePlotter)
myTree=treePlotter.retrieveTree(0)
treePlotter.getNumLeafs(myTree)
treePlotter.getTreeDepth(myTree)
'''


#3-7 绘图 not important
def plotMidText(cntrPt, parentPt, txtString):
    xMid = (parentPt[0]-cntrPt[0])/2.0 + cntrPt[0]
    yMid = (parentPt[1]-cntrPt[1])/2.0 + cntrPt[1]
    createPlot.ax1.text(xMid, yMid, txtString, va="center", ha="center", rotation=30)


def plotTree(myTree, parentPt, nodeTxt):
    numLeafs = getNumLeafs(myTree)  #计算树的宽度  totalW
    depth = getTreeDepth(myTree) #计算树的高度 存储在totalD
    #python3.x修改
    firstSides = list(myTree.keys())#firstStr = myTree.keys()[0]     #the text label for this node should be this
    firstStr = firstSides[0]  # 找到输入的第一个元素
    cntrPt = (plotTree.xOff + (1.0 + float(numLeafs))/2.0/plotTree.totalW, plotTree.yOff)#按照叶子结点个数划分x轴
    plotMidText(cntrPt, parentPt, nodeTxt) #标注结点属性
    plotNode(firstStr, cntrPt, parentPt, decisionNode)
    secondDict = myTree[firstStr]
    plotTree.yOff = plotTree.yOff - 1.0/plotTree.totalD #y方向上的摆放位置 自上而下绘制,因此递减y值
    for key in secondDict.keys():
        if type(secondDict[key]).__name__=='dict':#判断是否为字典 不是则为叶子结点
            plotTree(secondDict[key],cntrPt,str(key))        #递归继续向下找
        else:   #为叶子结点
            plotTree.xOff = plotTree.xOff + 1.0/plotTree.totalW #x方向计算结点坐标
            plotNode(secondDict[key], (plotTree.xOff, plotTree.yOff), cntrPt, leafNode)#绘制
            plotMidText((plotTree.xOff, plotTree.yOff), cntrPt, str(key))#添加文本信息
    plotTree.yOff = plotTree.yOff + 1.0/plotTree.totalD #下次重新调用时恢复y


def createPlot(inTree): #主函数
    fig = plt.figure(1, facecolor='white')
    fig.clf()
    axprops = dict(xticks=[], yticks=[])
    createPlot.ax1 = plt.subplot(111, frameon=False, **axprops)  # no ticks
    # createPlot.ax1 = plt.subplot(111, frameon=False) #ticks for demo puropses
    plotTree.totalW = float(getNumLeafs(inTree))
    plotTree.totalD = float(getTreeDepth(inTree))
    plotTree.xOff = -0.5 / plotTree.totalW
    plotTree.yOff = 1.0
    plotTree(inTree, (0.5, 1.0), '')
    plt.show()


'''
reload(treePlotter)
treePlotter.createPlot(myTree)


myTree['no surfacing'][2]='maybe'
treePlotter.createPlot(myTree)
'''

你可能感兴趣的:(python,机器学习)