随机森林代码

1.描述

鸢尾花判断

2.代码

1.Iris_DecisionTree.py

#!/usr/bin/python
# -*- coding:utf-8 -*-

import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import tree
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline


def iris_type(s):
    it = {'Iris-setosa': 0, 'Iris-versicolor': 1, 'Iris-virginica': 2}
    return it[s]


# 花萼长度、花萼宽度,花瓣长度,花瓣宽度
# iris_feature = 'sepal length', 'sepal width', 'petal length', 'petal width'
iris_feature = u'花萼长度', u'花萼宽度', u'花瓣长度', u'花瓣宽度'

if __name__ == "__main__":
    mpl.rcParams['font.sans-serif'] = [u'SimHei']
    mpl.rcParams['axes.unicode_minus'] = False

    path = '..\\8.Regression\\8.iris.data'  # 数据文件路径
    data = np.loadtxt(path, dtype=float, delimiter=',', converters={4: iris_type})
    x, y = np.split(data, (4,), axis=1)
    # 为了可视化,仅使用前两列特征
    x = x[:, :2]  #取前两列
    x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=1) # 测试集选30%
    #ss = StandardScaler()
    #ss = ss.fit(x_train)

    # 决策树参数估计
    # min_samples_split = 10:如果该结点包含的样本数目大于10,则(有可能)对其分支
    # min_samples_leaf = 10:若将某结点分支后,得到的每个子结点样本数目都大于10,则完成分支;否则,不进行分支
    model = Pipeline([  #管道:使过程更加完成明显
        ('ss', StandardScaler()),  #先进行标准化,保证均值0,方差1,提高效果
        ('DTC', DecisionTreeClassifier(criterion='entropy', max_depth=3))])   #最大深度
    # clf = DecisionTreeClassifier(criterion='entropy', max_depth=3)
    model = model.fit(x_train, y_train) # 进行训练
    y_test_hat = model.predict(x_test)      # 测试数据

    # 保存
    # dot -Tpng -o 1.png 1.dot
    f = open('.\\iris_tree.dot', 'w')
    tree.export_graphviz(model.get_params('DTC')['DTC'], out_file=f) #输出到图形可视化

    # 画图
    N, M = 100, 100  # 横纵各采样多少个值
    x1_min, x1_max = x[:, 0].min(), x[:, 0].max()  # 第0列的范围
    x2_min, x2_max = x[:, 1].min(), x[:, 1].max()  # 第1列的范围
    t1 = np.linspace(x1_min, x1_max, N) #第0列取了100个值
    t2 = np.linspace(x2_min, x2_max, M) # 第1列取了100个值
    x1, x2 = np.meshgrid(t1, t2)  # 生成网格采样点,t1t2拼到一起
    x_show = np.stack((x1.flat, x2.flat), axis=1)  # 测试点,把里面每个数据都写成一列,然后合起来

    # # 无意义,只是为了凑另外两个维度
    # # 打开该注释前,确保注释掉x = x[:, :2]
    # x3 = np.ones(x1.size) * np.average(x[:, 2])
    # x4 = np.ones(x1.size) * np.average(x[:, 3])
    # x_test = np.stack((x1.flat, x2.flat, x3, x4), axis=1)  # 测试点
    cm_light = mpl.colors.ListedColormap(['#A0FFA0', '#FFA0A0', '#A0A0FF'])  # 制作浅色的图例
    cm_dark = mpl.colors.ListedColormap(['g', 'r', 'b']) # 制作深色的图例
    y_show_hat = model.predict(x_show)  # 预测值,现在仍然是一个向量
    y_show_hat = y_show_hat.reshape(x1.shape)  # 使之与输入的形状相同reshape
    plt.figure(facecolor='w') # 设置底,w白色,k黑色,默认灰色
    plt.pcolormesh(x1, x2, y_show_hat, cmap=cm_light)  # 预测值的显示,背景块
    plt.scatter(x_test[:, 0], x_test[:, 1], c=y_test.ravel(), edgecolors='k', s=100, cmap=cm_dark, marker='o')  # 测试数据,设置圆圈的颜色,大小,形状
    plt.scatter(x[:, 0], x[:, 1], c=y.ravel(), edgecolors='k', s=40, cmap=cm_dark)  # 全部数据
    plt.xlabel(iris_feature[0], fontsize=15) # X 轴的标注
    plt.ylabel(iris_feature[1], fontsize=15)
    plt.xlim(x1_min, x1_max) # X轴画图的范围
    plt.ylim(x2_min, x2_max)
    plt.grid(True) # 是否加网格
    plt.title(u'鸢尾花数据的决策树分类', fontsize=17)
    plt.show() 

    # 训练集上的预测结果
    y_test = y_test.reshape(-1)
    print y_test_hat
    print y_test
    result = (y_test_hat == y_test)   # True则预测正确,False则预测错误
    acc = np.mean(result)
    print '准确度: %.2f%%' % (100 * acc)

    # 过拟合:错误率
    depth = np.arange(1, 15) # 层数的范围,进行遍历
    err_list = []
    for d in depth:
        clf = DecisionTreeClassifier(criterion='entropy', max_depth=d) # 决策树
        clf = clf.fit(x_train, y_train)
        y_test_hat = clf.predict(x_test)  # 测试数据
        result = (y_test_hat == y_test)  # True则预测正确,False则预测错误
        err = 1 - np.mean(result) # result均值就是准确率,用1减是错误率
        err_list.append(err)
        print d, ' 错误率: %.2f%%' % (100 * err)
    plt.figure(facecolor='w')
    plt.plot(depth, err_list, 'ro-', lw=2) # 做出层数与错误率的图
    plt.xlabel(u'决策树深度', fontsize=15)
    plt.ylabel(u'错误率', fontsize=15)
    plt.title(u'决策树深度与过拟合', fontsize=17)
    plt.grid(True)
    plt.show()

2.Iris_DecisionTree_Enum.py

#!/usr/bin/python
# -*- coding:utf-8 -*-

import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn.tree import DecisionTreeClassifier


def iris_type(s):
    it = {'Iris-setosa': 0, 'Iris-versicolor': 1, 'Iris-virginica': 2}
    return it[s]

# 'sepal length', 'sepal width', 'petal length', 'petal width'
iris_feature = u'花萼长度', u'花萼宽度', u'花瓣长度', u'花瓣宽度'

if __name__ == "__main__":
    mpl.rcParams['font.sans-serif'] = [u'SimHei']  # 黑体 FangSong/KaiTi
    mpl.rcParams['axes.unicode_minus'] = False

    path = '..\\8.Regression\\8.iris.data'  # 数据文件路径
    data = np.loadtxt(path, dtype=float, delimiter=',', converters={4: iris_type})
    x_prime, y = np.split(data, (4,), axis=1)

    feature_pairs = [(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)] # 做枚举,取不同的位置
    plt.figure(figsize=(10, 9), facecolor='#FFFFFF')
    for i, pair in enumerate(feature_pairs):
        # 准备数据
        x = x_prime[:, pair]

        # 决策树学习
        clf = DecisionTreeClassifier(criterion='entropy', min_samples_leaf=3) #设置叶节点的样本数不能小于3个
        dt_clf = clf.fit(x, y)

        # 画图
        N, M = 500, 500  # 横纵各采样多少个值
        x1_min, x1_max = x[:, 0].min(), x[:, 0].max()  # 第0列的范围
        x2_min, x2_max = x[:, 1].min(), x[:, 1].max()  # 第1列的范围
        t1 = np.linspace(x1_min, x1_max, N)
        t2 = np.linspace(x2_min, x2_max, M)
        x1, x2 = np.meshgrid(t1, t2)  # 生成网格采样点
        x_test = np.stack((x1.flat, x2.flat), axis=1)  # 测试点

        # 训练集上的预测结果
        y_hat = dt_clf.predict(x)
        y = y.reshape(-1)
        c = np.count_nonzero(y_hat == y)    # 统计预测正确的个数
        print '特征:  ', iris_feature[pair[0]], ' + ', iris_feature[pair[1]],
        print '\t预测正确数目:', c,
        print '\t准确率: %.2f%%' % (100 * float(c) / float(len(y)))

        # 显示
        cm_light = mpl.colors.ListedColormap(['#A0FFA0', '#FFA0A0', '#A0A0FF'])
        cm_dark = mpl.colors.ListedColormap(['g', 'r', 'b'])
        y_hat = dt_clf.predict(x_test)  # 预测值
        y_hat = y_hat.reshape(x1.shape)  # 使之与输入的形状相同
        plt.subplot(2, 3, i+1)
        plt.pcolormesh(x1, x2, y_hat, cmap=cm_light)  # 预测值
        plt.scatter(x[:, 0], x[:, 1], c=y, edgecolors='k', cmap=cm_dark)  # 样本
        plt.xlabel(iris_feature[pair[0]], fontsize=14)
        plt.ylabel(iris_feature[pair[1]], fontsize=14)
        plt.xlim(x1_min, x1_max)
        plt.ylim(x2_min, x2_max)
        plt.grid()
    plt.suptitle(u'决策树对鸢尾花数据的两特征组合的分类结果', fontsize=18)
    plt.tight_layout(2)
    plt.subplots_adjust(top=0.92)
    plt.show()

决策树做回归
3.DecisionTreeRegressor.py

#!/usr/bin/python
# -*- coding:utf-8 -*-

import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor


if __name__ == "__main__":
    N = 100
    x = np.random.rand(N) * 6 - 3     # [-3,3),本来是[0,1)
    x.sort()
    y = np.sin(x) + np.random.randn(N) * 0.05  #加一点噪声
    print y
    x = x.reshape(-1, 1)  # 转置后,得到N个样本,每个样本都是1维的
    print x

    reg = DecisionTreeRegressor(criterion='mse', max_depth=9) #决策树做回归,用均方误差作为准则而不是丄增益率
    dt = reg.fit(x, y) #训练
    x_test = np.linspace(-3, 3, 50).reshape(-1, 1) 
    y_hat = dt.predict(x_test)
    plt.plot(x, y, 'r*', linewidth=2, label='Actual') # 实际值
    plt.plot(x_test, y_hat, 'g-', linewidth=2, label='Predict') # 预测值
    plt.legend(loc='upper left')
    plt.grid()
    plt.show()

    # 比较决策树的深度影响
    depth = [2, 4, 6, 8, 10] # 做遍历
    clr = 'rgbmy'
    reg = [DecisionTreeRegressor(criterion='mse', max_depth=depth[0]),
           DecisionTreeRegressor(criterion='mse', max_depth=depth[1]),
           DecisionTreeRegressor(criterion='mse', max_depth=depth[2]),
           DecisionTreeRegressor(criterion='mse', max_depth=depth[3]),
           DecisionTreeRegressor(criterion='mse', max_depth=depth[4])]

    plt.plot(x, y, 'k^', linewidth=2, label='Actual')
    x_test = np.linspace(-3, 3, 50).reshape(-1, 1)
    for i, r in enumerate(reg):
        dt = r.fit(x, y)
        y_hat = dt.predict(x_test)
        plt.plot(x_test, y_hat, '-', color=clr[i], linewidth=2, label='Depth=%d' % depth[i])
    plt.legend(loc='upper left')
    plt.grid()
    plt.show()

多输出案例,当结果之间存在相关性的时候可以考虑做多输出
4.MultiOutput_DTR.py

#!/usr/bin/python
# -*- coding:utf-8 -*-

import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor

if __name__ == "__main__":
    N = 300
    x = np.random.rand(N) * 8 - 4     # [-4,4)
    x.sort() #进行从小到大排序
    y1 = np.sin(x) + 3 + np.random.randn(N) * 0.1 # 得到正弦和余弦,带有扰动
    y2 = np.cos(0.3*x) + np.random.randn(N) * 0.01
    # y1 = np.sin(x) + np.random.randn(N) * 0.05
    # y2 = np.cos(x) + np.random.randn(N) * 0.1
    y = np.vstack((y1, y2))  #做垂直方向的堆叠
    y = np.vstack((y1, y2)).T
    x = x.reshape(-1, 1)  # 转置后,得到N个样本,每个样本都是1维的

    deep = 3
    reg = DecisionTreeRegressor(criterion='mse', max_depth=deep) # 用均方误差做回归,三层
    dt = reg.fit(x, y)

    x_test = np.linspace(-4, 4, num=1000).reshape(-1, 1)
    print x_test
    y_hat = dt.predict(x_test)
    print y_hat
    plt.scatter(y[:, 0], y[:, 1], c='r', s=40, label='Actual')
    plt.scatter(y_hat[:, 0], y_hat[:, 1], c='g', marker='s', s=100, label='Depth=%d' % deep, alpha=1)
    plt.legend(loc='upper left')
    plt.xlabel('y1')
    plt.ylabel('y2')
    plt.grid()
    plt.show()

随机森林时间
5.Iris_RandomForest_Enuum.py

#!/usr/bin/python
# -*- coding:utf-8 -*-

import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn.ensemble import RandomForestClassifier  #提取随机森林


def iris_type(s):
    it = {'Iris-setosa': 0, 'Iris-versicolor': 1, 'Iris-virginica': 2}
    return it[s]

# 'sepal length', 'sepal width', 'petal length', 'petal width'
iris_feature = u'花萼长度', u'花萼宽度', u'花瓣长度', u'花瓣宽度'

if __name__ == "__main__":
    mpl.rcParams['font.sans-serif'] = [u'SimHei']  # 黑体 FangSong/KaiTi
    mpl.rcParams['axes.unicode_minus'] = False

    path = '..\\8.Regression\\8.iris.data'  # 数据文件路径
    data = np.loadtxt(path, dtype=float, delimiter=',', converters={4: iris_type})
    x_prime, y = np.split(data, (4,), axis=1)

    feature_pairs = [[0, 1], [0, 2], [0, 3], [1, 2], [1, 3], [2, 3]]
    plt.figure(figsize=(10, 9), facecolor='#FFFFFF') # 做一个窗口,背景全白
    for i, pair in enumerate(feature_pairs): # 对特征对进行遍历,i表示第几个
        # 准备数据
        x = x_prime[:, pair]

        # 随机森林
        clf = RandomForestClassifier(n_estimators=200, criterion='entropy', max_depth=4) # 就用熵作为标准
        rf_clf = clf.fit(x, y.ravel()) # 训练

        # 画图
        N, M = 500, 500  # 横纵各采样多少个值
        x1_min, x1_max = x[:, 0].min(), x[:, 0].max()  # 第0列的范围
        x2_min, x2_max = x[:, 1].min(), x[:, 1].max()  # 第1列的范围
        t1 = np.linspace(x1_min, x1_max, N)
        t2 = np.linspace(x2_min, x2_max, M)
        x1, x2 = np.meshgrid(t1, t2)  # 生成网格采样点
        x_test = np.stack((x1.flat, x2.flat), axis=1)  # 测试点

        # 训练集上的预测结果
        y_hat = rf_clf.predict(x) # 对测试集进行预测
        y = y.reshape(-1)
        c = np.count_nonzero(y_hat == y)    # 统计预测正确的个数,也可以直接数非0项
        print '特征:  ', iris_feature[pair[0]], ' + ', iris_feature[pair[1]], # 打印特征
        print '\t预测正确数目:', c,
        print '\t准确率: %.2f%%' % (100 * float(c) / float(len(y)))

        # 显示
        cm_light = mpl.colors.ListedColormap(['#A0FFA0', '#FFA0A0', '#A0A0FF'])
        cm_dark = mpl.colors.ListedColormap(['g', 'r', 'b'])
        y_hat = rf_clf.predict(x_test)  # 预测值
        y_hat = y_hat.reshape(x1.shape)  # 使之与输入的形状相同
        plt.subplot(2, 3, i+1)
        plt.pcolormesh(x1, x2, y_hat, cmap=cm_light)  # 预测值
        plt.scatter(x[:, 0], x[:, 1], c=y, edgecolors='k', cmap=cm_dark)  # 样本
        plt.xlabel(iris_feature[pair[0]], fontsize=14)
        plt.ylabel(iris_feature[pair[1]], fontsize=14)
        plt.xlim(x1_min, x1_max)
        plt.ylim(x2_min, x2_max)
        plt.grid()
        
    plt.tight_layout(2.5) # 设置图与边界之间的距离
    plt.subplots_adjust(top=0.92)
    plt.suptitle(u'随机森林对鸢尾花数据的两特征组合的分类结果', fontsize=18)  # 设置总标题
    plt.show()

你可能感兴趣的:(机器学习)