扩充:学习CART的生成(回归树模型)【参考统计学习方法】
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from collections import Counter
import math
from math import log
import pprint
def create_data():
datasets = [['青年', '否', '否', '一般', '否'],
['青年', '否', '否', '好', '否'],
['青年', '是', '否', '好', '是'],
['青年', '是', '是', '一般', '是'],
['青年', '否', '否', '一般', '否'],
['中年', '否', '否', '一般', '否'],
['中年', '否', '否', '好', '否'],
['中年', '是', '是', '好', '是'],
['中年', '否', '是', '非常好', '是'],
['中年', '否', '是', '非常好', '是'],
['老年', '否', '是', '非常好', '是'],
['老年', '否', '是', '好', '是'],
['老年', '是', '否', '好', '是'],
['老年', '是', '否', '非常好', '是'],
['老年', '否', '否', '一般', '否'],
]
labels = [u'年龄', u'有工作', u'有自己的房子', u'信贷情况', u'类别']
# 返回数据集和每个维度的名称
return datasets, labels
datasets, labels = create_data()
train_data = pd.DataFrame(datasets, columns=labels)
train_data
年龄 | 有工作 | 有自己的房子 | 信贷情况 | 类别 | |
---|---|---|---|---|---|
0 | 青年 | 否 | 否 | 一般 | 否 |
1 | 青年 | 否 | 否 | 好 | 否 |
2 | 青年 | 是 | 否 | 好 | 是 |
3 | 青年 | 是 | 是 | 一般 | 是 |
4 | 青年 | 否 | 否 | 一般 | 否 |
5 | 中年 | 否 | 否 | 一般 | 否 |
6 | 中年 | 否 | 否 | 好 | 否 |
7 | 中年 | 是 | 是 | 好 | 是 |
8 | 中年 | 否 | 是 | 非常好 | 是 |
9 | 中年 | 否 | 是 | 非常好 | 是 |
10 | 老年 | 否 | 是 | 非常好 | 是 |
11 | 老年 | 否 | 是 | 好 | 是 |
12 | 老年 | 是 | 否 | 好 | 是 |
13 | 老年 | 是 | 否 | 非常好 | 是 |
14 | 老年 | 否 | 否 | 一般 | 否 |
# 熵
def calc_ent(datasets):
data_length = len(datasets)
label_count = {}
for i in range(data_length):
label = datasets[i][-1]
if label not in label_count:
label_count[label] = 0
label_count[label] += 1
ent = -sum([(p/data_length)*log(p/data_length, 2) for p in label_count.values()])
return ent
# 条件熵
def cond_ent(datasets, axis=0):
data_length = len(datasets)
feature_sets = {}
for i in range(data_length):
feature = datasets[i][axis]
if feature not in feature_sets:
feature_sets[feature] = []
feature_sets[feature].append(datasets[i])
cond_ent = sum([(len(p)/data_length)*calc_ent(p) for p in feature_sets.values()])
return cond_ent
# 信息增益
def info_gain(ent, cond_ent):
return ent - cond_ent
def info_gain_train(datasets):
count = len(datasets[0]) - 1
ent = calc_ent(datasets)
best_feature = []
for c in range(count):
c_info_gain = info_gain(ent, cond_ent(datasets, axis=c))
best_feature.append((c, c_info_gain))
print('特征({}) - info_gain - {:.3f}'.format(labels[c], c_info_gain))
# 比较大小
best_ = max(best_feature, key=lambda x: x[-1])
return '特征({})的信息增益最大,选择为根节点特征'.format(labels[best_[0]])
info_gain_train(np.array(datasets)) # 将dataset转换为ndarray类型
特征(年龄) - info_gain - 0.083
特征(有工作) - info_gain - 0.324
特征(有自己的房子) - info_gain - 0.420
特征(信贷情况) - info_gain - 0.363
'特征(有自己的房子)的信息增益最大,选择为根节点特征'
# 定义节点类
class Node:
def __init__(self, root=True, label=None, feature_name=None, feature=None):
self.root = root
self.label = label
self.feature_name = feature_name
self.feature = feature
self.tree = {}
self.result = {'label:': self.label, 'feature': self.feature, 'tree': self.tree}
def __repr__(self):
return '{}'.format(self.result)
def add_node(self, val, node):
self.tree[val] = node
def predict(self, features):
if self.root is True:
return self.label
return self.tree[features[self.feature]].predict(features)
# 定义二叉树
class DTree:
def __init__(self, epsilon=0.1):
self.epsilon = epsilon
self._tree = {}
# 熵
@staticmethod
def calc_ent(datasets):
data_length = len(datasets)
label_count = {}
for i in range(data_length):
label = datasets[i][-1]
if label not in label_count:
label_count[label] = 0
label_count[label] += 1
ent = -sum([(p/data_length)*log(p/data_length, 2) for p in label_count.values()])
return ent
# 条件熵
def cond_ent(self, datasets, axis=0):
data_length = len(datasets)
feature_sets = {}
for i in range(data_length):
feature = datasets[i][axis]
if feature not in feature_sets:
feature_sets[feature] = []
feature_sets[feature].append(datasets[i])
cond_ent = sum([(len(p)/data_length)*self.calc_ent(p) for p in feature_sets.values()])
return cond_ent
# 信息增益
@staticmethod
def info_gain(ent, cond_ent):
return ent - cond_ent
def info_gain_train(self, datasets):
count = len(datasets[0]) - 1
ent = self.calc_ent(datasets)
best_feature = []
for c in range(count):
c_info_gain = self.info_gain(ent, self.cond_ent(datasets, axis=c))
best_feature.append((c, c_info_gain))
# 比较大小
best_ = max(best_feature, key=lambda x: x[-1])
return best_
def train(self, train_data):
"""
input:数据集D(DataFrame格式),特征集A,阈值eta
output:决策树T
"""
_, y_train, features = train_data.iloc[:, :-1], train_data.iloc[:, -1], train_data.columns[:-1]
# 1,若D中实例属于同一类Ck,则T为单节点树,并将类Ck作为结点的类标记,返回T
if len(y_train.value_counts()) == 1:
return Node(root=True,
label=y_train.iloc[0])
# 2, 若A为空,则T为单节点树,将D中实例树最大的类Ck作为该节点的类标记,返回T
if len(features) == 0:
return Node(root=True, label=y_train.value_counts().sort_values(ascending=False).index[0])
# 3,计算最大信息增益 Ag为信息增益最大的特征
max_feature, max_info_gain = self.info_gain_train(np.array(train_data))
max_feature_name = features[max_feature]
# 4,Ag的信息增益小于阈值eta,则置T为单节点树,并将D中是实例数最大的类Ck作为该节点的类标记,返回T
if max_info_gain < self.epsilon:
return Node(root=True, label=y_train.value_counts().sort_values(ascending=False).index[0])
# 5,构建Ag子集
node_tree = Node(root=False, feature_name=max_feature_name, feature=max_feature)
feature_list = train_data[max_feature_name].value_counts().index
for f in feature_list:
sub_train_df = train_data.loc[train_data[max_feature_name] == f].drop([max_feature_name], axis=1)
# 6, 递归生成树
sub_tree = self.train(sub_train_df)
node_tree.add_node(f, sub_tree)
return node_tree
def fit(self, train_data):
self._tree = self.train(train_data)
return self._tree
def predict(self, X_test):
return self._tree.predict(X_test)
datasets, labels = create_data()
data_df = pd.DataFrame(datasets, columns=labels)
dt = DTree()
tree = dt.fit(data_df)
tree
{'label:': None, 'feature': 2, 'tree': {'否': {'label:': None, 'feature': 1, 'tree': {'否': {'label:': '否', 'feature': None, 'tree': {}}, '是': {'label:': '是', 'feature': None, 'tree': {}}}}, '是': {'label:': '是', 'feature': None, 'tree': {}}}}
dt.predict(['老年', '否', '否', '一般'])
'否'
criterion : string, optional (default=”gini”)
用于测量分割质量的函数.
Supported criteria are “gini” for the Gini impurity and “entropy” for the information gain.
def create_data():
iris = load_iris()
df = pd.DataFrame(iris.data, columns=iris.feature_names)
df['label'] = iris.target
df.columns = ['sepal length', 'sepal width', 'petal length', 'petal width', 'label']
data = np.array(df.iloc[:100, [0, 1, -1]])
print(data)
return data[:,:2], data[:,-1]
X, y = create_data()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
[[5.1 3.5 0. ]
[4.9 3. 0. ]
[4.7 3.2 0. ]
[4.6 3.1 0. ]
[5. 3.6 0. ]
[5.4 3.9 0. ]
[4.6 3.4 0. ]
[5. 3.4 0. ]
[4.4 2.9 0. ]
[4.9 3.1 0. ]
[5.4 3.7 0. ]
[4.8 3.4 0. ]
[4.8 3. 0. ]
[4.3 3. 0. ]
[5.8 4. 0. ]
[5.7 4.4 0. ]
[5.4 3.9 0. ]
[5.1 3.5 0. ]
[5.7 3.8 0. ]
[5.1 3.8 0. ]
[5.4 3.4 0. ]
[5.1 3.7 0. ]
[4.6 3.6 0. ]
[5.1 3.3 0. ]
[4.8 3.4 0. ]
[5. 3. 0. ]
[5. 3.4 0. ]
[5.2 3.5 0. ]
[5.2 3.4 0. ]
[4.7 3.2 0. ]
[4.8 3.1 0. ]
[5.4 3.4 0. ]
[5.2 4.1 0. ]
[5.5 4.2 0. ]
[4.9 3.1 0. ]
[5. 3.2 0. ]
[5.5 3.5 0. ]
[4.9 3.6 0. ]
[4.4 3. 0. ]
[5.1 3.4 0. ]
[5. 3.5 0. ]
[4.5 2.3 0. ]
[4.4 3.2 0. ]
[5. 3.5 0. ]
[5.1 3.8 0. ]
[4.8 3. 0. ]
[5.1 3.8 0. ]
[4.6 3.2 0. ]
[5.3 3.7 0. ]
[5. 3.3 0. ]
[7. 3.2 1. ]
[6.4 3.2 1. ]
[6.9 3.1 1. ]
[5.5 2.3 1. ]
[6.5 2.8 1. ]
[5.7 2.8 1. ]
[6.3 3.3 1. ]
[4.9 2.4 1. ]
[6.6 2.9 1. ]
[5.2 2.7 1. ]
[5. 2. 1. ]
[5.9 3. 1. ]
[6. 2.2 1. ]
[6.1 2.9 1. ]
[5.6 2.9 1. ]
[6.7 3.1 1. ]
[5.6 3. 1. ]
[5.8 2.7 1. ]
[6.2 2.2 1. ]
[5.6 2.5 1. ]
[5.9 3.2 1. ]
[6.1 2.8 1. ]
[6.3 2.5 1. ]
[6.1 2.8 1. ]
[6.4 2.9 1. ]
[6.6 3. 1. ]
[6.8 2.8 1. ]
[6.7 3. 1. ]
[6. 2.9 1. ]
[5.7 2.6 1. ]
[5.5 2.4 1. ]
[5.5 2.4 1. ]
[5.8 2.7 1. ]
[6. 2.7 1. ]
[5.4 3. 1. ]
[6. 3.4 1. ]
[6.7 3.1 1. ]
[6.3 2.3 1. ]
[5.6 3. 1. ]
[5.5 2.5 1. ]
[5.5 2.6 1. ]
[6.1 3. 1. ]
[5.8 2.6 1. ]
[5. 2.3 1. ]
[5.6 2.7 1. ]
[5.7 3. 1. ]
[5.7 2.9 1. ]
[6.2 2.9 1. ]
[5.1 2.5 1. ]
[5.7 2.8 1. ]]
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import export_graphviz
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train,)
DecisionTreeClassifier(class_weight=None, criterion='gini', max_depth=None,
max_features=None, max_leaf_nodes=None,
min_impurity_decrease=0.0, min_impurity_split=None,
min_samples_leaf=1, min_samples_split=2,
min_weight_fraction_leaf=0.0, presort=False, random_state=None,
splitter='best')
clf.score(X_test, y_test)
0.9333333333333333
决策树生成
决策树剪枝
回归树:平方误差最小化
分类树:Gini Index
设Y是连续变量,给定训练数据集 D = { ( x 1 , y 1 ) , ( x 2 , y 2 ) , ⋯   , ( x N , y N ) } D=\left\{\left(x_{1}, y_{1}\right),\left(x_{2}, y_{2}\right), \cdots,\left(x_{N}, y_{N}\right)\right\} D={(x1,y1),(x2,y2),⋯,(xN,yN)}
假设已将输入空间划分为M各单元R1,R2…Rm,并且每个单元Rm上有一个固定的输出Cm,回归树表示为
f ( x ) = ∑ m = 1 M c m I ( x ∈ R m ) f(x)=\sum_{m=1}^{M} c_{m} I\left(x \in R_{m}\right) f(x)=∑m=1McmI(x∈Rm)
平方误差来表示预测误差,用平方误差最小准则求解每个单元上的最优输出值
∑ x ∈ R n ( y i − f ( x i ) ) 2 \sum_{x \in R_{n}}\left(y_{i}-f\left(x_{i}\right)\right)^{2} ∑x∈Rn(yi−f(xi))2
Rm上的Cm的最优值 c ^ m = ave ( y i ∣ x i ∈ R m ) \hat{c}_{m}=\operatorname{ave}\left(y_{i} | x_{i} \in R_{m}\right) c^m=ave(yi∣xi∈Rm)
启发式:选择第j个变量x(j)和它取的值s,作为切分变量和切分点,定义两个区域
R 1 ( j , s ) = { x ∣ x ( j ) ⩽ s } R_{1}(j, s)=\left\{x | x^{(j)} \leqslant s\right\} R1(j,s)={x∣x(j)⩽s}
和
R 2 ( j , s ) = { x ∣ x ( j ) > s } R_{2}(j, s)=\left\{x | x^{(j)}>s\right\} R2(j,s)={x∣x(j)>s}
然后寻找最优切分变量和切分点
min j , s [ min a 1 ∑ x ∈ R 1 ( y , s ) ( y i − c 1 ) 2 + min c 2 ∈ R 2 ( j , s ) ( y i − c 2 ) 2 ] \min _{j, s}\left[\min _{a_{1}} \sum_{x \in R_{1}(y, s)}\left(y_{i}-c_{1}\right)^{2}+\min _{c_{2} \in R_{2}(j, s)}\left(y_{i}-c_{2}\right)^{2}\right] minj,s[mina1∑x∈R1(y,s)(yi−c1)2+minc2∈R2(j,s)(yi−c2)2]
同时满足
c ^ 1 = ave ( y i ∣ x i ∈ R 1 ( j , s ) ) \hat{c}_{1}=\operatorname{ave}\left(y_{i} | x_{i} \in R_{1}(j, s)\right) c^1=ave(yi∣xi∈R1(j,s))
c ^ 2 = ave ( y i ∣ x i ∈ R 2 ( j , s ) ) \hat{c}_{2}=\operatorname{ave}\left(y_{i} | x_{i} \in R_{2}(j, s)\right) c^2=ave(yi∣xi∈R2(j,s));
再对两个区域重复上述划分,直至满足停止条件。
最小二乘回归树生成算法
输入:训练数据集D
输出:回归树f(x)
在训练数据集所在的输入空间中,递归地将每个区域划分为两个子区域并决定每个子区域上的输出值,构建二叉决策树
(1)选择最优切分变量j与切分点s,求解
min j , s [ min a 1 ∑ x ∈ R 1 ( y , s ) ( y i − c 1 ) 2 + min c 2 ∈ R 2 ( j , s ) ( y i − c 2 ) 2 ] \min _{j, s}\left[\min _{a_{1}} \sum_{x \in R_{1}(y, s)}\left(y_{i}-c_{1}\right)^{2}+\min _{c_{2} \in R_{2}(j, s)}\left(y_{i}-c_{2}\right)^{2}\right] minj,s[mina1∑x∈R1(y,s)(yi−c1)2+minc2∈R2(j,s)(yi−c2)2]
遍历变量j,对固定的切分变量j扫描切分点s,选择使上式达到最小值的对(j, s)
(2)用选定的对(j, s)划分区域并决定相应的输出值
R 1 ( j , s ) = { x ∣ x ( j ) ⩽ s } , R 2 ( j , s ) = { x ∣ x ( j ) > s } R_{1}(j, s)=\left\{x | x^{(j)} \leqslant s\right\}, \quad R_{2}(j, s)=\left\{x | x^{(j)}>s\right\} R1(j,s)={x∣x(j)⩽s},R2(j,s)={x∣x(j)>s}
c ^ m = 1 N m ∑ x i ∈ R n ( j , s ) y i , x ∈ R m , m = 1 , 2 \hat{c}_{m}=\frac{1}{N_{m}} \sum_{x_{i} \in R_{n}(j, s)} y_{i}, \quad x \in R_{m}, \quad m=1,2 c^m=Nm1∑xi∈Rn(j,s)yi,x∈Rm,m=1,2
(3)继续对两个子区域调用步骤(1)(2),直至满足条件;
(4)将输入空间划分为M个区域R1,R2,…,RM,生成决策树
f ( x ) = ∑ m = 1 M c ^ m I ( x ∈ R m ) f(x)=\sum_{m=1}^{M} \hat{c}_{m} I\left(x \in R_{m}\right) f(x)=∑m=1Mc^mI(x∈Rm)