Adaboost with Decision Stump

机器学习技法作业3(2018)的编程题,Q11~Q16,Experiments with AdaBoost
题目中给出了Decision Stump的求解思路,Adaboost算法代码如下:

# coding: utf-8
import numpy as np
import math

def loaddata(file):
    f = open(file)
    try:
        lines = f.readlines()
    finally:
        f.close()
    
    example_num = len(lines)
    dimension = len(lines[0].strip().split()) - 1  #features未添加x0 = 1
    
    features = np.zeros((example_num, dimension))
    labels = np.zeros((example_num, 1))
    
    #features[:,0] = 1  #初始化features的x0 = 1
    
    for index, line in enumerate(lines):
        item = lines[index].strip().split()
        features[index,:] = [float(feature) for feature in item[0:-1]]
        labels[index] = float(item[-1])
        
    return features, labels

class adaboost(object):
    def __init__(self, iteration, X, Y):
        self.__iter = iteration
        self.__dim = X.shape[1]
        self.__x = X
        self.__y = Y
        self.__sortedx = X
        self.__sortedy = Y
        self.__u = np.ones((len(Y),1))
        self.__u = self.__u * (1/X.shape[0])     ##examples权重u
        self.__alpha = np.zeros((iteration, 1))  ##G(x)中个gt(x)权重alphat
        self.__s = np.zeros((iteration, 1))      ##gt(x)参数s,i,theta
        self.__i = np.zeros((iteration, 1))
        self.__theta = np.zeros((iteration, 1))
    
    def sort(self, d):   ##对examples根据第d维features排序
        index = self.__x.argsort(axis=0)[:,d] ##由features的第d维对examples排序,得到排序index
        self.__sortedx = self.__x[index,:] ##得到排序后的features
        self.__sortedy = self.__y[index,:] ##得到排序后的labels
    
    def calcu_err(self, i, s, theta):
        err = 0
        for n in range(len(self.__y)):
            if np.sign(self.__x[n, i] - theta)*s != self.__y[n]:
                err += 1*self.__u[n]
        return err
    
    def decision_stump(self):   ##decision_stump训练,弱分类器
        best_theta = -1e10  ##初始化最优划分点theta近似负无穷大
        best_s = +1         ##初始化最优划分参数s
        best_i = 0          ##初始化最优划分维度i
        best_err = 1e10     ##初始化cost近似无穷大
        best_n = 0          ##初始化最优划分点index--n
        for i in range(self.__dim):
            self.sort(i)        ##针对第i维数据对examples进行排序
            for j in range(self.__x.shape[0]):
                if j > 0:
                    theta = (self.__sortedx[j,i] + self.__sortedx[j-1,i])/2
                else:
                    theta = -1e10
                
                err_s1 = self.calcu_err(i, +1, theta)    
                err_s0 = self.calcu_err(i, -1, theta)
                
                if err_s1 <= err_s0:
                    s = +1
                else:
                    s = -1
                    
                if min(err_s1, err_s0) <= best_err:
                    best_err = min(err_s1, err_s0)
                    best_s = s
                    best_theta = theta
                    best_i = i
                    best_n = j
                #print(best_err, best_s, best_i, best_theta)
        print('s, i, theta:', best_s, best_i, best_theta)
        return best_err, best_s, best_i, best_theta
    
    def bst_train(self):     ##Adaboost训练
        for iter in range(self.__iter):
            predict = np.zeros((len(self.__y),1))
            err, self.__s[iter], self.__i[iter], self.__theta[iter] = self.decision_stump()  ##训练decision_stump
            if err == 0:
                print('Err of', iter,'iter stump is 0!\n')
                break
                
            self.__alpha[iter] = math.log(math.sqrt((1-err)/err))   ##ln{sqrt[(1-err)/err]}
            
            for n in range(len(self.__u)):                          ##更新u
                gt = np.sign(self.__x[n, int(self.__i[iter])] - self.__theta[iter]) * self.__s[iter]   ##计算s*sign(xi-theta)第n个数据输出
                self.__u[n] = self.__u[n] * math.exp(-1 * self.__y[n] * self.__alpha[iter] * gt)  ##更新u[n]公式
        return self.__u, self.__alpha, self.__s, self.__i, self.__theta
    
    def predict_inout(self, x, y):   ##预测Ein,Eout
        predict_y = np.zeros((len(y),1))
        for i in range(len(y)):
            Gx = 0
            for j in range(self.__iter):
                g = np.sign(x[i, int(self.__i[j])] - self.__theta[j]) * self.__s[j]   ##计算s*sign(xi-theta)第n个数据
                Gx += self.__alpha[j] * g   ##累加iter个带weights的弱分类器输出
                predict_y[i] = np.sign(Gx)     ##计算预测输出
        Err = sum(predict_y != y)/len(y)   ##预测计算Ein
        print('Ein/Eout is', Err)
        return predict_y, Err
    
    def get_x(self):        ##以下函数用于读取数据进行调试
        return self.__x
    
    def get_y(self):
        return self.__y
    
    def get_sortedx(self):
        return self.__sortedx
    
    def get_sortedy(self):
        return self.__sortedy
    
    def get_u(self):
        return self.__u
    
    def get_i(self):
        return self.__i
    
    def get_s(self):
        return self.__s
    
    def get_theta(self):
        return self.__theta

def main():
    X_train, Y_train = loaddata('hw3_test.dat.txt')
    X_test, Y_test   = loaddata('hw3_train.dat.txt')

    iteration = 300
    alg = adaboost(iteration, X_train, Y_train)

    u, alpha, s, i, theta = alg.bst_train()   ## Adaboost train
    pre_ytrain, Ein = alg.predict_inout(X_train, Y_train)
    pre_ytest, Eout = alg.predict_inout(X_test, Y_test)

if __name__ == "__main__":
    main()

!!!注意数据的权重与数据的对应关系,最初遇到问题决策树找到的划分总是同一个,后来发现问题是在计算decision_stump时将数据排序,没有对权重u进行相应处理

你可能感兴趣的:(Adaboost with Decision Stump)