1 训练样本::
对于理论知识,大家可以参考http://ufldl.stanford.edu/wiki/index.php/Softmax_Regression和http://blog.sina.com.cn/s/blog_6982136301015asd.html。这里还是说一下实现的基本细节。步长这里选取0.0.1,这个数据大家可以根据实验的需要来调节,这里主要实现了多分类的算法,对于观察结果(y)这里不只属于【0.1】,而是属于更多【1.2.3.4.5.6....】,参考文献中说这种分类可以达到上限10个。
对于要求的分类方程y = theta1*x1 + theta2*x2+theta3*x3 ,这里我只谈一点对参数迭代公式的理解,也就是http://ufldl.stanford.edu/wiki/index.php/Softmax_Regression参数的更新,这里在迭代公式中对于theta是根据(y)的贡献值来确定的,也就是说如果x属于这个类,则把迭代中的y值设置为1,如果不是设置为0.
2 识别
识别过程分别计算每个类别的概率为归一化方程 也就是g函数。大家可以参考大尾巴龙的代码。
#include <iostream> #include <cmath> #include <assert.h> using namespace std; const int K = 2;//有K+1类 const int M = 9;//训练集大小 const int N = 4;//特征数 double x[M][N]={{1,47,76,24}, //include x0=1 {1,46,77,23}, {1,48,74,22}, {1,34,76,21}, {1,35,75,24}, {1,34,77,25}, {1,55,76,21}, {1,56,74,22}, {1,55,72,22}, }; double y[M]={1, 1, 1, 2, 2, 2, 3, 3, 3,}; double theta[K][N]={ {0.3,0.3,0.01,0.01}, {0.5,0.5,0.01,0.01}}; // include theta0 double h_value[K];//h(x)向量值 //求exp(QT*x) double fun_eqx(double* x, double* q) { double sum = 0; for (int i = 0; i < N; i++) { sum += x[i] * q[i]; } return pow(2.718281828, sum); } //求h向量 void h(double* x) { int i; double sum = 1;//之前假定theta[K+1]={0},所以exp(Q[K+1]T*x)=1 for (i = 0; i < K; i++) { h_value[i] = fun_eqx(x, theta[i]); sum += h_value[i]; } assert(sum != 0); for (i = 0; i < K; i++) { h_value[i] /= sum; } } void modify_stochostic() { //随机梯度下降,训练参数 int i, j, k; for (j = 0; j < M; j ++) { h(x[j]); for (i = 0; i < K; i++) { for (k = 0; k < N; k++) { theta[i][k] += 0.001 * x[j][k] * ((y[j] == i+1?1:0) - h_value[i]); } } } } void modify_batch() { //批量梯度下降,训练参数 int i, j, k ; for (i = 0; i < K; i++) { double sum[N] = {0.0}; for (j = 0; j < M; j++) { h(x[j]); for (k = 0; k < N; k++) { sum[k] += x[j][k] * ((y[j] == i+1?1:0) - h_value[i]); } } for (k = 0; k < N; k++) { theta[i][k] += 0.001 * sum[k] / N; } } } void train(void) { int i; for (i = 0; i < 10000; i++) { //modify_stochostic(); modify_batch(); } } void predict(double* pre) { //输出预测向量 int i; for (i = 0; i < K; i++) h_value[i] = 0; train(); h(pre); for (i = 0; i < K; i++) cout << h_value[i] << " "; cout << 1 - h_value[0] - h_value[1] << endl; } int main(void) { for (int i=0; i < M; i++) { predict(x[i]); } cout << endl; double pre[] = {1,20, 80, 50 }; predict(pre); return 0; }