matlab实现梯度下降算法

clear
clc
% training sample data;
% p0=3;
% p1=7;
% x=1:4;
% y=p0+p1*x;
x=[2.014,1.416,1.534,0.852];
y=[0.460,0.232,0.315,0.178];
num_sample=size(y,2);
z=[0.0001,0.0002,0.001,0.002,0.1,0.2];
% gradient descending process
% initial values of parameters
theta0=0;
theta1=0.3;
theta2=0;
theta3=0.3;
theta4=0;
theta5=0.3;
theta6=0;
theta7=0.3;
%learning rate
% alpha=z(:,n);
% if alpha is too large, the final error will be much large.
% if alpha is too small, the convergence will be slow
epoch=2000;
% for n=1:6
%  alpha=z(:,n);
for k=1:epoch
    alpha=z(:,1);
    v_k=k;
    h_theta_x=theta0+theta1*x;% hypothesis function
    Jcost1(k)=((h_theta_x(1)-y(1))^2+(h_theta_x(2)-y(2))^2+(h_theta_x(3)-y(3))^2+(h_theta_x(4)-y(4))^2)/(2*num_sample);
    temp0=theta0-alpha*((h_theta_x(1)-y(1))+(h_theta_x(2)-y(2))+(h_theta_x(3)-y(3))+(h_theta_x(4)-y(4)))/num_sample; 
   temp1=theta1-alpha*((h_theta_x(1)-y(1))*x(1)+(h_theta_x(2)-y(2))*x(2)+(h_theta_x(3)-y(3))*x(3)+(h_theta_x(4)-y(4))*x(4))/num_sample;
   theta0=temp0;
   theta1=temp1;
end
for k=1:epoch
    alpha=z(:,2);
    v_k=k;
    h_theta_x2=theta2+theta3*x;% hypothesis function
    Jcost2(k)=((h_theta_x2(1)-y(1))^2+(h_theta_x2(2)-y(2))^2+(h_theta_x2(3)-y(3))^2+(h_theta_x2(4)-y(4))^2)/(2*num_sample);
    temp0=theta2-alpha*((h_theta_x2(1)-y(1))+(h_theta_x2(2)-y(2))+(h_theta_x2(3)-y(3))+(h_theta_x2(4)-y(4)))/num_sample; 
   temp1=theta3-alpha*((h_theta_x2(1)-y(1))*x(1)+(h_theta_x2(2)-y(2))*x(2)+(h_theta_x2(3)-y(3))*x(3)+(h_theta_x2(4)-y(4))*x(4))/num_sample;
   theta2=temp0;
   theta3=temp1;
end
for k=1:epoch
    alpha=z(:,3);
    v_k=k;
    h_theta_x3=theta4+theta5*x;% hypothesis function
    Jcost3(k)=((h_theta_x3(1)-y(1))^2+(h_theta_x3(2)-y(2))^2+(h_theta_x3(3)-y(3))^2+(h_theta_x3(4)-y(4))^2)/(2*num_sample);
    temp0=theta4-alpha*((h_theta_x3(1)-y(1))+(h_theta_x3(2)-y(2))+(h_theta_x3(3)-y(3))+(h_theta_x3(4)-y(4)))/num_sample; 
   temp1=theta5-alpha*((h_theta_x3(1)-y(1))*x(1)+(h_theta_x3(2)-y(2))*x(2)+(h_theta_x3(3)-y(3))*x(3)+(h_theta_x3(4)-y(4))*x(4))/num_sample;
   theta4=temp0;
   theta5=temp1;
end
for k=1:epoch
    alpha=z(:,4);
    v_k=k;
    h_theta_x4=theta6+theta7*x;% hypothesis function
    Jcost4(k)=((h_theta_x4(1)-y(1))^2+(h_theta_x4(2)-y(2))^2+(h_theta_x4(3)-y(3))^2+(h_theta_x4(4)-y(4))^2)/(2*num_sample);
    temp0=theta6-alpha*((h_theta_x4(1)-y(1))+(h_theta_x4(2)-y(2))+(h_theta_x4(3)-y(3))+(h_theta_x4(4)-y(4)))/num_sample; 
   temp1=theta7-alpha*((h_theta_x4(1)-y(1))*x(1)+(h_theta_x4(2)-y(2))*x(2)+(h_theta_x4(3)-y(3))*x(3)+(h_theta_x4(4)-y(4))*x(4))/num_sample;
   theta6=temp0;
   theta7=temp1;
end
figure(1);plot(Jcost1,'r');
hold on 
grid on
plot(Jcost2,'g');
plot(Jcost3,'k');
plot(Jcost4,'b');
% text(2000,700,'曲线alpha=0.0001');
% text(2000,100,'曲线alpha=0.0002');
% text(1000,50,'曲线alpha=0.001');
% text(500,0,'曲线alpha=0.002');
theta=[theta0,theta1,theta2,theta3,theta4,theta5,theta6,theta7];
% for i=1:7
% disp(['h_theta_x=',theta(i-1), '+',theta(i), '*x']);
% end
% figure(2);plot(h_theta_x,'r');
% hold on 
% grid on
% plot(h_theta_x2,'g');
% plot(h_theta_x3,'k');
% plot(h_theta_x4,'b');
 P=linspace(0.01,5,300);
 H1=theta0+theta1*P;
 H2=theta2+theta3*P;
 H3=theta4+theta5*P;
 H4=theta6+theta7*P;
 figure(2);plot(P,H1,'r');
 hold on 
 grid on
 plot(P,H2,'g');
 plot(P,H3,'k');
 plot(P,H4,'b');
 plot(x,y,'o');
 

从此次编程学的知识点:

你可能感兴趣的:(matlab学习)