pytorch学习笔记(1)

糟糕的环境下除了学习,没什么能做的了,假期不能出去玩,只能一个人在宿舍学习了。刚开始接触pytorch,记录一下学习笔记

import torch
print(torch.__version__)#1.5.1
print('gpu:',torch.cuda.is_available())

1.手写梯度运算

import numpy as np
#y=wx+b
def compute_error_for_line_given_points(b,w,points):
    totalError=0
    for i in range(0,len(points)):
        x=points[i,0]
        y=points[i,1]
        totalError+=(y-(w*x+b))**2
    return totalError/float(len(points))
def step_gradient(b_current,w_current,points,learningRate):
    b_gradient=0
    w_gradient=0
    N=float(len(points))
    for i in range(0,len(points)):
        x=points[i,0]
        y=points[i,1]
        b_gradient+=-(2/N)*(y-((w_current*x)+b_current))
        w_gradient+=-(2/N)*x*(y-((w_current*x)+b_current))
    new_b=b_current-(learningRate*b_gradient)
    new_w=w_current-(learningRate*w_gradient)
    return [new_b,new_w]
def gradient_descent_runner(points,starting_b,starting_w,learning_rate,num_iterations):
    b=starting_b
    w=starting_w
    for i in range(num_iterations):
        b,w=step_gradient(b,w,np.array(points),learning_rate)
    return [b,w]
def run():
    points=np.genfromtxt('data.csv',delimiter=',')
    learning_rate=0.001
    initial_b=0
    initial_w=0
    num_iterations=1000
    print('Starting gradient descent at b={0},m={1},error={2}'.format(initial_b,initial_w,compute_error_for_line_given_points(initial_b,initial_w,points)))
    print('Running')
    [b,w]=gradient_descent_runner(points,initial_b,initial_w,learning_rate,num_iterations)
    print('After{0} iterations b={1},w={2},error={3}'.format(num_iterations,b,w,compute_error_for_line_given_points(b,w,points)))
if __name__ =='__main__':
    run()

  

2.pytorch入门基础操作

#1.创建tensor
import torch 
import numpy as np


t1=torch.Tensor([1,2,3])#t1:tensor([1., 2., 3.])

array1=np.arange(12).reshape(3,4)

array1

torch.Tensor(array1)#将array换成tensor

torch.empty([3,4])#3行4列很大或者很小的随机数
torch.ones([3,4])#3行4列全为1
torch.zeros([3,4])#3行4列全为0
torch.rand([3,4])#3行4列随机值,在区间【0,1】
torch.randint(low=0,high=10,size=([3,4]))#3行4列随机整数,在区间【low,high】
t3=torch.randn([3,4])#3行4列随机值,均值,方差1,即服从正态分布

#2.tensor方法与属性
t1=torch.Tensor([[[1]]])
t1
t1.item()#当tensor中只有1个元素时,可以用item方法获取元素值

t2=torch.Tensor([[[1,2]]])
t=t2.numpy()#array([[[1., 2.]]], dtype=float32),numpy方法可以将tensor转化成array形式
tt=torch.from_numpy(t)#将numpy转换成tensor t2.shape#shape属性 t2.size()#size方法可以获取属性,也可以获取某一维度的形状 t2.size(2) t3.view(4,3)#view方法可以改变tensor维度,numpy中使用reshape是一种浅拷贝 t1.dim()#获取维度 t3.max()#最大值 t3.min() t3.std()#标准差 t3.t()#转置 t4=torch.Tensor(np.arange(24).reshape(2,3,4))#制造3维度数据 t4.size()#torch.Size([2, 3, 4]) t4.transpose(0,1)#将第0维度和第1维度交换》》(3,2,4) t4.permute(2,1,0)#换维度》》(4,3,2) t4.dtype#torch.float32,默认是float32 torch.tensor([2,3],dtype=torch.float32)#设置数值类型tensor([2., 3.]) #数值类型转换 t4.type(torch.float) t3.double()

 3.numpy 实现两层神经网络

#numpy 实现两层神经网络

N,D_in,H,D_out=64,1000,100,10#定义输入有64个数据,每个数据1000维,中间层100神经元,输出10维度
x=np.random.randn(N,D_in)#生成数据
y=np.random.randn(N,D_out)
w1=np.random.randn(D_in,H)
w2=np.random.randn(H,D_out)
learning_rate=1e-6
for it in range(200):
    h=x.dot(w1)#前向传输
    h_relu=np.maximum(h,0)#relu激活
    y_pred=h_relu.dot(w2)
    loss=np.square(y_pred-y).sum()#求loss
    print(it,loss)
    grad_y_pred=2.0*(y_pred-y)#反向传播求导
    grad_w2=h_relu.T.dot(grad_y_pred)
    grad_h_relu=grad_y_pred.dot(w2.T)
    gard_h=grad_h_relu.copy()
    grad_h[h<0]=0
    grad_w1=x.T.dot(grad_h)
    #更新参数
    w1-=learning_rate*grad_w1
    w2-=learning_rate*grad_w2

  

  

 4.Torch 实现两层神经网络

N,D_in,H,D_out=64,1000,100,10#定义输入有64个数据,每个数据1000维,中间层100神经元,输出10维度
x=torch.randn(N,D_in)#生成数据
y=torch.randn(N,D_out)
w1=torch.randn(D_in,H)
w2=torch.randn(H,D_out)
learning_rate=1e-6
for it in range(200):
    h=x.mm(w1)#前向传输
    h_relu=h.clamp(min=0)#relu激活
    y_pred=h_relu.mm(w2)
    loss=(y_pred-y).pow(2).sum().item()#求loss
    print(it,loss)
    grad_y_pred=2.0*(y_pred-y)#反向传播求导
    grad_w2=h_relu.t().mm(grad_y_pred)
    grad_h_relu=grad_y_pred.mm(w2.t())
    grad_h=grad_h_relu.clone()
    grad_h[h<0]=0
    grad_w1=x.T.mm(grad_h)
    #更新参数
    w1-=learning_rate*grad_w1
    w2-=learning_rate*grad_w2
#torch求grad
x=torch.tensor(1.,requires_grad=True)
w=torch.tensor(2.,requires_grad=True)
b=torch.tensor(3.,requires_grad=True)
y=w*x+b
y.backward()
print(x.grad)#tensor(2.)
print(w.grad)#tensor(1.)
print(b.grad)#tensor(1.)

 

  `

import torch
import numpy as np

#tensor》》》ndarray
a=torch.ones(5)
b=a.numpy()
b#array([1., 1., 1., 1., 1.], dtype=float32)

#ndarray>>>tensor
a=np.ones(5)
b=torch.from_numpy(a)
b#tensor([1., 1., 1., 1., 1.], dtype=torch.float64)

#手动定义求导
x=torch.randn(3,4,requires_grad=True)#方法1
x

x=torch.randn(3,4)
x.requires_grad=True#方法2
x

#求和
b=torch.randn(3,4,requires_grad=True)
t=x+b#两个矩阵相加
y=t.sum()#矩阵内元素相加
y


y.backward()#从y开始反向传播

b.grad#y对b求导

x.requires_grad,b.requires_grad,t.requires_grad#(True, True, True)

  

#小计算
x=torch.rand(1)
b=torch.rand(1,requires_grad=True)
w=torch.rand(1,requires_grad=True)
y=w*x
z=y+b#z=wx+b
x.requires_grad,b.requires_grad,w.requires_grad,y.requires_grad#(False, True, True, True)

x.is_leaf,w.is_leaf,b.is_leaf,y.is_leaf,z.is_leaf#是否是叶子节点,即自变量(True, True, True, False, False)

z.backward(retain_graph=True)#不清空会把之前的导数值再累加进来
w.grad

b.grad

  

#线性回归模型
x_values=[i for i in range(11)]
x_train=np.array(x_values,dtype=np.float32)
x_train=x_train.reshape(-1,1)
x_train.shape#(11, 1),ndarray

y_values=[2*i+1 for i in x_values]
y_train=np.array(y_values,dtype=np.float32)
y_train=y_train.reshape(-1,1)
y_train.shape#(11, 1)

import torch.nn as nn
class LinearRegressionModel(nn.Module):
    def __init__(self,input_dim,output_dim):
        super(LinearRegressionModel,self).__init__()
        self.linear=nn.Linear(input_dim,output_dim)
    def forward(self,x):
        out=self.linear(x)
        return out
input_dim=1
output_dim=1
model=LinearRegressionModel(input_dim,output_dim)
model#LinearRegressionModel(  (linear): Linear(in_features=1, out_features=1, bias=True))
epochs=1000
learning_rate=0.01
optimizer=torch.optim.SGD(model.parameters(),lr=learning_rate)
criterion=nn.MSELoss()
for epoch in range(epochs):
    epoch+=1
    #将ndarray转换成tensor
    inputs=torch.from_numpy(x_train)
    labels=torch.from_numpy(y_train)
    #梯度每次迭代要清零
    optimizer.zero_grad()
    #前向传播
    outputs=model(inputs)
    #计算损失
    loss=criterion(outputs,labels)
    #反向传播
    loss.backward()
    #更新权重参数
    optimizer.step()
    if epoch%50==0:
        print('epoch{},loss{}'.format(epoch,loss.item()))
#预测
predicted=model(torch.from_numpy(x_train).requires_grad_()).data.numpy()

#模型的报讯与读取
torch.save(model.state_dict(),'model.pkl')

model.load_state_dict(torch.load('model.pkl'))

  

你可能感兴趣的:(pytorch学习笔记(1))