import numpy as np
import torch
import torch.autograd
import torch.nn.functional as F
from torch.autograd import Variable
import matplotlib.pyplot as plt
from itertools import count
torch.manual_seed(1)
POLY_DEGREE=4
W_target=torch.randn(POLY_DEGREE,1)*5
b_target=torch.randn(1)*5
def get_batch(batch_size=128):
random=torch.from_numpy(np.sort(torch.randn(batch_size)))
x=random.unsqueeze(1)
x=torch.cat([x**i for i in range(1,POLY_DEGREE+1)],1)
y=x.mm(W_target)+b_target[0]
return Variable(x),Variable(y)
fc=torch.nn.Linear(W_target.size(0),1)
for batch_index in count(1):
batch_x,batch_y=get_batch()
fc.zero_grad()
output=F.smooth_l1_loss(fc(batch_x),batch_y)
loss=output.data
output.backward()
for param in fc.parameters():
param.data.add_(-0.1*param.grad.data)
if loss<1e-3:
plt.cla()
plt.scatter(batch_x.data.numpy()[:,0],batch_y.data.numpy()[:,0],label="real curve",color="b")
plt.plot(batch_x.data.numpy()[:,0],fc(batch_x).data.numpy()[:,0],label="fitting curve",color="r")
plt.legend()
plt.show()
break
import torch
import torch.nn.functional as F
from torch.autograd import Variable
from torch.optim import SGD
from sklearn.datasets import load_iris
import matplotlib.pyplot as plt
use_cuda=torch.cuda.is_available()
print("use_cuda:",use_cuda)
iris=load_iris()
x=iris["data"]
y=iris["target"]
print(iris.keys())
print("x.shape:",x.shape)
print("y.shape:",y.shape)
x=torch.FloatTensor(x)
y=torch.LongTensor(y)
x,y=Variable(x),Variable(y)
class Net(torch.nn.Module):
def __init__(self,n_feature,n_hidden,n_output):
super(Net,self).__init__()
self.hidden=torch.nn.Linear(n_feature,n_hidden)
self.predict=torch.nn.Linear(n_hidden,n_output)
def forward(self,x):
x=torch.sigmoid(self.hidden(x))
x=self.predict(x)
out=torch.log_softmax(x,dim=1)
return out
net=Net(n_feature=4,n_hidden=5,n_output=4)
print(net)
if use_cuda:
x=x.cuda()
y=y.cuda()
net=net.cuda()
optimizer=SGD(net.parameters(),lr=0.5)
px,py=[],[]
for i in range(1000):
prediction=net(x)
loss=F.nll_loss(prediction,y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
px.append(i)
py.append(loss.data)
print(i,"loss:",loss.data)
if i%10==0:
print(i,"loss:",loss.data)
plt.cla()
plt.plot(px,py,"r-",lw=1)
plt.text(0,0,"loss=%.4f"%loss.data,fontdict={
"size":20,"color":"red"})
plt.pause(0.1)
若数据集下载慢,在AI Studio下载,放在mnist_data\MNIST\raw目录即可
#coding=utf-8
import torch
import torch.nn as nn
import torchvision.datasets as dsets
import torchvision.transforms as transforms
from torch.autograd import Variable
#设置参数
torch.manual_seed(1)
input_size=784
hidden_size=500
num_classes=10
num_epochs=5
batch_size=100
learning_rate=0.001
#加载数据
train_dataset=dsets.MNIST(root="./mnist_data",
train=True,
transform=transforms.ToTensor(),
download=True)
test_dataset=dsets.MNIST(root="./mnist_data",
train=False,
transform=transforms.ToTensor())
#数据批处理
train_loader=torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True)
test_loader=torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False)
#定义DNN模型
class Net(torch.nn.Module):
def __init__(self,input_size,hidden_size,num_classes):
super(Net,self).__init__()
self.fc1=nn.Linear(input_size,hidden_size)
self.relu=nn.ReLU()
self.fc2=nn.Linear(hidden_size,num_classes)
def forward(self,x):
out=self.fc1(x)
out=self.relu(out)
out=self.fc2(out)
return out
#
net=Net(input_size,hidden_size,num_classes)
print(net)
#训练模型
criterion=nn.CrossEntropyLoss()
optimizer=torch.optim.Adam(net.parameters(),lr=learning_rate)
for epoch in range(num_epochs):
for i,(images,labels) in enumerate(train_loader):
images=Variable(images.view(-1,28*28))
labels=Variable(labels)
optimizer.zero_grad()
outputs=net(images)
loss=criterion(outputs,labels)
loss.backward()
optimizer.step()
print(epoch,"loss:",loss.item())
#测试模型
correct=0
total=0
for images,labels in test_loader:
images=Variable(images.view(-1,28*28))
outputs=net(images)
_,predicted=torch.max(outputs.data,dim=1)
total+=labels.size(0)
correct+=(predicted==labels).sum()
print('Accuracy of the network on the test images: %d%%'%(100*(correct/total)))
import torch
from torch import nn,optim
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision import datasets
#预置参数
torch.manual_seed(1)
batch_size=128
learning_rate=1e-2
num_epoches=10
#加载数据
train_dataset=datasets.MNIST(root="./mnist_data",
train=True,
transform=transforms.ToTensor(),
download=True)
test_dataset=datasets.MNIST(root="./mnist_data",
train=False,
transform=transforms.ToTensor(),
download=True)
train_loader=DataLoader(train_dataset,batch_size=batch_size,shuffle=True)
test_loader=DataLoader(test_dataset,batch_size=batch_size,shuffle=False)
#创建模型
class Cnn(nn.Module):
def __init__(self,in_dim,n_class):#28*28
super(Cnn,self).__init__()
self.conv=nn.Sequential(nn.Conv2d(in_dim,6,3,stride=1,padding=1),#(28-3+2*1)/1+1=28 nn.ReLU(True),
nn.MaxPool2d(2,2),#14*14
nn.Conv2d(6,16,5,stride=1,padding=0),#(14-5+2*0)/1+1=10
nn.ReLU(True),
nn.MaxPool2d(2,2)) #5*5*16=400
self.fc=nn.Sequential(nn.Linear(400,120),
nn.Linear(120,84),
nn.Linear(84,n_class))
def forward(self,x):
out=self.conv(x)
out=out.view(out.size(0),400)
out=self.fc(out)
return out
model=Cnn(1,10)
print(model)
#训练模型
criterion=nn.CrossEntropyLoss()
optimizer=torch.optim.SGD(model.parameters(),lr=learning_rate)
print("train: ")
for epoch in range(num_epoches):
running_loss=0.0
running_acc=0.0
for i,data in enumerate(train_loader,1):
img,label=data
out=model(img)
loss=criterion(out,label)
running_loss+=loss.item()*label.size(0)
_,pred=torch.max(out,1)
num_correct=(pred==label).sum()
running_acc+=num_correct.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
print(epoch+1," loss:",running_loss/len(train_dataset)," acc:",running_acc/len(train_dataset))
#模型测试
model.eval()
eval_loss=0
eval_acc=0
for data in test_loader:
img,label=data
out=model(img)
loss=criterion(out,label)
eval_loss+=loss.item()*label.size(0)
_,pred=torch.max(out,1)
num_correct=(pred==label).sum()
eval_acc+=num_correct.item()
print(" test loss:",eval_loss/len(test_dataset)," acc:",eval_acc/len(test_dataset))