import numpy as np
import torch
np.array(list1)
np.random.random([3,3])
np.zeros([3,3])
np.arange(1,4,0.5)
,只有一个数字的话为1-数字nd11[3:6]
A*B/np.numtiply(a,b)
import numpy as np
arr = np.arange(6).reshape(2,-1)
np.arange(6).resize(2,-1)
np.arrange(12).reshape(3,4).T
import numpy as np
arr = np.arange(6).reshape(2,-1)
print(arr)
print(arr.ravel('F'))#按列优先展平
print(arr.ravel())#按行优先展平
a = np.floor(10*np.random.random((3,4)))
print(a)
print(a.flatten())
arr.squeeze().shape
arr2 = np.arange(24).reshape(2,3,4).transpose(1,2,0)
np.append(a,b,axis=0)
0为行合并,1为列合并np.concatenate((a,b),axis=0)
np.stack((a,b),axis=0)
import numpy as np
data_train = np.random.randn(10000,2,3)
print(data_train.shape)
np.random.shuffle(data_train)
batch_size = 100
for i in range(0,len(data_train),batch_size):
x_batch_sum = np.sum(data_train[i:i+batch_size])
print("第{}批次,该批次数据的和:{}".format(i,x_batch_sum))
两种基本对象:ndarry和ufunc对象
import numpy as np
A = np.arange(0,40,10).reshape(4,1)
B = np.arange(0,3)
C = A + B
print(C)
import torch
torch.Tensor([1,2,3,4])
torch.Tensor(2,3)
torch.Tensor([[1,2],[3,4]])
torch.unsqueeze(y,0)
view(-1)
展平数组,只能由torch.Tensor.view()来调用import torch
torch.manual_seed(100)
x = torch.randn(2,3)
x[0:]
x[:,-1]
mask = x > 0
torch.masked_select(x,mask)
torch.nonzero(mask)
import torch
import numpy as np
A = np.arange(0,40,10).reshape(4,1)
B = np.arange(0,3)
A1 = torch.from_numpy(A)
B1 = torch.from_numpy(B)
C = A1 + B1
import torch as t
from matplotlib import pyplot as plt
import numpy
t.manual_seed(100)
x = t.unsqueeze(t.linspace(-1,1,100),dim = 1)
y = 3*x.pow(2) + 2 + 0.2*t.rand(x.size())
plt.scatter(x.numpy(),y.numpy())
plt.show()
w = t.randn(1,1,requires_grad=True)
b = t.randn(1,1,requires_grad=True)
lr = 0.01
for ii in range(800):
y_pred = x.pow(2).mm(w) + b
loss = 0.5*(y_pred - y)**2
loss = loss.sum()
loss.backward()
with t.no_grad():
w -= lr * w.grad
b -= lr * b.grad
w.grad.zero_()
b.grad.zero_()
plt.plot(x.numpy(),y_pred.detach().numpy(),'r',label = 'predict')
plt.scatter(x.numpy(),y.numpy())
plt.xlim(-1,1)
plt.ylim(2,6)
plt.show()
print(w,b)
可以借助nn工具箱class Net(torch.nn.Module)
class Net(nn.Module):
def __init__(self, in_dim, n_hidden_1, n_hidden_2, out_dim):
super(Net, self).__init__()
self.layer1 = nn.Sequential(nn.Linear(in_dim, n_hidden_1),nn.BatchNorm1d(n_hidden_1))
self.layer2 = nn.Sequential(nn.Linear(n_hidden_1, n_hidden_2),nn.BatchNorm1d(n_hidden_2))
self.layer3 = nn.Sequential(nn.Linear(n_hidden_2, out_dim))
def forward(self,x):
x = F.relu(self.layer1(x))
x = F.relu(self.layer2(x))
x = self.layer3(x)
return x
来自nn.Module
的需要示例化,来自nn.fuctional
的不需要实例化
def forward(self,x):
x = F.relu(self.layer1(x))
x = F.relu(self.layer2(x))
x = self.layer3(x)
return x
直接让损失函数调用loss即可 loss.backward()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = Net(28*28, 300, 100, 10)
model.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=lr, momentum=momentum)
losses, acces, eval_losses, eval_acces = [],[],[],[]
for epoch in range(num_epochs):
train_loss = 0
train_acc = 0
model.train()
if epoch%5 == 0:
optimizer.param_groups[0]['lr'] *= 0.1
for img,label in train_loader:
img = img.to(device)
label = label.to(device)
img = img.view(img.size(0),-1) #这里为28*28
out = model(img)
loss = criterion(out,label)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss += loss.item()
_, pred = out.max(1)
num_correct = (pred == label).sum().item()
acc = num_correct/img.shape[0] #这里的train_batch_size
train_acc += acc
losses.append(train_loss/len(train_loader))
acces.append(train_acc/len(train_loader))
首先要记得调用module.train(),这样就可以把所有的module设置为训练模式;同样训练阶段要调用module.eval()。
optimizer.zero_grad()#缺省情况下梯度是累加的,要首先进行清零
loss.backward()#自动生成梯度
optimizer.step()#执行优化器
可以避免手动设置requires_grad=Ture
nn已经实现了绝大多数层,并作为自己的子类,能自动检测到parameter
封装在torch.optim
当中,各种优化方法都是继承了基类optim.Optimizer
import torch.optim as optim
optimizer = optim.SGD(model.parameters(), lr=lr, momentum=momentum)