pytorch 学习(九) 一些简单的例子

#coding=utf-8
import torch
import torch.nn as nn
from torch.autograd import Variable

m = nn.LeakyReLU(0.1)
input = Variable(torch.randn(2))
print(input)
print '---' * 10
print(m(input))
print '--' * 10
r = nn.ReLU()
print r(input)

m = nn.Threshold(0.1, 20)
input = Variable(torch.randn(2))
print(input)
print(m(input))

m = nn.Sigmoid()
input = Variable(torch.randn(2))
print(input)
print(m(input))

m = nn.BatchNorm1d(3, affine=False)
input = Variable(torch.randn([[1,2,3],[4,5,6]]))
output = m(input)
print output
print input

rnn = nn.RNN(10, 20, 2)                    # input_size,hidden_size,num_layers
input = Variable(torch.randn(5, 3, 10))    # seq_len,batch,input_size
h0 = Variable(torch.randn(2, 3, 20))       # num_layers * num_direction,batch,hidden_size
output, hn = rnn(input, h0)      
print output                               # 5*3*20  seq_len ,batch  , hidden_size * num_directions
print '--' * 10
print hn                                   # 2*3*20
#coding=utf-8
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn import functional as F

m = nn.Linear(20, 30)
input = Variable(torch.randn(128, 20))
output = m(input)
print(output.size())  # (128L, 30L)

filters = Variable(torch.randn(33, 16, 3))
inputs = Variable(torch.randn(20, 16, 50))
print F.conv1d(inputs, filters).size()  # (20L, 33L, 48L)


m = nn.Dropout(p=0.5)
input = Variable(torch.randn(5, 4))
output = m(input)
print input
print '--' * 10
print output

m = nn.Dropout2d(p=0.2)  # 随机将输入张量中整个通道设置为0。对于每次前向调用,被置0的通道都是随机的。
input = Variable(torch.randn(20, 16, 32, 32))
output = m(input)
print output


m = nn.LogSoftmax()
loss = nn.NLLLoss()
# input is of size nBatch x nClasses = 3 x 5
input = Variable(torch.randn(3, 5), requires_grad=True)
# each element in target has to have 0 <= value < nclasses
target = Variable(torch.LongTensor([1, 0, 4]))
output = loss(m(input), target)
print output
output.backward()

 

你可能感兴趣的:(pytorch)