【无标题】pytorch数据处理工具箱

transforms

transforms提供了对PIL Image对象和Tensor对象的常用操作。

Scale/Resize

调整尺寸,长宽比保持不变。

CenterCrop、RandomCrop、RandomSizedCrop

裁剪图像。

Pad

填充。

ToTensor

把一个取值范围是[0,255]的PIL.Image转换成Tensor。

RandomHorizontalFlip

图像随机水平翻转

 

 

transforms

transforms提供了对PIL Image对象和Tensor对象的常用操作。 

2)对Tensor的常见操作

 

import torch

import torchvision

import torchvision. transforms as transforms

 

transforms. Compose (L #将给定的PIL.Image进行中心切割,得到给定的size,

#size可以是tuple,(target_height,target_width)。

#size也可以是一个Integer,在这种情况下,切出来的图片形状是正方形。

transforms. CenterCrop (10),

#切割中心点的位置随机造取

transforms.RandomCrop(20,padding=0), #把一个取值范围是[0,2557的PIL.Image或者shape为(H,W,C)的numpy.ndarray, #转换为形状为(C,H,W),取值范围是[0,1]的torch.FloatTensor transforms.ToTensor(,

#规范化到[-1,1]

transforms.Normalize(mean=(0.5,0.5,0.5),std=(0.5,0.5,0.5))

」)

Compose (

CenterCrop (size=(10, 10))

RandomCrop (size=(20, 20), padding=0)

ToTensor

Normalize(mean=(0.5,0.5,0.5),std=(0.5,0.5,0.5))

 

ImageFolder

 

ImageFolder可以读取不同目录下的图像数据

 

from torchvision import transforms, utils from torchvision import datasets

import torch import matplotlib. pyplot as plt

matplotlib inline my_trans=transforms. Compose ([ transforms. RandomesizedCrop (224), transforms. RandomHorizontalFlip,

transforms. ToTensor (

])

train_data = datasets. ImageFolder ('../data/torchvision _data', transform-my_trans)

train_loader = data. DataLoader (train_data, batch_size=8, shuffle=True,)

for i batch, img in enumerate (train_loader) :

if i_batch == 0:

print (img [1])

fig = plt. figure )

grid = utils. make_grid (img [0]) olt. imshow (grid. numpy ). transpose ((1, 2, 0)))

plt. show

 

utils. save_image (grid,' test001. png' )

break

 

用TensorBoard可视化神经网络:

import torch
import torch. nn as nn
import torch. nn. functional as F
import torchvision
from torch. utils. tensorboard import SummaryWriter
class Net (nn. Module :
def _init_(self) :
super (Net, self). _init_
self. conv1 = nn. Conv2d (1, 10, kernel_size=5)
self. conv2 = nn. Conv2d (10, 20, kernel_size=5)
self. conv2_drop = nn. Dropout2d
self. fcl = nn. Linear (320, 50)
self. fc2 = nn. Linear (50, 10)
self. bn = nn. BatchNorm2d (20)
def forward (self, x):
x = F. max_poo12d (self. conv1 (x), 2)
x = F. relu(x) + F. relu(x)
x = F. relu(F. max_poo12d (self. conv2_drop(self. (self. (x) (x)), 2))
x = self. bn(x)
x = x. view(-1,
320)
x = F. relu(self. fol (x))
x = F. dropout (x, training=self. training)
x = self. fc2 (x)
x = F. softmax(x, dim=1)
return x

input=torch.rand(32,1,28,28)
#实例化神经网络
model=Net(
#将model保存为graph
with SummaryWriter (1og_dir=' logs', comment=' Net') as w:
w. add_graph (model, (input,))

 

用TensorBoard可视化损失值:

import torch import torch. nn as nn from torch. utils. tensorboard import SummaryWriter
import numpy as np
input_size = 1
output_size
=
num epoches = 60
learning_rate = 0.01
dtype = torch. FloatTensor
writer = SummaryWriter (10g_dir=' logs', comment=' Linear')
np. random. seed (100)
x_train = np. linspace (-1, 1, 100). reshape (100, 1)
y_train = 3*np. power (x_train, 2) +2+ 0. 2*np. random rand (x_train. size). reshape (100, 1)
model = nn. Linear (input_size, output_size)
criterion
= nn. MSELossO
optimizer = torch. optim SGD (model. parameters, 1r=learning_rate)
for epoch in range (num_epoches) :
inputs
= torch. from_numpy (x_train). type (dtype)
targets
= torch. from numpy (v_train). type (dtype)
output = model (inputs)
loss = criterion (output, targets)
optimizer. zero_grad 1oss. backward
optimizer. step
#深存1059的繳菇与epoch数位
writer.add_scalar(训练损失值’,1oss,epoch)

 

用TensorBoard可视化特征图:

#import torchvision utils as vutils
import utils as vutils
writer = SumaryWriter (log_dir=' logs', comment=' feature map' )
for i,
data in enumerate (trainloader,
0):
#莸取洲蔡数路
inputs, labels = data
inputs, labels = inputs. to (device), labels. to (device)
x=inputs [0]. unsqueeze (0)
break

ing grid = vutils. make_grid(x, normalize-True, scale_each=True, nrow=2)
net. evalo
for name, layer in net. _modules. items) :
#为f层颜炒理:
withtorch.no_gradO:
x=x.view(x.size(0),-1)if“fo^
in name else x
print(x.sizeO)
x = layer (x)
print (t' (name!' )
#登容送层的特金婆
if
'layer'innameor
'cony' in name:
x1 = x. transpose (0, 1) # C, B, H, 8 — B, C, H, $
ing_grid = vutils.make_grid(x1, normalize=True, scale_each-True, nrow=4)
writer. add _image(f' (nanel_feature_maps', ing_grid, global_step=0)

 

 

 

 

 

·如果要对数据集进行多个操作,可通过Compose将这些操作像管道一样拼接起来,类似于nn.Sequential

Normalize

标准化,即减均值,除以标准差。

ToPILImage

Tensor转为

你可能感兴趣的:(笔记)