pytorch学习

两个需要注意的点

  1. mac 不支持 cuda drivers
  2. cuda toolkit版本和机器上的cuda driver版本一致
    查看显卡驱动版本 nvidia -smi

使用package的两个命令

  1. dir(pytorch)
  2. help(pytorch)

数据加载
Dataset 获取数据和lable

from torch.utils.data import Dataset
class MyData(Dataset):
	def __init__(self,root_dir,label_dir):
		self.root_dir = root_dir
		self.label_dir = label_dir
		self.path = os.path.join(self.root_dir,self.label_dir)
		self.img_path = os.listdir(self.path)
	
	def __getitem__(self,idx):
		img_name = self.img_path[idx]
		img_item_path = os.path.join(self.root_dir,self,label_dir,img_name)
		img = Image.open(img_item_path)
		label = self.label_dir
		return img,label
	
	def __len__(self):
		return len(self.img_path)

root_dir = "/data/img/a.jpg"
ants_label_diir = "ants"
ants_dataset = MyData(root_dir,ants_label_dir)

Dataloader 为后面的层提供不同的数据方式

from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
test_data  = torchvision.datasets.CIFAR10("./dataset",train=False,transform = torchvision.transform.ToTensor(0)
test_loader = DataLoader(dataset = test_data,batch_size = 4,shuffle=True,num_worker==0,drop_last = False)
step = 0
for data in test_loader:
	imgs,targets = data
	write.add_images("test_data_drop_last",imgs,step++)
writer.close()

TensorBoard(查看训练过程中loss的变化)

import torch.utils.tensorboard import summaryWriter
import numpy as np
from PIL import Image

writer = SummaryWriter("logs")
image_path = "/data/img/a.jpg"
img_PIL = Image.open(image_path)
img_array = np.arrary(img_PIL)
writer.add_image("test",img_array,2,dataformats='HWC')
for i in range(100):
	writer.add_scalar("y =x",i,i)
writer.cose()

transform.py

from torchversion import transforms
from PIL import Image

img_path = "/data/img/a.img"
img = Image.open(img_path)

#transform 使用方式
tensor_trans = transform.toTensor()
tensor_img = tensor_trans(img) 
#normalize
print(imag_tensor[0][0][0]
tran_norm = transform.Normalize([0.5,0.5,0.5][0.5,0.5,0.5])
img_norm = trsns_norm(img_tensor)
print(img_norm[0][0][0])
writer.add_image("Normalization",img_norm)
writer.close()

python里面的内置方法使用

class Person:
	#内置方法
	def __call__(self,name):
		print("__call__  hello " + name)
	#普通方法
	def sayHello(name):
		print("hello " + name)
person = Person()
#调用内置方法的方式
person("zhangsan")
#调用普通方法的方式
person.sayHello("lisi")

torchvision数据集的使用

import torchvision
train_set = torchvision.datasets.CIFAR10(root="./dataset",train = True,download =True)
test_set = torchvision.datasets.CIFAR10(root="./dataset",train = False,download = True)

神经网络的基本骨架 nn.Module (nn = neural network)

import torch.nn as nn
class myModule (nn.Module):
	def __int__(self):
		super().__init__()
	def forward(self,input):
		return input+1
mymodule = myModule()
input = torch.tensor(1.0)
output = mymodule(input)
print(output)

卷积操作convolution

import torch
import torch.nn.funcional as F
input = torch.tensor([[1,2,0,,3,1],
					[0,1,2,3,1],
					[1,2,1,0,0],
					[5,2,3,1,1],
					[2,1,0,1,1])
kernel  = torch.tensor([[1,2,1],
						[0,1,0],
						[2,1,0]])
input.reshape(1,1,5,5)
kernel.reshape(1,1,3,3)
#卷积操作
F.conv2d(input,kernel,stride = 1)

池化操作 pooling

import torch
import torchvision
from torch import nn
from torch.nn import MaxPool2d
from torch.utils.data import DataLoader
dataset = torchvision.datasets.CIFAR10("../data",train=False,download= True,transform=torchvision.transform.ToTensor())
dataloader = DataLoader(dataset,batch_size=64)
class myModule(nn.modle):
	def __init__(self):
		super(myModule,self).__init__()
		self.maxpool1 = MaxPool2d(kernel_size=3,ceil_mode=False)
	#池化计算
	def forward(self,input):
		return self.maxpool1(input)
mymodule = myModule()
writer = SummaryWriter("logs_maxpool")
step = 0
for data in dataloader;
	imgs,target = data
	writer.add_images("input",imgs,step)
	output=mymodule(imgs)
	writer.add_images("output",output,step)
	step = step + 1
writer.close()

simple example

import torch
from torch.nn import Conv2d,MaxPoo2d,Linear
class myModule(nn.Module):
	def __init__(self):
		super(myModule,self).__init__()
		self.conv1 = Conv2d(3,32,5,padding=2)
		self.maxpool1 = MaxPool2d(2)
		self.conv2 = Conv2d(32,32,5,padding=2)
		self.maxpool2 = MaxPool2d(2)
		self.conv3 = Conv2d(32,64,5,padding=2)
		self.maxpool3 = MaxPool2d(2)
		self.flatten = Flatten()
		self.linear1 = Linear(10024,64)
		self.linear2 = Linear(64,10)
	def forward(self,x)
		x = self.conv1(x)
		x = self.maxpoo1(x)
		x = self.conv2(x)
		x = self.maxpool2(x)
		x = self.conv3(x)
		x = self.maxpool3(x)
		x = self.flatten(x)
		x = self.linear1(x)
		x = self.linear2(x)
		return x
mymodule = myModule()
x = torch.ones(64,32,32)
output = mymodule(x)
print(output)

你可能感兴趣的:(pytorch,学习,深度学习)