NN
import torch
from torchvision import datasets
from torchvision.transforms import ToTensor
from torch.utils.data import DataLoader
# from torch import nn
import torch.nn as nn
train_data = datasets.FashionMNIST(
root='data',
train=True,
download=True,
transform=ToTensor()
)
test_data = datasets.FashionMNIST(
root='data',
train=False,
download=True,
transform=ToTensor()
)
print(len(test_data))
batch_size = 64
train_loader = DataLoader(
dataset = train_data,
batch_size = batch_size,
shuffle = True
)
test_loader = DataLoader(
dataset = test_data,
batch_size = batch_size,
shuffle = True
)
for x ,y in train_loader:
print('x::',x,x.shape)
print('y::',y)
break
class NN(nn.Module):
def __init__(self):
super().__init__()
self.line = nn.Sequential(
nn.Linear(28*28,512),
nn.ReLU(),
nn.Linear(512,512),
nn.ReLU(),
nn.Linear(512,10)
)
def forward(self,x):
x = nn.Flatten()(x)
# print('x.shape::',x.shape)
pred = self.line(x)
return pred
nn_ins = NN()
print(nn_ins.parameters())
for i in nn_ins.parameters():
print(i,i.shape)
loss_fun = nn.CrossEntropyLoss()
optimer = torch.optim.SGD(lr=0.01,params=nn_ins.parameters())
# optimer = torch.optim.Adam(lr=0.001,params=nn_ins.parameters())
for j in range(3):
nn_ins.train()
for index,(x,y) in enumerate(train_loader):
pred = nn_ins(x)
loss = loss_fun(pred,y)
loss.backward()
optimer.step()
optimer.zero_grad()
if index%100==0:
print('loss',loss.item())
# break
nn_ins.eval()
loss_test = 0
accuracy = 0
for x,y in test_loader:
pred = nn_ins(x)
# print('pred',pred.argmax(1),pred.shape)
# print((pred.argmax(1)==y).type(torch.float))
loss_test+= loss_fun(pred,y)
accuracy += (pred.argmax(1)==y).type(torch.float).sum().item()
# break
total_length = len(test_loader.dataset)
batch_length = len(test_loader)
print(total_length,batch_length)
print(j,'loss::',f'{loss_test/batch_length:.5f}','accuracy::',f'{accuracy/total_length:.2f}')
torch.save(nn_ins.state_dict(), "model.pth")
print("Saved PyTorch Model State to model.pth")
torch.cuda.is_available()
torch.backends.mps.is_available()
model = NN()
model.load_state_dict(torch.load("model.pth"))
classes = [
"T-shirt/top",
"Trouser",
"Pullover",
"Dress",
"Coat",
"Sandal",
"Shirt",
"Sneaker",
"Bag",
"Ankle boot",
]
model.eval()
x, y = test_data[0][0], test_data[0][1]
with torch.no_grad():
pred = model(x)
predicted, actual = classes[pred[0].argmax(0)], classes[y]
print(f'Predicted: "{predicted}", Actual: "{actual}"')
CNN
import torch
from torchvision import datasets
from torch.utils.data import DataLoader
from torch import nn
from torchvision.transforms import ToTensor
train_data = datasets.FashionMNIST( root='data',train=True,transform=ToTensor(),download=True)
test_data = datasets.FashionMNIST( root='data',train=False,transform=ToTensor(),download=True)
batch_size = 64
train_loader = DataLoader( dataset=train_data,batch_size=batch_size,shuffle=True)
test_loader = DataLoader( dataset=test_data,batch_size=batch_size,shuffle=True)
class CNN(nn.Module):
def __init__(self):
super().__init__()
self.conv2d = nn.Sequential(
nn.Conv2d(in_channels=1,out_channels=16,kernel_size=3,stride=1,padding=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2),
nn.Conv2d(in_channels=16,out_channels=8,kernel_size=3,stride=1,padding=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2),
nn.Flatten(),
nn.Linear(8*7*7,10)
)
def forward(self,x):
pred = self.conv2d(x)
return pred
cnn = CNN()
for i in cnn.parameters():
print(i.shape)
loss_fun = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(lr=0.001,params=cnn.parameters())
cnn.train()
for epoch in range(3):
for batch_index ,(x,y) in enumerate(train_loader):
pred = cnn(x)
loss = loss_fun(pred,y)
loss.backward()
optimizer.step()
optimizer.zero_grad()
if batch_index%100==0:
print('loss',loss.item())
cnn.eval()
size = len(test_loader.dataset)
batch_num = len(test_loader)
test_loss = 0
accuracy=0
for x,y in test_loader:
pred = cnn(x)
loss = loss_fun(pred,y)
test_loss+=loss
current = (pred.argmax(1)==y).type(torch.float).sum().item()
accuracy+=current
print('test loss::',test_loss/batch_num,'accuracy::',accuracy/size)