机器学习李宏毅2020hw3可运行

import os
import numpy as np
import cv2
import torch
import torch.nn as nn
import torchvision.transforms as transforms
from torch.utils.data import DataLoader, Dataset
import time

#读取图片
def readfile(path,label):
    #label确定训练集或测试集
    image_dir=sorted(os.listdir(path))
    x=np.zeros((len(image_dir),128,128,3),dtype=np.uint8)
    y=np.zeros((len(image_dir)),dtype=np.uint8)

    for i,file in enumerate(image_dir):
        img=cv2.imread(os.path.join(path,file))
        #统一图片大小
        x[i,:,:]=cv2.resize(img,(128,128))

        if label:
            y[i]=int(file.split('_')[0])
            #以下划线分开,取前面的标签
    if label:
        return x,y
    else:
        return x

workspace_dir='/home/lizheng/Study/yolov5-5.0/food-11'
print('Reading data')
train_x,train_y=readfile(os.path.join(workspace_dir,'training'),True)
print('Size of training data={}'.format(len(train_x)))
val_x,val_y=readfile(os.path.join(workspace_dir,'validation'),True)
print('Size of validation data={}'.format(len(val_x)))
test_x=readfile(os.path.join(workspace_dir,'testing'),False)
print('Size of Testing data={}'.format(len(test_x)))

#用transforms.Compose将函数打包为新的函数,用来数据增强
training_transform=transforms.Compose([
    transforms.ToPILImage(),
    transforms.RandomHorizontalFlip(),#随机反转图片
    transforms.RandomRotation(15.0),#随机旋转图片
    transforms.ToTensor()
])
#testing 不需要数据增强
test_transform=transforms.Compose([
    transforms.ToPILImage(),
    transforms.ToTensor(),
])
#Dataset把数据打包成[ [X], Y ],X的数据形式为 图片数 * 3(彩色三通道) * 128*128(像素),每个X对应一个Y。
# 例如train_set[0] 为 [  [第一张图的数据],第一张图的分类编号],
# train_set[1] 为 [  [第二张图的数据],第二张图的分类编号] 。
#DataLoader把打包的数据分每128个图片为一个batch,一个batch的形式为[ [X], Y],
#X为 128(图片数) * 3(彩色三通道) * 128*128(像素), Y 也有128个编号。[   [X(128个)],Y(128个)   ]每组X对应组Y。

class ImgDataset(Dataset):
    def __init__(self,x,y=None,transform=None):
        self.x=x
        self.y=y
        if y is not None:
            self.y=torch.LongTensor(y)
        self.transform=transform

    def __len__(self):
        return len(self.x)

    def __getitem__(self,index):
        #传入一个x,y给X,Y,每个X有128个图片,组成一个batch,Y同理
        X=self.x[index]
        if self.transform is not None:
            X=self.transform(X)
        if self.y is not None:
            Y=self.y[index]
            return X,Y
        else:
            return X

class classifier(nn.Module):
    def __init__(self):
        super(classifier,self).__init__()
#torch.nn.Conv2d(in_channels,out_channels,kernel_size,stride,padding)
#torch.nn.MaxPool2d(kernel_size,stride,padding)
        self.cnn=nn.Sequential(
            nn.Conv2d(3,64,3,1,1),#输出[64,128,128]
            nn.BatchNorm2d(64),
            nn.ReLU(),
            nn.MaxPool2d(2,2,0),  #输出[64,64,64]

            nn.Conv2d(64,128,3,1,1),  #输出[128,64,64]
            nn.BatchNorm2d(128),
            nn.ReLU(),
            nn.MaxPool2d(2,2,0),  #输出[128,32,32]

            nn.Conv2d(128, 256, 3, 1, 1),  # 输出[256,32,32]
            nn.BatchNorm2d(256),
            nn.ReLU(),
            nn.MaxPool2d(2, 2, 0),      # 输出[256,16,16]

            nn.Conv2d(256, 512, 3, 1, 1),  # 输出[512,16,16]
            nn.BatchNorm2d(512),
            nn.ReLU(),
            nn.MaxPool2d(2, 2, 0),       # 输出[512,8,8]

            nn.Conv2d(512, 512, 3, 1, 1),  # 输出[512,8,8]
            nn.BatchNorm2d(512),
            nn.ReLU(),
            nn.MaxPool2d(2, 2, 0) # 输出[512,4,4]
        )

        #全连接的前向传播神经网络
        self.fc=nn.Sequential(
            nn.Linear(512*4*4,1024),
            nn.ReLU(),
            nn.Linear(1024,512),
            nn.ReLU(),
            nn.Linear(512,11),#输出可能的11个结果的概率

        )
    def forward(self,x):
        out=self.cnn(x) #out时cnn的输出128*[512,4,4].传进的x以一个batch为单位
        out=out.view(out.size()[0],-1)#[512,4,4]变为一维,维度变为[128行,512*4*4列]
        return self.fc(out)

#Dataloader
batch_size=128
#将数据打包
train_set=ImgDataset(train_x,train_y,training_transform)
val_set=ImgDataset(val_x,val_y,test_transform)
print(train_set.x)
print(train_set.y)
#DataLoader把打包的数据分每128个图片为一个batch ,一个batch的形式为[ [X],Y],X为128*3*128*128
train_loader=DataLoader(train_set,batch_size=batch_size,shuffle=True)#是否在每个epoch开始时重新排序
val_loader=DataLoader(val_set,batch_size=batch_size,shuffle=False)

#因为nn.Module的__call__函数中调用了forward()函数,那么__call__作用是什么呢?
# 它允许我们把一个实例当作对象一样来调用,举个简单的例子吧:
model=classifier().cuda()#设定GPU,并自动调用forward函数
loss=nn.CrossEntropyLoss()#设定损失函数
optimizer=torch.optim.Adam(model.parameters(),lr=0.001)#设定优化器
num_epoch=30

for epoch in range(num_epoch):
    epoch_start_time=time.time()
    train_acc=0.0
    train_loss=0.0
    val_acc=0.0
    val_loss=0.0

    model.train()
    for i,data in enumerate(train_loader):#一个一个取batch
        optimizer.zero_grad()#用optimizer将模型参数的梯度gradient归零
        train_pred=model(data[0].cuda())#利用model得到预测的概率分布,实际是调用模型的forward函数
        # data[0] = x, data[1] = y
        batch_loss=loss(train_pred,data[1].cuda())
        batch_loss.backward()#BP计算所有的gradient
        optimizer.step()#更新参数
        # .data表示将Variable中的Tensor取出来
        # train_pred是(batch_size,11)的数据,np.argmax()返回最大值的索引,axis=1则是对行进行(取一行中列值最大的列),返回的索引正好就对应了标签,
        # 然后和y真实标签比较,则可得到分类正确的数量
        #sum获得一个batch中正确的数量,而后加入到train_acc中
        train_acc+=np.sum(np.argmax(train_pred.cpu().data.numpy(),axis=1)==data[1].numpy())
        train_loss=batch_loss.item()## 张量中只有一个值就可以使用item()方法读取


    model.eval()
    with torch.no_grad():
        for i,data in enumerate(val_loader):
            val_pred=model(data[0].cuda())
            batch_loss=loss(val_pred,data[1].cuda())

            val_acc+=np.sum(np.argmax(val_pred.cpu().data.numpy(),axis=1)==data[1].numpy())
    val_loss=batch_loss.item()

    #输出结果:
    print('[%03d/%03d] %2.2f sec(s) Train Acc: %3.6f Loss: %3.6f|Val Acc:%3.6f loss: %3.6f' %
          (epoch+1,num_epoch,time.time()-epoch_start_time,train_acc/train_set.__len__(),
          train_loss/train_set.__len__(),val_acc/val_set.__len__(),
          val_loss/val_set.__len__()))
    torch.cuda.empty_cache()

train_val_x=np.concatenate((train_x,val_x),axis=0)#将训练集和验证集合并,增强训练效果
train_val_y=np.concatenate((train_y,val_y),axis=0)
train_val_set=ImgDataset(train_val_x,train_val_y,training_transform)#生成训练集
train_val_loader=DataLoader(train_val_set,batch_size=batch_size,shuffle=True)#生成批训练集

model_best=classifier().cuda()
loss=nn.CrossEntropyLoss()
optimizer=torch.optim.Adam(model_best.parameters(),lr=0.001)

num_epoch=30

for epoch in range(num_epoch):
    epoch_start_time=time.time()
    train_acc=0.0
    train_loss=0.0

    model_best.train()
    for i,data in enumerate(train_val_loader):
        optimizer.zero_grad()
        train_pred=model_best(data[0].cuda())
        batch_loss=loss(train_pred,data[1].cuda())
        batch_loss.backward()
        optimizer.step()

        train_acc+=np.sum(np.argmax(train_pred.cpu().data.numpy(),axis=1)==data[1].numpy())
        train_loss+=batch_loss.item()

    print('[%03d/%03d] %2.2f sec(s) Train Acc: %3.6f Loss: %3.6f' %
          (epoch+1,num_epoch,time.time()-epoch_start_time,
           train_acc/train_val_set.__len__(),train_loss/train_val_set.__len__()))

test_set=ImgDataset(test_x,transform=test_transform)
test_loader=DataLoader(test_set,batch_size=batch_size,shuffle=False)

model_best.eval()
prediction=[]

with torch.no_grad():
    for i,data in enumerate(test_loader):
        test_pred=model_best(data.cuda())
        #最大值为预测的标签
        test_label=np.argmax(test_pred.cpu().data.numpy(),axis=1)
        for y in test_label:
            prediction.append(y)
            
with open('prediction.csv','w') as f:
    f.write('Id,Category\n')
    for i,pred in enumerate(prediction):
        f.write('{},{}\n'.format(i,pred))
torch.cuda.empty_cache()

机器学习李宏毅2020hw3可运行_第1张图片
机器学习李宏毅2020hw3可运行_第2张图片

你可能感兴趣的:(机器学习,python,深度学习)