首先从Kaggle官网下载数据集https://www.kaggle.com/c/digit-recognizer/data里面包含三个CSV文档。train.csv是带标签的数据,用于训练和调参,test.csv是无标签的数据,在提交测试文档的时候才需要用到。
这里,我先把train里面的数据又随机划分为两个表,一个用于训练一个用于交叉验证,代码很简单,主要是pandas的一些简单功能。
#use torch.utils.data.Dataset to build my dataset from train.csv and test.csv
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
import torch
from torch.utils.data import DataLoader
from torch import nn
from torch.autograd import Variable
def train_val_split(train = 'train.csv',train_flie='train_set.csv',val_file='val_set.csv'):
#training set "train.csv" was downloaded from kaggle.com
train_data = pd.read_csv(train)
#training datas contains Feature and Label.
#divide training datas into training set and validation set
train_set, val_set = train_test_split(train_data, test_size = 0.2)
#wirte csv files
train_set.to_csv(train_flie,index = False )
val_set.to_csv(val_file,index = False )
print('train_data.shape:',train_data.shape)
print('train_set.shape:',train_set.shape)
print('val_set.shape:',val_set.shape)
train_val_split('train.csv','train_set.csv','val_set.csv')
运行结果为:
(33600, 785)
(42000, 785)
(8400, 785)
这样,训练数据和交叉验证的数据就分别存在两个表里面了。
下一步,我们需要重写Pytorch的数据集类,构建我们的数据集。
#image preprocessing to Gaussian distribution from -1 to 1
def data_tf(x):
x = np.array(x, dtype='float32') / 255
x = (x - 0.5) / 0.5 # 标准化
x = torch.from_numpy(x)
return x
#define the class of my MNIST dataset
class MyMNIST(torch.utils.data.Dataset): #创建自己的类:MyMNIST,这个类是继承的torch.utils.data.Dataset
def __init__(self, datatxt, train=True, transform = data_tf, target_transform=None): #初始化一些需要传入的参数
self.data = pd.read_csv(datatxt)
self.transform = transform
self.train = train
if self.train:
self.X = self.data.iloc[:,1:]
self.X = np.array(self.X)
self.y = self.data.iloc[:,0]
self.y = np.array(self.y)
else :
self.X = self.data
self.X = np.array(self.X)
def __getitem__(self, index): #这个方法是必须要有的,用于按照索引读取每个元素的具体内容
im = torch.tensor(self.X[index], dtype = torch.float)
if self.transform is not None:
im = self.transform(im)
if self.train:
label = torch.tensor(self.y[index], dtype = torch.long )
return im, label
else:
return im
def __len__(self): #return the length of batch
return len(self.data)
#Build trainset validset and testset from csv and perform data preprocessing
X_train = MyMNIST(datatxt = 'train_set.csv',train=True, transform = data_tf)
X_val = MyMNIST(datatxt= 'val_set.csv',train=True, transform = data_tf)
X_test = MyMNIST(datatxt = 'test.csv',train=False, transform = data_tf)
#iterator of our dataset
train_data = DataLoader(X_train, batch_size=64, shuffle=True)
val_data = DataLoader(X_val, batch_size=64, shuffle=False)
test_data = DataLoader(X_test, batch_size=1000, shuffle=False)
以上是构建数据集的全部代码。
下一步就可以构建网络结构。