Mindspore 的简单使用实践 - Educoder实训答案

先说意义,Mindspore属于比较易于上手的一个深度学习开源框架,相较其他框架会简单一些,而且其在一些算法的加速效果上也是有着不可忽视的提升。实践学习这个框架能更好地去理解掌握其他框架,而且正好能有比较入门的实践题,对于之后深入加入深度学习也会有一定帮助。而且对于Mindspore,目前来说是一代目,后面的更多人的加入肯定能发展更好的。

任务1: MindSpore的安装和基本概念

	根据提示,在右侧编辑器Begin和End之间补充代码,在代码中定义一个三阶的张量,
	类型是mindspore.int32,张量内的数据不限制,最后将这个张量作为函数的输出。

Mindspore中张量的定义使用Tensor模块,注意张量与矩阵的区别。张量是在机器学习、深度学习中比较重要的数据存储方式,建议加强了解。同时应该学会在numpy中的使用转换方式,numpy详细使用体验在后面博文再写吧。

import mindspore
from mindspore import Tensor
def define_tensor():
    # 定义一个int32类型的三阶张量,内容不限
    # 将这个张量作为函数的返回值
    ########## Begin ##########

    tensor = Tensor([ [[1,2,3],[2,3,4]] ,[[4,5,5],[3,4,5]]],mindspore.int32) # 定义一个张量

    ########## End##########
    return tensor

任务2 : MindSpore中的数据存储和使用

该任务主要是了解在Mindspore中数据是如何添加格式使用的,难度不大。

from mindspore.mindrecord import FileWriter
import numpy as np


def main():
    # 定义数据
    data = [{"label": np.array([0, 1]), "count": 12.3, "title": "长江流域遭遇洪峰袭击"},
            {"label": np.array([6, 7]), "count": 22.4, "title": "全球新冠确诊病例突破1000万"},
            {"label": np.array([0, 8]), "count": 32.1, "title": "全国高考今日结束"}]
    # 编程要求:
    # 1. 根据data的内容定义合理的数据格式
    # 2. 然后将数据写入到磁盘的相对路径"news.mindrecord"中
    ########## Begin ##########
    data_structure = {  "label":{"type":"int32","shape":[-1]},
                        "count":{"type":"float32"} , 
                        "title":{"type":"string"}
                        }
    
    writer = FileWriter(file_name="news.mindrecord")
    writer.add_schema(data_structure,"test_schema")
    writer.write_raw_data(data)
    writer.commit()

    ########## End ##########

任务3 : MindSpore - 初体验

第一关:数据集加载
让你体验一下怎么使用Mindspore加载数据集。

# 导入基础库
import mindspore.dataset as ds
import numpy as np

def get():
    # 常见数据集的加载
    DATA_DIR = "./MindSpore/src/step1/cifar-10-binary/cifar-10-batches-bin"

    # 请在此添加代码完成本关任务
    # ********** Begin *********#
    # 提示:根据数据地址初始化数据集 cifar10_dataset
    cifar10_dataset = ds.Cifar10Dataset(DATA_DIR)

    # ********** End **********#

    for data in cifar10_dataset.create_dict_iterator():
        # In CIFAR-10 dataset, each dictionary of data has keys "image" and "label".
        # 图像数据比较大,平台不方便展示
        #print(data["image"])
        #print(data["label"])
        break
         

    # 自定义数据的加载
    def Generator1D():
        for i in range(64):
            yield (np.array([i]),)  # Notice, tuple of only one element needs following a comma at the end.

    # 请在此添加代码完成本关任务
    # ********** Begin *********#
    # 提示:初始化自定义数据集 dataset
    dataset = ds.GeneratorDataset(Generator1D, ["data"])
    # ********** End **********#
    # 只展示部分数据
    iter = 0
    for data in dataset.create_tuple_iterator():  # each data is a sequence
        print(data)
        iter += 1
        if (iter > 5):
            break     
    # 只展示部分数据
    iter = 0
    for data in dataset.create_dict_iterator():  # each data is a dictionary
        print(data["data"])
        iter += 1
        if (iter > 5):
            break

第二关:数据格式转换
体验一下如何将数据集转换为Mindspore格式。

from mindspore.mindrecord import Cifar10ToMR

def trans():
    CIFAR10_DIR = "./cifar-10-python/cifar-10-batches-py"
    MINDRECORD_FILE = "./cifar10.mindrecord"
    # 请在此添加代码完成本关任务
    # ********** Begin *********#
    ## 提示:补全数据集格式转换的代码

    cifar10_transformer = Cifar10ToMR(CIFAR10_DIR,MINDRECORD_FILE)
    cifar10_transformer.transform(['label'])

    # ********** End **********#

第三关:数据处理
MindSpore 支持多种处理数据操作,包括复制、分批、洗牌、映射等等。

import mindspore.dataset as ds
import numpy as np

def data():
    def generator_func():
        for i in range(5):
            yield (np.array([i, i + 1, i + 2]),)

    ds1 = ds.GeneratorDataset(generator_func, ["data"])
    print("ds1:")
    for data in ds1.create_dict_iterator():
        print(data["data"])

    #### 调用repeat来加倍数据量
    ds2 = ds.GeneratorDataset(generator_func, ["data"])
    # 请在此添加代码完成本关任务
    # ********** Begin *********#
    ## 提示:调用repeat,倍数为2
    ds2 = ds2.repeat(2)
    # ********** End **********#
    print("ds2:")
    for data in ds2.create_dict_iterator():
        print(data["data"])

    ### batch的使用
    # 请在此添加代码完成本关任务
    # **********Begin*********#
    ## 提示:根据ds1生成一个ds2,batch_size=2
    ds2 = ds1.batch(batch_size=2)
    # **********End**********#
    print("batch size:2    drop remainder:False")
    for data in ds2.create_dict_iterator():
        print(data["data"])

    #### shuffle
    print("Before shuffle:")
    for data in ds1.create_dict_iterator():
        print(data["data"])
        
    ds1 = ds.GeneratorDataset(generator_func, ["data"])

    # 请在此添加代码完成本关任务
    # ********** Begin *********#
    ## 提示:对ds1进行shuffle处理,buffer_size=5
    ds2 = ds1.shuffle(buffer_size=5)
    # ********** End **********#
    print("After shuffle:")
    print("try to print the data after shuffle,but you cannot go through this step") 
    #for data in ds2.create_dict_iterator():
        #print(data["data"])

    ### map
    func = lambda x: x * 2  # Define lambda function to multiply each element by 2.
    ds1 = ds.GeneratorDataset(generator_func, ["data"])
    # 请在此添加代码完成本关任务
    # ********** Begin *********#
    ## 提示:对ds1进行map处理
    ds2 = ds1.map(input_columns="data",operations=func)
    # ********** End **********#
    for data in ds2.create_dict_iterator():
        print(data["data"])

    ### zip
    def generator_func2():
        for i in range(5):
            yield (np.array([i - 3, i - 2, i - 1]),)

    ds1 = ds.GeneratorDataset(generator_func, ["data"])
    ds2 = ds.GeneratorDataset(generator_func2, ["data2"])
    # 请在此添加代码完成本关任务
    # ********** Begin *********#
    ## 提示:ds3为ds1和ds2的zip
    ds3 = ds.zip((ds1,ds2))
    # ********** End **********#
    for data in ds3.create_dict_iterator():
        print(data)

第四关:数据增强

import mindspore.dataset.transforms.vision.c_transforms as c_transforms
import mindspore.dataset.transforms.vision.py_transforms as py_transforms
from mindspore.dataset.transforms.vision import Inter

def data():
    #### 使用c_transforms模块进行数据增强
    dataset = ds.ImageFolderDatasetV2(DATA_DIR, decode=True)  # Deocde images
    # 请在此添加代码完成本关任务
    # ********** Begin *********#
    ## 提示:使用c_transforms模块,将图片变化为500*500像素
    resize_op = c_transforms.Resize( size=(500,500), interpolation=Inter.LINEAR )
    dataset.map(input_columns="image",operations=resize_op)
    # ********** End **********#

    for data in dataset.create_dict_iterator():
        imgplot_resized = plt.imshow(data["image"])
        plt.show()

    ### 使用py_transforms模块进行数据增强
    dataset = ds.ImageFolderDatasetV2(DATA_DIR)

    transforms_list = [
        py_transforms.Decode(),  # Decode images to PIL format.
        py_transforms.RandomCrop(size=(500, 500)),
        py_transforms.ToTensor()  # Convert PIL images to Numpy ndarray.
    ]
    # 请在此添加代码完成本关任务
    # ********** Begin *********#
    ## 提示:使用py_transforms模块
    compose = py_transforms.ComposeOp(transforms_list)
    dataset = dataset.map(input_columns="image",operations=compose)
    # ********** End **********#
    for data in dataset.create_dict_iterator():
        print(data["image"])
        imgplot_resized = plt.imshow(data["image"].transpose(1, 2, 0))
        plt.show()

任务4:MindSpore中数据的预处理

就是简单使用以下Mindspore的数据预处理的功能。

	repeat,对数据集进行复制,来加倍数据量;
	batch,将数据分批处理,有益于加速训练过程;
	shuffle,对数据进行混洗,随机打乱数据的原来顺序;
	map,将提供的函数或算子作用于指定的列数据;
	zip,将多个数据集合并成一个数据集。
multi_func = lambda x : x*10

def data_pre_handle(data):
    # 对输入数据依次执行以下操作,然后作为返回值输出
    # 1. 映射,使用上面的multi_func函数作为参数
    # 2. 分组,每3个分为一组,末尾不足一组的砍掉
    # 3. 重复,将数据重复3次
    ########## Begin ##########
    ds1 = data.map(operations=multi_func)
    ds2 = ds1.batch(batch_size=3,drop_remainder=True)
    ds3 = ds2.repeat(3)
    return ds3
    ########## end ##########

任务5:MindSpore - 模型开发

第一关:神经网络定义
关于网络参数初始化和网络结构的定义

import mindspore.nn as nn
from mindspore.common.initializer import TruncatedNormal

def weight_variable():
    """
    weight initial
    """
    return TruncatedNormal(0.02)

def conv(in_channels, out_channels, kernel_size, stride=1, padding=0):
    """
    conv layer weight initial
    """
    weight = weight_variable()
    return nn.Conv2d(in_channels, out_channels,
                     kernel_size=kernel_size, stride=stride, padding=padding,
                     weight_init=weight, has_bias=False, pad_mode="valid")

def fc_with_initialize(input_channels, out_channels):
    """
    fc layer weight initial
    """
    # 请在此添加代码完成本关任务
    #********** Begin *********#
    ## 提示:完成初始化代码
    weight = weight_variable()
    bias = weight_variable()
    return nn.Dense(input_channels, out_channels, weight, bias)
    #********** End **********#

class LeNet5(nn.Cell):
    """
    Lenet network structure
    """
    # define the operator required
    def __init__(self):
        super(LeNet5, self).__init__()
        self.conv1 = conv(1, 6, 5)
        self.conv2 = conv(6, 16, 5)
        self.fc1 = fc_with_initialize(16 * 5 * 5, 120)
        self.fc2 = fc_with_initialize(120, 84)
        self.fc3 = fc_with_initialize(84, 10)
        self.relu = nn.ReLU()
        self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2)
        self.flatten = nn.Flatten()

    #use the preceding operators to construct networks
    def construct(self, x):
        # 请在此添加代码完成本关任务
        #********** Begin *********#
        ## 提示:根据教程内容完成网络定义即可
        x = self.conv1(x)  # 卷积
        x = self.relu(x)   # 激活函数计算
        x = self.max_pool2d(x) # pooling

        x = self.conv2(x)  # 卷积
        x = self.relu(x)   # 激活函数计算
        x = self.max_pool2d(x) # pooling

        x = self.flatten(x) # 数据压平,连接卷积层和全连接层

        x = self.fc1(x)    # 全连接
        x = self.relu(x)   # 激活函数计算
        x = self.fc2(x)    # 全连接
        x = self.relu(x)   # 激活函数计算
        x = self.fc3(x)    # 全连接
        #********** End **********#
        return x

第2关:损失函数及优化器的定义
就是Cost函数和J函数的定义.

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='MindSpore LeNet Example')
    parser.add_argument('--device_target', type=str, default="CPU", choices=['Ascend', 'GPU', 'CPU'],
                        help='device where the code will be implemented (default: CPU)')
    args = parser.parse_args()
    context.set_context(mode=context.GRAPH_MODE, device_target=args.device_target)
    # download mnist dataset
    download_dataset()
    # learning rate setting
    lr = 0.01
    momentum = 0.9
    epoch_size = 1
    mnist_path = "./MNIST_Data"
    # 请在此添加代码完成本关任务
    #********** Begin *********#
    ## 提示:补全损失函数的定义
    net_loss =  SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction='mean')
    #********** End **********#
    repeat_size = epoch_size
    # create the network
    network = LeNet5()
    # define the optimizer
    # 请在此添加代码完成本关任务
    #********** Begin *********#
    ## 提示:补全优化器的定义,使用 Momentum 优化器
    net_opt = nn.Momentum(network.trainable_params(), lr, momentum)
    #********** End **********#
    config_ck = CheckpointConfig(save_checkpoint_steps=1875, keep_checkpoint_max=10)
    
    # save the network model and parameters for subsequence fine-tuning
    ckpoint_cb = ModelCheckpoint(prefix="checkpoint_lenet", config=config_ck)
    # group layers into an object with training and evaluation features
    model = Model(network, net_loss, net_opt, metrics={"Accuracy": Accuracy()})

    train_net(args, model, epoch_size, mnist_path, repeat_size, ckpoint_cb)
    test_net(args, network, model, mnist_path)

第3关:网络的训练
1.如何配置模型保存;2.如何配置训练网络。

import os
import urllib.request
from urllib.parse import urlparse
import gzip
import argparse
import mindspore.dataset as ds
import mindspore.nn as nn
from mindspore import context
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor
from mindspore.train import Model
from mindspore.common.initializer import TruncatedNormal
import mindspore.dataset.transforms.vision.c_transforms as CV
import mindspore.dataset.transforms.c_transforms as C
from mindspore.dataset.transforms.vision import Inter
from mindspore.nn.metrics import Accuracy
from mindspore.common import dtype as mstype
from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits


def unzipfile(gzip_path):
    """unzip dataset file
    Args:
        gzip_path: dataset file path
    """
    open_file = open(gzip_path.replace('.gz',''), 'wb')
    gz_file = gzip.GzipFile(gzip_path)
    open_file.write(gz_file.read())
    gz_file.close()


def download_dataset():
    """Download the dataset from http://yann.lecun.com/exdb/mnist/."""
    print("******Downloading the MNIST dataset******")
    train_path = "./MNIST_Data/train/"
    test_path = "./MNIST_Data/test/"
    train_path_check = os.path.exists(train_path)
    test_path_check = os.path.exists(test_path)
    if train_path_check == False and test_path_check ==False:
        os.makedirs(train_path)
        os.makedirs(test_path)
    train_url = {"http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz", "http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz"}
    test_url = {"http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz", "http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz"}
    for url in train_url:
        url_parse = urlparse(url)
        # split the file name from url
        file_name = os.path.join(train_path,url_parse.path.split('/')[-1])
        if not os.path.exists(file_name.replace('.gz','')):
            file = urllib.request.urlretrieve(url, file_name)
            unzipfile(file_name)
            os.remove(file_name)
    for url in test_url:
        url_parse = urlparse(url)
        # split the file name from url
        file_name = os.path.join(test_path,url_parse.path.split('/')[-1])
        if not os.path.exists(file_name.replace('.gz','')):
            file = urllib.request.urlretrieve(url, file_name)
            unzipfile(file_name)
            os.remove(file_name)


def create_dataset(data_path, batch_size=32, repeat_size=1,
                   num_parallel_workers=1):
    """ create dataset for train or test
    Args:
        data_path: Data path
        batch_size: The number of data records in each group
        repeat_size: The number of replicated data records
        num_parallel_workers: The number of parallel workers
    """
    # define dataset
    mnist_ds = ds.MnistDataset(data_path)

    # define operation parameters
    resize_height, resize_width = 32, 32
    rescale = 1.0 / 255.0
    shift = 0.0
    rescale_nml = 1 / 0.3081
    shift_nml = -1 * 0.1307 / 0.3081

    # define map operations
    resize_op = CV.Resize((resize_height, resize_width), interpolation=Inter.LINEAR)  # Resize images to (32, 32)
    rescale_nml_op = CV.Rescale(rescale_nml, shift_nml) # normalize images
    rescale_op = CV.Rescale(rescale, shift) # rescale images
    hwc2chw_op = CV.HWC2CHW() # change shape from (height, width, channel) to (channel, height, width) to fit network.
    type_cast_op = C.TypeCast(mstype.int32) # change data type of label to int32 to fit network

    # apply map operations on images
    mnist_ds = mnist_ds.map(input_columns="label", operations=type_cast_op, num_parallel_workers=num_parallel_workers)
    mnist_ds = mnist_ds.map(input_columns="image", operations=resize_op, num_parallel_workers=num_parallel_workers)
    mnist_ds = mnist_ds.map(input_columns="image", operations=rescale_op, num_parallel_workers=num_parallel_workers)
    mnist_ds = mnist_ds.map(input_columns="image", operations=rescale_nml_op, num_parallel_workers=num_parallel_workers)
    mnist_ds = mnist_ds.map(input_columns="image", operations=hwc2chw_op, num_parallel_workers=num_parallel_workers)

    # apply DatasetOps
    buffer_size = 10000
    mnist_ds = mnist_ds.shuffle(buffer_size=buffer_size)  # 10000 as in LeNet train script
    mnist_ds = mnist_ds.batch(batch_size, drop_remainder=True)
    mnist_ds = mnist_ds.repeat(repeat_size)

    return mnist_ds


def conv(in_channels, out_channels, kernel_size, stride=1, padding=0):
    """Conv layer weight initial."""
    weight = weight_variable()
    return nn.Conv2d(in_channels, out_channels,
                     kernel_size=kernel_size, stride=stride, padding=padding,
                     weight_init=weight, has_bias=False, pad_mode="valid")


def fc_with_initialize(input_channels, out_channels):
    """Fc layer weight initial."""
    weight = weight_variable()
    bias = weight_variable()
    return nn.Dense(input_channels, out_channels, weight, bias)


def weight_variable():
    """Weight initial."""
    return TruncatedNormal(0.02)


class LeNet5(nn.Cell):
    """Lenet network structure."""
    # define the operator required
    def __init__(self):
        super(LeNet5, self).__init__()
        self.conv1 = conv(1, 6, 5)
        self.conv2 = conv(6, 16, 5)
        self.fc1 = fc_with_initialize(16 * 5 * 5, 120)
        self.fc2 = fc_with_initialize(120, 84)
        self.fc3 = fc_with_initialize(84, 10)
        self.relu = nn.ReLU()
        self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2)
        self.flatten = nn.Flatten()

    # use the preceding operators to construct networks
    def construct(self, x):
        x = self.conv1(x)
        x = self.relu(x)
        x = self.max_pool2d(x)
        x = self.conv2(x)
        x = self.relu(x)
        x = self.max_pool2d(x)
        x = self.flatten(x)
        x = self.fc1(x)
        x = self.relu(x)
        x = self.fc2(x)
        x = self.relu(x)
        x = self.fc3(x)
        return x


def train_net(args, model, epoch_size, mnist_path, repeat_size, ckpoint_cb):
    """Define the training method."""
    print("============== Starting Training ==============")
    # load training dataset
    # 请在此添加代码完成本关任务
    #********** Begin *********#
    ## 提示:完成网络的配置
    ds_train = create_dataset(os.path.join(mnist_path,"train"),32,repeat_size)
    model.train(epoch_size, ds_train, callbacks=[ckpoint_cb, LossMonitor()], dataset_sink_mode=False)
    #********** End **********#

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='MindSpore LeNet Example')
    parser.add_argument('--device_target', type=str, default="CPU", choices=['Ascend', 'GPU', 'CPU'],
                        help='device where the code will be implemented (default: CPU)')
    args = parser.parse_args()
    context.set_context(mode=context.GRAPH_MODE, device_target=args.device_target)
    # download mnist dataset
    download_dataset()
    # learning rate setting
    lr = 0.01
    momentum = 0.9
    epoch_size = 1
    mnist_path = "./MNIST_Data"
    # define the loss function
    net_loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction='mean')
    repeat_size = epoch_size
    # create the network
    network = LeNet5()
    # define the optimizer
    net_opt = nn.Momentum(network.trainable_params(), lr, momentum)
    # 请在此添加代码完成本关任务
    #********** Begin *********#
    ## 提示:配置模型保存
    config_ck = CheckpointConfig(save_checkpoint_steps=1875, keep_checkpoint_max=10) 
    ckpoint_cb = ModelCheckpoint(prefix="checkpoint_lenet", config=config_ck) 
    #********** End **********#
    # group layers into an object with training and evaluation features
    model = Model(network, net_loss, net_opt, metrics={"Accuracy": Accuracy()})

    train_net(args, model, epoch_size, mnist_path, repeat_size, ckpoint_cb)

第4关:模型的验证
在得到模型文件后,通过模型运行测试数据集得到的结果,验证模型的泛化能力

"""Lenet Tutorial
The sample can be run on CPU, GPU and Ascend 910 AI processor.
"""
import os
import urllib.request
from urllib.parse import urlparse
import gzip
import argparse
import mindspore.dataset as ds
import mindspore.nn as nn
from mindspore import context
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor
from mindspore.train import Model
from mindspore.common.initializer import TruncatedNormal
import mindspore.dataset.transforms.vision.c_transforms as CV
import mindspore.dataset.transforms.c_transforms as C
from mindspore.dataset.transforms.vision import Inter
from mindspore.nn.metrics import Accuracy
from mindspore.common import dtype as mstype
from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits


def unzipfile(gzip_path):
    """unzip dataset file
    Args:
        gzip_path: dataset file path
    """
    open_file = open(gzip_path.replace('.gz',''), 'wb')
    gz_file = gzip.GzipFile(gzip_path)
    open_file.write(gz_file.read())
    gz_file.close()


def download_dataset():
    """Download the dataset from http://yann.lecun.com/exdb/mnist/."""
    print("******Downloading the MNIST dataset******")
    train_path = "./MNIST_Data/train/"
    test_path = "./MNIST_Data/test/"
    train_path_check = os.path.exists(train_path)
    test_path_check = os.path.exists(test_path)
    if train_path_check == False and test_path_check ==False:
        os.makedirs(train_path)
        os.makedirs(test_path)
    train_url = {"http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz", "http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz"}
    test_url = {"http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz", "http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz"}
    for url in train_url:
        url_parse = urlparse(url)
        # split the file name from url
        file_name = os.path.join(train_path,url_parse.path.split('/')[-1])
        if not os.path.exists(file_name.replace('.gz','')):
            file = urllib.request.urlretrieve(url, file_name)
            unzipfile(file_name)
            os.remove(file_name)
    for url in test_url:
        url_parse = urlparse(url)
        # split the file name from url
        file_name = os.path.join(test_path,url_parse.path.split('/')[-1])
        if not os.path.exists(file_name.replace('.gz','')):
            file = urllib.request.urlretrieve(url, file_name)
            unzipfile(file_name)
            os.remove(file_name)


def create_dataset(data_path, batch_size=32, repeat_size=1,
                   num_parallel_workers=1):
    """ create dataset for train or test
    Args:
        data_path: Data path
        batch_size: The number of data records in each group
        repeat_size: The number of replicated data records
        num_parallel_workers: The number of parallel workers
    """
    # define dataset
    mnist_ds = ds.MnistDataset(data_path)

    # define operation parameters
    resize_height, resize_width = 32, 32
    rescale = 1.0 / 255.0
    shift = 0.0
    rescale_nml = 1 / 0.3081
    shift_nml = -1 * 0.1307 / 0.3081

    # define map operations
    resize_op = CV.Resize((resize_height, resize_width), interpolation=Inter.LINEAR)  # Resize images to (32, 32)
    rescale_nml_op = CV.Rescale(rescale_nml, shift_nml) # normalize images
    rescale_op = CV.Rescale(rescale, shift) # rescale images
    hwc2chw_op = CV.HWC2CHW() # change shape from (height, width, channel) to (channel, height, width) to fit network.
    type_cast_op = C.TypeCast(mstype.int32) # change data type of label to int32 to fit network

    # apply map operations on images
    mnist_ds = mnist_ds.map(input_columns="label", operations=type_cast_op, num_parallel_workers=num_parallel_workers)
    mnist_ds = mnist_ds.map(input_columns="image", operations=resize_op, num_parallel_workers=num_parallel_workers)
    mnist_ds = mnist_ds.map(input_columns="image", operations=rescale_op, num_parallel_workers=num_parallel_workers)
    mnist_ds = mnist_ds.map(input_columns="image", operations=rescale_nml_op, num_parallel_workers=num_parallel_workers)
    mnist_ds = mnist_ds.map(input_columns="image", operations=hwc2chw_op, num_parallel_workers=num_parallel_workers)

    # apply DatasetOps
    buffer_size = 10000
    mnist_ds = mnist_ds.shuffle(buffer_size=buffer_size)  # 10000 as in LeNet train script
    mnist_ds = mnist_ds.batch(batch_size, drop_remainder=True)
    mnist_ds = mnist_ds.repeat(repeat_size)

    return mnist_ds


def conv(in_channels, out_channels, kernel_size, stride=1, padding=0):
    """Conv layer weight initial."""
    weight = weight_variable()
    return nn.Conv2d(in_channels, out_channels,
                     kernel_size=kernel_size, stride=stride, padding=padding,
                     weight_init=weight, has_bias=False, pad_mode="valid")


def fc_with_initialize(input_channels, out_channels):
    """Fc layer weight initial."""
    weight = weight_variable()
    bias = weight_variable()
    return nn.Dense(input_channels, out_channels, weight, bias)


def weight_variable():
    """Weight initial."""
    return TruncatedNormal(0.02)


class LeNet5(nn.Cell):
    """Lenet network structure."""
    # define the operator required
    def __init__(self):
        super(LeNet5, self).__init__()
        self.conv1 = conv(1, 6, 5)
        self.conv2 = conv(6, 16, 5)
        self.fc1 = fc_with_initialize(16 * 5 * 5, 120)
        self.fc2 = fc_with_initialize(120, 84)
        self.fc3 = fc_with_initialize(84, 10)
        self.relu = nn.ReLU()
        self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2)
        self.flatten = nn.Flatten()

    # use the preceding operators to construct networks
    def construct(self, x):
        x = self.conv1(x)
        x = self.relu(x)
        x = self.max_pool2d(x)
        x = self.conv2(x)
        x = self.relu(x)
        x = self.max_pool2d(x)
        x = self.flatten(x)
        x = self.fc1(x)
        x = self.relu(x)
        x = self.fc2(x)
        x = self.relu(x)
        x = self.fc3(x)
        return x


def train_net(args, model, epoch_size, mnist_path, repeat_size, ckpoint_cb):
    """Define the training method."""
    print("============== Starting Training ==============")
    # load training dataset
    ds_train = create_dataset(os.path.join(mnist_path, "train"), 32, repeat_size)
    model.train(epoch_size, ds_train, callbacks=[ckpoint_cb, LossMonitor()], dataset_sink_mode=False)


def test_net(args, network, model, mnist_path):
    """Define the evaluation method."""
    print("============== Starting Testing ==============")
    # load the saved model for evaluation
    # 请在此添加代码完成本关任务
    #********** Begin *********#
    ## 提示:补全验证函数的代码
    param_dict = load_checkpoint("checkpoint_lenet-1_1875.ckpt")
    load_param_into_net(network,param_dict)
    ds_eval = create_dataset(os.path.join(mnist_path,"test"))
    acc = model.eval(ds_eval,dataset_sink_mode=False)
    #********** End **********#
    print("============== Accuracy:{} ==============".format(acc))


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='MindSpore LeNet Example')
    parser.add_argument('--device_target', type=str, default="CPU", choices=['Ascend', 'GPU', 'CPU'],
                        help='device where the code will be implemented (default: CPU)')
    args = parser.parse_args()
    context.set_context(mode=context.GRAPH_MODE, device_target=args.device_target)
    # download mnist dataset
    download_dataset()
    # learning rate setting
    lr = 0.01
    momentum = 0.9
    epoch_size = 1
    mnist_path = "./MNIST_Data"
    # define the loss function
    net_loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction='mean')
    repeat_size = epoch_size
    # create the network
    network = LeNet5()
    # define the optimizer
    net_opt = nn.Momentum(network.trainable_params(), lr, momentum)
    config_ck = CheckpointConfig(save_checkpoint_steps=1875, keep_checkpoint_max=10)
    # save the network model and parameters for subsequence fine-tuning
    ckpoint_cb = ModelCheckpoint(prefix="checkpoint_lenet", config=config_ck)
    # group layers into an object with training and evaluation features
    model = Model(network, net_loss, net_opt, metrics={"Accuracy": Accuracy()})

    train_net(args, model, epoch_size, mnist_path, repeat_size, ckpoint_cb)
    test_net(args, network, model, mnist_path)

你可能感兴趣的:(Mindspore,机器学习专栏,深度学习,python)