Keras搭建VGG搭建ResNet识别MNIST识别CIFAR10例程

keras的两种编程方式

加载model模块的sequential类的顺序模型,特点是add,示例如下

"""
只是样例,跑不通的
"""
import keras
from keras.layers import Dense,Conv2D
from keras.models import Sequential
import keras.optimizers as optimizers
import keras.losses as losses

model = Sequential()
model.add(Conv2D(32,(3,3),activation='relu',input_dim =100))
model.add(Dense(units =64,activation ='relu'))
model.add(Dense(units = 32,activation ='softmax'))
SGD = optimizers.SGD(lr =0.01,momentum=0.9,nesterov =True)
model.compile(loss =losses.categorical_crossentropy,optimizer = SGD,metrics = {'accuracy'})
model.fit(x_train,y_train,epoch = 5,batch_size=16)
loss_and_metrics = model.evaluate(x_test,y_test,batch_size = 16)
classes = model.predict(x,batch_size=1)

使用Model的嵌套模型也是很流行的,特点是每条语句有都加一个(x),x表输出的特征图层

import keras
from keras.layers import Conv2D,MaxPooling2D,Dense,Flatten,Dropout
import keras.optimizers as optimizers
from keras.models import Model

inputs = Input(shape=(100, 100, 3))
x = Conv2D(32,(3,3),activation = 'relu')(inputs)
x = MaxPooling2D(pool_size= (2,2))(x)
x = Conv2D(32,(3,3),activation ='relu')(x)
x = MaxPooling2D(pool_size = (2,2))(x)

x = Flatten()(x)
x = Dropout(0.5)(x)
x = Dense(256,activation='relu')(x)
out = Dense(10,activation = 'softmax')(x)

model = Model(inputs = [inputs],outputs =[out])
SGD = optimizers.SGD(lr =0.001,decay = 1e-6,momentun = 0.9,nestervo = True)
model.compile(loss = 'categorical_crossentropy',optimizer= SGD,metrics = {'accuracy'})
model.fit(x_train,y_train,epoch = 5,batch_size=16)

loss_and_metrics = model.evaluate(x_test,y_test,batch_size = 16)
classes = model.predict(x,batch_size=1)

使用keras搭建神经网络识别三类数据练习

github地址

环境说明

  1. keras2.0
  2. 后端tensorflow-gpu版1.6
  3. Python3.5
  4. ubuntu16.4

搭建网络

  1. MLP
  2. 随意搭建的浅层CNN
  3. VGG
  4. ResNet

使用数据集

  1. mnist(在keras的example模块中自动下载,如果电脑不在实验室,需要科学上网辅助)
  2. cifar10(同上)
  3. asl美式手语数据集,识别A-E五类。共15000个数据,13600的训练集、1280的验证集、120的测试集

五个脚本

一些重要信息在程序注释里都解释了,这里就不多说了,自己研究研究就明白了。

数据加载脚本dataloader.py

import cv2
import numpy as np
import random
import keras
import os

class Dataloader():
    def __init__(self,path,n_classes = None):
        self.path = path
        self.files = os.listdir(self.path)
        self.n_classes = n_classes


    def load_data(self,name):
        """
        负责将数据一张一张读入并根据文件名(例23_D)生成标签,23只是数据的序号,根据
        最后的字母D打标签
        :param name:数据的路径
        :return:图像数据和标签
        """
        im = cv2.imread(name)
        im = cv2.resize(im,(224,224))
        label = name.split('_')[-1][0]
        if label == 'A':
            label = 0
        elif label == 'B':
            label = 1
        elif label == 'C':
            label = 2
        elif label == 'D':
            label = 3
        elif label == 'E':
            label = 4
        im = np.array(im).astype('float')/255
        label = keras.utils.to_categorical(label,self.n_classes)
        return im,label


    def load_predict_data(self,name,isgray= False,input_size=(28,28)):
        """
        测试模型的时候使用,一次测一张
        :param name: 文件名和路径
        :param isgray:如果像mnist一类,输入数据是灰度图赋值true
        :param input_size:传入图像的长宽
        :return: 格式如(1,28,28,1)的数据
        """
        im = cv2.imread(name)
        im = cv2.resize(im,input_size)
        if isgray:
            im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
            im = np.expand_dims(im, axis=2)

        cv2.imshow('im_show', im)
        cv2.waitKey(1000)
        im = np.expand_dims(im, axis=0)
        im = np.array(im).astype('float') / 255
        return im


    def traverse_each_folder(self):
        folders = sorted(os.listdir(self.path))
        sign = 0
        for folder in folders:
            files = os.listdir(self.path+'/'+folder)
            for i in range(len(files)):
                im = cv2.imread(self.path+'/'+folder+'/'+files[i])
                im = cv2.resize(im,(224,224))
            sign+=1
        return np.array(im),sign


    def shuffle(self,path):
        files = os.listdir(path)
        random.shuffle(files)
        cnt=0
        for file in self.files:
            if file[0]=='A':
                os.rename(self.path+'/'+file,self.path+'/'+str(cnt)+'_A.jpg')
            elif file[0]=='B':
                os.rename(self.path+'/'+file,self.path+'/'+str(cnt)+'_B.jpg')
            elif file[0]=='C':
                os.rename(self.path+'/'+file,self.path+'/'+str(cnt)+'_C.jpg')
            elif file[0]=='D':
                os.rename(self.path+'/'+file,self.path+'/'+str(cnt)+'_D.jpg')
            elif file[0]=='E':
                os.rename(self.path+'/'+file,self.path+'/'+str(cnt)+'_E.jpg')
            cnt+=1

class DataGenerator(Dataloader):
    """
    继承自Dataloader的迭代器类,如果训练的数据量比较大,就不能在将数据一次性全部读入内存。
    keras这个时候就需要使用python的生成器来分批次训练数据。此类只有train_generator的是迭
代器,因为训练数据最多。valid_generator只返回数据和标签的元组
    """
    def __init__(self,path,n_classes):
        Dataloader.__init__(self,path,n_classes)


    def train_generator(self,batch_size):
        X = []
        Y = []
        cnt = 0
        while 1:
            for file in self.files:
                data,label = self.load_data(os.path.join(self.path,file))
                X.append(data)
                Y.append(label)
                cnt+=1
                if cnt==batch_size:
                    cnt=0
                    yield (np.array(X),np.squeeze(np.array(Y)))
                    X = []
                    Y = []


    def valid_generator(self):
        X = []
        Y = []
        for file in self.files:
            data, label = self.load_data(os.path.join(self.path, file))
            X.append(data)
            Y.append(label)
        X = np.array(X)
        Y = np.squeeze(np.array(Y))
        return (X,Y)



模型脚本myNet.py

from keras.models import Sequential,Model
from keras.layers import Input,Flatten,Dropout,regularizers,Add
from keras.layers import Conv2D,BatchNormalization,Activation,MaxPool2D,MaxPooling2D,Dense
from keras.layers import ZeroPadding2D,AveragePooling2D
from keras.optimizers import SGD

class Nets():
    def __init__(self,n_classes,shape = None,fc_size = None):
        self.n_classes = n_classes
        self.shape = shape
        self.fc_size = fc_size

    def MLP(self):
        model = Sequential()
        model.add(Dense(self.fc_size,input_dim = self.fc_size,activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(self.n_classes,activation='softmax'))

        model.compile(loss = 'categorical_crossentropy',optimizer='Adam',metrics=['accuracy'])
        return model

    def CNN(self):
        """
        随意搭建的普通CNN,mnist直接使用时过拟合现象严重,加入了Dropout和l2正则缓解。
        另外使用SGD时经常出现更新停滞,可能是陷入了局部极小值,Adam比较稳定,每次都能更新
        :return: model
        """
        model = Sequential()
        model.add(Conv2D(16,(3,3),activation='relu',input_shape = self.shape))
        model.add(Conv2D(32,(3,3),activation='relu'))
        model.add(MaxPooling2D(pool_size=(2,2)))

        model.add(Conv2D(64,(3,3),activation='relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(Conv2D(128,(3,3),kernel_regularizer=regularizers.l2(0.1),activation='relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(Flatten())
        model.add(Dense(256,activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(self.n_classes,activation='softmax'))

        sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
        model.compile(loss='categorical_crossentropy',optimizer='Adam',metrics = ['accuracy'])
        return model


    def VGG(self):
        """
        由于使用自己的笔记本显卡1050-4G显存实验,所以只能跑得动VGG11,层数再多就OOM了。实验中由于网络过深出现了梯度
        消失,导致损失不在下降,所以每层后面加入了BN操作后解决。
        :return: VGG11模型
        """
        model = Sequential()
        model.add(Conv2D(64, (3, 3),input_shape=self.shape))
        model.add(BatchNormalization(axis=3))
        model.add(Activation('relu'))
        model.add(MaxPool2D(pool_size=(2, 2),strides=(2,2)))

        model.add(Conv2D(128, (3, 3)))
        model.add(BatchNormalization(axis=3))
        model.add(Activation('relu'))
        model.add(MaxPool2D(pool_size=(2, 2),strides=(2,2)))

        model.add(Conv2D(256, (3, 3)))
        model.add(BatchNormalization(axis=3))
        model.add(Activation('relu'))
        model.add(Conv2D(256, (3, 3)))
        model.add(BatchNormalization(axis=3))
        model.add(Activation('relu'))
        model.add(MaxPool2D(pool_size=(2, 2),strides=(2,2)))

        model.add(Conv2D(512, (3, 3)))
        model.add(BatchNormalization(axis=3))
        model.add(Activation('relu'))
        model.add(Conv2D(512, (3, 3)))
        model.add(BatchNormalization(axis=3))
        model.add(Activation('relu'))
        model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))

        model.add(Conv2D(512, (3, 3)))
        model.add(BatchNormalization(axis=3))
        model.add(Activation('relu'))
        model.add(Conv2D(512, (3, 3)))
        model.add(BatchNormalization(axis=3))
        model.add(Activation('relu'))
        model.add(MaxPool2D(pool_size=(2, 2),strides=(2,2)))

        model.add(Flatten())
        model.add(Dense(1024,activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(256, activation='relu'))
        model.add(Dense(self.n_classes, activation='softmax'))

        model.compile(loss = 'categorical_crossentropy',optimizer='Adam',metrics=['accuracy'])
        return model


    def identity_block(self,x,filters):
        shortcut = x
        f1,f2,f3 = filters

        x = Conv2D(f1,(1,1),padding='valid')(x)
        x = BatchNormalization(axis=3)(x)
        x = Activation('relu')(x)

        x = Conv2D(f2,(3,3),padding='same')(x)
        x = BatchNormalization(axis = 3)(x)
        x = Activation('relu')(x)

        x = Conv2D(f3,(1,1),padding='valid')(x)
        x = BatchNormalization(axis=3)(x)
        x = Add()([x,shortcut])
        x = Activation('relu')(x)

        return x


    def convolutional_block(self,x,filters,stride):
        shortcut = x
        f1,f2,f3 = filters

        x = Conv2D(f1,(1,1),padding='valid',strides=stride)(x)
        x = BatchNormalization(axis = 3)(x)
        x = Activation('relu')(x)

        x = Conv2D(f2, (3,3), padding='same',strides =1)(x)
        x = BatchNormalization(axis=3)(x)
        x = Activation('relu')(x)

        x = Conv2D(f3, (1, 1), padding='valid',strides = 1)(x)
        x = BatchNormalization(axis=3)(x)
        shortcut = Conv2D(f3,(1,1),padding = 'valid',strides = stride)(shortcut)
        shortcut = BatchNormalization(axis=3)(shortcut)

        x = Add()([x,shortcut])
        x = Activation('relu')(x)

        return x


    def basic_block(self,x,filters,stride,name):
        shortcut = x

        x = Conv2D(filters,(3,3),strides=stride,padding='same')(x)
        x = BatchNormalization(axis=3)(x)
        x = Activation('relu')(x)

        x = Conv2D(filters, (3, 3), strides=1,padding='same' )(x)
        x = BatchNormalization(axis=3)(x)

        if x.shape != shortcut.shape:
            shortcut = Conv2D(filters,(1,1),strides = stride,name=name)(shortcut)
            shortcut = BatchNormalization(axis=3)(shortcut)

        x = Add()([x,shortcut])
        x = Activation('relu')(x)

        return x


    def ResNet18(self):
        """
        还是太深的话,笔记本带不动,ResNet18还可以,该模型比较稳定,参数量小,没有梯度消失现象
        :return:ResNet18模型
        """
        input = Input(self.shape)

        x = ZeroPadding2D((3,3))(input)
        x = Conv2D(64,(7,7),strides=2)(x)
        x = BatchNormalization(axis = 3)(x)
        x = Activation('relu')(x)
        x = MaxPool2D(pool_size=(3,3),strides = (2,2),padding='same')(x)

        x = self.basic_block(x,64,1,name='shortcut1')
        x = self.basic_block(x,64,1,name='shortcut2')

        x = self.basic_block(x, 128, 2,name='shortcut3')
        x = self.basic_block(x, 128, 1,name='shortcut4')

        x = self.basic_block(x, 256, 2,name='shortcut5')
        x = self.basic_block(x, 256, 1,name='shortcut6')

        x = self.basic_block(x, 512, 2,name='shortcut7')
        x = self.basic_block(x, 512, 1,name='shortcut8')

        size = int(x.shape[1])
        x = AveragePooling2D(pool_size=(size,size))(x)

        x = Flatten()(x)
        x = Dense(self.n_classes,activation='softmax')(x)

        model = Model(inputs = input,outputs=x)
        model.compile(loss = 'categorical_crossentropy',optimizer='Adam',metrics=['accuracy'])

        return model

    def ResNet50(self):
        """
        注释了好多后笔记本才带的动
        :return: ResNet50模型
        """
        input = Input(self.shape)

        x = ZeroPadding2D((3,3))(input)
        x = Conv2D(64,(7,7),strides=(2,2))(x)
        x = BatchNormalization(axis = 3)(x)
        x = Activation('relu')(x)
        x = MaxPool2D(pool_size =(3,3),strides=(2,2),padding='same')(x)

        x = self.convolutional_block(x,[64,64,256],stride=1)
        x = self.identity_block(x,[64,64,256])
        # x = self.identity_block(x, [64, 64, 256])

        x = self.convolutional_block(x,[128,128,512],stride=1)
        x = self.identity_block(x,[128,128,512])
        # x = self.identity_block(x, [128, 128, 512])
        # x = self.identity_block(x, [128, 128, 512])
        #
        x = self.convolutional_block(x,[256,256,1024],stride=2)
        x = self.identity_block(x,[256,256,1024])
        # x = self.identity_block(x, [256, 256, 1024])
        # x = self.identity_block(x, [256, 256, 1024])
        # x = self.identity_block(x, [256, 256, 1024])

        x = self.convolutional_block(x,[512,512,2048],stride=2 )
        x = self.identity_block(x,[512,512,2048])
        # x = self.identity_block(x, [512, 512, 2048])

        size = int(x.shape[1])
        x = AveragePooling2D(pool_size=(size, size))(x)

        x = Flatten()(x)
        x = Dense(self.n_classes,activation='softmax')(x)

        model = Model(inputs = input,outputs= x)
        model.compile(loss='categorical_crossentropy', optimizer='Adam', metrics=['accuracy'])

        return model

mnist实验mnist.py

import keras
import cv2
from keras.datasets import mnist
from myNet import Nets
from dataloader import Dataloader

learning_rate = 0.001
training_iters = 20
batch_size = 128
display_step = 10

H, W, C = 28, 28, 1
input_size = H*W
shape = (H,W,C)
n_classes = 10

nets = Nets(n_classes,shape)
model = nets.CNN()
datasets = Dataloader('./test/mnisttest')

def train():
    (x_train, y_train), (x_test, y_test) = mnist.load_data()
    # cv2.imshow('im',x_train[0])
    # cv2.waitKey(1000)
    x_train = x_train.reshape(-1, H, W, 1).astype('float') / 255
    x_test = x_test.reshape(-1, H, W, 1).astype('float') / 255
    y_train = keras.utils.to_categorical(y_train, n_classes)
    y_test = keras.utils.to_categorical(y_test, n_classes)

    model.summary()
    model.fit(x_train,y_train,validation_data=(x_test,y_test),epochs=20,batch_size = batch_size)
    # model.save('./model/mnistCNN.h5')
    scores = model.evaluate(x_test,y_test,batch_size=batch_size)
    print(scores)


def test():
    model.load_weights('./model/mnistCNN.h5')
    import os
    import numpy as np
    files = os.listdir(datasets.path)
    for file in files:
        x = datasets.load_predict_data(os.path.join(datasets.path,file),isgray=True)
        y_pred = model.predict(x)  #
        print(np.argmax(y_pred))


if __name__=='__main__':
    train()
    # test()

cifar10实验cifar10.py

import keras
from keras.datasets import cifar10
from myNet import Nets

H,W,C = 32,32,3
n_classes = 10
shape = (H,W,C)
input_size = H*W*C
batch_size = 128
epoch=30

(x_train,y_train),(x_test,y_test) = cifar10.load_data()
x_train = x_train.reshape(-1,H,W,C).astype('float')/255
x_test = x_test.reshape(-1,H,W,C).astype('float')/255
y_train = keras.utils.to_categorical(y_train,n_classes)
y_test = keras.utils.to_categorical(y_test,n_classes)

nets = Nets(n_classes,shape)

model = nets.ResNet18()
model.summary()

model.fit(x_train,y_train,validation_data=(x_test,y_test),epochs=epoch,batch_size=batch_size)
# model.save('./model/cifar10CNN.h5')
score = model.evaluate(x_test,y_test,batch_size=batch_size)
print(score)

asl实验asl.py

from dataloader import DataGenerator,Dataloader
from myNet import Nets
import numpy as np
import os
import time

H,W,C = 224,224,3
shape = (H,W,C)
n_classes = 5
batch_size = 16
epochs = 10
save_model = './model/aslResNet18.h5'

train_data = DataGenerator('./train',n_classes)
valid_data = DataGenerator('./valid',n_classes).valid_generator()

nets = Nets(n_classes,shape)
model = nets.VGG()

def train():
    model.summary()
    #fit_generator需要使用迭代器
    model.fit_generator(train_data.train_generator(batch_size),epochs=epochs,validation_data= valid_data,steps_per_epoch=len(train_data.files)//batch_size)
    # model.save_weights(save_model)

def test(path):
    model.load_weights(save_model)
    dataloader = Dataloader(path)
    files = (os.listdir(path))
    for file in files:
        test_data = dataloader.load_predict_data(os.path.join(path, file), input_size=(H, W))
        time_start = time.time()
        result = model.predict(test_data)
        time_end = time.time()
        pred = np.argmax(result, axis=1)
        print('classes : %s \t cost : %04f s'%(pred,time_end-time_start))


if __name__ =='__main__':
    train()
    data_path= './test/asltest'
    # test(data_path)


 

你可能感兴趣的:(深度学习框架)