1.首先导入需要用到的包 import numpy as np import tensorflow as tf import h5py from keras import layers from keras.layers import Input, Add, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D, AveragePooling2D, MaxPooling2D, GlobalMaxPooling2D from keras.models import Model, load_model from keras.preprocessing import image from keras.utils import layer_utils from keras.utils.data_utils import get_file from keras.applications.imagenet_utils import preprocess_input from keras.utils.vis_utils import model_to_dot from keras.utils import plot_model from keras.initializers import glorot_uniform import scipy.misc from matplotlib.pyplot import imshow import keras.backend as K K.set_image_data_format('channels_last') K.set_learning_phase(1) import resnets_utils #identity block实现 # GRADED FUNCTION: identity_block 2.CONv block ResNet50结构: def identity_block(X, f, filters, stage, block): """ 实现图3的恒等块 参数: X - 输入的tensor类型的数据,维度为( m, n_H_prev, n_W_prev, n_C_prev ) f - 整数,指定主路径中间的CONV窗口的维度 filters - 整数列表,定义了主路径每层的卷积层的过滤器数量 stage - 整数,根据每层的位置来命名每一层,与block参数一起使用。 block - 字符串,据每层的位置来命名每一层,与stage参数一起使用。 返回: X - 恒等块的输出,tensor类型,维度为(n_H, n_W, n_C) """ #定义命名规则 conv_name_base = "res" + str(stage) + block + "_branch" bn_name_base = "bn" + str(stage) + block + "_branch" #获取过滤器 F1, F2, F3 = filters #保存输入数据,将会用于为主路径添加捷径 X_shortcut = X #主路径的第一部分 ##卷积层 X = Conv2D(filters=F1, kernel_size=(1,1), strides=(1,1) ,padding="valid", name=conv_name_base+"2a", kernel_initializer=glorot_uniform(seed=0))(X) ##归一化 X = BatchNormalization(axis=3,name=bn_name_base+"2a")(X) ##使用ReLU激活函数 X = Activation("relu")(X) #主路径的第二部分 ##卷积层 X = Conv2D(filters=F2, kernel_size=(f,f),strides=(1,1), padding="same", name=conv_name_base+"2b", kernel_initializer=glorot_uniform(seed=0))(X) ##归一化 X = BatchNormalization(axis=3,name=bn_name_base+"2b")(X) ##使用ReLU激活函数 X = Activation("relu")(X) #主路径的第三部分 ##卷积层 X = Conv2D(filters=F3, kernel_size=(1,1), strides=(1,1), padding="valid", name=conv_name_base+"2c", kernel_initializer=glorot_uniform(seed=0))(X) ##归一化 X = BatchNormalization(axis=3,name=bn_name_base+"2c")(X) ##没有ReLU激活函数 #最后一步: ##将捷径与输入加在一起 X = Add()([X,X_shortcut]) ##使用ReLU激活函数 X = Activation("relu")(X) return X tf.reset_default_graph() with tf.Session() as test: np.random.seed(1) A_prev = tf.placeholder("float", [3, 4, 4, 6]) X = np.random.randn(3, 4, 4, 6) A = identity_block(A_prev, f=2, filters=[2, 4, 6], stage=1, block="a") test.run(tf.global_variables_initializer()) out = test.run([A], feed_dict={A_prev: X, K.learning_phase(): 0}) print("out = " + str(out[0][1][1][0])) test.close() def convolutional_block(X, f, filters, stage, block, s=2): """ 实现图5的卷积块 参数: X - 输入的tensor类型的变量,维度为( m, n_H_prev, n_W_prev, n_C_prev) f - 整数,指定主路径中间的CONV窗口的维度 filters - 整数列表,定义了主路径每层的卷积层的过滤器数量 stage - 整数,根据每层的位置来命名每一层,与block参数一起使用。 block - 字符串,据每层的位置来命名每一层,与stage参数一起使用。 s - 整数,指定要使用的步幅 返回: X - 卷积块的输出,tensor类型,维度为(n_H, n_W, n_C) """ #定义命名规则 conv_name_base = "res" + str(stage) + block + "_branch" bn_name_base = "bn" + str(stage) + block + "_branch" #获取过滤器数量 F1, F2, F3 = filters #保存输入数据 X_shortcut = X #主路径 ##主路径第一部分 X = Conv2D(filters=F1, kernel_size=(1,1), strides=(s,s), padding="valid", name=conv_name_base+"2a", kernel_initializer=glorot_uniform(seed=0))(X) X = BatchNormalization(axis=3,name=bn_name_base+"2a")(X) X = Activation("relu")(X) ##主路径第二部分 X = Conv2D(filters=F2, kernel_size=(f,f), strides=(1,1), padding="same", name=conv_name_base+"2b", kernel_initializer=glorot_uniform(seed=0))(X) X = BatchNormalization(axis=3,name=bn_name_base+"2b")(X) X = Activation("relu")(X) ##主路径第三部分 X = Conv2D(filters=F3, kernel_size=(1,1), strides=(1,1), padding="valid", name=conv_name_base+"2c", kernel_initializer=glorot_uniform(seed=0))(X) X = BatchNormalization(axis=3,name=bn_name_base+"2c")(X) #捷径 X_shortcut = Conv2D(filters=F3, kernel_size=(1,1), strides=(s,s), padding="valid", name=conv_name_base+"1", kernel_initializer=glorot_uniform(seed=0))(X_shortcut) X_shortcut = BatchNormalization(axis=3,name=bn_name_base+"1")(X_shortcut) #最后一步 X = Add()([X,X_shortcut]) X = Activation("relu")(X) return X tf.reset_default_graph() with tf.Session() as test: np.random.seed(1) A_prev = tf.placeholder("float",[3,4,4,6]) X = np.random.randn(3,4,4,6) A = convolutional_block(A_prev,f=2,filters=[2,4,6],stage=1,block="a") test.run(tf.global_variables_initializer()) out = test.run([A],feed_dict={A_prev:X,K.learning_phase():0}) print("out = " + str(out[0][1][1][0])) test.close() #resnet实现(50层) # GRADED FUNCTION: ResNet50 def ResNet50(input_shape=(64, 64, 3), classes=6): """ Implementation of the popular ResNet50 the following architecture: CONV2D -> BATCHNORM -> RELU -> MAXPOOL -> CONVBLOCK -> IDBLOCK*2 -> CONVBLOCK -> IDBLOCK*3 -> CONVBLOCK -> IDBLOCK*5 -> CONVBLOCK -> IDBLOCK*2 -> AVGPOOL -> TOPLAYER Arguments: input_shape -- shape of the images of the dataset classes -- integer, number of classes Returns: model -- a Model() instance in Keras """ # Define the input as a tensor with shape input_shape X_input = Input(input_shape) # Zero-Padding X = ZeroPadding2D((3, 3))(X_input) # Stage 1 X = Conv2D(64, (7, 7), strides=(2, 2), name='conv1', kernel_initializer=glorot_uniform(seed=0))(X) X = BatchNormalization(axis=3, name='bn_conv1')(X) X = Activation('relu')(X) X = MaxPooling2D((3, 3), strides=(2, 2))(X) # Stage 2 X = convolutional_block(X, f=3, filters=[64, 64, 256], stage=2, block='a', s=1) X = identity_block(X, 3, [64, 64, 256], stage=2, block='b') X = identity_block(X, 3, [64, 64, 256], stage=2, block='c') ### START CODE HERE ### # Stage 3 (≈4 lines) X = convolutional_block(X, f=3, filters=[128, 128, 512], stage=3, block='a', s=2) X = identity_block(X, 3, [128, 128, 512], stage=3, block='b') X = identity_block(X, 3, [128, 128, 512], stage=3, block='c') X = identity_block(X, 3, [128, 128, 512], stage=3, block='d') # Stage 4 (≈6 lines) X = convolutional_block(X, f=3, filters=[256, 256, 1024], stage=4, block='a', s=2) X = identity_block(X, 3, [256, 256, 1024], stage=4, block='b') X = identity_block(X, 3, [256, 256, 1024], stage=4, block='c') X = identity_block(X, 3, [256, 256, 1024], stage=4, block='d') X = identity_block(X, 3, [256, 256, 1024], stage=4, block='e') X = identity_block(X, 3, [256, 256, 1024], stage=4, block='f') # Stage 5 (≈3 lines) X = X = convolutional_block(X, f=3, filters=[512, 512, 2048], stage=5, block='a', s=2) X = identity_block(X, 3, [512, 512, 2048], stage=5, block='b') X = identity_block(X, 3, [512, 512, 2048], stage=5, block='c') # AVGPOOL (≈1 line). Use "X = AveragePooling2D(...)(X)" X = AveragePooling2D(pool_size=(2, 2), padding='same')(X) ### END CODE HERE ### # output layer X = Flatten()(X) X = Dense(classes, activation='softmax', name='fc' + str(classes), kernel_initializer=glorot_uniform(seed=0))(X) # Create model model = Model(inputs=X_input, outputs=X, name='ResNet50') return model model = ResNet50(input_shape=(64,64,3),classes=6) model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"]) def load_dataset(): train_dataset = h5py.File('train_signs.h5', "r") train_set_x_orig = np.array(train_dataset["train_set_x"][:]) # your train set features train_set_y_orig = np.array(train_dataset["train_set_y"][:]) # your train set labels test_dataset = h5py.File('test_signs.h5', "r") test_set_x_orig = np.array(test_dataset["test_set_x"][:]) # your test set features test_set_y_orig = np.array(test_dataset["test_set_y"][:]) # your test set labels classes = np.array(test_dataset["list_classes"][:]) # the list of classes train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0])) test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0])) return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset() # Normalize image vectors X_train = X_train_orig/255. X_test = X_test_orig/255. def convert_to_one_hot(Y,C): return np.eye(C)[Y.reshape(-1)].T # Convert training and test labels to one hot matrices Y_train = convert_to_one_hot(Y_train_orig, 6).T Y_test = convert_to_one_hot(Y_test_orig, 6).T print ("number of training examples = " + str(X_train.shape[0])) print ("number of test examples = " + str(X_test.shape[0])) print ("X_train shape: " + str(X_train.shape)) print ("Y_train shape: " + str(Y_train.shape)) print ("X_test shape: " + str(X_test.shape)) print ("Y_test shape: " + str(Y_test.shape)) #train model.fit(X_train, Y_train, epochs = 2, batch_size = 32) #把自己训练好的参数保存起来,本实验之训练了2个轮回 print("saving...") model.save("model2.h5") model.save_weights('ModelWeights.h5')
#test preds = model.evaluate(X_test, Y_test) print ("Loss = " + str(preds[0])) print ("Test Accuracy = " + str(preds[1])) #加载已经训练好的参数(网上训练好的参数) model = load_model('ResNet50.h5') preds = model.evaluate(X_test, Y_test) print ("Loss = " + str(preds[0])) print ("Test Accuracy = " + str(preds[1])) #测试自己的图片 img_path = 'images/my_image.jpg' img = image.load_img(img_path, target_size=(64, 64)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = preprocess_input(x) print('Input image shape:', x.shape) my_image = scipy.misc.imread(img_path) imshow(my_image) print("class prediction vector [p(0), p(1), p(2), p(3), p(4), p(5)] = ") print(model.predict(x)) model.summary() plot_model(model, to_file='model.png')