直接上代码
from keras.datasets import fashion_mnist
from keras.utils import to_categorical
from keras import models
from keras.layers.convolutional import Conv2D,MaxPooling2D
from keras.layers.core import Dense,Flatten,Dropout
import cv2
import numpy as np
from keras import optimizers
#加载数据
(x_train,y_train),(x_test,y_test)=fashion_mnist.load_data()
#VGG16模型,权重由ImageNet训练而来,模型的默认输入尺寸是224x224,但是最小是48x48
#修改数据集的尺寸、将灰度图像转换为rgb图像
x_train=[cv2.cvtColor(cv2.resize(i,(48,48)),cv2.COLOR_GRAY2BGR)for i in x_train]
x_test=[cv2.cvtColor(cv2.resize(i,(48,48)),cv2.COLOR_GRAY2BGR)for i in x_test]
#第一步:通过np.newaxis函数把每一个图片增加一个维度变成(1,48,48,3)。所以就有了程序中的arr[np.newaxis]。
#第二步:通过np.concatenate把每个数组连接起来组成一个新的x_train数组,连接后的x_train数组shape为(10000,48,48,3)
x_train=np.concatenate([arr[np.newaxis]for arr in x_train])
x_test=np.concatenate([arr[np.newaxis]for arr in x_test])
x_train=x_train.astype("float32")/255
x_train=x_train.reshape((60000,48,48,3))
x_test=x_test.astype("float32")/255
x_test=x_test.reshape((10000,48,48,3))
y_train=to_categorical(y_train)
y_test=to_categorical(y_test)
#划出验证集
x_val=x_train[:10000]
y_val=y_train[:10000]
x_train=x_train[10000:]
y_train=y_train[10000:]
#建立模型
def VGG(input_shape,class_o=10):
model=models.Sequential()
#layer1
model.add(Conv2D(64,(3,3),strides=(1,1),padding="same",activation="relu",input_shape=input_shape))
# layer2
model.add(Conv2D(64, (3, 3), strides=(1,1),padding="same", activation="relu"))
model.add(MaxPooling2D((2,2),strides=2,padding="valid"))
# layer3
model.add(Conv2D(128, (3, 3), strides=(1, 1), padding="same", activation="relu"))
# layer4
model.add(Conv2D(128, (3, 3), strides=(1, 1), padding="same", activation="relu"))
model.add(MaxPooling2D((2, 2), strides=2, padding="valid"))
# layer5
model.add(Conv2D(256, (3, 3), strides=(1, 1), padding="same", activation="relu"))
# layer6
model.add(Conv2D(256, (3, 3), strides=(1, 1), padding="same", activation="relu"))
# layer7
model.add(Conv2D(256, (3, 3), strides=(1, 1), padding="same", activation="relu"))
model.add(MaxPooling2D((2, 2), strides=2, padding="valid"))
# layer8
model.add(Conv2D(512, (3, 3), strides=(1, 1), padding="same", activation="relu"))
# layer9
model.add(Conv2D(512, (3, 3), strides=(1, 1), padding="same", activation="relu"))
# layer10
model.add(Conv2D(512, (3, 3), strides=(1, 1), padding="same", activation="relu"))
model.add(MaxPooling2D((2, 2), strides=2, padding="valid"))
# layer11
model.add(Conv2D(512, (3, 3), strides=(1, 1), padding="same", activation="relu"))
# layer12
model.add(Conv2D(512, (3, 3), strides=(1, 1), padding="same", activation="relu"))
# layer13
model.add(Conv2D(512, (3, 3), strides=(1, 1), padding="same", activation="relu"))
model.add(MaxPooling2D((2, 2), strides=2, padding="valid"))
#layer 14
model.add(Flatten())
model.add(Dense(4096,activation="relu"))
model.add(Dropout(0.5))
# layer 15
model.add(Dense(4096, activation="relu"))
model.add(Dropout(0.5))
# layer 16
model.add(Dense(class_o,activation="softmax"))
model.summary()
return model
input_shape=(48,48,3)
model=VGG(input_shape)
sgd=optimizers.SGD(lr=0.01,decay=1e-5)
#编译模型
model.compile(optimizer="sgd",loss="categorical_crossentropy",metrics=["accuracy"])
#训练模型
model.fit(x_train,y_train,batch_size=64,epochs=50,validation_data=(x_val,y_val))
#评估模型
test_loss,test_acc=model.evaluate(x_test,y_test)
print("The accuracy is:"+str(test_acc))