使用CNN检测脸部关键点教程(二)

第三部分 模型:卷积神经网络

这一部分让我们从代码开始: 


import keras

import pandasas pd

import numpyas np

from kerasimport Sequential

from keras.layersimport Conv2D, Activation, MaxPooling2D, Dropout, Flatten, Dense

from keras.modelsimport load_model

from matplotlibimport pyplot

TRAIN_FILE ='training.csv'

TEST_FILE ='test.csv'

SAVE_PATH ='model'

VALIDATION_SIZE =100    #验证集大小

EPOCHS =100            #迭代次数

BATCH_SIZE =64          #每个batch大小,稍微大一点的batch会更稳定

EARLY_STOP_PATIENCE =10 #控制early stopping的参数

def input_data(test=False):

file_name = TEST_FILEif testelse TRAIN_FILE

df = pd.read_csv(file_name)

cols = df.columns[:-1]

#dropna()是丢弃有缺失数据的样本,这样最后7000多个样本只剩2140个可用的。

    df = df.dropna()

df['Image'] = df['Image'].apply(lambda img: np.fromstring(img, sep=' ') /255.0)

X = np.vstack(df['Image'])

X = X.reshape((-1,96,96,1))

if test:

y =None

    else:

y = df[cols].values /96.0      #将y值缩放到[0,1]区间

    return X, y

#最后生成提交结果的时候要用到

keypoint_index = {

'left_eye_center_x':0,

    'left_eye_center_y':1,

    'right_eye_center_x':2,

    'right_eye_center_y':3,

    'left_eye_inner_corner_x':4,

    'left_eye_inner_corner_y':5,

    'left_eye_outer_corner_x':6,

    'left_eye_outer_corner_y':7,

    'right_eye_inner_corner_x':8,

    'right_eye_inner_corner_y':9,

    'right_eye_outer_corner_x':10,

    'right_eye_outer_corner_y':11,

    'left_eyebrow_inner_end_x':12,

    'left_eyebrow_inner_end_y':13,

    'left_eyebrow_outer_end_x':14,

    'left_eyebrow_outer_end_y':15,

    'right_eyebrow_inner_end_x':16,

    'right_eyebrow_inner_end_y':17,

    'right_eyebrow_outer_end_x':18,

    'right_eyebrow_outer_end_y':19,

    'nose_tip_x':20,

    'nose_tip_y':21,

    'mouth_left_corner_x':22,

    'mouth_left_corner_y':23,

    'mouth_right_corner_x':24,

    'mouth_right_corner_y':25,

    'mouth_center_top_lip_x':26,

    'mouth_center_top_lip_y':27,

    'mouth_center_bottom_lip_x':28,

    'mouth_center_bottom_lip_y':29

}

X,y = input_data()

print(X)

print(y)

X_test,y_test = input_data(test=True)

print(X_test)

print(y_test)

y_pred = []

#

# output_file = open('submit.csv','w')

# output_file.write('RowId,Location\n')

#

# IdLookupTable = open('IdLookupTable.csv')

# IdLookupTable.readline()

#

# for line in IdLookupTable:

#    RowId,ImageId,FeatureName = line.rstrip().split(',')

#    image_index = int(ImageId) - 1

#    feature_index = keypoint_index[FeatureName]

#    feature_location = y_pred[image_index][feature_index] * 96

#    output_file.write('{0},{1}\n'.format(RowId,feature_location))

#

# output_file.close()

# IdLookupTable.close()

model = Sequential()

model.add(Conv2D(32, (3, 3), padding='same',

                input_shape=X.shape[1:]))

model.add(Activation('relu'))

model.add(Conv2D(32, (3, 3)))

model.add(Activation('relu'))

model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Dropout(0.25))

model.add(Conv2D(64, (3, 3), padding='same'))

model.add(Activation('relu'))

model.add(Conv2D(64, (3, 3)))

model.add(Activation('relu'))

model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Dropout(0.25))

model.add(Flatten())

model.add(Dense(512))

model.add(Activation('relu'))

model.add(Dropout(0.5))

model.add(Dense(30))

# initiate RMSprop optimizer

opt = keras.optimizers.rmsprop(lr=0.0001, decay=1e-6)

# Let's train the model using RMSprop

model.compile(loss='mse',

              optimizer=opt,

              metrics=['accuracy'])

x_train = X.astype('float32')

x_test = X_test.astype('float32')

# x_train /= 255

# x_test /= 255

history =  model.fit(x_train, y,batch_size=BATCH_SIZE,epochs=EPOCHS,validation_split=0.2,shuffle=True)

pyplot.plot(history.history['loss'], label='train')

pyplot.plot(history.history['val_loss'], label='test')

pyplot.title('model train vs validation loss')

pyplot.ylabel('loss')

pyplot.xlabel('epoch')

pyplot.legend(['train_loss','test_loss','tran_acc','test_acc'],loc='upper right')

pyplot.legend(['train_loss','test_loss'],loc='upper right')

pyplot.show()

model.save("keypointdetection")

model = load_model("keypointdetection")

y_test = model.predict(x_test)

print(y_test)

y_pred  = y_test

def plot_sample(x, y, axis):

img = x.reshape(96, 96)

axis.imshow(img, cmap='gray')

axis.scatter(y[0::2] *96, y[1::2] *96, marker='x', s=10)

fig = pyplot.figure(figsize=(6, 6))

fig.subplots_adjust(

left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)

for iin range(16):

ax = fig.add_subplot(4, 4, i +1, xticks=[], yticks=[])

plot_sample(X_test[i], y_pred[i], ax)

pyplot.show()

第四部分:测试卷积神经网络的性能

我们刚刚训练的model对象已经保存了训练时打印在控制台桌面中的记录,我们可以获取这个记录通过history 相关属性,让我们画出这两个曲线。

pyplot.plot(history.history['loss'], label='train')

pyplot.plot(history.history['val_loss'], label='test')

pyplot.title('model train vs validation loss')

pyplot.ylabel('loss')

pyplot.xlabel('epoch')

pyplot.legend(['train_loss','test_loss','tran_acc','test_acc'],loc='upper right')

pyplot.legend(['train_loss','test_loss'],loc='upper right')

pyplot.show()



使用CNN检测脸部关键点教程(二)_第1张图片
我们能够看到我们的网络拟合的很好,那么网络的预测结果是什么样的呢?让我们选择一些样例来看一看。


y_test = model.predict(x_test)

print(y_test)

y_pred  = y_test

def plot_sample(x, y, axis):

img = x.reshape(96, 96)

axis.imshow(img, cmap='gray')

axis.scatter(y[0::2] *96, y[1::2] *96, marker='x', s=10)

fig = pyplot.figure(figsize=(6, 6))

fig.subplots_adjust(

left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)

for iin range(16):

ax = fig.add_subplot(4, 4, i +1, xticks=[], yticks=[])

plot_sample(X_test[i], y_pred[i], ax)

pyplot.show()

卷积神经网络预测的结果(从测试集抽出了16个样例)


使用CNN检测脸部关键点教程(二)_第2张图片

预测结果看起来还不错,但是有点时候还是有一点偏。让我们试着做的更好一些。

你可能感兴趣的:(使用CNN检测脸部关键点教程(二))