import pandas as pd
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Flatten, Dense, Conv2D, MaxPooling2D, Dropout
from tensorflow.keras.datasets import fashion_mnist
%matplotlib inline
tf.__version__
'2.0.0-alpha0'
# 导入数据集
(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
print( x_train.shape )
print( x_test.shape )
print( y_train.shape )
print( y_test.shape )
(60000, 28, 28)
(10000, 28, 28)
(60000,)
(10000,)
# 构建简单模型(全连接网络)
model = Sequential([Flatten(input_shape=[28, 28]),
Dense(100, activation='relu'),
Dense(10, activation='softmax')
])
model.summary()
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
flatten (Flatten) (None, 784) 0
_________________________________________________________________
dense (Dense) (None, 100) 78500
_________________________________________________________________
dense_1 (Dense) (None, 10) 1010
=================================================================
Total params: 79,510
Trainable params: 79,510
Non-trainable params: 0
_________________________________________________________________
# 使用modle.layers[1].name 来获取第n层的名称
model.layers[1].name
'dense'
# get_weights() 来获取每层的权重矩阵 W 和偏置向量 b。
weights, biases = model.layers[1].get_weights()
weights
array([[ 0.03753366, -0.07585614, 0.05845281, ..., -0.00696003,
-0.02985662, 0.0026468 ],
[-0.02360117, 0.07903261, -0.00201984, ..., 0.01831853,
-0.05822062, 0.00874755],
[ 0.03159927, -0.00679947, 0.03076784, ..., 0.06593607,
-0.00499721, 0.03378649],
...,
[ 0.01089679, 0.04923365, 0.07235795, ..., 0.01033241,
0.01817431, -0.04198586],
[ 0.03213584, -0.0057021 , 0.00929629, ..., -0.03756753,
0.01735194, -0.01611251],
[ 0.06783222, -0.04055587, -0.06099807, ..., -0.06757091,
-0.01999778, 0.00600851]], dtype=float32)
biases
array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
dtype=float32)
# 当模型还没训练时,W 是随机初始化,而 b 是零初始化。最后检查一下它们的形状。
print( weights.shape )
print( biases.shape )
(784, 100)
(100,)
# 编译模型
model.compile(loss='sparse_categorical_crossentropy',
optimizer='adam',
metrics=['acc'])
sogmoid
,损失函数是binart_crossentropy
softmax
,损失函数是categorical_crossentropy
softmoid
,损失函数是binary_crossentropy
mse
大多数情况下,使用 adam
和 rmsprop
及其默认的学习率是稳妥的
除了通过名称来调用优化器 model.compile(‘名称’),我们还可以通过实例化对象来调用优化器 model.compile(‘优化器’)。选取几个对比如下:
名称:SGD
对象:SGD(lr=0.01, momentum=0.0, decay=0.0, nesterov=False)
名称:RMSprop
对象:RMSprop(lr=0.001, rho=0.9, epsilon=None, decay=0.0)
名称:Adagrad
对象:Adagrad(lr=0.01, epsilon=None, decay=0.0)
名称:Adam
对象:
Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
这些优化器对象都在 keras.optimizer 命名空间下。使用优化器对象来编译模型的好处是可以调节里面的超参数比如学习率 lr,使用名称则来编译模型只能采用优化器的默认参数,比如用 Adam 里面的学习率 0.001。
指标不会用于训练过程,只是让我们监控模型训练时的表现
除了 Keras 自带指标,我们还可以自定指标,下列的 mean_pred 就是自定义指标(该指标计算预测的平均值)。
def mean_pred(y_true, y_pred):
return K.mean(y_pred)
model.compile(optimizer=‘sgd’,
loss=‘binary_crossentropy’,
metrics=[‘acc’, mean_pred])
# 调用函数(回调)
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if (logs.get('acc') > 0.9):
print('\nReached 90% accuracy so cancelling training!')
self.model.stop_training = True
callback = myCallback()
# 拟合模型
model.fit(x_train, y_train, epochs=20, callbacks=[callback])
Epoch 1/20
60000/60000 [==============================] - 5s 81us/sample - loss: 2.5439 - acc: 0.6660
Epoch 2/20
60000/60000 [==============================] - 4s 72us/sample - loss: 0.7489 - acc: 0.7143
Epoch 3/20
60000/60000 [==============================] - 4s 72us/sample - loss: 0.6793 - acc: 0.7381
Epoch 4/20
60000/60000 [==============================] - 4s 71us/sample - loss: 0.5989 - acc: 0.7839
Epoch 5/20
60000/60000 [==============================] - 4s 72us/sample - loss: 0.5315 - acc: 0.8148
Epoch 6/20
60000/60000 [==============================] - 4s 71us/sample - loss: 0.5149 - acc: 0.8244
Epoch 7/20
60000/60000 [==============================] - 4s 72us/sample - loss: 0.4978 - acc: 0.8292
Epoch 8/20
60000/60000 [==============================] - 4s 71us/sample - loss: 0.4953 - acc: 0.8321
Epoch 9/20
60000/60000 [==============================] - 4s 72us/sample - loss: 0.4870 - acc: 0.8366
Epoch 10/20
60000/60000 [==============================] - 4s 73us/sample - loss: 0.4856 - acc: 0.8376
Epoch 11/20
60000/60000 [==============================] - 4s 71us/sample - loss: 0.4868 - acc: 0.8378
Epoch 12/20
60000/60000 [==============================] - 4s 71us/sample - loss: 0.4844 - acc: 0.8399
Epoch 13/20
60000/60000 [==============================] - 4s 71us/sample - loss: 0.4689 - acc: 0.8425
Epoch 14/20
60000/60000 [==============================] - 4s 71us/sample - loss: 0.4794 - acc: 0.8420
Epoch 15/20
60000/60000 [==============================] - 4s 72us/sample - loss: 0.4715 - acc: 0.8451
Epoch 16/20
60000/60000 [==============================] - 4s 72us/sample - loss: 0.4695 - acc: 0.8469
Epoch 17/20
60000/60000 [==============================] - 4s 72us/sample - loss: 0.4604 - acc: 0.8473
Epoch 18/20
60000/60000 [==============================] - 4s 71us/sample - loss: 0.4617 - acc: 0.8481
Epoch 19/20
60000/60000 [==============================] - 4s 72us/sample - loss: 0.4678 - acc: 0.8471
Epoch 20/20
60000/60000 [==============================] - 4s 72us/sample - loss: 0.4570 - acc: 0.8475
# 预测模型
prob = model.predict( x_test[0:1] )
prob
array([[7.6639465e-23, 1.3957777e-23, 0.0000000e+00, 2.8266480e-21,
0.0000000e+00, 9.7962271e-04, 0.0000000e+00, 8.5630892e-03,
1.0883533e-21, 9.9045730e-01]], dtype=float32)
在测试集上第一张图上做预测,输出是一个数组,里面 10 个数值代表每个类别预测的概率。看上去是第 10 类(索引为 9)概率最大。用 argmax 验证一下
print(np.argmax(prob))
9
plt.imshow(x_test[0])
最后用 model.evaluate() 来看看模型在所有测试集上的表现。
model.evaluate( x_test, y_test )
10000/10000 [==============================] - 1s 54us/sample - loss: 0.5990 - acc: 0.8217
[0.5990172575473786, 0.8217]
如果训练集准确率大于测试集精度,这样就说明有过拟合的征兆
如果准确率表现不是很好,我们就应该考虑一下我们的模型了,在这里,我们使用单层全连接模型可能是比较简单的,我们使用卷积神经网络尝试一下
首先导入 二维卷积层Conv2D, 二维最大池化层MaxPooling2D
# 构建模型(卷积)
model = Sequential([Conv2D(64, (3,3), activation='relu', input_shape=(28, 28, 1)),
MaxPooling2D(2,2),
Conv2D(64, (3,3), activation='relu'),
MaxPooling2D(2,2),
Flatten(),
Dense(128, activation='relu'),
Dense(10, activation='softmax')])
# 编译模型
model.compile(loss='sparse_categorical_crossentropy',
optimizer='adam',
metrics=['acc'])
# 拟合模型
(x_train_full, y_train_full), (x_test, y_test) = fashion_mnist.load_data()
x_train_full = x_train_full.reshape(60000, 28, 28, 1)
x_test = x_test.reshape(10000, 28, 28, 1)
x_valid, x_train = x_train_full[:5000]/255.0, x_train_full[5000:] / 255.0
y_valid, y_train = y_train_full[:5000], y_train_full[5000:]
# x_test = x_test.reshape(10000, 28, 28, 1)
# y_test = y_test.reshape(10000, 28, 28, 1)
history = model.fit(x_train, y_train, epochs=20, validation_data=(x_valid, y_valid))
Train on 55000 samples, validate on 5000 samples
Epoch 1/20
55000/55000 [==============================] - 9s 166us/sample - loss: 0.1063 - acc: 0.9602 - val_loss: 0.3073 - val_acc: 0.9094
Epoch 2/20
55000/55000 [==============================] - 9s 164us/sample - loss: 0.0950 - acc: 0.9645 - val_loss: 0.3091 - val_acc: 0.9158
Epoch 3/20
55000/55000 [==============================] - 9s 162us/sample - loss: 0.0893 - acc: 0.9664 - val_loss: 0.3295 - val_acc: 0.9138
Epoch 4/20
55000/55000 [==============================] - 9s 163us/sample - loss: 0.0783 - acc: 0.9704 - val_loss: 0.3551 - val_acc: 0.9138
Epoch 5/20
55000/55000 [==============================] - 9s 163us/sample - loss: 0.0712 - acc: 0.9734 - val_loss: 0.3801 - val_acc: 0.9070
Epoch 6/20
55000/55000 [==============================] - 9s 162us/sample - loss: 0.0666 - acc: 0.9752 - val_loss: 0.3812 - val_acc: 0.9126
Epoch 7/20
55000/55000 [==============================] - 9s 167us/sample - loss: 0.0599 - acc: 0.9775 - val_loss: 0.3997 - val_acc: 0.9122
Epoch 8/20
55000/55000 [==============================] - 9s 167us/sample - loss: 0.0543 - acc: 0.9802 - val_loss: 0.4552 - val_acc: 0.9068
Epoch 9/20
55000/55000 [==============================] - 9s 165us/sample - loss: 0.0515 - acc: 0.9807 - val_loss: 0.4546 - val_acc: 0.9056
Epoch 10/20
55000/55000 [==============================] - 9s 166us/sample - loss: 0.0483 - acc: 0.9822 - val_loss: 0.4561 - val_acc: 0.9122
Epoch 11/20
55000/55000 [==============================] - 9s 165us/sample - loss: 0.0427 - acc: 0.9842 - val_loss: 0.4877 - val_acc: 0.9092
Epoch 12/20
55000/55000 [==============================] - 9s 167us/sample - loss: 0.0406 - acc: 0.9849 - val_loss: 0.4913 - val_acc: 0.9100
Epoch 13/20
55000/55000 [==============================] - 9s 166us/sample - loss: 0.0392 - acc: 0.9853 - val_loss: 0.5382 - val_acc: 0.9102
Epoch 14/20
55000/55000 [==============================] - 9s 165us/sample - loss: 0.0361 - acc: 0.9866 - val_loss: 0.5899 - val_acc: 0.9116
Epoch 15/20
55000/55000 [==============================] - 9s 167us/sample - loss: 0.0375 - acc: 0.9864 - val_loss: 0.5630 - val_acc: 0.9136
Epoch 16/20
55000/55000 [==============================] - 9s 168us/sample - loss: 0.0301 - acc: 0.9891 - val_loss: 0.5853 - val_acc: 0.9140
Epoch 17/20
55000/55000 [==============================] - 9s 169us/sample - loss: 0.0347 - acc: 0.9875 - val_loss: 0.6378 - val_acc: 0.9054
Epoch 18/20
55000/55000 [==============================] - 9s 169us/sample - loss: 0.0280 - acc: 0.9899 - val_loss: 0.5961 - val_acc: 0.9106
Epoch 19/20
55000/55000 [==============================] - 9s 168us/sample - loss: 0.0306 - acc: 0.9890 - val_loss: 0.6319 - val_acc: 0.9100
Epoch 20/20
55000/55000 [==============================] - 9s 165us/sample - loss: 0.0252 - acc: 0.9908 - val_loss: 0.6697 - val_acc: 0.9078
acc = history.history.get('acc')
val_acc = history.history.get('val_acc')
loss = history.history.get('loss')
val_loss = history.history.get('val_loss')
epochs = range(1, len(acc)+1)
plt.figure(figsize=(8,4),dpi=100)
plt.subplot(1, 2, 1)
plt.plot(epochs, acc, 'bo', label='Traing acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.legend()
plt.subplot(1, 2, 2)
plt.plot(epochs, loss, 'ro', label='Traing loss')
plt.plot(epochs, val_loss, 'r', label='Validation val_loss')
plt.legend()
我们发现,训练集准确率高于验证集准确率,验证集的acc和loss都有点不降反升的感觉,过拟合了,怎么办呢?用 Dropout 试试?
# 构建模型(卷积)
model = Sequential([Conv2D(64, (3,3), activation='relu', input_shape=(28, 28, 1)),
MaxPooling2D(2,2),
Conv2D(64, (3,3), activation='relu'),
MaxPooling2D(2,2),
Flatten(),
Dropout(0.5),
Dense(128, activation='relu'),
Dense(10, activation='softmax')])
# 编译模型
model.compile(loss='sparse_categorical_crossentropy',
optimizer='adam',
metrics=['acc'])
history = model.fit(x_train, y_train, epochs=20, validation_data=(x_valid, y_valid))
Train on 55000 samples, validate on 5000 samples
Epoch 1/20
55000/55000 [==============================] - 9s 171us/sample - loss: 0.5157 - acc: 0.8105 - val_loss: 0.3606 - val_acc: 0.8676
Epoch 2/20
55000/55000 [==============================] - 9s 168us/sample - loss: 0.3601 - acc: 0.8664 - val_loss: 0.2841 - val_acc: 0.8968
Epoch 3/20
55000/55000 [==============================] - 9s 168us/sample - loss: 0.3160 - acc: 0.8840 - val_loss: 0.2870 - val_acc: 0.8942
Epoch 4/20
55000/55000 [==============================] - 9s 167us/sample - loss: 0.2900 - acc: 0.8914 - val_loss: 0.2527 - val_acc: 0.9056
Epoch 5/20
55000/55000 [==============================] - 9s 170us/sample - loss: 0.2703 - acc: 0.8976 - val_loss: 0.2413 - val_acc: 0.9088
Epoch 6/20
55000/55000 [==============================] - 10s 174us/sample - loss: 0.2525 - acc: 0.9052 - val_loss: 0.2270 - val_acc: 0.9158
Epoch 7/20
55000/55000 [==============================] - 9s 163us/sample - loss: 0.2375 - acc: 0.9115 - val_loss: 0.2294 - val_acc: 0.9172
Epoch 8/20
55000/55000 [==============================] - 9s 163us/sample - loss: 0.2306 - acc: 0.9107 - val_loss: 0.2197 - val_acc: 0.9186
Epoch 9/20
55000/55000 [==============================] - 9s 164us/sample - loss: 0.2170 - acc: 0.9173 - val_loss: 0.2165 - val_acc: 0.9198
Epoch 10/20
55000/55000 [==============================] - 9s 165us/sample - loss: 0.2110 - acc: 0.9200 - val_loss: 0.2181 - val_acc: 0.9160
Epoch 11/20
55000/55000 [==============================] - 9s 163us/sample - loss: 0.2021 - acc: 0.9235 - val_loss: 0.2230 - val_acc: 0.9222
Epoch 12/20
55000/55000 [==============================] - 9s 165us/sample - loss: 0.1973 - acc: 0.9248 - val_loss: 0.2282 - val_acc: 0.9200
Epoch 13/20
55000/55000 [==============================] - 9s 165us/sample - loss: 0.1899 - acc: 0.9288 - val_loss: 0.2201 - val_acc: 0.9162
Epoch 14/20
55000/55000 [==============================] - 9s 164us/sample - loss: 0.1853 - acc: 0.9291 - val_loss: 0.2140 - val_acc: 0.9236
Epoch 15/20
55000/55000 [==============================] - 9s 164us/sample - loss: 0.1787 - acc: 0.9317 - val_loss: 0.2265 - val_acc: 0.9204
Epoch 16/20
55000/55000 [==============================] - 9s 164us/sample - loss: 0.1756 - acc: 0.9333 - val_loss: 0.2087 - val_acc: 0.9250
Epoch 17/20
55000/55000 [==============================] - 9s 164us/sample - loss: 0.1680 - acc: 0.9357 - val_loss: 0.2250 - val_acc: 0.9170
Epoch 18/20
55000/55000 [==============================] - 9s 164us/sample - loss: 0.1651 - acc: 0.9375 - val_loss: 0.2217 - val_acc: 0.9238
Epoch 19/20
55000/55000 [==============================] - 9s 164us/sample - loss: 0.1588 - acc: 0.9399 - val_loss: 0.2241 - val_acc: 0.9254
Epoch 20/20
55000/55000 [==============================] - 9s 165us/sample - loss: 0.1616 - acc: 0.9383 - val_loss: 0.2200 - val_acc: 0.9208
acc = history.history.get('acc')
val_acc = history.history.get('val_acc')
loss = history.history.get('loss')
val_loss = history.history.get('val_loss')
epochs = range(1, len(acc)+1)
plt.figure(figsize=(8,4),dpi=100)
plt.subplot(1, 2, 1)
plt.plot(epochs, acc, 'bo', label='Traing acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.legend()
plt.subplot(1, 2, 2)
plt.plot(epochs, loss, 'ro', label='Traing loss')
plt.plot(epochs, val_loss, 'r', label='Validation val_lossc')
plt.legend()
可以看出。训练精度有所下降,但是验证精度有所提升,Dropout 有效地抑制了过拟合。
# 保存模型
model.save("my_keras_model.h5")
# 加载可用 models 命名空间里面的 load_model() 函数:
model = tf.keras.models.load_model("my_keras_model.h5")
history = model.fit(x_train, y_train, epochs=20, validation_data=(x_valid, y_valid))
Train on 55000 samples, validate on 5000 samples
Epoch 1/20
55000/55000 [==============================] - 10s 185us/sample - loss: 0.1527 - acc: 0.9412 - val_loss: 0.2212 - val_acc: 0.9226
Epoch 2/20
55000/55000 [==============================] - 10s 179us/sample - loss: 0.1515 - acc: 0.9425 - val_loss: 0.2161 - val_acc: 0.9268
Epoch 3/20
55000/55000 [==============================] - 10s 178us/sample - loss: 0.1459 - acc: 0.9451 - val_loss: 0.2118 - val_acc: 0.9234
Epoch 4/20
55000/55000 [==============================] - 10s 180us/sample - loss: 0.1446 - acc: 0.9440 - val_loss: 0.2290 - val_acc: 0.9232
Epoch 5/20
55000/55000 [==============================] - 10s 179us/sample - loss: 0.1413 - acc: 0.9461 - val_loss: 0.2303 - val_acc: 0.9186
Epoch 6/20
55000/55000 [==============================] - 10s 176us/sample - loss: 0.1401 - acc: 0.9467 - val_loss: 0.2247 - val_acc: 0.9230
Epoch 7/20
55000/55000 [==============================] - 10s 179us/sample - loss: 0.1375 - acc: 0.9472 - val_loss: 0.2225 - val_acc: 0.9266
Epoch 8/20
55000/55000 [==============================] - 10s 178us/sample - loss: 0.1312 - acc: 0.9489 - val_loss: 0.2249 - val_acc: 0.9248
Epoch 9/20
55000/55000 [==============================] - 10s 177us/sample - loss: 0.1300 - acc: 0.9496 - val_loss: 0.2330 - val_acc: 0.9238
Epoch 10/20
55000/55000 [==============================] - 10s 179us/sample - loss: 0.1294 - acc: 0.9519 - val_loss: 0.2330 - val_acc: 0.9288
Epoch 11/20
55000/55000 [==============================] - 10s 175us/sample - loss: 0.1274 - acc: 0.9515 - val_loss: 0.2265 - val_acc: 0.9262
Epoch 12/20
55000/55000 [==============================] - 10s 180us/sample - loss: 0.1264 - acc: 0.9516 - val_loss: 0.2454 - val_acc: 0.9206
Epoch 13/20
55000/55000 [==============================] - 10s 177us/sample - loss: 0.1231 - acc: 0.9519 - val_loss: 0.2327 - val_acc: 0.9256
Epoch 14/20
55000/55000 [==============================] - 10s 176us/sample - loss: 0.1238 - acc: 0.9531 - val_loss: 0.2210 - val_acc: 0.9262
Epoch 15/20
55000/55000 [==============================] - 10s 181us/sample - loss: 0.1185 - acc: 0.9541 - val_loss: 0.2355 - val_acc: 0.9232
Epoch 16/20
55000/55000 [==============================] - 10s 178us/sample - loss: 0.1186 - acc: 0.9555 - val_loss: 0.2252 - val_acc: 0.9268
Epoch 17/20
55000/55000 [==============================] - 10s 178us/sample - loss: 0.1157 - acc: 0.9553 - val_loss: 0.2346 - val_acc: 0.9260
Epoch 18/20
55000/55000 [==============================] - 10s 177us/sample - loss: 0.1195 - acc: 0.9551 - val_loss: 0.2319 - val_acc: 0.9242
Epoch 19/20
55000/55000 [==============================] - 10s 179us/sample - loss: 0.1128 - acc: 0.9569 - val_loss: 0.2406 - val_acc: 0.9212
Epoch 20/20
55000/55000 [==============================] - 10s 181us/sample - loss: 0.1125 - acc: 0.9573 - val_loss: 0.2504 - val_acc: 0.9276
keras总体流程
[外链图片转存失败(img-JDuioUCs-1568382806197)(attachment:image.png)]
import pandas as pd
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Flatten, Dense, Conv2D, MaxPooling2D, Dropout
from tensorflow.keras.datasets import fashion_mnist
%matplotlib inline
# 1、加载数据
(x_train_full, y_train_full), (x_test, y_test) = fashion_mnist.load_data()
x_train_full = x_train_full.reshape(60000, 28, 28, 1)
x_test = x_test.reshape(10000, 28, 28, 1)
x_valid, x_train = x_train_full[:5000]/255.0, x_train_full[5000:] / 255.0
y_valid, y_train = y_train_full[:5000], y_train_full[5000:]
# 2、构建模型
model = Sequential([Conv2D(64, (3,3), activation='relu', input_shape=(28, 28, 1)),
MaxPooling2D(2,2),
Conv2D(64, (3,3), activation='relu'),
MaxPooling2D(2,2),
Flatten(),
Dropout(0.5),
Dense(128, activation='relu'),
Dense(10, activation='softmax')])
# 3、编译模型
model.compile(loss='sparse_categorical_crossentropy',
optimizer='adam',
metrics=['acc'])
# 4、拟合模型
history = model.fit(x_train, y_train, epochs=20, validation_data=(x_valid, y_valid))
# 5、评估模型
# model.predict(x_test)
# model.predict_class(x_test)
model.evaluate(x_test, y_test)
# 6、保存模板
model.save("my_keras_model.h5")
# 7、调用模板
model = tf.keras.models.load_model("my_keras_model.h5")
Train on 55000 samples, validate on 5000 samples
Epoch 1/20
55000/55000 [==============================] - 10s 176us/sample - loss: 0.5164 - acc: 0.8091 - val_loss: 0.3403 - val_acc: 0.8806
Epoch 2/20
55000/55000 [==============================] - 10s 173us/sample - loss: 0.3575 - acc: 0.8678 - val_loss: 0.3001 - val_acc: 0.8872
Epoch 3/20
55000/55000 [==============================] - 9s 172us/sample - loss: 0.3096 - acc: 0.8844 - val_loss: 0.2651 - val_acc: 0.9026
Epoch 4/20
55000/55000 [==============================] - 9s 166us/sample - loss: 0.2854 - acc: 0.8941 - val_loss: 0.2607 - val_acc: 0.9040
Epoch 5/20
55000/55000 [==============================] - 9s 164us/sample - loss: 0.2641 - acc: 0.9009 - val_loss: 0.2316 - val_acc: 0.9122
Epoch 6/20
55000/55000 [==============================] - 9s 162us/sample - loss: 0.2494 - acc: 0.9055 - val_loss: 0.2283 - val_acc: 0.9156
Epoch 7/20
55000/55000 [==============================] - 9s 164us/sample - loss: 0.2356 - acc: 0.9109 - val_loss: 0.2252 - val_acc: 0.9152
Epoch 8/20
55000/55000 [==============================] - 9s 164us/sample - loss: 0.2228 - acc: 0.9153 - val_loss: 0.2217 - val_acc: 0.9220
Epoch 9/20
55000/55000 [==============================] - 9s 164us/sample - loss: 0.2160 - acc: 0.9177 - val_loss: 0.2171 - val_acc: 0.9206
Epoch 10/20
55000/55000 [==============================] - 9s 164us/sample - loss: 0.2028 - acc: 0.9226 - val_loss: 0.2163 - val_acc: 0.9208
Epoch 11/20
55000/55000 [==============================] - 9s 164us/sample - loss: 0.2014 - acc: 0.9228 - val_loss: 0.2114 - val_acc: 0.9196
Epoch 12/20
55000/55000 [==============================] - 9s 164us/sample - loss: 0.1943 - acc: 0.9271 - val_loss: 0.2094 - val_acc: 0.9244
Epoch 13/20
55000/55000 [==============================] - 9s 164us/sample - loss: 0.1854 - acc: 0.9282 - val_loss: 0.2132 - val_acc: 0.9216
Epoch 14/20
55000/55000 [==============================] - 9s 164us/sample - loss: 0.1811 - acc: 0.9312 - val_loss: 0.2205 - val_acc: 0.9144
Epoch 15/20
55000/55000 [==============================] - 9s 163us/sample - loss: 0.1745 - acc: 0.9323 - val_loss: 0.2197 - val_acc: 0.9212
Epoch 16/20
55000/55000 [==============================] - 9s 165us/sample - loss: 0.1700 - acc: 0.9356 - val_loss: 0.2233 - val_acc: 0.9188
Epoch 17/20
55000/55000 [==============================] - 9s 163us/sample - loss: 0.1652 - acc: 0.9359 - val_loss: 0.2164 - val_acc: 0.9240
Epoch 18/20
55000/55000 [==============================] - 9s 163us/sample - loss: 0.1596 - acc: 0.9390 - val_loss: 0.2099 - val_acc: 0.9224
Epoch 19/20
55000/55000 [==============================] - 9s 164us/sample - loss: 0.1596 - acc: 0.9391 - val_loss: 0.2052 - val_acc: 0.9270
Epoch 20/20
55000/55000 [==============================] - 9s 166us/sample - loss: 0.1586 - acc: 0.9395 - val_loss: 0.2287 - val_acc: 0.9242
10000/10000 [==============================] - 1s 70us/sample - loss: 98.0071 - acc: 0.7226
[98.00711212615967, 0.7226]
# 结果可视化
acc = history.history.get('acc')
val_acc = history.history.get('val_acc')
loss = history.history.get('loss')
val_loss = history.history.get('val_loss')
epochs = range(1, len(acc)+1)
plt.figure(figsize=(8,4),dpi=100)
plt.subplot(1, 2, 1)
plt.plot(epochs, acc, 'bo', label='Traing acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.legend()
plt.subplot(1, 2, 2)
plt.plot(epochs, loss, 'ro', label='Traing loss')
plt.plot(epochs, val_loss, 'r', label='Validation val_lossc')
plt.legend()