输出数字 5 5 5
数据集:
参考博客
目的:
搭建卷积层:
C O N V 2 D − > R E L U − > M A X P O O L − > C O N V 2 D − > R E L U − > M A X P O O L − > F L A T T E N − > F U L L Y C O N N E C T E D CONV2D->RELU->MAXPOOL->CONV2D->RELU->MAXPOOL->FLATTEN->FULLYCONNECTED CONV2D−>RELU−>MAXPOOL−>CONV2D−>RELU−>MAXPOOL−>FLATTEN−>FULLYCONNECTED
#转变成one-hot编码
def convert_to_one_hot(Y, C):
Y = np.eye(C)[Y.reshape(-1)].T
return Y
#加载和处理数据
def load_dataset():
train_dataset = h5py.File('train_signs.h5', "r")
train_set_x_orig = np.array(train_dataset["train_set_x"][:]) # your train set features
train_set_y_orig = np.array(train_dataset["train_set_y"][:]) # your train set labels
test_dataset = h5py.File('test_signs.h5', "r")
test_set_x_orig = np.array(test_dataset["test_set_x"][:]) # your test set features
test_set_y_orig = np.array(test_dataset["test_set_y"][:]) # your test set labels
classes = np.array(test_dataset["list_classes"][:]) # the list of classes
train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))
test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))
return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes
#加载数据
train_x_org,train_y,test_x_org,test_y, classes = load_dataset()
#归一化
train_x_org,test_x_org = train_x_org/255,test_x_org/255
plt.imshow(train_x_org[7])
#转成one-hot
train_y,test_y = convert_to_one_hot(train_y,6),convert_to_one_hot(test_y,6)
print('原始训练集数据维度:',train_x_org.shape)
def create_placeholders(n_h,n_w,n_c,C):
'''
n_h:输入数据高度
n_w:输入图像宽度
n_c:输入图像通道数
C:分类类别
'''
X = tf.compat.v1.placeholder(tf.float32,[None,n_h,n_w,n_c])
Y = tf.compat.v1.placeholder(tf.float32,[None,C])
return X,Y
def initialize_parameters():
'''
第一层卷积层的过滤器组:W1
第二层卷积层的过滤器组:W2
'''
#采用he初始化
initializer = tf.keras.initializers.glorot_normal()
W1 = tf.compat.v1.Variable(initializer([4,4,3,8]))
W2 = tf.compat.v1.Variable(initializer([2,2,8,16]))
parameters = {
'W1':W1,
'W2':W2
}
return parameters
def forward_propagation(X,parameters):
'''
CONV2D->RELU->MAXPOOL->CONV2D->RELU->MAXPOOL->FLATTEN->FULLYCONNECTED
'''
W1,W2 = parameters['W1'],parameters['W2']
#SAME卷积
Z1 = tf.nn.conv2d(X,W1,strides=[1,1,1,1],padding="SAME")
#通过激活函数
A1 = tf.nn.relu(Z1)
#最大池化
P1 = tf.nn.max_pool(A1,ksize=[1,8,8,1],strides=[1,8,8,1],padding="SAME")
#第二次SAME卷积
Z2 = tf.nn.conv2d(P1,W2,strides=[1,1,1,1],padding="SAME")
#经过激活函数
A2 = tf.nn.relu(Z2)
#最大池化
P2 = tf.nn.max_pool(A2,ksize=[1,4,4,1],strides=[1,4,4,1],padding="SAME")
#平铺卷积结构
P = tf.compat.v1.layers.flatten(P2)
#经过一个全连接层
Z3 = tf.compat.v1.layers.dense(P,6)
return Z3
def costfunction(Z3,Y):
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=Z3,labels=Y))
return cost
def random_mini_batches(X, Y, mini_batch_size = 64):
"""
Creates a list of random minibatches from (X, Y)
Arguments:
X -- input data, of shape (input size, number of examples) (m, Hi, Wi, Ci)
Y -- true "label" vector (containing 0 if cat, 1 if non-cat), of shape (1, number of examples) (m, n_y)
mini_batch_size - size of the mini-batches, integer
seed -- this is only for the purpose of grading, so that you're "random minibatches are the same as ours.
Returns:
mini_batches -- list of synchronous (mini_batch_X, mini_batch_Y)
"""
m = X.shape[0] # number of training examples
mini_batches = []
# Step 1: Shuffle (X, Y)
permutation = list(np.random.permutation(m))
shuffled_X = X[permutation,:,:,:]
shuffled_Y = Y[permutation,:]
# Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case.
num_complete_minibatches = math.floor(m/mini_batch_size) # number of mini batches of size mini_batch_size in your partitionning
for k in range(0, num_complete_minibatches):
mini_batch_X = shuffled_X[k * mini_batch_size : k * mini_batch_size + mini_batch_size,:,:,:]
mini_batch_Y = shuffled_Y[k * mini_batch_size : k * mini_batch_size + mini_batch_size,:]
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
# Handling the end case (last mini-batch < mini_batch_size)
if m % mini_batch_size != 0:
mini_batch_X = shuffled_X[num_complete_minibatches * mini_batch_size : m,:,:,:]
mini_batch_Y = shuffled_Y[num_complete_minibatches * mini_batch_size : m,:]
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
return mini_batches
def conv_model(X_train,Y_train,X_test,Y_test,learning_rate=0.009,epochs=100,mini_batch_size=64):
tf.random.set_seed(1)
#获取输入维度
m,n_h0,n_w0,n_c0 = X_train.shape
#分类数
C = Y_train.shape[1]
costs = []
#为输入输出创建palcehoder
X,Y = create_placeholders(n_h0,n_w0,n_c0,C)
#初始化变量filter
parameters = initialize_parameters()
#前向传播
Z3 = forward_propagation(X,parameters)
cost = costfunction(Z3,Y)
#创建优化器(即梯度下降的过程)
optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
#初始化所有变量
init = tf.compat.v1.global_variables_initializer()
with tf.compat.v1.Session() as sess:
sess.run(init)
for epoch in range(epochs):
epoch_cost = 0
mini_batch_num = m//mini_batch_size
mini_batchs = random_mini_batches(X_train, Y_train, mini_batch_size)
for mini in mini_batchs:
(mini_x,mini_y) = mini
#执行优化器/梯度下降
_,mini_batch_cost = sess.run([optimizer,cost],feed_dict={X:mini_x,Y:mini_y})
epoch_cost = epoch_cost + mini_batch_cost/mini_batch_num
if epoch%5 == 0:
costs.append(epoch_cost)
if epoch%5 == 0:
print("epoch = "+str(epoch)+" epoch_cost = "+str(epoch_cost))
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('epoch')
plt.show()
#保存参数到seseeion
parameters = sess.run(parameters)
#获取预测正确的样本下标
correct_prediction = tf.equal(tf.argmax(Z3,axis=1),tf.argmax(Y,axis=1))
accuracy = tf.compat.v1.reduce_mean(tf.cast(correct_prediction,"float"))
print("训练集的准确率:", accuracy.eval({X: X_train, Y: Y_train}))
print("测试集的准确率:", accuracy.eval({X: X_test, Y: Y_test}))
return parameters
start_time = time.perf_counter()
parameters = conv_model(train_x_org,train_y.T,test_x_org,test_y.T,learning_rate=0.007,epochs=100,mini_batch_size=64)
end_time = time.perf_counter()
print("CPU的执行时间 = " + str(end_time - start_time) + " 秒" )
epoch = 0 epoch_cost = 1.9171459674835205
epoch = 5 epoch_cost = 1.5569179132580757
epoch = 10 epoch_cost = 1.094716988503933
epoch = 15 epoch_cost = 0.8536306358873844
epoch = 20 epoch_cost = 0.6616563107818365
epoch = 25 epoch_cost = 0.564769646152854
epoch = 30 epoch_cost = 0.4631323553621769
epoch = 35 epoch_cost = 0.4017002824693918
epoch = 40 epoch_cost = 0.3510632552206516
epoch = 45 epoch_cost = 0.31469378992915154
epoch = 50 epoch_cost = 0.2672974271699786
epoch = 55 epoch_cost = 0.25333060417324305
epoch = 60 epoch_cost = 0.23826426221057773
epoch = 65 epoch_cost = 0.18890069890767336
epoch = 70 epoch_cost = 0.18604624597355723
epoch = 75 epoch_cost = 0.2148781386204064
epoch = 80 epoch_cost = 0.1592756942845881
epoch = 85 epoch_cost = 0.13289345195516944
epoch = 90 epoch_cost = 0.16014830861240625
epoch = 95 epoch_cost = 0.1574413264170289
训练集的准确率: 0.9685185
测试集的准确率: 0.89166665
CPU的执行时间 = 58.456597299999885 秒