这里不再详细说明其细节,只举例学习。
tf.reshape(tensor, shape, name=None)
矩阵变形是常用的操作,在Tensorflow中调用方式有多种,例如:
1.tf.reshape
tf.reshape(L3, [-1, W4.get_shape().as_list()[0]])
2.object.reshape
mnist.test.images.reshape(-1, 28, 28, 1)
例子:
import tensorflow as tf
#import numpy as np
# tensor 't' is [1, 2, 3, 4, 5, 6, 7, 8, 9]
# tensor 't' has shape [9]
t1=[1, 2, 3, 4, 5, 6, 7, 8, 9]
print('t:',t1)
print(tf.reshape(t1, [3, 3]))
with tf.Session() as sess:
print(sess.run(tf.reshape(t1, [3, 3])))
print('----------------------')
# tensor 't' is [[[1, 1], [2, 2]],
# [[3, 3], [4, 4]]]
# tensor 't' has shape [2, 2, 2]
t2=[[[1, 1], [2, 2]],
[[3, 3], [4, 4]]]
print('t:',t2)
print(tf.reshape(t2, [2,4]))
with tf.Session() as sess:
print(sess.run(tf.reshape(t2, [2, 4])))
print('----------------------')
# tensor 't' is [[[1, 1, 1],
# [2, 2, 2]],
# [[3, 3, 3],
# [4, 4, 4]],
# [[5, 5, 5],
# [6, 6, 6]]]
# tensor 't' has shape [3, 2, 3]
# pass '[-1]' to flatten 't'
t3=[[[1, 1, 1],
[2, 2, 2]],
[[3, 3, 3],
[4, 4, 4]],
[[5, 5, 5],
[6, 6, 6]]]
print('t:',t3)
print(tf.reshape(t3, [-1]))
with tf.Session() as sess:
print(sess.run(tf.reshape(t3, [-1])),'\n')
# -1 can also be used to infer the shape
# -1 is inferred to be 9:
print(sess.run(tf.reshape(t3, [2,-1])),'\n')
# -1 is inferred to be 2:
print(sess.run(tf.reshape(t3, [-1,9])),'\n')
# -1 is inferred to be 3:
print(sess.run(tf.reshape(t3, [2,-1,3])),'\n')
print(sess.run(tf.reshape(t3, [-1,3, 2, 3])))
运行结果:
t: [1, 2, 3, 4, 5, 6, 7, 8, 9]
Tensor("Reshape_47:0", shape=(3, 3), dtype=int32)
[[1 2 3]
[4 5 6]
[7 8 9]]
----------------------
t: [[[1, 1], [2, 2]], [[3, 3], [4, 4]]]
Tensor("Reshape_49:0", shape=(2, 4), dtype=int32)
[[1 1 2 2]
[3 3 4 4]]
----------------------
t: [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]], [[5, 5, 5], [6, 6, 6]]]
Tensor("Reshape_51:0", shape=(18,), dtype=int32)
[1 1 1 2 2 2 3 3 3 4 4 4 5 5 5 6 6 6]
[[1 1 1 2 2 2 3 3 3]
[4 4 4 5 5 5 6 6 6]]
[[1 1 1 2 2 2 3 3 3]
[4 4 4 5 5 5 6 6 6]]
[[[1 1 1]
[2 2 2]
[3 3 3]]
[[4 4 4]
[5 5 5]
[6 6 6]]]
[[[[1 1 1]
[2 2 2]]
[[3 3 3]
[4 4 4]]
[[5 5 5]
[6 6 6]]]]
import tensorflow as tf
import numpy as np
# 训练模型
def train_model():
# 假造数据
x_data = np.random.rand(100).astype(np.float32)
print ('x_data:',x_data)
y_data = x_data * 0.1 + 0.2
print ('y_data:',y_data)
# 定义权重
W = tf.Variable(tf.random_uniform([1], -20.0, 20.0), dtype=tf.float32, name='w')
b = tf.Variable(tf.random_uniform([1], -10.0, 10.0), dtype=tf.float32, name='b')
# 计算线性输出
y = W * x_data + b
# 定义损失函数
loss = tf.reduce_mean(tf.square(y - y_data))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
# 保存模型:这里的max_to_keep=4是最终会保存最新的4个模型
saver = tf.train.Saver(max_to_keep=4)
# 定义会话,训练模型
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print ("------------------------------------------------------")
print ("before the train, the W is %6f, the b is %6f" % (sess.run(W), sess.run(b)))
for epoch in range(300):
if epoch % 10 == 0:
print ("------------------------------------------------------")
print ("after epoch %d, the loss is %6f" % (epoch, sess.run(loss)))
print ("the W is %f, the b is %f" % (sess.run(W), sess.run(b)))
saver.save(sess, "model/my-model", global_step=epoch)
print ("save the model")
sess.run(train_step)
print ("------------------------------------------------------")
# 加载模型
def load_model():
with tf.Session() as sess:
# import_meta_graph填的名字meta文件的名字
saver = tf.train.import_meta_graph('model/my-model-290.meta')
# 检查checkpoint,所以只填到checkpoint所在的路径下即可,不需要填checkpoint
saver.restore(sess, tf.train.latest_checkpoint("model"))
# saver.restore(sess, tf.train.latest_checkpoint("model/checkpoint"))
print (sess.run('w:0'))
print (sess.run('b:0'))
# 模型训练
#train_model()
# 模型加载
load_model()
结果:
INFO:tensorflow:Restoring parameters from model/my-model-290
[0.09999993]
[0.20000005]
这里的运行结果省略了训练的过程,其实应该先进行训练,保存模型,然后进行模型的调用进行测试数据的测试,这里的数据是随机生成的,所以准确率不必在意。
要注意的几点:
w1 = tf.Variable(tf.random_normal(shape=[2]), name='w1')
w2 = tf.Variable(tf.random_normal(shape=[5]), name='w2')
saver = tf.train.Saver([w1,w2])
name='w'
下面定义了一个简单的卷积神经网络:有两个卷积层、两个池化层和两个全连接层。并且加载的数据是无意义的数据,模拟的是10张32x32的RGB图像,共4个类别0、1、2、3。这里主要是为了学习模型的保存和调用,对于数据怎样得来和准确率不用在意。
import tensorflow as tf
import numpy as np
import os
# 自定义要加载的训练集
def load_data(resultpath):
datapath = os.path.join(resultpath, "data10_4.npz")
# 如果有已经存在的数据,则加载
if os.path.exists(datapath):
data = np.load(datapath)
# 注意提取数值的方法
X, Y = data["X"], data["Y"]
else:
# 加载的数据是无意义的数据,模拟的是10张32x32的RGB图像,共4个类别:0、1、2、3
# 将30720个数字化成10*32*32*32*3的张量
X = np.array(np.arange(30720)).reshape(10, 32, 32, 3)
Y = [0, 0, 1, 1, 2, 2, 3, 3, 2, 0]
X = X.astype('float32')
Y = np.array(Y)
# 把数据保存成dataset.npz的格式
np.savez(datapath, X=X, Y=Y)
print('Saved dataset to dataset.npz')
# 一种很好用的打印输出显示方式
print('X_shape:{}\nY_shape:{}'.format(X.shape, Y.shape))
return X, Y
# 搭建卷积网络:有两个卷积层、两个池化层和两个全连接层。
def define_model(x):
x_image = tf.reshape(x, [-1, 32, 32, 3])
print ('x_image.shape:',x_image.shape)
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial, name="w")
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial, name="b")
def conv3d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2d(x):
return tf.nn.max_pool(x, ksize=[1, 3, 3, 1], strides=[1, 3, 3, 1], padding='SAME')
with tf.variable_scope("conv1"): # [-1,32,32,3]
weights = weight_variable([3, 3, 3, 32])
biases = bias_variable([32])
conv1 = tf.nn.relu(conv3d(x_image, weights) + biases)
pool1 = max_pool_2d(conv1) # [-1,11,11,32]
with tf.variable_scope("conv2"):
weights = weight_variable([3, 3, 32, 64])
biases = bias_variable([64])
conv2 = tf.nn.relu(conv3d(pool1, weights) + biases)
pool2 = max_pool_2d(conv2) # [-1,4,4,64]
with tf.variable_scope("fc1"):
weights = weight_variable([4 * 4 * 64, 128]) # [-1,1024]
biases = bias_variable([128])
fc1_flat = tf.reshape(pool2, [-1, 4 * 4 * 64])
fc1 = tf.nn.relu(tf.matmul(fc1_flat, weights) + biases)
fc1_drop = tf.nn.dropout(fc1, 0.5) # [-1,128]
with tf.variable_scope("fc2"):
weights = weight_variable([128, 4])
biases = bias_variable([4])
fc2 = tf.matmul(fc1_drop, weights) + biases # [-1,4]
return fc2
# 训练模型
def train_model():
# 训练数据的占位符
x = tf.placeholder(tf.float32, shape=[None, 32, 32, 3], name="x")
y_ = tf.placeholder('int64', shape=[None], name="y_")
# 学习率
initial_learning_rate = 0.001
# 定义网络结构,前向传播,得到预测输出
y_fc2 = define_model(x)
# 定义训练集的one-hot标签
y_label = tf.one_hot(y_, 4, name="y_labels")
# 定义损失函数
loss_temp = tf.losses.softmax_cross_entropy(onehot_labels=y_label, logits=y_fc2)
cross_entropy_loss = tf.reduce_mean(loss_temp)
# 训练时的优化器
train_step = tf.train.AdamOptimizer(learning_rate=initial_learning_rate, beta1=0.9, beta2=0.999,
epsilon=1e-08).minimize(cross_entropy_loss)
# 一样返回True,否则返回False
correct_prediction = tf.equal(tf.argmax(y_fc2, 1), tf.argmax(y_label, 1))
# 将correct_prediction,转换成指定tf.float32类型
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# 保存模型,这里做多保存4个模型
saver = tf.train.Saver(max_to_keep=4)
# 把预测值加入predict集合
tf.add_to_collection("predict", y_fc2)
tf.add_to_collection("acc", accuracy )
# 定义会话
with tf.Session() as sess:
# 所有变量初始化
sess.run(tf.global_variables_initializer())
print ("------------------------------------------------------")
# 加载训练数据,这里的训练数据是构造的,旨在保存/加载模型的学习
X, Y = load_data("model1/") # 这里需要提前新建一个文件夹
X = np.multiply(X, 1.0 / 255.0)
for epoch in range(200):
if epoch % 10 == 0:
print ("------------------------------------------------------")
train_accuracy = accuracy.eval(feed_dict={x: X, y_: Y})
train_loss = cross_entropy_loss.eval(feed_dict={x: X, y_: Y})
print ("after epoch %d, the loss is %6f" % (epoch, train_loss))
# 这里的正确率是以整体的训练样本为训练样例的
print ("after epoch %d, the acc is %6f" % (epoch, train_accuracy))
saver.save(sess, "model1/my-model", global_step=epoch)
print ("save the model")
train_step.run(feed_dict={x: X, y_: Y})
print ("------------------------------------------------------")
# 保存模型
def load_model():
# 测试数据构造:模拟2张32x32的RGB图
X = np.array(np.arange(6144, 12288)).reshape(2, 32, 32, 3)
Y = [3, 1]
Y = np.array(Y)
X = X.astype('float32')
X = np.multiply(X, 1.0 / 255.0)
with tf.Session() as sess:
# 加载元图和权重
saver = tf.train.import_meta_graph('model1/my-model-190.meta')
saver.restore(sess, tf.train.latest_checkpoint("model1/"))
# 获取权重
graph = tf.get_default_graph()
fc2_w = graph.get_tensor_by_name("fc2/w:0")
fc2_b = graph.get_tensor_by_name("fc2/b:0")
print ("------------------------------------------------------")
print ('fc2_w:',sess.run(fc2_w))
print ("#######################################")
print ('fc2_b:',sess.run(fc2_b))
print ("------------------------------------------------------")
#input_x = graph.get_operation_by_name("x").outputs[0]
# 预测输出
feed_dict = {"x:0":X, "y_:0":Y}
y = graph.get_tensor_by_name("y_labels:0")
yy = sess.run(y, feed_dict)
print ('yy:',yy)
print ("the answer is: ", sess.run(tf.argmax(yy, 1)))
print ("------------------------------------------------------")
pred_y = tf.get_collection("predict")
print('i am here..1')
pred = sess.run(pred_y, feed_dict)[0]
print ('pred:',pred, '\n')
pred = sess.run(tf.argmax(pred, 1))
print ("the predict is: ", pred)
print ("------------------------------------------------------")
acc = tf.get_collection("acc")
#acc = graph.get_operation_by_name("acc")
acc = sess.run(acc, feed_dict)
#print(acc.eval())
print ("the accuracy is: ", acc)
print ("------------------------------------------------------")
# 训练模型
train_model()
# 加载模型
load_model()
注意上面按照顺序应该是先训练,训练好以后再调用训练好的模型进行测试。
单独训练结果:
x_image.shape: (?, 32, 32, 3)
------------------------------------------------------
X_shape:(10, 32, 32, 3)
Y_shape:(10,)
------------------------------------------------------
after epoch 0, the loss is 37.972336
after epoch 0, the acc is 0.200000
save the model
------------------------------------------------------
after epoch 10, the loss is 55.470387
after epoch 10, the acc is 0.100000
save the model
------------------------------------------------------
after epoch 20, the loss is 17.129293
after epoch 20, the acc is 0.200000
save the model
------------------------------------------------------
after epoch 30, the loss is 15.748987
after epoch 30, the acc is 0.300000
save the model
------------------------------------------------------
after epoch 40, the loss is 4.500556
after epoch 40, the acc is 0.300000
save the model
------------------------------------------------------
after epoch 50, the loss is 2.675602
after epoch 50, the acc is 0.200000
save the model
------------------------------------------------------
after epoch 60, the loss is 2.377462
after epoch 60, the acc is 0.500000
save the model
------------------------------------------------------
after epoch 70, the loss is 1.419432
after epoch 70, the acc is 0.500000
save the model
------------------------------------------------------
...
...
...
after epoch 130, the loss is 1.356822
after epoch 130, the acc is 0.500000
save the model
------------------------------------------------------
after epoch 140, the loss is 1.361622
after epoch 140, the acc is 0.200000
save the model
------------------------------------------------------
after epoch 150, the loss is 1.204934
after epoch 150, the acc is 0.300000
save the model
------------------------------------------------------
after epoch 160, the loss is 1.273999
after epoch 160, the acc is 0.300000
save the model
------------------------------------------------------
after epoch 170, the loss is 1.213519
after epoch 170, the acc is 0.400000
save the model
------------------------------------------------------
after epoch 180, the loss is 1.276478
after epoch 180, the acc is 0.300000
save the model
------------------------------------------------------
after epoch 190, the loss is 1.162433
after epoch 190, the acc is 0.300000
save the model
------------------------------------------------------
单独测试结果:
INFO:tensorflow:Restoring parameters from model1/my-model-190
------------------------------------------------------
fc2_w: [[ 0.09413899 -0.07282051 0.02397597 0.05508222]
[-0.05514605 -0.03894351 -0.0548727 -0.02125386]
[ 0.06236398 -0.00028329 0.13300249 0.06448492]
[-0.0921673 0.00342558 0.10539673 -0.02442357]
[-0.04699677 0.11520271 -0.04514726 -0.13220425]
...
...
...
[ 0.08583067 -0.06123111 0.10699942 0.03429044]
[-0.05737718 0.0714161 -0.04370898 -0.0397063 ]
[ 0.00849419 -0.04352335 0.01004444 0.03862172]]
#######################################
fc2_b: [0.12246324 0.11658503 0.10220832 0.06499074]
------------------------------------------------------
yy: [[0. 0. 0. 1.]
[0. 1. 0. 0.]]
the answer is: [3 1]
------------------------------------------------------
i am here..1
pred: [[ 0.6232525 0.18511544 0.08325944 -0.4809047 ]
[ 0.12246324 0.11658503 0.10220832 0.06499074]]
the predict is: [0 0]
------------------------------------------------------
the accuracy is: [0.0, 0.0]
------------------------------------------------------
在使用梯度下降算法训练模型时,每次更新权重时,为每个权重维护一个影子变量,该影子变量随着训练的进行,会最终稳定在一个接近真实权重的值的附近。那么,在进行预测的时候,使用影子变量的值替代真实变量的值,可以得到更好的结果。 滑动平均模型在梯段下降算法上才会有好的结果,别的优化算法没有这个现象,还没有合理的解释。而优化的方法有很多,这个可以作为提高健壮性的有效措施。
# 执行时要注意IDE的当前工作过路径,最好每段重启控制器一次,输出结果更准确
# Part1: 通过tf.train.Saver类实现保存和载入神经网络模型
# 执行本段程序时注意当前的工作路径
import tensorflow as tf
v1 = tf.Variable(tf.constant(1.0, shape=[1]), name="v1")
v2 = tf.Variable(tf.constant(2.0, shape=[1]), name="v2")
result = v1 + v2
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver.save(sess, "Model/model.ckpt")
# Part3: 若不希望重复定义计算图上的运算,可直接加载已经持久化的图
import tensorflow as tf
saver = tf.train.import_meta_graph("Model/model.ckpt.meta")
graph=tf.get_default_graph()
with tf.Session() as sess:
saver.restore(sess, "./Model/model.ckpt") # 注意路径写法
print(sess.run(graph.get_tensor_by_name("add:0"))) # [ 3.]
# Part4: tf.train.Saver类也支持在保存和加载时给变量重命名
import tensorflow as tf
# 声明的变量名称name与已保存的模型中的变量名称name不一致
u1 = tf.Variable(tf.constant(1.0, shape=[1]), name="other-v1")
u2 = tf.Variable(tf.constant(2.0, shape=[1]), name="other-v2")
result = u1 + u2
# 若直接生命Saver类对象,会报错变量找不到
# 使用一个字典dict重命名变量即可,{"已保存的变量的名称name": 重命名变量名}
# 原来名称name为v1的变量现在加载到变量u1(名称name为other-v1)中
saver = tf.train.Saver({"v1": u1, "v2": u2})
with tf.Session() as sess:
saver.restore(sess, "./Model/model.ckpt")
print(sess.run(result)) # [ 3.]
#INFO:tensorflow:Restoring parameters from ./Model/model.ckpt
#[3.]
#INFO:tensorflow:Restoring parameters from ./Model/model.ckpt
#[3.]
# Part5: 保存滑动平均模型
import tensorflow as tf
v = tf.Variable(0, dtype=tf.float32, name="v")
for variables in tf.global_variables():
print(variables.name) # v:0
print('...........')
ema = tf.train.ExponentialMovingAverage(0.99)
maintain_averages_op = ema.apply(tf.global_variables())
for variables in tf.global_variables():
print(variables.name) # v:0
# v/ExponentialMovingAverage:0
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.assign(v, 10))
sess.run(maintain_averages_op)
saver.save(sess, "Model/model_ema.ckpt")
print('here..1:',sess.run([v, ema.average(v)])) # [10.0, 0.099999905]
#v:0
#...........
#v:0
#v/ExponentialMovingAverage:0
#here..1: [10.0, 0.099999905]
# Part6: 通过变量重命名直接读取变量的滑动平均值
import tensorflow as tf
v = tf.Variable(0, dtype=tf.float32, name="v")
# {"已保存的变量的名称name": 重命名变量名}
saver = tf.train.Saver({"v/ExponentialMovingAverage": v})
with tf.Session() as sess:
saver.restore(sess, "./Model/model_ema.ckpt")
print('here..2:',sess.run(v)) # 0.0999999
# INFO:tensorflow:Restoring parameters from ./Model/model_ema.ckpt
# here..2: 0.099999905
# Part7: 通过tf.train.ExponentialMovingAverage的variables_to_restore()函数获取变量重命名字典
import tensorflow as tf
v = tf.Variable(0, dtype=tf.float32, name="v")
# 注意此处的变量名称name一定要与已保存的变量名称一致
ema = tf.train.ExponentialMovingAverage(0.99)
print(ema.variables_to_restore())
# {'v/ExponentialMovingAverage': }
# 此处的v取自上面变量v的名称name="v"
saver = tf.train.Saver(ema.variables_to_restore())
with tf.Session() as sess:
saver.restore(sess, "./Model/model_ema.ckpt")
print(sess.run(v)) # 0.0999999
#{'v/ExponentialMovingAverage': }
#INFO:tensorflow:Restoring parameters from ./Model/model_ema.ckpt
#0.099999905
# Part8: 通过convert_variables_to_constants函数将计算图中的变量及其取值通过常量的方式保存于一个文件中
import tensorflow as tf
from tensorflow.python.framework import graph_util
v1 = tf.Variable(tf.constant(1.0, shape=[1]), name="v1")
v2 = tf.Variable(tf.constant(2.0, shape=[1]), name="v2")
result = v1 + v2
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# 导出当前计算图的GraphDef部分,即从输入层到输出层的计算过程部分
graph_def = tf.get_default_graph().as_graph_def()
output_graph_def = graph_util.convert_variables_to_constants(sess,graph_def, ['add'])
with tf.gfile.GFile("Model/combined_model.pb", 'wb') as f:
f.write(output_graph_def.SerializeToString())
#INFO:tensorflow:Froze 2 variables.
#INFO:tensorflow:Converted 2 variables to const ops.
# Part9: 载入包含变量及其取值的模型
import tensorflow as tf
from tensorflow.python.platform import gfile
with tf.Session() as sess:
model_filename = "Model/combined_model.pb"
with gfile.FastGFile(model_filename, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
result = tf.import_graph_def(graph_def, return_elements=["add:0"])
print(sess.run(result))
#[array([3.], dtype=float32)]
tf.train.Saver类也支持在保存和加载时给变量重命名,声明Saver类对象的时候使用一个字典dict重命名变量即可,{"已保存的变量的名称name": 重命名变量名}
,saver = tf.train.Saver({"v1":u1, "v2": u2})
即原来名称name为v1的变量现在加载到变量u1(名称name为other-v1)中。
这样就是为了方便使用变量的滑动平均值。如果在加载模型时直接将影子变量映射到变量自身,则在使用训练好的模型时就不需要再调用函数来获取变量的滑动平均值了。载入时,声明Saver类对象时通过一个字典将滑动平均值直接加载到新的变量中,saver = tf.train.Saver({"v/ExponentialMovingAverage": v})
,另通过tf.train.ExponentialMovingAverage的variables_to_restore()
函数获取变量重命名字典。
使用已经预训练好的模型,自己fine-tuning。
saver = tf.train.import_meta_graph('my_test_model-1000.meta')
saver.restore(sess,tf.train.latest_checkpoint('./'))
# pre-train and fine-tuning
fc2 = graph.get_tensor_by_name("fc2/add:0")
fc2 = tf.stop_gradient(fc2) # stop the gradient compute
fc2_shape = fc2.get_shape().as_list()
# fine -tuning
new_nums = 6
weights = tf.Variable(tf.truncated_normal([fc2_shape[1], new_nums], stddev=0.1), name="w")
biases = tf.Variable(tf.constant(0.1, shape=[new_nums]), name="b")
conv2 = tf.matmul(fc2, weights) + biases
output2 = tf.nn.softmax(conv2)
参考:
https://blog.csdn.net/liuxiao214/article/details/79048136
https://blog.csdn.net/marsjhao/article/details/72829635
https://blog.csdn.net/qq_36330643/article/details/77323554