TensorFlow程序被组织成一个构建阶段和一个执行阶段。
a = tf.constant(2)
b = tf.constant(3)
c = a + b
with tf.Session() as sess:
cv = sess.run(c)
graph = tf.Graph()
with graph.as_default():
a = tf.constant(2)
b = tf.constant(3)
c = a + b
with tf.Session(graph=graph) as sess:
cv = sess.run(c)
a = tf.Variable(3, name='a')
b = tf.Variable(2, name='b')
s = tf.add(a, b)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
s_value = sess.run(s)
saver = tf.train.Saver(var_list=None, max_to_keep=5)
saver.save(sess, path='./checkpoints/model.ckpt')
saver.restore(sess, path='./checkpoints/model.ckpt')
import os
import tensorflow.compat.v1 as tf
import tensorflow.contrib.layers as tflayer
from tensorflow.contrib.learn.python.learn.datasets.mnist import read_data_sets
tf.logging.set_verbosity(tf.logging.ERROR)
tf.app.flags.DEFINE_integer('total_epoch', 2000, 'Total Epoch of Training')
tf.app.flags.DEFINE_integer('batch_size', 128, 'Batch Size of Data')
tf.app.flags.DEFINE_integer('show_internal', 100, 'Show Result Internal')
FLAGS = tf.app.flags.FLAGS
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
mnist = read_data_sets('MNIST_data/', one_hot=True)
with tf.variable_scope('data'):
x = tf.placeholder(tf.float32, shape=[None, 784])
labels = tf.placeholder(tf.float32, shape=[None, 10])
with tf.variable_scope('model'):
logit = tflayer.fully_connected(inputs=x, num_outputs=10, activation_fn=None)
with tf.variable_scope('loss_func'):
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logit, labels=labels, dim=-1))
with tf.variable_scope('optimizer'):
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.05).minimize(loss)
loss_log = tf.summary.scalar('Loss', loss)
correct_prediction = tf.equal(tf.argmax(logit, 1), tf.argmax(labels, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
writer = tf.summary.FileWriter('./tmp', graph=sess.graph)
for epoch in range(FLAGS.total_epoch):
batch_xs, batch_ys = mnist.train.next_batch(FLAGS.batch_size)
batch_loss, _, batch_acc = sess.run([loss, optimizer, accuracy], feed_dict={x: batch_xs, labels: batch_ys})
if epoch % FLAGS.show_internal == 0:
print("Loss={:.3f}, Acc={:.2%}".format(batch_loss, batch_acc))
summary = sess.run(loss_log, feed_dict={x: batch_xs, labels: batch_ys})
writer.add_summary(summary, epoch)
acc = sess.run(accuracy, {x: mnist.test.images, labels: mnist.test.labels})
print("Acc={:.2%}".format(acc))
import os
import tensorflow.compat.v1 as tf
import tensorflow.contrib.layers as tflayer
from tensorflow.contrib.learn.python.learn.datasets.mnist import read_data_sets
tf.logging.set_verbosity(tf.logging.ERROR)
tf.app.flags.DEFINE_integer('total_epoch', 2000, 'Total Epoch of Training')
tf.app.flags.DEFINE_integer('batch_size', 128, 'Batch Size of Data')
tf.app.flags.DEFINE_integer('show_internal', 100, 'Show Result Internal')
FLAGS = tf.app.flags.FLAGS
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
mnist = read_data_sets('MNIST_data/', one_hot=True)
def prepare_data():
with tf.variable_scope('data'):
x_image = tf.placeholder(tf.float32, shape=[None, 28 * 28])
x = tf.reshape(x_image, [-1, 28, 28, 1])
labels = tf.placeholder(tf.float32, shape=[None, 10])
return x_image, x, labels
def build_model():
with tf.variable_scope('conv1'):
conv1_x = tflayer.conv2d(inputs=x, num_outputs=32, kernel_size=5, stride=1,
padding='SAME', activation_fn=tf.nn.relu)
pool1_x = tflayer.max_pool2d(inputs=conv1_x, kernel_size=2, stride=2, padding='SAME')
with tf.variable_scope('conv2'):
conv2_x = tflayer.conv2d(inputs=pool1_x, num_outputs=64, kernel_size=5, stride=1,
padding='SAME', activation_fn=tf.nn.relu)
pool2_x = tflayer.max_pool2d(inputs=conv2_x, kernel_size=2, stride=2, padding='SAME')
with tf.variable_scope('fc'):
flatten_x = tf.reshape(pool2_x, shape=[-1, 7 * 7 * 64])
f = tflayer.fully_connected(inputs=flatten_x, num_outputs=1000, activation_fn=tf.nn.relu)
logit = tflayer.fully_connected(inputs=f, num_outputs=10, activation_fn=None)
return logit
def build_optimizer(logit, labels):
with tf.variable_scope('loss_func'):
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logit, labels=labels, dim=-1))
with tf.variable_scope('optimizer'):
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.05).minimize(loss)
return loss, optimizer
x_image, x, labels = prepare_data()
logit = build_model()
loss, optimizer = build_optimizer(logit, labels)
loss_log = tf.summary.scalar('Loss', loss)
correct_prediction = tf.equal(tf.argmax(logit, 1), tf.argmax(labels, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
writer = tf.summary.FileWriter('./tmp', graph=sess.graph)
for epoch in range(FLAGS.total_epoch):
batch_xs, batch_ys = mnist.train.next_batch(FLAGS.batch_size)
batch_loss, _, batch_acc = sess.run([loss, optimizer, accuracy], feed_dict={x_image: batch_xs, labels: batch_ys})
if epoch % FLAGS.show_internal == 0:
print("Loss={:.3f}, Acc={:.2%}".format(batch_loss, batch_acc))
summary = sess.run(loss_log, feed_dict={x_image: batch_xs, labels: batch_ys})
writer.add_summary(summary, epoch)
acc = sess.run(accuracy, {x_image: mnist.test.images, labels: mnist.test.labels})
print("Acc={:.2%}".format(acc))
tf.feature_column.embedding_column(
categorical_column, dimension, combiner='mean', initializer=None,
ckpt_to_load_from=None, tensor_name_in_ckpt=None, max_norm=None, trainable=True,
use_safe_embedding_lookup=True
)