train和test同时纯在,test用train的变量,train=False,reuse=True,summary变量时只调用trainModel的summary函数,Writer用两个分别写

train和test同时纯在,test用train的变量,train=False,reuse=True,summary变量时只调用trainModel的summary函数,Writer用两个分别写

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

mnist = input_data.read_data_sets('MNIST_data', one_hot=True)

def add_layer(input, in_size, out_size, name='layer', activation=None):
    with tf.variable_scope(name):
        Weight = tf.get_variable(name='Weight', shape=(in_size, out_size), dtype=tf.float32, initializer=tf.random_normal_initializer())
        bias = tf.get_variable(name='bias', shape=(out_size), dtype=tf.float32, initializer=tf.constant_initializer(0.1))
        Wx_b = tf.matmul(input, Weight) + bias
        if activation != None:
            output = activation(Wx_b)
        else:
            output = Wx_b
    return output

#placehoder
x = tf.placeholder(name='x', shape=(None, 784), dtype=tf.float32)
y_label = tf.placeholder(name='y_label', shape=(None, 10), dtype=tf.float32)

#network structure
class nn(object):
    def __init__(self, x, y_label, trainning=True, reuse=False):
        with tf.variable_scope('nn', reuse=reuse):
            with tf.variable_scope('predict'):
                hidden_size = 128
                y_hidden = tf.layers.dropout(add_layer(x, 784, 128, name='hidden', activation=tf.nn.relu), 0.5, training=trainning)
                y = add_layer(y_hidden, 128, 10, name='output', activation=tf.nn.softmax)
            with tf.variable_scope('loss'):
                self.cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_label * tf.log(y + 0.000001),
                                                              reduction_indices=[1]))  # loss

                self.train_step = tf.train.GradientDescentOptimizer(0.5).minimize(self.cross_entropy)
            with tf.variable_scope('accuracy'):
                correct = tf.equal(tf.arg_max(y, 1), tf.arg_max(y_label, 1))
                self.acuracy = tf.reduce_mean(tf.cast(correct, 'float'))
    def get_summary(self):
        return tf.summary.scalar('loss', self.cross_entropy)

train_model = nn(x, y_label, trainning=True, reuse=False)
dev_model = nn(x, y_label, trainning=False, reuse=True)




#run sess
with tf.Session() as sess:
    # summary
    train_model.get_summary()
    merged = tf.summary.merge_all()
    train_writer = tf.summary.FileWriter('logs/train', graph=sess.graph)
    test_writer = tf.summary.FileWriter('logs/test', graph=sess.graph)
    #
    sess.run(tf.global_variables_initializer())
    for steps in range(1000):
        batch_x, batch_y = mnist.train.next_batch(100)
        sess.run(train_model.train_step, feed_dict={x:batch_x, y_label:batch_y})
        if steps % 50 == 0:
            train_summary = sess.run(merged, feed_dict={x:mnist.train.images, y_label:mnist.train.labels})
            test_summary = sess.run(merged, feed_dict={x:mnist.test.images, y_label:mnist.test.labels})
            train_writer.add_summary(train_summary, steps)
            test_writer.add_summary(test_summary, steps)
            # print('train')
            # print(sess.run(dev_model.acuracy, feed_dict={x:mnist.train.images, y_label:mnist.train.labels}))
            # print('test')
            # print(sess.run(dev_model.acuracy, feed_dict={x:mnist.test.images, y_label:mnist.test.labels}))

你可能感兴趣的:(tensorflow代码)