tensorflow中用summary.merge_all 时出现 Nontype问题

我用的是python3.5,tensorflow版本是1.4。

错误的意思,是在运行 summary_str = sess.run(summary_op) 时,summary_op没有被定义。
但是我在前面妥妥的加了summary_op = tf.summary.merge_all()这句话,当我调试的时候,发现summary_op真的是Nonetype,这问题就很奇怪,折磨了我好久。
后来我就换了一种数据传递的方式:
一开始,我用的是placeholder + feed_dict的方式,summary_op一直都显示没有被定义
然后,我改成了直接传入数据的方式,就没有问题了(见下面代码)

说来很玄学。。。可能是tensorflow更新太快的缘故吧

代码很简单,放出正确的代码

import tensorflow as tf
import numpy as np
import pandas as pd
import os

lr =0.001
batch_size = 32
capacity = 2000
MAX_STEP = 100000
#获取一批数据
def get_batch(data, label):
    data = tf.cast(data, tf.float32)
    label = tf.cast(label, tf.float32)
    input_queue = tf.train.slice_input_producer([data, label])#input_queue是大小为2的list
    label = input_queue[1]
    data = input_queue[0]
    data_batch, label_batch = tf.train.batch([data, label],
                                                batch_size= batch_size,
                                                num_threads= 64, 
                                                capacity = capacity)
    return data_batch, label_batch


def add_layer(inputs, in_size, out_size, activation_function=None,norm = False):
    with tf.name_scope('layer'):
        with tf.name_scope('weights'):
            Weights = tf.Variable(tf.random_normal([in_size, out_size]), name='W')
        with tf.name_scope('biases'):
            biases = tf.Variable(tf.zeros([1, out_size]) + 0.1, name='b')
        with tf.name_scope('Wx_plus_b'):
            Wx_plus_b = tf.add(tf.matmul(inputs, Weights), biases)

        if activation_function is None:
            outputs = Wx_plus_b
        else:
            outputs = activation_function(Wx_plus_b, )
        return outputs


data = pd.read_csv("data.csv")
label = pd.read_csv("label.csv")

data = np.array(data)
label = np.array(label)
s= data.shape
input_size = s[1]
with tf.name_scope('inputs'):
    data_batch,label_batch = get_batch(data, label)

# define placeholder for inputs to network
#with tf.name_scope('inputs'):
#    xs = tf.placeholder(tf.float32, [None, input_size])
#    ys = tf.placeholder(tf.float32, [None, 1])
# add hidden layer
l1 = add_layer(data_batch, input_size, 2*input_size, activation_function= tf.nn.relu,norm = True)

l2 = add_layer(l1,2*input_size,2*input_size, activation_function= tf.nn.relu,norm = True)

l3 = add_layer(l2,2*input_size,input_size, activation_function= tf.nn.relu,norm = True)

l4 = add_layer(l3,input_size,int(input_size/2), activation_function= tf.nn.relu,norm = True)

l5 = add_layer(l4,int(input_size/2),int(input_size/4), activation_function= tf.nn.relu,norm = True)

# add output layer
prediction = add_layer(l5,int(input_size/4),1, activation_function= None)

with tf.name_scope('loss'):
    loss = tf.reduce_mean(tf.reduce_sum(tf.square(tf.subtract(label_batch,prediction)),
                     reduction_indices=[1]))
with tf.name_scope('train'):
    train_op = tf.train.AdamOptimizer(lr).minimize(loss)



summary_op = tf.summary.merge_all()
sess = tf.Session()
init = tf.global_variables_initializer()

writer = tf.summary.FileWriter(logs_train_dir, sess.graph)
saver = tf.train.Saver()

sess.run(init)
for step in np.arange(MAX_STEP):
    _,train_loss = sess.run([train_op,loss])
    if step % 2000 == 0:
        summary_str = sess.run(summary_op)
        writer.add_summary(summary_str,step)
        print('loss: ', train_loss)

在上述代码中


你可能感兴趣的:(tensorflow)