with tf.Session() as sess:
sess.run(...)
config = tf.ConfigProto(log_device_placement=True)
# 交互式
tf.InteractiveSession()
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
node1 = tf.constant(3.0)
node2 = tf.constant(4.0)
sum = node1 + node2
with tf.Session() as sess:
print(sum.eval())
print(sum.graph)
print(sum.op)
print(sum.name)
print(sess.run(sum))
tf.zeros(shape,dtype=tf.float32,name=None)
tf.ones(shape,dtype=tf.float32,name=None)
tf.constant(value,dtype=None,name='Const')
tf.random_normal(shape,mean=0.0,stddev=1.0,dtype=tf.float32,seed=None,name=None)
tf.string_to_number(string_tensor,out_type=None,name=None)
tf.cast(x,dtype,name=None)
tf.shape(input,name=None)
tf.reshape(tensor,shape,name=None)
tf.concat(values,axis,name='concat')
tf.summary.FileWriter('/..',graph=default_graph) # 返回filewriter,写入时间文件制定的目录,以供给tensorboard使用
tensorboard --logdir='addr' --127.0.0.1
# 矩阵运算
tf.matmul(x,w)
# 平方
tf.square(error)
# 均值
tf.reduce_mean(error)
# 梯度下降API
tf.train.GradientDescentOptimizer(learning_rate)
# learning_rate:学习率 method: 方法 return:梯度下降op
tf.variable_scope(<scope_name>)
# 创建指定名字的变量作用域
# 让模型代码更加清晰,作用分明
tf.train.Saver(var_list=None,max_to_keep=5)
saver.save(sess,"./model")
saver.restore(sess,"./model")
# 保存格式:checkpoint文件
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('data_dir', '/tmp/tensorflow/mnist/input_data',
"""数据集目录""")
tf.app.flags.DEFINE_integer('max_steps', 2000,
"""训练次数""")
tf.app.flags.DEFINE_string('summary_dir', '/tmp/summary/mnist/convtrain',
"""事件文件目录""")
def main(argv):
print(FLAGS.data_dir)
print(FLAGS.max_steps)
print(FLAGS.summary_dir)
print(argv)
if __name__=="__main__":
tf.app.run()
# 创建一个队列
Q = tf.FIFOQueue(3, dtypes=tf.float32)
# 数据进队列
init = Q.enqueue_many(([0.1, 0.2, 0.3],))
# 定义操作,op,出队列,+1,进队列,注意返回的都是op
out_q = Q.dequeue()
data = out_q + 1
en_q = Q.enqueue(data)
with tf.Session() as sess:
# 初始化队列,是数据进入
sess.run(init)
# 执行两次入队加1
for i in range(2):
sess.run(en_q)
# 循环取队列
for i in range(3):
print(sess.run(Q.dequeue()))
异步执行队列
#主线程,不断的去取数据,开启其它线程来进行增加计数,入队
#主线程结束了,队列线程没有结束,就会抛出异常
#主线程没有结束,需要将队列线程关闭,防止主线程等待
Q = tf.FIFOQueue(1000,dtypes=tf.float32)
# 定义操作
var = tf.Variable(0.0)
increment_op = tf.assign_add(var,tf.constant(1.0))
en_op = Q.enqueue(increment_op)
# 创建一个队列管理器,指定线程数,执行队列的操作
qr = tf.train.QueueRunner(Q,enqueue_ops=[increment_op,en_op]*3)
with tf.Session() as sess:
tf.global_variables_initializer().run()
# 生成一个线程协调器
coord = tf.train.Coordinator()
# 启动线程执行操作
threads_list = qr.create_threads(sess,coord=coord,start=True)
print(len(threads_list),"----------")
# 主线程去取数据
for i in range(20):
print(sess.run(Q.dequeue()))
# 请求其它线程终止
coord.request_stop()
# 关闭线程
coord.join(threads_list)
# 读取CSV格式文件
# 1、构建文件队列
# 2、构建读取器,读取内容
# 3、解码内容
# 4、现读取一个内容,如果有需要,就批处理内容
import tensorflow as tf
import os
def readcsv_decode(filelist):
"""
读取并解析文件内容
:param filelist: 文件列表
:return: None
"""
# 把文件目录和文件名合并
flist = [os.path.join("./csvdata/",file) for file in filelist]
# 构建文件队列
file_queue = tf.train.string_input_producer(flist,shuffle=False)
# 构建阅读器,读取文件内容
reader = tf.TextLineReader()
key,value = reader.read(file_queue)
record_defaults = [["null"],["null"]] # [[0],[0],[0],[0]]
# 解码内容,按行解析,返回的是每行的列数据
example,label = tf.decode_csv(value,record_defaults=record_defaults)
# 通过tf.train.batch来批处理数据
example_batch,label_batch = tf.train.batch([example,label],batch_size=9,num_threads=1,capacity=9)
with tf.Session() as sess:
# 线程协调员
coord = tf.train.Coordinator()
# 启动工作线程
threads = tf.train.start_queue_runners(sess,coord=coord)
# 这种方法不可取
# for i in range(9):
# print(sess.run([example,label]))
# 打印批处理的数据
print(sess.run([example_batch,label_batch]))
coord.request_stop()
coord.join(threads)
return None
if __name__=="__main__":
filename_list = os.listdir("./csvdata")
readcsv_decode(filename_list)
def readpic_decode(file_list):
"""
批量读取图片并转换成张量格式
:param file_list: 文件名目录列表
:return: None
"""
# 构造文件队列
file_queue = tf.train.string_input_producer(file_list)
# 图片阅读器和读取数据
reader = tf.WholeFileReader()
key,value = reader.read(file_queue)
# 解码成张量形式
image_first = tf.image.decode_jpeg(value)
print(image_first)
# 缩小图片到指定长宽,不用指定通道数
image = tf.image.resize_images(image_first,[256,256])
# 设置图片的静态形状
image.set_shape([256,256,3])
print(image)
# 批处理图片数据,tensors是需要具体的形状大小
image_batch = tf.train.batch([image],batch_size=100,num_threads=1,capacity=100)
tf.summary.image("pic",image_batch)
with tf.Session() as sess:
merged = tf.summary.merge_all()
filewriter = tf.summary.FileWriter("/tmp/summary/dog/",graph=sess.graph)
# 线程协调器
coord = tf.train.Coordinator()
# 开启线程
threads = tf.train.start_queue_runners(sess=sess,coord=coord)
print(sess.run(image_batch))
summary = sess.run(merged)
filewriter.add_summary(summary)
# 等待线程回收
coord.request_stop()
coord.join(threads)
return None
if __name__=="__main__":
# 获取文件列表
filename = os.listdir("./dog/")
# 组合文件目录和文件名
file_list = [os.path.join("./dog/",file) for file in filename]
# 调用读取函数
readpic_decode(file_list)
# 序列化events文件
tf.summary.FileWriter('/tmp/summary/test/', graph=default_graph)
在终端启用
tensorboard --logdir=path/to/log-directory
import tensorflow as tf
graph = tf.Graph()
with graph.as_default():
with tf.name_scope("name1") as scope:
a = tf.Variable([1.0,2.0],name="a")
with tf.name_scope("name2") as scope:
b = tf.Variable(tf.zeros([20]),name="b")
c = tf.Variable(tf.ones([20]),name="c")
with tf.name_scope("cal") as scope:
d = tf.concat([b,c],0)
e = tf.add(a,5)
with tf.Session(graph=graph) as sess:
tf.global_variables_initializer().run()
# merged = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter('/tmp/summary/test/', graph=sess.graph)
sess.run([d,e])
收集操作
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_label, logits=y))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_label, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar("loss",cross_entropy)
tf.summary.scalar("accuracy", accuracy)
tf.summary.histogram("W",W)
# 合并
merged = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(FLAGS.summary_dir, graph=sess.graph)
# 运行
summary = sess.run(merged)
#写入
summary_writer.add_summary(summary,i)
cluster = tf.train.ClusterSpec()
cluster = tf.train.ClusterSpec({"worker": ["worker0.example.com:2222", /job:worker/task:0
"worker1.example.com:2222", /job:worker/task:1
"worker2.example.com:2222"], /job:worker/task:2
"ps": ["ps0.example.com:2222", /job:ps/task:0
"ps1.example.com:2222"]}) /job:ps/task:1
# 第一个任务
cluster = tf.train.ClusterSpec({"worker": ["localhost:2222","localhost:2223"]})
server = tf.train.Server(cluster, job_name="worker", task_index=0)
# 第二个任务
cluster = tf.train.ClusterSpec({"worker": ["localhost:2222","localhost:2223"]})
server = tf.train.Server(cluster, job_name="worker", task_index=1)
with tf.device("/job:ps/task:0"):
weights = tf.Variable(...)