第四个tensorflow的小案例

实现了tf.greater与tf.where的运用

import tensorflow as tf
from numpy.random import RandomState

batch_size = 8

x = tf.placeholder(tf.float32, shape=(None,2), name = 'x-input')
y_ = tf.placeholder(tf.float32, shape=(None,1), name = 'y-input')

w1 = tf.Variable(tf.random_normal([2,1], stddev=1, seed=1))
y = tf.matmul(x, w1)

#定义预测多了和预测少了的成本
loss_less = 10
loss_more = 1

#tf.greater的输入是两个张量,此函数会比较这两个张量中每一个元素的大小并返回结果,简单来说比大小
#tf.where第一个为选择条件根据,当选择条件为True时,tf。select函数会选择第二个参数中的值,否则选择第三个参数(tf.where已替换了tf.select
loss = tf.reduce_sum(tf.where(tf.greater(y, y_), (y - y_) * loss_more, (y_ - y) * loss_less))
train_step = tf.train.AdamOptimizer(0.001).minimize(loss)

rdm = RandomState(1)
dataset_size = 128

X = rdm.rand(dataset_size,2)
Y = [[x1 + x2 + rdm .rand()/10.0 - 0.05] for (x1, x2) in X]

with tf.Session() as sess:
	init_op = tf.initialize_all_variables()
	sess.run(init_op)
	STEPS = 5000
	for i in range(STEPS):
		start = (i * batch_size) % dataset_size
		end = min(start + batch_size, dataset_size)
		sess.run(train_step,feed_dict={x: X[start:end], y_: Y[start:end]})
		print(sess.run(w1))

你可能感兴趣的:(TensorFlow)