import tensorflow as tf
from numpy.random import RandomState
batch_size = 8
x = tf.placeholder(tf.float32, shape=(None, 2), name="x-input")
y_ = tf.placeholder(tf.float32, shape=(None, 1), name='y-input')
w1= tf.Variable(tf.random_normal([2, 1], stddev=1, seed=1))
y = tf.matmul(x, w1)
loss_less = 10
loss_more = 1
loss = tf.reduce_sum(tf.where(tf.greater(y, y_), (y - y_) * loss_more, (y_ - y) * loss_less))
train_step = tf.train.AdamOptimizer(0.001).minimize(loss)
rdm = RandomState(1)
X = rdm.rand(128,2)
Y = [[x1+x2+(rdm.rand()/10.0-0.05)] for (x1, x2) in X]
with tf.Session() as sess:
init_op = tf.global_variables_initializer()
sess.run(init_op)
STEPS = 5000
for i in range(STEPS):
start = (i*batch_size) % 128
end = (i*batch_size) % 128 + batch_size
sess.run(train_step, feed_dict={x: X[start:end], y_: Y[start:end]})
if i % 1000 == 0:
print("After %d training step(s), w1 is: " % (i))
print(sess.run(w1), "\n")
print("[loss_less=10 loss_more=1] Final w1 is: \n", sess.run(w1))
'''
After 0 training step(s), w1 is:
[[-0.81031823]
[ 1.4855988 ]]
After 1000 training step(s), w1 is:
[[ 0.01247113]
[ 2.13854504]]
After 2000 training step(s), w1 is:
[[ 0.45567426]
[ 2.17060685]]
After 3000 training step(s), w1 is:
[[ 0.69968736]
[ 1.84653103]]
After 4000 training step(s), w1 is:
[[ 0.89886677]
[ 1.29736042]]
[loss_less=10 loss_more=1] Final w1 is:
[[ 1.01934707]
[ 1.04280913]]
'''
loss_less = 1
loss_more = 10
loss = tf.reduce_sum(tf.where(tf.greater(y, y_), (y - y_) * loss_more, (y_ - y) * loss_less))
train_step = tf.train.AdamOptimizer(0.001).minimize(loss)
with tf.Session() as sess:
init_op = tf.global_variables_initializer()
sess.run(init_op)
STEPS = 5000
for i in range(STEPS):
start = (i*batch_size) % 128
end = (i*batch_size) % 128 + batch_size
sess.run(train_step, feed_dict={x: X[start:end], y_: Y[start:end]})
if i % 1000 == 0:
print("After %d training step(s), w1 is: " % (i))
print(sess.run(w1), "\n")
print("[loss_less=1 loss_more=10] Final w1 is: \n", sess.run(w1))
'''
After 0 training step(s), w1 is:
[[-0.81231821]
[ 1.48359871]]
After 1000 training step(s), w1 is:
[[ 0.18643527]
[ 1.07393336]]
After 2000 training step(s), w1 is:
[[ 0.95444274]
[ 0.98088616]]
After 3000 training step(s), w1 is:
[[ 0.95574027]
[ 0.9806633 ]]
After 4000 training step(s), w1 is:
[[ 0.95466018]
[ 0.98135227]]
[loss_less=1 loss_more=10] Final w1 is:
[[ 0.95525807]
[ 0.9813394 ]]
'''
loss = tf.losses.mean_squared_error(y, y_)
train_step = tf.train.AdamOptimizer(0.001).minimize(loss)
with tf.Session() as sess:
init_op = tf.global_variables_initializer()
sess.run(init_op)
STEPS = 5000
for i in range(STEPS):
start = (i*batch_size) % 128
end = (i*batch_size) % 128 + batch_size
sess.run(train_step, feed_dict={x: X[start:end], y_: Y[start:end]})
if i % 1000 == 0:
print("After %d training step(s), w1 is: " % (i))
print(sess.run(w1), "\n")
print("[losses.mean_squared_error]Final w1 is: \n", sess.run(w1))
'''
After 0 training step(s), w1 is:
[[-0.81031823]
[ 1.4855988 ]]
After 1000 training step(s), w1 is:
[[-0.13337614]
[ 1.81309223]]
After 2000 training step(s), w1 is:
[[ 0.32190299]
[ 1.52463484]]
After 3000 training step(s), w1 is:
[[ 0.67850214]
[ 1.25297272]]
After 4000 training step(s), w1 is:
[[ 0.89473999]
[ 1.08598232]]
[losses.mean_squared_error]Final w1 is:
[[ 0.97437561]
[ 1.0243336 ]]
'''