linear regression using TF

step设为0.001,如果为0.1 0.01则train结果nan,要将步长设小。

def main(args):

  W = tf.Variable([1], dtype=tf.float32)
  b = tf.Variable([0], dtype=tf.float32)
  x = tf.placeholder(tf.float32)
  y = tf.placeholder(tf.float32)
  h = W * x + b
  squared_deltas = tf.square(h - y)
  cost = 0.5 * tf.reduce_mean(squared_deltas)
  optimizer = tf.train.GradientDescentOptimizer(0.001)
  train = optimizer.minimize(cost)

  init = tf.global_variables_initializer()
  sess = tf.Session()

  sess.run(init)
  for i in range(10000):
    sess.run(train, {x:[1,2,3,4,-3,35], y:[0,-1,-2,-3,4,-34]})
  curr_W, curr_b, curr_loss = sess.run([W, b, cost], {x:[1,2,3,4,-3,35], y:[0,-1,-2,-3,4,-34]})
  print("W, b, cost learned: ", curr_W, curr_b, curr_loss)

你可能感兴趣的:(linear regression using TF)