回归

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt


#线性回归
x_data = np.linspace(-0.5, 0.5, 200)[:, np.newaxis] #200x1维度
print(x_data.shape)
noise = np.random.normal(0, 0.02, x_data.shape)
y_data = np.square(x_data) + noise

#定义palcehold,用来选择数据
x = tf.placeholder(tf.float32, [None, 1])
y = tf.placeholder(tf.float32, [None, 1])

#结构:输入层一个 中间层10个 输出层1个 相当于用十个线性函数拟合一个非线性函数
#构建中间层
Weight_L1 = tf.Variable(tf.random_normal([1, 10]))
bias_L1 = tf.Variable(tf.zeros([1, 10])+0.2)
Wx_plus_b_L1 = tf.matmul(x, Weight_L1)+bias_L1
L1 = tf.nn.tanh(Wx_plus_b_L1)

#输出层
Weight_L2 = tf.Variable(tf.random_normal([10, 1]))
bias_l2 = tf.Variable(tf.zeros([1, 1])+0.2)
Wx_plus_b_L2 = tf.matmul(L1, Weight_L2)+bias_l2
prediction = tf.nn.tanh(Wx_plus_b_L2)

#代价函数
loss = tf.reduce_mean(tf.square(y-prediction))

#训练步骤,优化loss
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(loss)

with tf.Session() as sess:
    sess.run(tf.initialize_all_variables())
    for i in range(2000):
        sess.run(train_step, {x:x_data, y:y_data})

    prediction_value = sess.run(prediction, {x:x_data})#没有用到y
    print(prediction_value)
    plt.figure()
    plt.scatter(x_data, y_data)
    plt.plot(x_data, prediction_value, 'r-')
    plt.show()

 

你可能感兴趣的:(TensorFlow)