Tensorflow 线性回归拟合简单例子

import tensorflow as tf
import numpy as np

# 使用numpy生成100个随机点
x = np.random.rand(100)
y = x * 0.1 + 0.2

# 构建一个线性模型
k = tf.Variable(0.)
b = tf.Variable(0.)

y_predict = k * x + b

# 定义代价函数
loss = tf.reduce_mean(tf.square(y_predict - y))

# 定义一个梯度下降法进行训练的优化器,学习率为0.1
optimizer = tf.train.GradientDescentOptimizer(0.1)

# 最小化代价函数
train = optimizer.minimize(loss)

# 初始化全局变量
init = tf.global_variables_initializer()

# 定义会话执行
with tf.Session() as sess:
    sess.run(init)
    
    # 迭代次数
    for step in range(201):
        # 训练201次
        sess.run(train)
        
        # 每训练20次打印K,b
        if step%20 == 0:
            print(step, sess.run([k, b]))

# 输出

0 [0.02921415, 0.05083104]
20 [0.10880345, 0.19447665]
40 [0.10694137, 0.19598201]
60 [0.10536063, 0.19689776]
80 [0.10413962, 0.19760437]
100 [0.10319672, 0.19815004]
120 [0.10246859, 0.1985714]
140 [0.10190632, 0.1988968]
160 [0.10147212, 0.19914807]
180 [0.1011368, 0.19934213]
200 [0.10087787, 0.19949196]

 

你可能感兴趣的:(Tensorflow)