用tensorflow实现线性回归--学习笔记

 

参考代码:linear_regression.py

import tensorflow as tf
import numpy
import matplotlib.pyplot as plt
rng=numpy.random

learning_rate=0.01
training_epochs=1000
display_step=50

train_X=numpy.asarray([3.3,4.4,5.5,6.71,6.93,4.168,9.779,6.182,7.59,2.167,
                         7.042,10.791,5.313,7.997,5.654,9.27,3.1])
train_Y=numpy.asarray([1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221,
                         2.827,3.465,1.65,2.904,2.42,2.94,1.3])
n_sample=train_X.shape[0]
# print(n_sample)
X=tf.placeholder("float")
# tf.placeholder(
#     dtype, 数据类型。常用的是tf.float32,tf.float64等数值类型
#     shape=None, 数据形状。默认是None,就是一维值,也可以是多维(比如[2,3], [None, 3]表示
#     name=None 名称
# )
Y=tf.placeholder("float")
W=tf.Variable(rng.randn(),name="weight") # randn()生成正态分布矩阵
# 创造variable.指定这个variable的type和shape
# w = tf.Variable(, name=)
b=tf.Variable(rng.randn(),name="bias")

pred=tf.add(tf.multiply(X,W),b)
# tf.multiply()两个矩阵中对应元素各自相乘
# a=tf.constant([[1,1],[2,2],[3,3]],dtype=tf.float32)
# b=tf.constant([1,-1],dtype=tf.float32)
# print(sess.run(tf.add(a, c)))
# add:
# [[ 2. 2.]
# [ 3. 3.]
# [ 4. 4.]]

cost = tf.reduce_sum(tf.pow(pred-Y, 2))/(2*n_sample)
# tf.pow()给定一个张量x和一个张量y,这个操作计算对应于\(x ^ y \)中的x元素和y元素
# reduce_sum应该理解为压缩求和,用于降维

optimizer=tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
# tf.train.GradientDescentOptimizer()使用随机梯度下降算法,使参数沿着
# 梯度的反方向,即总损失减小的方向移动,实现更新参数

inti=tf.global_variables_initializer()
# 未初始化的变量进行初始化

with tf.Session() as sess:
    sess.run(inti)
    for epoch in range(training_epochs):
        for (x,y) in zip(train_X,train_Y):
            sess.run(optimizer,feed_dict={X:x,Y:y}) # feed_dict的作用是给使用placeholder创建出来的tensor赋值。
        if (epoch+1)%display_step==0:
            c=sess.run(cost,feed_dict={X:train_X,Y:train_Y})
            print("Epoch:","%04d"%(epoch+1),"costj=","{:.9f}".format(c),
                  "w=",sess.run(W),"b=",sess.run(b))
    print("Optimization Finished!")
    training_cost=sess.run(cost,feed_dict={X:train_X,Y:train_Y})
    print("Training cost=",training_cost,"W=",sess.run(W),"b=",sess.run(b),'\n')

    plt.plot(train_X,train_Y,'ro',label='Original data')
    plt.plot(train_X,sess.run(W)*train_X+sess.run(b),label='Fitted line')
    plt.legend()
    plt.show()

你可能感兴趣的:(用tensorflow实现线性回归--学习笔记)