DL6-多元线性回归:波士顿房价预测问题TensorFlow(1.x)实战

1、数据读取

# 导入相关库
%matplotlib notebook
# import tensorflow as tf     V2.0用这个
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd  # 数据读取
from sklearn.utils import shuffle  #样本洗牌

import tensorflow.compat.v1 as tf
tf.disable_eager_execution()
tf.reset_default_graph()
#读取数据文件
df = pd.read_csv("data/boston.csv",header = 0)

#显示数据摘要描述信息
print(df.describe())
             CRIM         ZN       INDUS         CHAS         NOX          RM  \
count  506.000000  506.000000  506.000000  506.000000  506.000000  506.000000   
mean     3.613524   11.363636   11.136779    0.069170    0.554695    6.284634   
std      8.601545   23.322453    6.860353    0.253994    0.115878    0.702617   
min      0.006320    0.000000    0.460000    0.000000    0.385000    3.561000   
25%      0.082045    0.000000    5.190000    0.000000    0.449000    5.885500   
50%      0.256510    0.000000    9.690000    0.000000    0.538000    6.208500   
75%      3.677082   12.500000   18.100000    0.000000    0.624000    6.623500   
max     88.976200  100.000000   27.740000    1.000000    0.871000    8.780000   

              AGE         DIS         RAD         TAX     PTRATIO       LSTAT  \
count  506.000000  506.000000  506.000000  506.000000  506.000000  506.000000   
mean    68.574901    3.795043    9.549407  408.237154   18.455534   12.653063   
std     28.148861    2.105710    8.707259  168.537116    2.164946    7.141062   
min      2.900000    1.129600    1.000000  187.000000   12.600000    1.730000   
25%     45.025000    2.100175    4.000000  279.000000   17.400000    6.950000   
50%     77.500000    3.207450    5.000000  330.000000   19.050000   11.360000   
75%     94.075000    5.188425   24.000000  666.000000   20.200000   16.955000   
max    100.000000   12.126500   24.000000  711.000000   22.000000   37.970000   

             MEDV  
count  506.000000  
mean    22.532806  
std      9.197104  
min      5.000000  
25%     17.025000  
50%     21.200000  
75%     25.000000  
max     50.000000  
# 显示所有数据
print(df)
        CRIM   ZN   INDUS   CHAS    NOX     RM   AGE     DIS  RAD  TAX  \
0    0.00632  18.0    2.31     0  0.538  6.575  65.2  4.0900    1  296   
1    0.02731   0.0    7.07     0  0.469  6.421  78.9  4.9671    2  242   
2    0.02729   0.0    7.07     0  0.469  7.185  61.1  4.9671    2  242   
3    0.03237   0.0    2.18     0  0.458  6.998  45.8  6.0622    3  222   
4    0.06905   0.0    2.18     0  0.458  7.147  54.2  6.0622    3  222   
..       ...   ...     ...   ...    ...    ...   ...     ...  ...  ...   
501  0.06263   0.0   11.93     0  0.573  6.593  69.1  2.4786    1  273   
502  0.04527   0.0   11.93     0  0.573  6.120  76.7  2.2875    1  273   
503  0.06076   0.0   11.93     0  0.573  6.976  91.0  2.1675    1  273   
504  0.10959   0.0   11.93     0  0.573  6.794  89.3  2.3889    1  273   
505  0.04741   0.0   11.93     0  0.573  6.030  80.8  2.5050    1  273   

     PTRATIO  LSTAT  MEDV  
0       15.3   4.98  24.0  
1       17.8   9.14  21.6  
2       17.8   4.03  34.7  
3       18.7   2.94  33.4  
4       18.7   5.33  36.2  
..       ...    ...   ...  
501     21.0   9.67  22.4  
502     21.0   9.08  20.6  
503     21.0   5.64  23.9  
504     21.0   6.48  22.0  
505     21.0   7.88  11.9  

[506 rows x 13 columns]

2、准备建模

(1)准备工作–线性代数

import numpy as np
# 标量只是一个单一的数字
scalar_value = 18
print(scalar_value)
18
# 把标量转换为数组
sclar_np = np.array(scalar_value)

# 数组的shape不为空
print(sclar_np,sclar_np.shape)
18 ()
# 向量是一个有序的数字数组

# 这是一个列表
vector_value = [1,2,3]

# 转换为np中的数组array
vector_np = np.array(vector_value)

# shape显示为 一维数组, 其实这既不能算行向量,也不能算列向量:即不能区分(无法表示)行向量、列向量
print(vector_np, vector_np.shape)
[1 2 3] (3,)
# 矩阵是一个有序二维数组,它有两个索引。第一个指向该行,第二个指向该列

matrix_list = [[1,2,3],[4,5,6]]
matrix_np = np.array(matrix_list)

print("matrix_list = ",matrix_list,\
"\nmatrix_np =\n", matrix_np, \
"\nmatrix_np.shape = ", matrix_np.shape)
matrix_list =  [[1, 2, 3], [4, 5, 6]] 
matrix_np =
 [[1 2 3]
 [4 5 6]] 
matrix_np.shape =  (2, 3)
# 行向量矩阵表示
vector_row = np.array([[1,2,3]])
print(vector_row, "shape = ", vector_row.shape)
# 1行3列
[[1 2 3]] shape =  (1, 3)
# 列向量矩阵表示
vector_column = np.array([[4],[5],[6]])
print(vector_column, "shape = ", vector_column.shape)
# 3行1列  
[[4]
 [5]
 [6]] shape =  (3, 1)
# 矩阵与标量运算

matrix_a = np.array([[1,2,3],[4,5,6]])
print(matrix_a,"\nshape = ", matrix_a.shape)
[[1 2 3]
 [4 5 6]] 
shape =  (2, 3)
# 矩阵 * 标量
matrix_b = matrix_a * 2
print(matrix_b,"s\nhape = ", matrix_b.shape)
[[ 2  4  6]
 [ 8 10 12]] s
hape =  (2, 3)
# 矩阵 + 标量
matrix_c =matrix_a + 2
print(matrix_c,"\nshape = ", matrix_c.shape)
[[3 4 5]
 [6 7 8]] 
shape =  (2, 3)
# 矩阵 ± 矩阵
# 要求矩阵的形状相同:对应位置相加减

matrix_a = np.array([[1,2,3],
                     [4,5,6]])
matrix_b = np.array([[-1,-2,-3],
                     [-4,-5,-6]])
print(matrix_a + matrix_b)
print(matrix_a - matrix_b)
[[0 0 0]
 [0 0 0]]
[[ 2  4  6]
 [ 8 10 12]]
# 矩阵 * 矩阵(点乘)
# 要求矩阵的形状相同:对应位置相乘
matrix_a = np.array([[1,2,3],
                     [4,5,6]])
matrix_b = np.array([[-1,-2,-3],
                     [-4,-5,-6]])

print(matrix_a * matrix_b)
#等价表述如下:
print(np.multiply(matrix_a,matrix_b))
[[ -1  -4  -9]
 [-16 -25 -36]]
[[ -1  -4  -9]
 [-16 -25 -36]]
# 矩阵 ※ 矩阵(叉乘)
# 要求第一个矩阵的列=第二个矩阵的行

matrix_a = np.array([[1,2,3],
                     [4,5,6]])#2行3列
matrix_b = np.array([[1,2,3,4],
                     [1,2,1,2],
                     [-1,0,1,1]])#3行4列

print(np.matmul(matrix_a,matrix_b))#输出为2行4列
[[ 0  6  8 11]
 [ 3 18 23 32]]
# 矩阵转置
matrix_a = np.array([[1,2,3],
                     [4,5,6]])#2行3列
print(matrix_a,"\nshape = ", matrix_a.shape,'\n',matrix_a.T,'\n.Tshape=',matrix_a.T.shape)
[[1 2 3]
 [4 5 6]] 
shape =  (2, 3) 
 [[1 4]
 [2 5]
 [3 6]] 
.Tshape= (3, 2)
# 也可以通过reshape函数
matrix_b = np.array([[1,2,3],
                     [4,5,6]])#2行3列
matrix_c = matrix_b.reshape(6,1)
print(matrix_b,"\nshape = ", matrix_b.shape,'\n',matrix_c,'\n.Tshape=',matrix_c.shape)
[[1 2 3]
 [4 5 6]] 
shape =  (2, 3) 
 [[1]
 [2]
 [3]
 [4]
 [5]
 [6]] 
.Tshape= (6, 1)
matrix_c = matrix_b.reshape(1,6)
print(matrix_b,"\nshape = ", matrix_b.shape,'\n',matrix_c,'\n.Tshape=',matrix_c.shape)
[[1 2 3]
 [4 5 6]] 
shape =  (2, 3) 
 [[1 2 3 4 5 6]] 
.Tshape= (1, 6)

(2)数据准备

# 获取df的值
df = df.values   #只获取含数字的项,表头是字母不会包含进来
# 把 df 转换为 np 数组格式
df = np.array(df)
print(df)
[[6.3200e-03 1.8000e+01 2.3100e+00 ... 1.5300e+01 4.9800e+00 2.4000e+01]
 [2.7310e-02 0.0000e+00 7.0700e+00 ... 1.7800e+01 9.1400e+00 2.1600e+01]
 [2.7290e-02 0.0000e+00 7.0700e+00 ... 1.7800e+01 4.0300e+00 3.4700e+01]
 ...
 [6.0760e-02 0.0000e+00 1.1930e+01 ... 2.1000e+01 5.6400e+00 2.3900e+01]
 [1.0959e-01 0.0000e+00 1.1930e+01 ... 2.1000e+01 6.4800e+00 2.2000e+01]
 [4.7410e-02 0.0000e+00 1.1930e+01 ... 2.1000e+01 7.8800e+00 1.1900e+01]]
# x_data 为前12列特征数据
x_data = df[:,:12]  #行数:所有;列数:第0至11行,不包含第12行

# y_data 为最后1列标签数据
y_data = df[:,12]
print(x_data,"\n shape=",x_data.shape)
[[6.3200e-03 1.8000e+01 2.3100e+00 ... 2.9600e+02 1.5300e+01 4.9800e+00]
 [2.7310e-02 0.0000e+00 7.0700e+00 ... 2.4200e+02 1.7800e+01 9.1400e+00]
 [2.7290e-02 0.0000e+00 7.0700e+00 ... 2.4200e+02 1.7800e+01 4.0300e+00]
 ...
 [6.0760e-02 0.0000e+00 1.1930e+01 ... 2.7300e+02 2.1000e+01 5.6400e+00]
 [1.0959e-01 0.0000e+00 1.1930e+01 ... 2.7300e+02 2.1000e+01 6.4800e+00]
 [4.7410e-02 0.0000e+00 1.1930e+01 ... 2.7300e+02 2.1000e+01 7.8800e+00]] 
 shape= (506, 12)
print(y_data,"\n shape=",y_data.shape)
[24.  21.6 34.7 33.4 36.2 28.7 22.9 27.1 16.5 18.9 15.  18.9 21.7 20.4
 18.2 19.9 23.1 17.5 20.2 18.2 13.6 19.6 15.2 14.5 15.6 13.9 16.6 14.8
 18.4 21.  12.7 14.5 13.2 13.1 13.5 18.9 20.  21.  24.7 30.8 34.9 26.6
 25.3 24.7 21.2 19.3 20.  16.6 14.4 19.4 19.7 20.5 25.  23.4 18.9 35.4
 24.7 31.6 23.3 19.6 18.7 16.  22.2 25.  33.  23.5 19.4 22.  17.4 20.9
 24.2 21.7 22.8 23.4 24.1 21.4 20.  20.8 21.2 20.3 28.  23.9 24.8 22.9
 23.9 26.6 22.5 22.2 23.6 28.7 22.6 22.  22.9 25.  20.6 28.4 21.4 38.7
 43.8 33.2 27.5 26.5 18.6 19.3 20.1 19.5 19.5 20.4 19.8 19.4 21.7 22.8
 18.8 18.7 18.5 18.3 21.2 19.2 20.4 19.3 22.  20.3 20.5 17.3 18.8 21.4
 15.7 16.2 18.  14.3 19.2 19.6 23.  18.4 15.6 18.1 17.4 17.1 13.3 17.8
 14.  14.4 13.4 15.6 11.8 13.8 15.6 14.6 17.8 15.4 21.5 19.6 15.3 19.4
 17.  15.6 13.1 41.3 24.3 23.3 27.  50.  50.  50.  22.7 25.  50.  23.8
 23.8 22.3 17.4 19.1 23.1 23.6 22.6 29.4 23.2 24.6 29.9 37.2 39.8 36.2
 37.9 32.5 26.4 29.6 50.  32.  29.8 34.9 37.  30.5 36.4 31.1 29.1 50.
 33.3 30.3 34.6 34.9 32.9 24.1 42.3 48.5 50.  22.6 24.4 22.5 24.4 20.
 21.7 19.3 22.4 28.1 23.7 25.  23.3 28.7 21.5 23.  26.7 21.7 27.5 30.1
 44.8 50.  37.6 31.6 46.7 31.5 24.3 31.7 41.7 48.3 29.  24.  25.1 31.5
 23.7 23.3 22.  20.1 22.2 23.7 17.6 18.5 24.3 20.5 24.5 26.2 24.4 24.8
 29.6 42.8 21.9 20.9 44.  50.  36.  30.1 33.8 43.1 48.8 31.  36.5 22.8
 30.7 50.  43.5 20.7 21.1 25.2 24.4 35.2 32.4 32.  33.2 33.1 29.1 35.1
 45.4 35.4 46.  50.  32.2 22.  20.1 23.2 22.3 24.8 28.5 37.3 27.9 23.9
 21.7 28.6 27.1 20.3 22.5 29.  24.8 22.  26.4 33.1 36.1 28.4 33.4 28.2
 22.8 20.3 16.1 22.1 19.4 21.6 23.8 16.2 17.8 19.8 23.1 21.  23.8 23.1
 20.4 18.5 25.  24.6 23.  22.2 19.3 22.6 19.8 17.1 19.4 22.2 20.7 21.1
 19.5 18.5 20.6 19.  18.7 32.7 16.5 23.9 31.2 17.5 17.2 23.1 24.5 26.6
 22.9 24.1 18.6 30.1 18.2 20.6 17.8 21.7 22.7 22.6 25.  19.9 20.8 16.8
 21.9 27.5 21.9 23.1 50.  50.  50.  50.  50.  13.8 13.8 15.  13.9 13.3
 13.1 10.2 10.4 10.9 11.3 12.3  8.8  7.2 10.5  7.4 10.2 11.5 15.1 23.2
  9.7 13.8 12.7 13.1 12.5  8.5  5.   6.3  5.6  7.2 12.1  8.3  8.5  5.
 11.9 27.9 17.2 27.5 15.  17.2 17.9 16.3  7.   7.2  7.5 10.4  8.8  8.4
 16.7 14.2 20.8 13.4 11.7  8.3 10.2 10.9 11.   9.5 14.5 14.1 16.1 14.3
 11.7 13.4  9.6  8.7  8.4 12.8 10.5 17.1 18.4 15.4 10.8 11.8 14.9 12.6
 14.1 13.  13.4 15.2 16.1 17.8 14.9 14.1 12.7 13.5 14.9 20.  16.4 17.7
 19.5 20.2 21.4 19.9 19.  19.1 19.1 20.1 19.9 19.6 23.2 29.8 13.8 13.3
 16.7 12.  14.6 21.4 23.  23.7 25.  21.8 20.6 21.2 19.1 20.6 15.2  7.
  8.1 13.6 20.1 21.8 24.5 23.1 19.7 18.3 21.2 17.5 16.8 22.4 20.6 23.9
 22.  11.9] 
 shape= (506,)

(3)模型定义

# 定义 特征数据和标签数据的占位符
x = tf.placeholder(tf.float32,[None,12],name = "X")   
y = tf.placeholder(tf.float32,[None,1],name = "Y")   
# shape中 None 表示行的数量未知,在实际训练时决定一次代入多少行样本,从一个样本的随机SDG到批量SDG都可以
# 定义模型函数
with tf.name_scope("Model"):
    
    # w 初始值为shape = (12,1)的随机数
    w = tf.Variable(tf.random_normal([12,1],stddev=0.01),name = "W")
    
    # b 初始化值为1.0
    b = tf.Variable(1.0,name = "b")
    
    # w和x矩阵相乘,用matmul
    def model(x,w,b):
        return tf.matmul(x,w) + b
    
    # 预测计算操作,前向计算节点
    pred = model(x,w,b)

(4)模型训练

# 设置训练参数(超参数)

# 迭代轮次
train_epochs = 50

# 学习率
learning_rate = 0.01
# 定义均方差损失函数

with tf.name_scope("LossFunction"):
    loss_function = tf.reduce_mean(tf.pow(y - pred,2)) 
# 创建梯度下降优化器

optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss_function)
# 声明会话

sess = tf.Session()
init = tf.global_variables_initializer()
# 启动会话
sess.run(init)
# 迭代训练

for epoch in range(train_epochs):
    loss_sum = 0
    for xs,ys in zip(x_data,y_data):
        # 数据变形,使其与spaceholder形状一致
        xs = xs.reshape(1,12)
        ys = ys.reshape(1,1) 
        
        _,loss = sess.run([optimizer,loss_function],feed_dict={x:xs,y:ys})
        #将xs与ys利用占位符喂给x和y,然后得到梯度优化后的损失函数,此时w和b的值存在_里,但是我们并不需要知道具体的值,故用一个符号简单代替即可
        
        loss_sum = loss_sum + loss
        
    # 打乱数据顺序,防止记住顺序
    xvalues, yvalues = shuffle(x_data,y_data)
    
    b0temp = b.eval(session=sess)
    w0temp = w.eval(session=sess)
    loss_average = loss_sum / len(y_data)
    
print("epoch=",epoch + 1,"loss = ",loss_average,"b = ",b0temp,"w = ",w0temp)
epoch= 50 loss =  nan b =  nan w =  [[nan]
 [nan]
 [nan]
 [nan]
 [nan]
 [nan]
 [nan]
 [nan]
 [nan]
 [nan]
 [nan]
 [nan]]

出现问题:单变量与多变量的梯度下降有别!

归一化:特征值/(特征值最大值-特征值最小值)

3、版本2:进行归一化处理

# 导入相关库
%matplotlib notebook
# import tensorflow as tf     V2.0用这个
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd  # 数据读取
from sklearn.utils import shuffle  #样本洗牌

import tensorflow.compat.v1 as tf
tf.disable_eager_execution()
tf.reset_default_graph()
#读取数据文件
df = pd.read_csv("data/boston.csv",header = 0)

#显示数据摘要描述信息
print(df.describe())
             CRIM         ZN       INDUS         CHAS         NOX          RM  \
count  506.000000  506.000000  506.000000  506.000000  506.000000  506.000000   
mean     3.613524   11.363636   11.136779    0.069170    0.554695    6.284634   
std      8.601545   23.322453    6.860353    0.253994    0.115878    0.702617   
min      0.006320    0.000000    0.460000    0.000000    0.385000    3.561000   
25%      0.082045    0.000000    5.190000    0.000000    0.449000    5.885500   
50%      0.256510    0.000000    9.690000    0.000000    0.538000    6.208500   
75%      3.677082   12.500000   18.100000    0.000000    0.624000    6.623500   
max     88.976200  100.000000   27.740000    1.000000    0.871000    8.780000   

              AGE         DIS         RAD         TAX     PTRATIO       LSTAT  \
count  506.000000  506.000000  506.000000  506.000000  506.000000  506.000000   
mean    68.574901    3.795043    9.549407  408.237154   18.455534   12.653063   
std     28.148861    2.105710    8.707259  168.537116    2.164946    7.141062   
min      2.900000    1.129600    1.000000  187.000000   12.600000    1.730000   
25%     45.025000    2.100175    4.000000  279.000000   17.400000    6.950000   
50%     77.500000    3.207450    5.000000  330.000000   19.050000   11.360000   
75%     94.075000    5.188425   24.000000  666.000000   20.200000   16.955000   
max    100.000000   12.126500   24.000000  711.000000   22.000000   37.970000   

             MEDV  
count  506.000000  
mean    22.532806  
std      9.197104  
min      5.000000  
25%     17.025000  
50%     21.200000  
75%     25.000000  
max     50.000000  
# 获取df的值
df = df.values   #只获取含数字的项,表头是字母不会包含进来
# 把 df 转换为 np 数组格式
df = np.array(df)
print(df)
[[6.3200e-03 1.8000e+01 2.3100e+00 ... 1.5300e+01 4.9800e+00 2.4000e+01]
 [2.7310e-02 0.0000e+00 7.0700e+00 ... 1.7800e+01 9.1400e+00 2.1600e+01]
 [2.7290e-02 0.0000e+00 7.0700e+00 ... 1.7800e+01 4.0300e+00 3.4700e+01]
 ...
 [6.0760e-02 0.0000e+00 1.1930e+01 ... 2.1000e+01 5.6400e+00 2.3900e+01]
 [1.0959e-01 0.0000e+00 1.1930e+01 ... 2.1000e+01 6.4800e+00 2.2000e+01]
 [4.7410e-02 0.0000e+00 1.1930e+01 ... 2.1000e+01 7.8800e+00 1.1900e+01]]

特征数据归一化

# 对特征数据【0到11】列做(0-1)归一化

# 对第i列的所有数据进行归一化
for i in range(12):
    df[:,i] = df[:,i] / (df[:,i].max() - df[:,i].min())
# x_data 为归一化后的前12列特征数据
x_data = df[:,:12]

# y_data 为最后一列标签数据:无需归一化
y_data = df[:,12]
print(x_data,"\n shape=",x_data.shape)
print(y_data,"\n shape=",y_data.shape)
[[7.10352762e-05 1.80000000e-01 8.46774194e-02 ... 5.64885496e-01
  1.62765957e+00 1.37417219e-01]
 [3.06957815e-04 0.00000000e+00 2.59164223e-01 ... 4.61832061e-01
  1.89361702e+00 2.52207506e-01]
 [3.06733020e-04 0.00000000e+00 2.59164223e-01 ... 4.61832061e-01
  1.89361702e+00 1.11203091e-01]
 ...
 [6.82927750e-04 0.00000000e+00 4.37316716e-01 ... 5.20992366e-01
  2.23404255e+00 1.55629139e-01]
 [1.23176518e-03 0.00000000e+00 4.37316716e-01 ... 5.20992366e-01
  2.23404255e+00 1.78807947e-01]
 [5.32876969e-04 0.00000000e+00 4.37316716e-01 ... 5.20992366e-01
  2.23404255e+00 2.17439294e-01]] 
 shape= (506, 12)
[24.  21.6 34.7 33.4 36.2 28.7 22.9 27.1 16.5 18.9 15.  18.9 21.7 20.4
 18.2 19.9 23.1 17.5 20.2 18.2 13.6 19.6 15.2 14.5 15.6 13.9 16.6 14.8
 18.4 21.  12.7 14.5 13.2 13.1 13.5 18.9 20.  21.  24.7 30.8 34.9 26.6
 25.3 24.7 21.2 19.3 20.  16.6 14.4 19.4 19.7 20.5 25.  23.4 18.9 35.4
 24.7 31.6 23.3 19.6 18.7 16.  22.2 25.  33.  23.5 19.4 22.  17.4 20.9
 24.2 21.7 22.8 23.4 24.1 21.4 20.  20.8 21.2 20.3 28.  23.9 24.8 22.9
 23.9 26.6 22.5 22.2 23.6 28.7 22.6 22.  22.9 25.  20.6 28.4 21.4 38.7
 43.8 33.2 27.5 26.5 18.6 19.3 20.1 19.5 19.5 20.4 19.8 19.4 21.7 22.8
 18.8 18.7 18.5 18.3 21.2 19.2 20.4 19.3 22.  20.3 20.5 17.3 18.8 21.4
 15.7 16.2 18.  14.3 19.2 19.6 23.  18.4 15.6 18.1 17.4 17.1 13.3 17.8
 14.  14.4 13.4 15.6 11.8 13.8 15.6 14.6 17.8 15.4 21.5 19.6 15.3 19.4
 17.  15.6 13.1 41.3 24.3 23.3 27.  50.  50.  50.  22.7 25.  50.  23.8
 23.8 22.3 17.4 19.1 23.1 23.6 22.6 29.4 23.2 24.6 29.9 37.2 39.8 36.2
 37.9 32.5 26.4 29.6 50.  32.  29.8 34.9 37.  30.5 36.4 31.1 29.1 50.
 33.3 30.3 34.6 34.9 32.9 24.1 42.3 48.5 50.  22.6 24.4 22.5 24.4 20.
 21.7 19.3 22.4 28.1 23.7 25.  23.3 28.7 21.5 23.  26.7 21.7 27.5 30.1
 44.8 50.  37.6 31.6 46.7 31.5 24.3 31.7 41.7 48.3 29.  24.  25.1 31.5
 23.7 23.3 22.  20.1 22.2 23.7 17.6 18.5 24.3 20.5 24.5 26.2 24.4 24.8
 29.6 42.8 21.9 20.9 44.  50.  36.  30.1 33.8 43.1 48.8 31.  36.5 22.8
 30.7 50.  43.5 20.7 21.1 25.2 24.4 35.2 32.4 32.  33.2 33.1 29.1 35.1
 45.4 35.4 46.  50.  32.2 22.  20.1 23.2 22.3 24.8 28.5 37.3 27.9 23.9
 21.7 28.6 27.1 20.3 22.5 29.  24.8 22.  26.4 33.1 36.1 28.4 33.4 28.2
 22.8 20.3 16.1 22.1 19.4 21.6 23.8 16.2 17.8 19.8 23.1 21.  23.8 23.1
 20.4 18.5 25.  24.6 23.  22.2 19.3 22.6 19.8 17.1 19.4 22.2 20.7 21.1
 19.5 18.5 20.6 19.  18.7 32.7 16.5 23.9 31.2 17.5 17.2 23.1 24.5 26.6
 22.9 24.1 18.6 30.1 18.2 20.6 17.8 21.7 22.7 22.6 25.  19.9 20.8 16.8
 21.9 27.5 21.9 23.1 50.  50.  50.  50.  50.  13.8 13.8 15.  13.9 13.3
 13.1 10.2 10.4 10.9 11.3 12.3  8.8  7.2 10.5  7.4 10.2 11.5 15.1 23.2
  9.7 13.8 12.7 13.1 12.5  8.5  5.   6.3  5.6  7.2 12.1  8.3  8.5  5.
 11.9 27.9 17.2 27.5 15.  17.2 17.9 16.3  7.   7.2  7.5 10.4  8.8  8.4
 16.7 14.2 20.8 13.4 11.7  8.3 10.2 10.9 11.   9.5 14.5 14.1 16.1 14.3
 11.7 13.4  9.6  8.7  8.4 12.8 10.5 17.1 18.4 15.4 10.8 11.8 14.9 12.6
 14.1 13.  13.4 15.2 16.1 17.8 14.9 14.1 12.7 13.5 14.9 20.  16.4 17.7
 19.5 20.2 21.4 19.9 19.  19.1 19.1 20.1 19.9 19.6 23.2 29.8 13.8 13.3
 16.7 12.  14.6 21.4 23.  23.7 25.  21.8 20.6 21.2 19.1 20.6 15.2  7.
  8.1 13.6 20.1 21.8 24.5 23.1 19.7 18.3 21.2 17.5 16.8 22.4 20.6 23.9
 22.  11.9] 
 shape= (506,)
# 定义 特征数据和标签数据的占位符
x = tf.placeholder(tf.float32,[None,12],name = "X")   
y = tf.placeholder(tf.float32,[None,1],name = "Y")   


# 定义模型函数
with tf.name_scope("Model"):
    
    # w 初始值为shape = (12,1)的随机数
    w = tf.Variable(tf.random_normal([12,1],stddev=0.01),name = "W")
    
    # b 初始化值为1.0
    b = tf.Variable(1.0,name = "b")
    
    # w和x矩阵相乘,用matmul
    def model(x,w,b):
        return tf.matmul(x,w) + b
    
    # 预测计算操作,前向计算节点
    pred = model(x,w,b)



# 设置训练参数(超参数)

# 迭代轮次
train_epochs = 50

# 学习率
learning_rate = 0.01


# 定义均方差损失函数
with tf.name_scope("LossFunction"):
    loss_function = tf.reduce_mean(tf.pow(y - pred,2)) 
    
    
# 创建梯度下降优化器

optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss_function)


# 声明会话

sess = tf.Session()
init = tf.global_variables_initializer()
# 启动会话
sess.run(init)
# 迭代训练

for epoch in range(train_epochs):
    loss_sum = 0
    for xs,ys in zip(x_data,y_data):
        # 数据变形,使其与spaceholder形状一致
        xs = xs.reshape(1,12)
        ys = ys.reshape(1,1) 
        
        _,loss = sess.run([optimizer,loss_function],feed_dict={x:xs,y:ys})
        #将xs与ys利用占位符喂给x和y,然后得到梯度优化后的损失函数,此时w和b的值存在_里,但是我们并不需要知道具体的值,故用一个符号简单代替即可
        
        loss_sum = loss_sum + loss
        
    # 打乱数据顺序,防止记住顺序
    xvalues, yvalues = shuffle(x_data,y_data)
    
    b0temp = b.eval(session=sess)
    w0temp = w.eval(session=sess)
    loss_average = loss_sum / len(y_data)
    
    print("epoch=",epoch + 1,"loss = ",loss_average,"b = ",b0temp,"w = ",w0temp)
epoch= 1 loss =  44.337393957753434 b =  3.6102986 w =  [[-0.61777246]
 [ 1.3829758 ]
 [-0.7958472 ]
 [ 0.5048962 ]
 [ 2.5224879 ]
 [ 7.1773787 ]
 [-0.05069519]
 [ 0.8032707 ]
 [ 0.3868867 ]
 [ 0.33171538]
 [ 2.305457  ]
 [-4.3866787 ]]
epoch= 2 loss =  32.036438212210605 b =  3.9981961 w =  [[-1.1673872 ]
 [ 1.963905  ]
 [-1.5239497 ]
 [ 0.856303  ]
 [ 2.896329  ]
 [10.614244  ]
 [-0.81843406]
 [ 0.36249873]
 [ 0.6352789 ]
 [-0.25952753]
 [ 1.1442716 ]
 [-8.10359   ]]
epoch= 3 loss =  27.34001518933999 b =  4.278993 w =  [[ -1.6652527 ]
 [  2.1797404 ]
 [ -1.9723119 ]
 [  1.063931  ]
 [  3.1755688 ]
 [ 13.210045  ]
 [ -1.1952971 ]
 [ -0.17106298]
 [  0.94141656]
 [ -0.76077545]
 [  0.21417993]
 [-10.960338  ]]
epoch= 4 loss =  24.672504632380633 b =  4.500205 w =  [[ -2.1225903 ]
 [  2.2053273 ]
 [ -2.2477956 ]
 [  1.1574029 ]
 [  3.3579588 ]
 [ 15.199795  ]
 [ -1.3577286 ]
 [ -0.71559364]
 [  1.2651606 ]
 [ -1.1928074 ]
 [ -0.518474  ]
 [-13.182013  ]]
epoch= 5 loss =  23.104833186311204 b =  4.6888976 w =  [[ -2.546973 ]
 [  2.1386263]
 [ -2.4139462]
 [  1.1708477]
 [  3.4531305]
 [ 16.742895 ]
 [ -1.4043621]
 [ -1.2321597]
 [  1.5853091]
 [ -1.5699434]
 [ -1.0881089]
 [-14.9242115]]
epoch= 6 loss =  22.160490104904515 b =  4.860485 w =  [[ -2.9436767]
 [  2.0337877]
 [ -2.509238 ]
 [  1.1334271]
 [  3.4749079]
 [ 17.950644 ]
 [ -1.3902718]
 [ -1.704704 ]
 [  1.8913547]
 [ -1.9023851]
 [ -1.5261304]
 [-16.297922 ]]
epoch= 7 loss =  21.580403804739078 b =  5.023739 w =  [[ -3.3164914]
 [  1.9199821]
 [ -2.5574613]
 [  1.0670242]
 [  3.437573 ]
 [ 18.902704 ]
 [ -1.346074 ]
 [ -2.1290264]
 [  2.1787562]
 [ -2.1976423]
 [ -1.85953  ]
 [-17.384504 ]]
epoch= 8 loss =  21.21740849930612 b =  5.183575 w =  [[ -3.6682456 ]
 [  1.8121917 ]
 [ -2.5736852 ]
 [  0.98677564]
 [  3.3541694 ]
 [ 19.657541  ]
 [ -1.2886808 ]
 [ -2.5067797 ]
 [  2.4461915 ]
 [ -2.4614594 ]
 [ -2.1107812 ]
 [-18.244934  ]]
epoch= 9 loss =  20.98522323375682 b =  5.3427134 w =  [[ -4.0011134]
 [  1.7173829]
 [ -2.5677123]
 [  0.9024809]
 [  3.2359138]
 [ 20.258835 ]
 [ -1.227302 ]
 [ -2.8422096]
 [  2.6940217]
 [ -2.6983678]
 [ -2.2981663]
 [-18.92591  ]]
epoch= 10 loss =  20.832208754840092 b =  5.5026116 w =  [[ -4.316819 ]
 [  1.6380858]
 [ -2.5460882]
 [  0.8200648]
 [  3.0920887]
 [ 20.739676 ]
 [ -1.1668166]
 [ -3.140446 ]
 [  2.9234095]
 [ -2.9120276]
 [ -2.436327 ]
 [-19.463686 ]]
epoch= 11 loss =  20.727108475022284 b =  5.663972 w =  [[ -4.616779 ]
 [  1.5744346]
 [ -2.5133312]
 [  0.7427979]
 [  2.9301863]
 [ 21.125591 ]
 [ -1.1097097]
 [ -3.4066348]
 [  3.1358395]
 [ -3.1054864]
 [ -2.5369139]
 [-19.886688 ]]
epoch= 12 loss =  20.65087399468323 b =  5.827078 w =  [[ -4.9021707 ]
 [  1.5253503 ]
 [ -2.472628  ]
 [  0.67222476]
 [  2.7561388 ]
 [ 21.436354  ]
 [ -1.0571206 ]
 [ -3.6455274 ]
 [  3.3328485 ]
 [ -3.2813058 ]
 [ -2.609074  ]
 [-20.217522  ]]
epoch= 13 loss =  20.591912843748347 b =  5.9919696 w =  [[ -5.1740084]
 [  1.4892125]
 [ -2.4262955]
 [  0.608814 ]
 [  2.5745807]
 [ 21.687426 ]
 [ -1.0094529]
 [ -3.8613236]
 [  3.5158975]
 [ -3.4416852]
 [ -2.6599605]
 [-20.474157 ]]
epoch= 14 loss =  20.543204317078576 b =  6.1585617 w =  [[ -5.4331717 ]
 [  1.4642256 ]
 [ -2.3760529 ]
 [  0.5524116 ]
 [  2.3890753 ]
 [ 21.890976  ]
 [ -0.96671915]
 [ -4.057625  ]
 [  3.6863337 ]
 [ -3.5885103 ]
 [ -2.6951325 ]
 [-20.671051  ]]
epoch= 15 loss =  20.500552776940673 b =  6.3266907 w =  [[ -5.680442  ]
 [  1.448629  ]
 [ -2.323208  ]
 [  0.50252676]
 [  2.2023227 ]
 [ 22.056625  ]
 [ -0.92872787]
 [ -4.2374883 ]
 [  3.84534   ]
 [ -3.7234159 ]
 [ -2.7188869 ]
 [-20.819813  ]]
epoch= 16 loss =  20.46148518907175 b =  6.496175 w =  [[ -5.9165115 ]
 [  1.4407969 ]
 [ -2.268759  ]
 [  0.45851797]
 [  2.0163589 ]
 [ 22.191914  ]
 [ -0.8951785 ]
 [ -4.4034514 ]
 [  3.9939713 ]
 [ -3.8478165 ]
 [ -2.7344937 ]
 [-20.92986   ]]
epoch= 17 loss =  20.424589754659305 b =  6.6668124 w =  [[ -6.1420093]
 [  1.4392806]
 [ -2.2134895]
 [  0.4197028]
 [  1.8326569]
 [ 22.302906 ]
 [ -0.865742 ]
 [ -4.5576286]
 [  4.1331363]
 [ -3.9629526]
 [ -2.7444816]
 [-21.008703 ]]
epoch= 18 loss =  20.389084714326096 b =  6.8383956 w =  [[ -6.3574996 ]
 [  1.4428155 ]
 [ -2.158008  ]
 [  0.38542095]
 [  1.6522747 ]
 [ 22.394419  ]
 [ -0.8400744 ]
 [ -4.701762  ]
 [  4.2636395 ]
 [ -4.069903  ]
 [ -2.7507432 ]
 [-21.062569  ]]
epoch= 19 loss =  20.354564370924116 b =  7.010729 w =  [[ -6.5635095 ]
 [  1.4503263 ]
 [ -2.102796  ]
 [  0.35506266]
 [  1.4759431 ]
 [ 22.470253  ]
 [ -0.81783974]
 [ -4.837295  ]
 [  4.3861775 ]
 [ -4.1695976 ]
 [ -2.7547047 ]
 [-21.096367  ]]
epoch= 20 loss =  20.32083476652019 b =  7.183645 w =  [[ -6.7605214 ]
 [  1.4609047 ]
 [ -2.0482311 ]
 [  0.32808915]
 [  1.3041507 ]
 [ 22.533424  ]
 [ -0.79871434]
 [ -4.9653964 ]
 [  4.5013595 ]
 [ -4.262861  ]
 [ -2.7574115 ]
 [-21.114166  ]]
epoch= 21 loss =  20.28783740242552 b =  7.3569617 w =  [[ -6.948975  ]
 [  1.4737933 ]
 [ -1.9946077 ]
 [  0.3040303 ]
 [  1.1371793 ]
 [ 22.586391  ]
 [ -0.78240204]
 [ -5.0870357 ]
 [  4.6097302 ]
 [ -4.350404  ]
 [ -2.759646  ]
 [-21.119238  ]]
epoch= 22 loss =  20.255557866654083 b =  7.5305247 w =  [[ -7.1292915 ]
 [  1.4883684 ]
 [ -1.9421594 ]
 [  0.28248435]
 [  0.9751797 ]
 [ 22.631052  ]
 [ -0.7686177 ]
 [ -5.2030044 ]
 [  4.7117853 ]
 [ -4.4328394 ]
 [ -2.761948  ]
 [-21.114258  ]]
epoch= 23 loss =  20.224017936810313 b =  7.704196 w =  [[ -7.301845  ]
 [  1.5041173 ]
 [ -1.8910623 ]
 [  0.2631091 ]
 [  0.81818926]
 [ 22.668938  ]
 [ -0.75710267]
 [ -5.3139567 ]
 [  4.8079605 ]
 [ -4.510705  ]
 [ -2.7647078 ]
 [-21.10137   ]]
epoch= 24 loss =  20.193252790668627 b =  7.877838 w =  [[ -7.467009  ]
 [  1.5206252 ]
 [ -1.8414457 ]
 [  0.24561568]
 [  0.6661698 ]
 [ 22.70127   ]
 [ -0.74761546]
 [ -5.420425  ]
 [  4.898644  ]
 [ -4.5844703 ]
 [ -2.7681818 ]
 [-21.082333  ]]
epoch= 25 loss =  20.163283503926376 b =  8.051342 w =  [[ -7.6251125 ]
 [  1.5375584 ]
 [ -1.7934122 ]
 [  0.22976038]
 [  0.51901996]
 [ 22.729036  ]
 [ -0.7399396 ]
 [ -5.5228634 ]
 [  4.9841967 ]
 [ -4.654546  ]
 [ -2.7725394 ]
 [-21.058584  ]]
epoch= 26 loss =  20.134130553488504 b =  8.224596 w =  [[ -7.776479  ]
 [  1.5546496 ]
 [ -1.7470256 ]
 [  0.21534127]
 [  0.3766105 ]
 [ 22.752962  ]
 [ -0.73386884]
 [ -5.6216326 ]
 [  5.06495   ]
 [ -4.7212744 ]
 [ -2.777862  ]
 [-21.031239  ]]
epoch= 27 loss =  20.105817040083704 b =  8.3974905 w =  [[ -7.92141   ]
 [  1.5716902 ]
 [ -1.7023295 ]
 [  0.20218605]
 [  0.23877427]
 [ 22.773718  ]
 [ -0.729224  ]
 [ -5.717039  ]
 [  5.141205  ]
 [ -4.784975  ]
 [ -2.784202  ]
 [-21.001255  ]]
epoch= 28 loss =  20.078329738080154 b =  8.56994 w =  [[ -8.06019   ]
 [  1.5885159 ]
 [ -1.659346  ]
 [  0.1901504 ]
 [  0.10533985]
 [ 22.791744  ]
 [ -0.7258338 ]
 [ -5.809349  ]
 [  5.213251  ]
 [ -4.8459077 ]
 [ -2.7915494 ]
 [-20.969383  ]]
epoch= 29 loss =  20.05167671847701 b =  8.741887 w =  [[ -8.193069  ]
 [  1.6050023 ]
 [ -1.6180803 ]
 [  0.17911133]
 [ -0.02388395]
 [ 22.807428  ]
 [ -0.7235491 ]
 [ -5.8987784 ]
 [  5.281348  ]
 [ -4.9043016 ]
 [ -2.7998886 ]
 [-20.93623   ]]
epoch= 30 loss =  20.02585880453415 b =  8.913233 w =  [[ -8.320327  ]
 [  1.6210549 ]
 [ -1.5785275 ]
 [  0.16896608]
 [ -0.14908668]
 [ 22.82112   ]
 [ -0.7222309 ]
 [ -5.9855075 ]
 [  5.345736  ]
 [ -4.9603696 ]
 [ -2.809181  ]
 [-20.902306  ]]
epoch= 31 loss =  20.00084181768566 b =  9.08393 w =  [[ -8.442204  ]
 [  1.6366044 ]
 [ -1.5406682 ]
 [  0.15962708]
 [ -0.2704553 ]
 [ 22.833027  ]
 [ -0.72175264]
 [ -6.0697007 ]
 [  5.4066467 ]
 [ -5.0142846 ]
 [ -2.8193712 ]
 [-20.867983  ]]
epoch= 32 loss =  19.976618240610218 b =  9.253911 w =  [[ -8.558926  ]
 [  1.651602  ]
 [ -1.504473  ]
 [  0.15101798]
 [ -0.38817817]
 [ 22.843365  ]
 [ -0.7220032 ]
 [ -6.151498  ]
 [  5.464299  ]
 [ -5.0662036 ]
 [ -2.8304014 ]
 [-20.833578  ]]
epoch= 33 loss =  19.95316042354514 b =  9.423108 w =  [[ -8.670715  ]
 [  1.6660159 ]
 [ -1.4699101 ]
 [  0.14307168]
 [ -0.50243527]
 [ 22.852308  ]
 [ -0.72288346]
 [ -6.2310157 ]
 [  5.518886  ]
 [ -5.1162667 ]
 [ -2.8422055 ]
 [-20.799343  ]]
epoch= 34 loss =  19.930450114194123 b =  9.591489 w =  [[ -8.777779  ]
 [  1.679826  ]
 [ -1.4369352 ]
 [  0.13573325]
 [ -0.6133945 ]
 [ 22.85996   ]
 [ -0.7242962 ]
 [ -6.3083735 ]
 [  5.5705957 ]
 [ -5.164591  ]
 [ -2.854715  ]
 [-20.765516  ]]
epoch= 35 loss =  19.90846017309892 b =  9.759006 w =  [[ -8.880324  ]
 [  1.6930271 ]
 [ -1.4055077 ]
 [  0.12895079]
 [ -0.7212243 ]
 [ 22.866415  ]
 [ -0.7261652 ]
 [ -6.3836584 ]
 [  5.6196127 ]
 [ -5.211291  ]
 [ -2.867862  ]
 [-20.73217   ]]
epoch= 36 loss =  19.887165716818934 b =  9.925617 w =  [[ -8.978544  ]
 [  1.7056178 ]
 [ -1.3755795 ]
 [  0.12268099]
 [ -0.8260821 ]
 [ 22.871796  ]
 [ -0.7284178 ]
 [ -6.45696   ]
 [  5.666094  ]
 [ -5.2564545 ]
 [ -2.881594  ]
 [-20.699532  ]]
epoch= 37 loss =  19.866540418133514 b =  10.091272 w =  [[ -9.072603  ]
 [  1.7176012 ]
 [ -1.3471004 ]
 [  0.11688287]
 [ -0.92811525]
 [ 22.87616   ]
 [ -0.730987  ]
 [ -6.5283713 ]
 [  5.7102056 ]
 [ -5.300173  ]
 [ -2.8958457 ]
 [-20.667597  ]]
epoch= 38 loss =  19.84655585635008 b =  10.255973 w =  [[ -9.16269  ]
 [  1.7289914]
 [ -1.3200164]
 [  0.1115229]
 [ -1.02746  ]
 [ 22.87953  ]
 [ -0.7338145]
 [ -6.5979457]
 [  5.752091 ]
 [ -5.342516 ]
 [ -2.910552 ]
 [-20.636497 ]]
epoch= 39 loss =  19.827194230022545 b =  10.419676 w =  [[ -9.248955  ]
 [  1.7398028 ]
 [ -1.2942772 ]
 [  0.10656872]
 [ -1.1242509 ]
 [ 22.881989  ]
 [ -0.7368505 ]
 [ -6.6657662 ]
 [  5.7918835 ]
 [ -5.383562  ]
 [ -2.9256687 ]
 [-20.606283  ]]
epoch= 40 loss =  19.80842495173308 b =  10.582332 w =  [[ -9.331573  ]
 [  1.7500488 ]
 [ -1.2698294 ]
 [  0.1019915 ]
 [ -1.218611  ]
 [ 22.883596  ]
 [ -0.74004924]
 [ -6.7318883 ]
 [  5.82971   ]
 [ -5.4233704 ]
 [ -2.9411438 ]
 [-20.576973  ]]
epoch= 41 loss =  19.79022036244805 b =  10.743951 w =  [[ -9.410695  ]
 [  1.7597514 ]
 [ -1.2466215 ]
 [  0.09776533]
 [ -1.3106596 ]
 [ 22.884367  ]
 [ -0.74337256]
 [ -6.7963815 ]
 [  5.865695  ]
 [ -5.461994  ]
 [ -2.9569368 ]
 [-20.548565  ]]
epoch= 42 loss =  19.772557086552826 b =  10.90449 w =  [[ -9.486461  ]
 [  1.7689276 ]
 [ -1.224602  ]
 [  0.09386475]
 [ -1.4005071 ]
 [ 22.884365  ]
 [ -0.74678785]
 [ -6.8592973 ]
 [  5.8999496 ]
 [ -5.499495  ]
 [ -2.973007  ]
 [-20.521082  ]]
epoch= 43 loss =  19.755417098352222 b =  11.063955 w =  [[ -9.559009  ]
 [  1.7776009 ]
 [ -1.2037163 ]
 [  0.09026939]
 [ -1.4882517 ]
 [ 22.883583  ]
 [ -0.75026137]
 [ -6.9206896 ]
 [  5.9325867 ]
 [ -5.535919  ]
 [ -2.989303  ]
 [-20.494566  ]]
epoch= 44 loss =  19.738778096573792 b =  11.222289 w =  [[ -9.628478  ]
 [  1.785791  ]
 [ -1.1839144 ]
 [  0.08695884]
 [ -1.5739897 ]
 [ 22.882086  ]
 [ -0.7537668 ]
 [ -6.980616  ]
 [  5.963705  ]
 [ -5.571313  ]
 [ -3.0057926 ]
 [-20.468973  ]]
epoch= 45 loss =  19.722612199989562 b =  11.3795185 w =  [[ -9.694994  ]
 [  1.7935212 ]
 [ -1.1651485 ]
 [  0.08391371]
 [ -1.6578104 ]
 [ 22.879871  ]
 [ -0.7572823 ]
 [ -7.039118  ]
 [  5.9933887 ]
 [ -5.605718  ]
 [ -3.0224457 ]
 [-20.444311  ]]
epoch= 46 loss =  19.706914213606513 b =  11.535623 w =  [[ -9.75869   ]
 [  1.8008089 ]
 [ -1.1473718 ]
 [  0.081117  ]
 [ -1.7398065 ]
 [ 22.877012  ]
 [ -0.76079136]
 [ -7.096251  ]
 [  6.0217304 ]
 [ -5.6391726 ]
 [ -3.0392458 ]
 [-20.420582  ]]
epoch= 47 loss =  19.69165743188988 b =  11.69058 w =  [[ -9.819683  ]
 [  1.8076748 ]
 [ -1.130533  ]
 [  0.07855233]
 [ -1.8200492 ]
 [ 22.87351   ]
 [ -0.76426905]
 [ -7.152051  ]
 [  6.0488124 ]
 [ -5.6717143 ]
 [ -3.0561526 ]
 [-20.397795  ]]
epoch= 48 loss =  19.67681932402663 b =  11.844392 w =  [[ -9.87808   ]
 [  1.814139  ]
 [ -1.1145889 ]
 [  0.07620447]
 [ -1.8986154 ]
 [ 22.869387  ]
 [ -0.7677054 ]
 [ -7.2065682 ]
 [  6.0747123 ]
 [ -5.703382  ]
 [ -3.073144  ]
 [-20.37588   ]]
epoch= 49 loss =  19.662384319130293 b =  11.997031 w =  [[ -9.933989  ]
 [  1.8202229 ]
 [ -1.0994942 ]
 [  0.07405989]
 [ -1.9755716 ]
 [ 22.864666  ]
 [ -0.77108556]
 [ -7.25984   ]
 [  6.0995016 ]
 [ -5.7342043 ]
 [ -3.0901952 ]
 [-20.354834  ]]
epoch= 50 loss =  19.648338488437822 b =  12.148509 w =  [[ -9.987511 ]
 [  1.8259448]
 [ -1.085209 ]
 [  0.0721045]
 [ -2.0509896]
 [ 22.859385 ]
 [ -0.7744012]
 [ -7.3119183]
 [  6.1232414]
 [ -5.764211 ]
 [ -3.1072927]
 [-20.33465  ]]

4、模型应用

模型一般应该用来预测新的样本的值

本例506 条数据都用来训练了,暂时没有新的数据

# 指定某一条看看效果

n = 348
x_test = x_data[n]
x_test = x_test.reshape(1,12)

predict = sess.run(pred,feed_dict={x:x_test})
print("预测值:%f"%predict)

target = y_data[n]
print("标签值:%f"%target)
预测值:23.972691
标签值:24.500000
# 随机指定某一条数据

n = np.random.randint(506)
print(n)
x_test = x_data[n]
x_test = x_test.reshape(1,12)

predict = sess.run(pred,feed_dict={x:x_test})
print("预测值:%f"%predict)

target = y_data[n]
print("标签值:%f"%target)
448
预测值:14.384734
标签值:14.100000

版本3:可视化训练过程中的损失值

loss_list = []

# 迭代训练

for epoch in range(train_epochs):
    loss_sum = 0
    for xs,ys in zip(x_data,y_data):
        # 数据变形,使其与spaceholder形状一致
        xs = xs.reshape(1,12)
        ys = ys.reshape(1,1) 
        
        _,loss = sess.run([optimizer,loss_function],feed_dict={x:xs,y:ys})
        #将xs与ys利用占位符喂给x和y,然后得到梯度优化后的损失函数,此时w和b的值存在_里,但是我们并不需要知道具体的值,故用一个符号简单代替即可
        
        loss_sum = loss_sum + loss
        
    # 打乱数据顺序,防止记住顺序
    xvalues, yvalues = shuffle(x_data,y_data)
    
    b0temp = b.eval(session=sess)
    w0temp = w.eval(session=sess)
    loss_average = loss_sum / len(y_data)
    
    loss_list.append(loss_average)
print("epoch=",epoch + 1,"loss = ",loss_average,"b = ",b0temp,"w = ",w0temp)
plt.plot(loss_list)
epoch= 50 loss =  19.233434925812812 b =  18.298111 w =  [[-11.041447  ]
 [  1.8939558 ]
 [ -0.8549625 ]
 [  0.07317752]
 [ -4.6470685 ]
 [ 22.261425  ]
 [ -0.8367696 ]
 [ -8.972372  ]
 [  6.701183  ]
 [ -6.657288  ]
 [ -3.8620994 ]
 [-19.961855  ]]




[]

训练次数不是越多越好,有可能出现过拟合,即只对训练集表现出较好的预测能力,但是对新数据的预测能力会下降

版本4:加上TensorBoard

# 声明会话

sess = tf.Session()
init = tf.global_variables_initializer()
# 启动会话
sess.run(init)
# 为TensorBoard可视化准备数据
# 设置日志存储目录
logdir = 'd:/log'
# 创建一个操作,用于记录损失值loss,后面在TensorBoard中SCALARS栏可见
sum_loss_op = tf.summary.scalar("loss",loss_function)

# 把所有需要记录摘要日志文件的合并,方便一次性写入
merged = tf.summary.merge_all()
# 启动会话
sess.run(init)
# 创建摘要文件写入器(FileWriter),将计算图写入摘要文件,后面在TensorBoard中GRAPHS栏可见
writer = tf.summary.FileWriter(logdir,sess.graph)
# 迭代训练

for epoch in range(train_epochs):
    loss_sum = 0.0
    for xs,ys in zip(x_data,y_data):
        # 数据变形,使其与spaceholder形状一致
        xs = xs.reshape(1,12)
        ys = ys.reshape(1,1) 
        
        _,summary_str,loss = sess.run([optimizer,sum_loss_op,loss_function],feed_dict={x:xs,y:ys})
        #将xs与ys利用占位符喂给x和y,然后得到梯度优化后的损失函数,此时w和b的值存在_里,但是我们并不需要知道具体的值,故用一个符号简单代替即可
        writer.add_summary(summary_str,epoch)
        
        loss_sum = loss_sum + loss
        
    # 打乱数据顺序,防止记住顺序
    xvalues, yvalues = shuffle(x_data,y_data)
    
    b0temp = b.eval(session=sess)
    w0temp = w.eval(session=sess)
    loss_average = loss_sum / len(y_data)
    
    print("epoch=",epoch + 1,"loss = ",loss_average,"b = ",b0temp,"w = ",w0temp)
epoch= 1 loss =  44.346984727579645 b =  3.6101098 w =  [[-0.6028991 ]
 [ 1.3716252 ]
 [-0.7876034 ]
 [ 0.49574542]
 [ 2.5262067 ]
 [ 7.176039  ]
 [-0.05296665]
 [ 0.8115916 ]
 [ 0.3888658 ]
 [ 0.3218941 ]
 [ 2.3052237 ]
 [-4.386189  ]]
epoch= 2 loss =  32.04108791965448 b =  3.9981675 w =  [[-1.1532013 ]
 [ 1.9551501 ]
 [-1.5167841 ]
 [ 0.85052747]
 [ 2.8998208 ]
 [10.613817  ]
 [-0.8205696 ]
 [ 0.37133965]
 [ 0.6370229 ]
 [-0.2686866 ]
 [ 1.1436037 ]
 [-8.103704  ]]
epoch= 3 loss =  27.34222933747573 b =  4.279024 w =  [[ -1.6516982 ]
 [  2.1728437 ]
 [ -1.9659317 ]
 [  1.0603381 ]
 [  3.178882  ]
 [ 13.210131  ]
 [ -1.1971354 ]
 [ -0.16200757]
 [  0.9431066 ]
 [ -0.7692719 ]
 [  0.21318811]
 [-10.960832  ]]
epoch= 4 loss =  24.673796940525087 b =  4.50023 w =  [[ -2.1096234]
 [  2.1997828]
 [ -2.242004 ]
 [  1.1552098]
 [  3.3611088]
 [ 15.200149 ]
 [ -1.3592125]
 [ -0.706517 ]
 [  1.2668993]
 [ -1.2006631]
 [ -0.5196944]
 [-13.182718 ]]
epoch= 5 loss =  23.105802523715468 b =  4.688873 w =  [[ -2.5345595]
 [  2.1340857]
 [ -2.4086115]
 [  1.1695445]
 [  3.456134 ]
 [ 16.743366 ]
 [ -1.4054959]
 [ -1.2231828]
 [  1.5871502]
 [ -1.5771949]
 [ -1.089483 ]
 [-14.925047 ]]
epoch= 6 loss =  22.16134066072484 b =  4.8603954 w =  [[ -2.9317865]
 [  2.0300012]
 [ -2.5042703]
 [  1.1326815]
 [  3.4777637]
 [ 17.95115  ]
 [ -1.3910842]
 [ -1.6959045]
 [  1.8933216]
 [ -1.9090716]
 [ -1.5275977]
 [-16.298841 ]]
epoch= 7 loss =  21.58121336979395 b =  5.023566 w =  [[ -3.3051083]
 [  1.9167767]
 [ -2.5527997]
 [  1.0666223]
 [  3.4402902]
 [ 18.903196 ]
 [ -1.3466065]
 [ -2.1204534]
 [  2.1808507]
 [ -2.2038078]
 [ -1.8610458]
 [-17.385468 ]]
epoch= 8 loss =  21.218201108343276 b =  5.183318 w =  [[ -3.657341 ]
 [  1.8094352]
 [ -2.5692813]
 [  0.9865797]
 [  3.3567495]
 [ 19.657984 ]
 [ -1.2889739]
 [ -2.4984608]
 [  2.4484055]
 [ -2.4671469]
 [ -2.1123066]
 [-18.245943 ]]
epoch= 9 loss =  20.985988155955273 b =  5.3423796 w =  [[ -3.9906657]
 [  1.7149824]
 [ -2.5635278]
 [  0.9024041]
 [  3.2383654]
 [ 20.259172 ]
 [ -1.2273903]
 [ -2.8341599]
 [  2.6963465]
 [ -2.703608 ]
 [ -2.299666 ]
 [-18.926945 ]]
epoch= 10 loss =  20.83295870112948 b =  5.5021973 w =  [[ -4.306814  ]
 [  1.6359707 ]
 [ -2.542102  ]
 [  0.82005054]
 [  3.0944092 ]
 [ 20.73996   ]
 [ -1.1667447 ]
 [ -3.1326795 ]
 [  2.92583   ]
 [ -2.916861  ]
 [ -2.4378006 ]
 [-19.464714  ]]
epoch= 11 loss =  20.727827887255383 b =  5.6634765 w =  [[ -4.607194 ]
 [  1.5725503]
 [ -2.5095208]
 [  0.7428122]
 [  2.9323804]
 [ 21.125816 ]
 [ -1.1095084]
 [ -3.3991563]
 [  3.1383364]
 [ -3.1099505]
 [ -2.5383441]
 [-19.887714 ]]
epoch= 12 loss =  20.651557802794766 b =  5.8265033 w =  [[ -4.892995  ]
 [  1.523658  ]
 [ -2.4689786 ]
 [  0.67224526]
 [  2.7582135 ]
 [ 21.436516  ]
 [ -1.056813  ]
 [ -3.638334  ]
 [  3.3354006 ]
 [ -3.2854328 ]
 [ -2.6104448 ]
 [-20.218552  ]]
epoch= 13 loss =  20.592560407845724 b =  5.991331 w =  [[ -5.16522  ]
 [  1.4876795]
 [ -2.4227934]
 [  0.6088305]
 [  2.5765417]
 [ 21.687523 ]
 [ -1.0090615]
 [ -3.8544097]
 [  3.5184896]
 [ -3.445503 ]
 [ -2.6612668]
 [-20.475204 ]]
epoch= 14 loss =  20.543824973743366 b =  6.1578693 w =  [[ -5.424757  ]
 [  1.4628285 ]
 [ -2.3726912 ]
 [  0.5524165 ]
 [  2.3909328 ]
 [ 21.891     ]
 [ -0.96625865]
 [ -4.0509844 ]
 [  3.6889513 ]
 [ -3.5920413 ]
 [ -2.696368  ]
 [-20.672127  ]]
epoch= 15 loss =  20.501146773610966 b =  6.32595 w =  [[ -5.672386 ]
 [  1.44735  ]
 [ -2.319976 ]
 [  0.5025167]
 [  2.20409  ]
 [ 22.056557 ]
 [ -0.9282105]
 [ -4.2311087]
 [  3.8479712]
 [ -3.7266824]
 [ -2.7200403]
 [-20.820915 ]]
epoch= 16 loss =  20.462054043941045 b =  6.4953823 w =  [[ -5.9088006 ]
 [  1.4396188 ]
 [ -2.265652  ]
 [  0.45849186]
 [  2.0180266 ]
 [ 22.191826  ]
 [ -0.8946232 ]
 [ -4.397331  ]
 [  3.9965947 ]
 [ -3.8508472 ]
 [ -2.735589  ]
 [-20.930954  ]]
epoch= 17 loss =  20.425134463581962 b =  6.6659684 w =  [[ -6.134625  ]
 [  1.4381878 ]
 [ -2.2105005 ]
 [  0.41966116]
 [  1.8342335 ]
 [ 22.302794  ]
 [ -0.8651574 ]
 [ -4.5517592 ]
 [  4.1357436 ]
 [ -3.965766  ]
 [ -2.7455106 ]
 [-21.009815  ]]
epoch= 18 loss =  20.389595499823777 b =  6.837511 w =  [[ -6.3504333 ]
 [  1.4418002 ]
 [ -2.155134  ]
 [  0.38536286]
 [  1.6537652 ]
 [ 22.394276  ]
 [ -0.83946913]
 [ -4.696138  ]
 [  4.2662187 ]
 [ -4.07251   ]
 [ -2.7517076 ]
 [-21.063667  ]]
epoch= 19 loss =  20.355049912910186 b =  7.0098114 w =  [[ -6.556744  ]
 [  1.4493804 ]
 [ -2.1000302 ]
 [  0.35499093]
 [  1.4773606 ]
 [ 22.470066  ]
 [ -0.8172184 ]
 [ -4.8319025 ]
 [  4.3887243 ]
 [ -4.172024  ]
 [ -2.7556016 ]
 [-21.097462  ]]
epoch= 20 loss =  20.321303464425828 b =  7.18269 w =  [[ -6.754046  ]
 [  1.4600201 ]
 [ -2.045567  ]
 [  0.32800516]
 [  1.305491  ]
 [ 22.533237  ]
 [ -0.79808664]
 [ -4.960228  ]
 [  4.5038643 ]
 [ -4.265119  ]
 [ -2.7582548 ]
 [-21.115255  ]]
epoch= 21 loss =  20.288277413960977 b =  7.3559747 w =  [[ -6.94278  ]
 [  1.4729656]
 [ -1.992043 ]
 [  0.3039353]
 [  1.1384501]
 [ 22.586176 ]
 [ -0.7817691]
 [ -5.0820823]
 [  4.612189 ]
 [ -4.352504 ]
 [ -2.760428 ]
 [-21.120298 ]]
epoch= 22 loss =  20.255976657753536 b =  7.529512 w =  [[ -7.123358  ]
 [  1.4875929 ]
 [ -1.9396926 ]
 [  0.28238067]
 [  0.9763845 ]
 [ 22.630833  ]
 [ -0.767987  ]
 [ -5.1982617 ]
 [  4.714188  ]
 [ -4.434795  ]
 [ -2.7626817 ]
 [-21.115284  ]]
epoch= 23 loss =  20.22442039234261 b =  7.7031617 w =  [[ -7.296164 ]
 [  1.5033908]
 [ -1.8886874]
 [  0.2629979]
 [  0.8193346]
 [ 22.668709 ]
 [ -0.7564766]
 [ -5.30941  ]
 [  4.8103   ]
 [ -4.512533 ]
 [ -2.7653892]
 [-21.102367 ]]
epoch= 24 loss =  20.193632152690782 b =  7.8767877 w =  [[ -7.4615693 ]
 [  1.5199423 ]
 [ -1.8391632 ]
 [  0.24549845]
 [  0.66725415]
 [ 22.701052  ]
 [ -0.74700016]
 [ -5.4160767 ]
 [  4.9009175 ]
 [ -4.5861807 ]
 [ -2.7688181 ]
 [-21.083317  ]]
epoch= 25 loss =  20.16363997791389 b =  8.05026 w =  [[ -7.6199126 ]
 [  1.5369147 ]
 [ -1.791215  ]
 [  0.22963867]
 [  0.5200528 ]
 [ 22.728811  ]
 [ -0.73933184]
 [ -5.5186944 ]
 [  4.986406  ]
 [ -4.6561403 ]
 [ -2.7731261 ]
 [-21.059525  ]]
epoch= 26 loss =  20.134472406776553 b =  8.223491 w =  [[ -7.7715034 ]
 [  1.5540417 ]
 [ -1.7449114 ]
 [  0.21521527]
 [  0.37759066]
 [ 22.75276   ]
 [ -0.7332736 ]
 [ -5.6176376 ]
 [  5.067092  ]
 [ -4.7227654 ]
 [ -2.7784088 ]
 [-21.03216   ]]
epoch= 27 loss =  20.106140428734335 b =  8.396376 w =  [[ -7.91665   ]
 [  1.5711161 ]
 [ -1.7002957 ]
 [  0.20205604]
 [  0.23970994]
 [ 22.773497  ]
 [ -0.7286397 ]
 [ -5.7132163 ]
 [  5.143287  ]
 [ -4.7863665 ]
 [ -2.7847047 ]
 [-21.002142  ]]
epoch= 28 loss =  20.078638766050148 b =  8.5688305 w =  [[ -8.055634  ]
 [  1.5879754 ]
 [ -1.6573887 ]
 [  0.19001842]
 [  0.10623693]
 [ 22.791494  ]
 [ -0.72526073]
 [ -5.8056846 ]
 [  5.215267  ]
 [ -4.8472004 ]
 [ -2.7920082 ]
 [-20.970247  ]]
epoch= 29 loss =  20.05197920028582 b =  8.740764 w =  [[ -8.188714  ]
 [  1.6044916 ]
 [ -1.6162013 ]
 [  0.17897943]
 [ -0.02302845]
 [ 22.807207  ]
 [ -0.72299093]
 [ -5.8952627 ]
 [  5.2832932 ]
 [ -4.9055157 ]
 [ -2.8003173 ]
 [-20.937078  ]]
epoch= 30 loss =  20.026142806785725 b =  8.91211 w =  [[ -8.3161545 ]
 [  1.6205711 ]
 [ -1.5767202 ]
 [  0.16883452]
 [ -0.14826903]
 [ 22.820906  ]
 [ -0.72168666]
 [ -5.9821444 ]
 [  5.347619  ]
 [ -4.961507  ]
 [ -2.80958   ]
 [-20.90313   ]]
epoch= 31 loss =  20.00111694905495 b =  9.0828 w =  [[ -8.438215  ]
 [  1.636145  ]
 [ -1.5389304 ]
 [  0.15949494]
 [ -0.26967722]
 [ 22.832846  ]
 [ -0.72122526]
 [ -6.0664797 ]
 [  5.4084606 ]
 [ -5.015347  ]
 [ -2.819747  ]
 [-20.8688    ]]
epoch= 32 loss =  19.976876023260523 b =  9.252759 w =  [[ -8.555112  ]
 [  1.6511654 ]
 [ -1.5028042 ]
 [  0.15088564]
 [ -0.38743532]
 [ 22.843216  ]
 [ -0.7214917 ]
 [ -6.148412  ]
 [  5.4660387 ]
 [ -5.067201  ]
 [ -2.8307493 ]
 [-20.834362  ]]
epoch= 33 loss =  19.953411039013904 b =  9.421962 w =  [[ -8.667068  ]
 [  1.665602  ]
 [ -1.4683019 ]
 [  0.14294076]
 [ -0.50171745]
 [ 22.852142  ]
 [ -0.72238165]
 [ -6.2280664 ]
 [  5.520563  ]
 [ -5.117201  ]
 [ -2.8425179 ]
 [-20.800135  ]]
epoch= 34 loss =  19.930691390567286 b =  9.590336 w =  [[ -8.774292  ]
 [  1.6794354 ]
 [ -1.4353894 ]
 [  0.13560352]
 [ -0.61270636]
 [ 22.859802  ]
 [ -0.7238109 ]
 [ -6.305546  ]
 [  5.572204  ]
 [ -5.1654673 ]
 [ -2.8550014 ]
 [-20.766256  ]]
epoch= 35 loss =  19.908689436425135 b =  9.757842 w =  [[ -8.87699   ]
 [  1.6926562 ]
 [ -1.404023  ]
 [  0.12882215]
 [ -0.7205627 ]
 [ 22.866287  ]
 [ -0.7256954 ]
 [ -6.3809533 ]
 [  5.621156  ]
 [ -5.21211   ]
 [ -2.8681278 ]
 [-20.732916  ]]
epoch= 36 loss =  19.887386695063512 b =  9.924463 w =  [[ -8.97536   ]
 [  1.7052654 ]
 [ -1.3741522 ]
 [  0.12255323]
 [ -0.82544595]
 [ 22.871664  ]
 [ -0.7279629 ]
 [ -6.4543743 ]
 [  5.667572  ]
 [ -5.2572203 ]
 [ -2.881837  ]
 [-20.700218  ]]
epoch= 37 loss =  19.86675028833604 b =  10.090131 w =  [[ -9.069558  ]
 [  1.7172685 ]
 [ -1.3457288 ]
 [  0.116758  ]
 [ -0.927498  ]
 [ 22.876017  ]
 [ -0.73054504]
 [ -6.5258846 ]
 [  5.7116237 ]
 [ -5.300886  ]
 [ -2.8960645 ]
 [-20.668264  ]]
epoch= 38 loss =  19.846758902883902 b =  10.254826 w =  [[ -9.159769  ]
 [  1.7286767 ]
 [ -1.3187001 ]
 [  0.11140099]
 [ -1.026865  ]
 [ 22.879416  ]
 [ -0.73338926]
 [ -6.595566  ]
 [  5.7534475 ]
 [ -5.3431883 ]
 [ -2.9107597 ]
 [-20.63712   ]]
epoch= 39 loss =  19.82738176727769 b =  10.418527 w =  [[ -9.246163  ]
 [  1.7395029 ]
 [ -1.293014  ]
 [  0.10644902]
 [ -1.1236793 ]
 [ 22.881903  ]
 [ -0.736444  ]
 [ -6.663486  ]
 [  5.793184  ]
 [ -5.3841906 ]
 [ -2.925864  ]
 [-20.60686   ]]
epoch= 40 loss =  19.80859844872877 b =  10.581193 w =  [[ -9.328893  ]
 [  1.7497646 ]
 [ -1.2686185 ]
 [  0.10187446]
 [ -1.2180638 ]
 [ 22.883526  ]
 [ -0.73966265]
 [ -6.729703  ]
 [  5.8309593 ]
 [ -5.423962  ]
 [ -2.9413295 ]
 [-20.577477  ]]
epoch= 41 loss =  19.790382144316983 b =  10.742806 w =  [[ -9.408131  ]
 [  1.759481  ]
 [ -1.2454603 ]
 [  0.09765093]
 [ -1.310128  ]
 [ 22.88431   ]
 [ -0.74300146]
 [ -6.794283  ]
 [  5.866894  ]
 [ -5.4625535 ]
 [ -2.9571    ]
 [-20.549078  ]]
epoch= 42 loss =  19.7727154651905 b =  10.903362 w =  [[ -9.484003  ]
 [  1.7686719 ]
 [ -1.2234856 ]
 [  0.09375397]
 [ -1.3999892 ]
 [ 22.88429   ]
 [ -0.74643093]
 [ -6.857287  ]
 [  5.9011016 ]
 [ -5.5000234 ]
 [ -2.973147  ]
 [-20.521568  ]]
epoch= 43 loss =  19.755573752456257 b =  11.062811 w =  [[ -9.556662  ]
 [  1.7773588 ]
 [ -1.2026455 ]
 [  0.0901618 ]
 [ -1.4877502 ]
 [ 22.883541  ]
 [ -0.74991935]
 [ -6.9187627 ]
 [  5.933685  ]
 [ -5.536415  ]
 [ -2.9894311 ]
 [-20.495026  ]]
epoch= 44 loss =  19.73892638734157 b =  11.221154 w =  [[ -9.626234 ]
 [  1.7855614]
 [ -1.1828885]
 [  0.0868539]
 [ -1.5735025]
 [ 22.882063 ]
 [ -0.7534398]
 [ -6.9787636]
 [  5.9647517]
 [ -5.571777 ]
 [ -3.0059128]
 [-20.469427 ]]
epoch= 45 loss =  19.72275971981826 b =  11.378388 w =  [[ -9.692853  ]
 [  1.7933004 ]
 [ -1.1641629 ]
 [  0.08381241]
 [ -1.6573385 ]
 [ 22.879875  ]
 [ -0.7569692 ]
 [ -7.037346  ]
 [  5.994391  ]
 [ -5.6061606 ]
 [ -3.0225582 ]
 [-20.44474   ]]
epoch= 46 loss =  19.70705129104678 b =  11.534488 w =  [[ -9.756643  ]
 [  1.8005998 ]
 [ -1.1464226 ]
 [  0.08101843]
 [ -1.7393415 ]
 [ 22.877008  ]
 [ -0.76048803]
 [ -7.094551  ]
 [  6.0226994 ]
 [ -5.6395864 ]
 [ -3.0393372 ]
 [-20.421003  ]]
epoch= 47 loss =  19.691786841337766 b =  11.68945 w =  [[ -9.817729  ]
 [  1.8074781 ]
 [ -1.1296227 ]
 [  0.07845551]
 [ -1.8195949 ]
 [ 22.87351   ]
 [ -0.7639785 ]
 [ -7.1504245 ]
 [  6.049739  ]
 [ -5.6721025 ]
 [ -3.0562296 ]
 [-20.398193  ]]
epoch= 48 loss =  19.676944714699722 b =  11.843252 w =  [[ -9.876206  ]
 [  1.8139527 ]
 [ -1.1137139 ]
 [  0.07610977]
 [ -1.898168  ]
 [ 22.86938   ]
 [ -0.76742446]
 [ -7.2050123 ]
 [  6.075606  ]
 [ -5.7037435 ]
 [ -3.073202  ]
 [-20.376253  ]]
epoch= 49 loss =  19.662505444781683 b =  11.99591 w =  [[ -9.932195  ]
 [  1.8200471 ]
 [ -1.0986578 ]
 [  0.07396667]
 [ -1.9751372 ]
 [ 22.864668  ]
 [ -0.77081823]
 [ -7.2583547 ]
 [  6.1003575 ]
 [ -5.7345405 ]
 [ -3.0902467 ]
 [-20.35518   ]]
epoch= 50 loss =  19.64845156447943 b =  12.147389 w =  [[ -9.985801  ]
 [  1.8257802 ]
 [ -1.0844069 ]
 [  0.07201444]
 [ -2.0505586 ]
 [ 22.85937   ]
 [ -0.77414376]
 [ -7.3104887 ]
 [  6.1240644 ]
 [ -5.7645226 ]
 [ -3.1073253 ]
 [-20.334978  ]]
![loss曲线](https://img-blog.csdnimg.cn/20200513163255140.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L3FxXzM2MDQ1MDkz,size_16,color_FFFFFF,t_70)
![图](https://img-blog.csdnimg.cn/20200513163337549.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L3FxXzM2MDQ1MDkz,size_16,color_FFFFFF,t_70)

你可能感兴趣的:(学习总结,深度学习)