loss = tf.reduce_mean(tf.square(y - y_))
optimizer = tf.train.GradientDescentOptimizer(0.5)
线性回归最常用的耗费函数就是MSE均方误差
from __future__ import print_function
from __future__ import absolute_import
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import sys
import logging
import numpy as np
import tensorflow as tf
def load_data():
datafile = 'data/ex1data2.txt'
#Read into the data file
cols = np.loadtxt(datafile,delimiter=',',usecols=(0,1,2),unpack=True)
# [[x1 x1 x1 x1 x1],
# [x2 x2 x2 x2 x2],
# [y y y y y]]
print(cols[-1:].shape) #(1,47)
print(cols[-1].shape) #(47,)
X = np.transpose(np.array(cols[:-1]))
y = np.transpose(np.array(cols[-1:]))
# X,Y: [[x1 x2],
# [x1 x2],
# [x1 x2]]
print(X.shape) #(47,2)
print(y.shape) #(47,1)
stored_feature_means, stored_feature_stds = [], [] #寸均值和方差
Xnorm = X.copy()
for icol in range(Xnorm.shape[1]):
stored_feature_means.append(np.mean(Xnorm[:,icol]))
stored_feature_stds.append(np.std(Xnorm[:,icol]))
#Skip the first column
#问题:为什么跳过第一行,又没加过1
#if not icol: continue
#Faster to not recompute the mean and std again, just used stored values
Xnorm[:,icol] = (Xnorm[:,icol] - stored_feature_means[-1])/stored_feature_stds[-1]
return Xnorm, y
train_X, train_y = load_data()
# placeholder
X = tf.placeholder("float", [None, 2])
# model
b = tf.Variable(tf.zeros([1]))
W = tf.Variable(tf.zeros([2, 1]))
y = tf.matmul(X, W) + b
# minimize mean squared error
# 线性回归最常用的耗费函数就是MSE均方误差
y_ = tf.placeholder("float", [None, 1])
loss = tf.reduce_mean(tf.square(y - y_))
optimizer = tf.train.GradientDescentOptimizer(0.5)
train = optimizer.minimize(loss)
# initialize variable
init = tf.global_variables_initializer()
# 启动图 (graph)
sess = tf.Session()
sess.run(init)
#也可以sess.run(tf.global_variables_initializer())
sess.run(train, feed_dict={X: train_X, y_: train_y})
print(sess.run(W), sess.run(b))
import tensorflow as tf
import numpy as np
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# 使用 NumPy 生成假数据(phony data), 总共 100 个点.
x_data = np.float32(np.random.rand(2, 100)) # 随机输入
y_data = np.dot([0.100, 0.200], x_data) + 0.300
print(x_data.shape)#(2, 100)
print(y_data.shape)#(100,)
# 构造一个线性模型
#
b = tf.Variable(tf.zeros([1]))
W = tf.Variable(tf.random_uniform([1, 2], -1.0, 1.0))
y = tf.matmul(W, x_data) + b
# 最小化方差
loss = tf.reduce_mean(tf.square(y - y_data))
optimizer = tf.train.GradientDescentOptimizer(0.5)
train = optimizer.minimize(loss)
# 初始化变量
init = tf.global_variables_initializer()
# 启动图 (graph)
sess = tf.Session()
sess.run(init)
# 拟合平面
for step in range(0, 201):
sess.run(train)
if step % 20 == 0:
print(step, sess.run(W), sess.run(b)) #W的形状是[1,2] b的形状是[1]