Tensorflow线性回归模型搭建

# import tensorflow as tf
# import numpy as np
# greeting = tf.constant('Hello Google Tensorflow!')
# #启动一个会话
# sess = tf.Session()
# #使用会话执行greeting计算模块
# result = sess.run(greeting)
# print(result)
# sess.close()
#
# #使用TensorFlow完成一次线性函数计算
# matrix1 = tf.constant([[3.,3.]]) #行向量
# matrix2 = tf.constant([[2.],[2.]]) #列向量
# product = tf.matmul(matrix1,matrix2)
# linear = tf.add(product,tf.constant(2.0))
#
# with tf.Session() as sess:
#     result = sess.run(linear)
#     print(result)

import tensorflow as tf
import numpy as np
import pandas as pd
train = pd.read_csv('breast-cancer-train.csv')
test = pd.read_csv('breast-cancer-test.csv')
X_train = np.float32(train[['Clump Thickness','Cell Size']].T)
y_train = np.float32(train['Type'].T)
X_test = np.float32(test[['Clump Thickness','Cell Size']])
y_test = np.float32(test['Type'].T)
b = tf.Variable(tf.zeros([1]))
W = tf.Variable(tf.random_uniform([1,2],-1.0,1.0))

#显示线性函数
y = tf.matmul(W,X_train) + b
#获得训练集上的均方误差
loss = tf.reduce_mean(tf.square(y-y_train))

#使用梯度下降估计参数W,b,设置步长为0.01
optimizer = tf.train.GradientDescentOptimizer(0.01)
train = optimizer.minimize(loss)

init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)

for step in range(0,1000):
    sess.run(train)
    if step % 200 == 0:
        print(step,sess.run(W),sess.run(b))

#测试样本
test_nagative = test.loc[test['Type']==0][['Clump Thickness','Cell Size']]
test_positive = test.loc[test['Type']==1][['Clump Thickness','Cell Size']]

import matplotlib.pyplot as plt
plt.scatter(test_nagative['Clump Thickness'],test_nagative['Cell Size'],marker='o',s=200,c='red')
plt.scatter(test_positive['Clump Thickness'],test_positive['Cell Size'],marker='x',s=150,c='black')
plt.xlabel('Clump Thickness')
plt.ylabel('Cell Size')
lx = np.arange(0,12)
ly = (0.5 - sess.run(b) -lx * sess.run(W)[0][0])/ sess.run(W)[0][1]
plt.plot(lx,ly,color='green')
plt.show()

你可能感兴趣的:(机器学习)