#encoding:utf-8
import sys
import tensorflow as tf
import numpy as np
x_data=np.random.rand(100).astype(np.float32)
y_data=x_data*0.1+0.55
#create tensortdlow strctru start
Weights=tf.Variable(tf.random_uniform([1],-1.0,1.0))
biases=tf.Variable(tf.zeros([1]))
y=Weights*x_data+biases
loss=tf.reduce_mean(tf.square(y-y_data))
op=tf.train.GradientDescentOptimizer(0.5)
train=op.minimize(loss)
init=tf.initialize_all_variables()
sess=tf.Session()
sess.run(init)
for i in xrange(250):
sess.run(train)
if i%20==0:
print(i,sess.run(Weights),sess.run(biases))
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
mnist=input_data.read_data_sets("MNIST_data/",one_hot=True)
x=tf.placeholder("float",[None,784])
w=tf.Variable(tf.zeros([784,10]))
b=tf.Variable(tf.zeros([10]))
y=tf.nn.softmax(tf.matmul(x,w)+b) #模型拟合值
y_=tf.placeholder("float",[None,10]) #实际值
cross_entropy=-tf.reduce_sum(y_*tf.log(y))
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
init = tf.global_variables_initializer()
sess=tf.Session()
sess.run(init)
for i in range(10000):
batch_xs,batch_ys=mnist.train.next_batch(100)
sess.run(train_step,feed_dict={x:batch_xs,y_:batch_ys})
if i%1000==0:
# 评估模型,tf.argmax能给出某个tensor对象在某一维上数据最大值的索引。因为标签是由0,1组成了one-hot vector,返回的索引就是数值为1的位置
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
# 计算正确预测项的比例,因为tf.equal返回的是布尔值,使用tf.cast可以把布尔值转换成浮点数,tf.reduce_mean是求平均值
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
# 在session中启动accuracy,输入是MNIST中的测试集
print("第"+str(i)+"步骤为:",sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}))
第0步骤为: 0.4124
第1000步骤为: 0.917
第2000步骤为: 0.9198
第3000步骤为: 0.9206
第4000步骤为: 0.9228
第5000步骤为: 0.9198
第6000步骤为: 0.9119
第7000步骤为: 0.9199
第8000步骤为: 0.9232
第9000步骤为: 0.9218
import tensorflow as tf
import numpy as np
def add_layer(inputs,in_size,out_size,activation_function=None):
Weights=tf.Variable(tf.random_normal([in_size,out_size]))
biases=tf.Variable(tf.zeros([1,out_size])+0.1)
Wx_plus_b=tf.matmul(inputs,Weights)+biases
if activation_function==None:
outputs=Wx_plus_b
else:
outputs=activation_function(Wx_plus_b)
return outputs
x_data=np.linspace(-1,1,300)[:,np.newaxis] #300*1,输入只有一个神经元
noise=np.random.normal(0,0.05,x_data.shape)
y_data=np.square(x_data)-0.5+noise
xs=tf.placeholder(tf.float32,[None,1])
ys=tf.placeholder(tf.float32,[None,1])
#定义隐含层,隐含层有10个神经元
l1=add_layer(xs,1,10,activation_function=tf.nn.relu)
#定义输出层,假设没有任何激活函数
prediction=add_layer(l1,10,1,activation_function=None)
loss=tf.reduce_mean(tf.reduce_sum(tf.square(ys-prediction),reduction_indices=[1]))
train_step=tf.train.GradientDescentOptimizer(0.1).minimize(loss)
init=tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for i in range(3000):
sess.run(train_step,feed_dict={
xs:x_data,ys:y_data
})
if i%100==0:
print(sess.run(loss,feed_dict={
xs:x_data,ys:y_data
}))
0.898771
0.00805174
0.00672437
0.00613227
0.00571695
0.00533702
0.00494347
0.00450889
0.00418779
0.00394219
0.00373838