#6.session 指定到想运算的那部分
import tensorflow as tf
import numpy as np
matrix1=tf.constant([[3,3]])#一行两列的向量
matrix2=tf.constant([[2],#两行一列
[2]])
product=tf.matmul(matrix1,matrix2)#相当np.dot(),矩阵相乘=12
sess=tf.Session()
result=sess.run(product)#Session指定到输出product这步
print(result)
sess.close()
#或用以下方法替换“sess=tf.Session()、sess.close()”:
#with tf.Session()as sess:
# result = sess.run(product)
# print(result)————[[12]]
#7.variable
import tensorflow as tf
state=tf.Variable(0,name='counter')
one=tf.constant(1)
new_value=tf.add(state,one)
update=tf.assign(state,new_value)
init=tf.initialize_all_variables()#用变量,此步必须有,初始化
with tf.Session()as sess:
sess.run(init)#run and activate
for _ in range(3):
sess.run(update)
print(sess.run(state))————1 2 3
#8.feeds 传入值(从外部)
import tensorflow as tf
input1=tf.placeholder(tf.float32) #后面run某个东西时就会从外部索取数据,将外部数据放到placeholder里才能用
input2=tf.placeholder(tf.float32)
output=tf.multiply(input1,input2)#矩阵相乘,注意不是tf,mul而是tf.multiply
with tf.Session()as sess:
print(sess.run(output,feed_dict={input1:[7.],input2:[2.]}))#因为output涉及到两个laceholder,所以feed传入——————[ 14.]
#10、11:添加层def add_layer()—建造神经网络
import tensorflow as tf
import numpy as np
def add_layer(inputs,in_size,out_size,activation_function=None):
Weights=tf.Variable(tf.random_normal([in_size,out_size]))#in_size行out_size列的矩阵
biases=tf.Variable(tf.zeros([1,out_size])+0.1)#列表,不为0,故+0.1
Wx_plus_b=tf.matmul(inputs,Weights)+biases
if activation_function is None:#即线性关系
outputs=Wx_plus_b
else:
outputs=activation_function(Wx_plus_b)
return outputs
x_data=np.linspace(-1,1,300)[:,np.newaxis]
noise=np.random.normal(0,0.05,x_data.shape)#加噪声,更像真实数据
y_data=np.square(x_data)-0.5+noise
#10个隐层,1个输出层
xs=tf.placeholder(tf.float32,[None,1])#x_data的1
ys=tf.placeholder(tf.float32,[None,1])
l1=add_layer(xs,1,10,activation_function=tf.nn.relu)#定义隐层:x_data输入1个参数,隐层10个神经元
prediction=add_layer(l1,10,1,activation_function=None)#定义输出层:l1_size=10,y_data_size=1,None即线性
loss=tf.reduce_mean(tf.reduce_sum(tf.square(ys-prediction),reduction_indices=[1]))#求平均误差
train_step=tf.train.GradientDescentOptimizer(0.1).minimize(loss)#给定学习效率0.1(<1),每次都更正误差
init=tf.initialize_all_variables()#初始所有变量
sess=tf.Session()
sess.run(init)
for i in range (1000):#学习1000次
sess.run(train_step,feed_dict={xs:x_data,ys:y_data})
if i % 50==0:
print(sess.run(loss,feed_dict={xs:x_data,ys:y_data}))#若误差是不断减小的 √ loss也通过placeholder
#结果: (嗯,误差在不断减小)
0.625046
0.0103892
0.00880997
0.00816188
0.0077936
0.0075142
0.00726819
0.00701968
0.00677203
0.00653569
0.00632172
0.00609444
0.00587196
0.00567781
0.00551067
0.00533964
0.00513579
0.00497731
0.00483084
0.00470275
#12:结果可视化(在上例基础上)
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
def add_layer(inputs,in_size,out_size,activation_function=None):
Weights=tf.Variable(tf.random_normal([in_size,out_size]))#in_size行out_size列的矩阵
biases=tf.Variable(tf.zeros([1,out_size])+0.1)#列表,不为0,故+0.1
Wx_plus_b=tf.matmul(inputs,Weights)+biases
if activation_function is None:#即线性关系
outputs=Wx_plus_b
else:
outputs=activation_function(Wx_plus_b)
return outputs
x_data=np.linspace(-1,1,300)[:,np.newaxis]
noise=np.random.normal(0,0.05,x_data.shape)#加噪声,更像真实数据
y_data=np.square(x_data)-0.5+noise
#10个隐层,1个输出层
xs=tf.placeholder(tf.float32,[None,1])#x_data的1
ys=tf.placeholder(tf.float32,[None,1])
l1=add_layer(xs,1,10,activation_function=tf.nn.relu)#定义隐层:x_data输入1个参数,隐层10个神经元
prediction=add_layer(l1,10,1,activation_function=None)#定义输出层:l1_size=10,y_data_size=1,None即线性
loss=tf.reduce_mean(tf.reduce_sum(tf.square(ys-prediction),reduction_indices=[1]))#求平均误差
train_step=tf.train.GradientDescentOptimizer(0.1).minimize(loss)#给定学习效率0.1(<1),每次都更正误差
init=tf.initialize_all_variables()#初始所有变量
sess=tf.Session()
sess.run(init)
fig=plt.figure()#先生成一个图片框
ax=fig.add_subplot(1,1,1)#编号为1,1,1
ax.scatter(x_data,y_data)
plt.ion()#使plt.show()后不停继续往下走
plt.show()
for i in range (1000):#学习1000次
sess.run(train_step,feed_dict={xs:x_data,ys:y_data})
if i % 50==0:
#print(sess.run(loss,feed_dict={xs:x_data,ys:y_data}))#若误差是不断减小的 √ loss也通过placeholder
try:
ax.lines.remove(lines[0]) # 抹除lines的第一条线段(防止太多线看不出)
except Exception:
pass
prediction_value=sess.run(prediction,feed_dict={xs:x_data})
lines=ax.plot(x_data,prediction_value,"r-",lw=5)#线宽为5
plt.pause(0.1)#停0.1s
#14.Tensorboard 可视化好帮手
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
def add_layer(inputs,in_size,out_size,activation_function=None):
with tf.name_scope("layer"):
with tf.name_scope("Weights"):
Weights=tf.Variable(tf.random_normal([in_size,out_size]),name='W')#in_size行out_size列的矩阵
with tf.name_scope("biases"):
biases=tf.Variable(tf.zeros([1,out_size])+0.1,name="b")#列表,不为0,故+0.1
with tf.name_scope("Wx_plus_b"):
Wx_plus_b=tf.add(tf.matmul(inputs,Weights),biases)
if activation_function is None:#即线性关系
outputs=Wx_plus_b
else:
outputs=activation_function(Wx_plus_b)
return outputs
x_data=np.linspace(-1,1,300)[:,np.newaxis]
noise=np.random.normal(0,0.05,x_data.shape)#加噪声,更像真实数据
y_data=np.square(x_data)-0.5+noise
#10个隐层,1个输出层
with tf.name_scope("inputs"):
xs=tf.placeholder(tf.float32,[None,1],name="x_input")#x_data的1
ys=tf.placeholder(tf.float32,[None,1],name="y_input")
l1=add_layer(xs,1,10,activation_function=tf.nn.relu)#定义隐层:x_data输入1个参数,隐层10个神经元
prediction=add_layer(l1,10,1,activation_function=None)#定义输出层:l1_size=10,y_data_size=1,None即线性
with tf.name_scope("loss"):
loss=tf.reduce_mean(tf.reduce_sum(tf.square(ys-prediction),reduction_indices=[1]))#求平均误差
with tf.name_scope("train"):
train_step=tf.train.GradientDescentOptimizer(0.1).minimize(loss)#给定学习效率0.1(<1),每次都更正误差
init=tf.initialize_all_variables()#初始所有变量
sess=tf.Session()
writer=tf.summary.FileWriter("D:/",sess.graph)#存文件中,在浏览器中查看(terminal中看
sess.run(init)
参考:http://blog.csdn.net/flying_sfeng/article/details/69943260