(1)一次线性函数拟合
代码下载http://download.csdn.net/download/seven_year_promise/10126396
import tensorflow as tf
import numpy as np
#create data
x_data = np.random.rand(100).astype(np.float32)
y_data = x_data*0.1+0.3
""" structure """
Weights = tf.Variable(tf.random_uniform([1],-1.0,1.0))
biases=tf.Variable(tf.zeros([1]))
y=Weights*x_data+biases
Loss=tf.reduce_mean(tf.square(y-y_data))
optimizer=tf.train.GradientDescentOptimizer(0.5)
train=optimizer.minimize(Loss)
init = tf.initialize_all_variables()
""" end """
sess = tf.Session()
sess.run(init) #activate the structure
for step in range(201):
sess.run(train)
if step % 20 == 0:
print(step,sess.run(Weights),sess.run(biases))
(2)变量学习
代码下载http://download.csdn.net/download/seven_year_promise/10126397
import tensorflow as tf
state = tf.Variable(0, name='counter')
print state.name
one = tf.constant(1)
new_value=tf.add(state, one)
update = tf.assign(state, new_value)
init = tf.initialize_all_variables()
with tf.Session() as sess:
sess.run(init)
for _ in range(3):
print sess.run(update)
print(sess.run(state))
print state
(3)会话session
import tensorflow as tf
matrix_1=tf.constant([[3,3]])
matrix_2=tf.constant([[2],
[2]])
product = tf.matmul(matrix_1,matrix_2)
#method1
sess = tf.Session()
result = sess.run(product)
print result
sess.close()
#method2
with tf.Session() as sess:
result2=sess.run(product)
print result2
(4)placeholder
import tensorflow as tf
input_1 = tf.placeholder(tf.float32)
input_2 = tf.placeholder(tf.float32)
output = tf.multiply(input_1,input_2)
with tf.Session() as sess:
print sess.run(output, feed_dict={input_1:[7.],input_2:[2.]})
(5)生成一个自己的网络结构
代码下载http://download.csdn.net/download/seven_year_promise/10126399
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
def add_layer(inputs, in_size,out_size, activation_function=None):
Weights = tf.Variable(tf.random_normal([in_size, out_size]))
biases = tf.Variable(tf.zeros([1,out_size]) + 0.1 )
Wx_plus_b = tf.matmul(inputs, Weights) + biases
if activation_function is None:
outputs = Wx_plus_b
else:
outputs = activation_function(Wx_plus_b)
return outputs
#data for training
x_data = np.linspace(-1,1,300)[:, np.newaxis]
noise = np.random.normal(0,0.05,x_data.shape)
y_data = np.square(x_data) - 0.5 + noise
#data holder
xs = tf.placeholder(tf.float32,[None, 1])
ys = tf.placeholder(tf.float32,[None, 1])
#networks
l_1 = add_layer(xs,1,10,activation_function = tf.nn.relu)
prediction = add_layer(l_1,10,1,activation_function = None)
#loss
loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - prediction), reduction_indices=[1]))
#train method
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(loss)
#initialization
init = tf.initialize_all_variables()
#Session
sess = tf.Session()
sess.run(init)
#visulization
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.scatter(x_data, y_data)
plt.ion()
plt.show()
for i in range(1000):
sess.run(train_step, feed_dict={xs:x_data,ys:y_data})
if i%50==0:
print(sess.run(loss, feed_dict={xs:x_data,ys:y_data}))
try:
ax.lines.remove(lines[0])
except Exception:
pass
prediction_value = sess.run(prediction,feed_dict={xs:x_data})
lines = ax.plot(x_data, prediction_value,'r-',lw=5)
plt.pause(0.8)
(6)tensorboard
代码下载http://download.csdn.net/download/seven_year_promise/10126400
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
def add_layer(inputs, in_size,out_size, n_layer, activation_function=None):
layer_name = 'layer%s'% n_layer
with tf.name_scope('layer_name'):
with tf.name_scope('Weights'):
Weights = tf.Variable(tf.random_normal([in_size, out_size]),name='W')
tf.summary.histogram(layer_name+'/weights',Weights)
with tf.name_scope('biases'):
biases = tf.Variable(tf.zeros([1,out_size]) + 0.1, name='b' )
tf.summary.histogram(layer_name+'/biases',biases)
with tf.name_scope('Weights'):
Wx_plus_b = tf.add(tf.matmul(inputs, Weights),biases)
if activation_function is None:
outputs = Wx_plus_b
else:
outputs = activation_function(Wx_plus_b)
tf.summary.histogram(layer_name+'/outputs',outputs)
return outputs
#data for training
x_data = np.linspace(-1,1,300)[:, np.newaxis]
noise = np.random.normal(0,0.05,x_data.shape)
y_data = np.square(x_data) - 0.5 + noise
#data holder
with tf.name_scope('inputs'):
xs = tf.placeholder(tf.float32,[None, 1],name='x_input')
ys = tf.placeholder(tf.float32,[None, 1],name='y_input')
#networks
l_1 = add_layer(xs,1,10,n_layer=1,activation_function = tf.nn.relu)
prediction = add_layer(l_1,10,1,n_layer=2,activation_function = None)
#loss
with tf.name_scope('Loss'):
loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - prediction), reduction_indices=[1]), name='L')
tf.summary.scalar('loss',loss)
#train method
with tf.name_scope('train'):
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(loss)
#initialization
#init = tf.initialize_all_variables()
#Session
sess = tf.Session()
merged = tf.summary.merge_all()
writer = tf.summary.FileWriter("logs/",sess.graph)
sess.run(tf.global_variables_initializer())
"""
#visulization
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.scatter(x_data, y_data)
plt.ion()
plt.show()
"""
for i in range(1000):
sess.run(train_step, feed_dict={xs:x_data,ys:y_data})
if i%50==0:
result = sess.run(merged, feed_dict={xs:x_data,ys:y_data})
writer.add_summary(result, i)