TensorFlow_实战学习笔记(内附详细实现代码)

搭建神经网络来预测: y=0.1X+0.3

import tensorflow as tf
import numpy as np
# create data
x_data = np.random.rand(10000).astype(np.float32)
y_data = 0.1*x_data + 0.3

# create tensorflow structure start
Weights = tf.Variable(tf.random_uniform([1],-1.0,1)) # 一维,范围从-1到1
biases = tf.Variable(tf.zeros([1])) # 初始biaese,初始值为0
y = Weights*x_data + biases  # 根据权重和biases来计算y
loss = tf.reduce_mean(tf.square(y-y_data)) # 计算方差
optimizer = tf.train.GradientDescentOptimizer(0.5)  # 建立优化器(最基础的梯度下降方法)
train = optimizer.minimize(loss) # 最小化误差进行优化

init = tf.initialize_all_variables()  # 上述搭完结构之后,在这进行初始化
# create tensorflow structure end


sess = tf.Session()  
sess.run(init)  # 十分重要

for step in range(280):
    sess.run(train)
    if step%20 == 0:
        print(step,sess.run(Weights),sess.run(biases))
0 [0.24367195] [0.30459303]
20 [0.12827748] [0.28488767]
40 [0.10727955] [0.2961096]
60 [0.10187402] [0.29899848]
80 [0.10048245] [0.2997422]
100 [0.10012418] [0.29993364]
120 [0.10003198] [0.29998294]
140 [0.10000823] [0.2999956]
160 [0.1000021] [0.29999888]
180 [0.10000056] [0.2999997]
200 [0.10000014] [0.29999995]
220 [0.1000001] [0.29999995]
240 [0.1000001] [0.29999995]
260 [0.1000001] [0.29999995]

session会话

1、sess的两种打开模式

import tensorflow as tf
matrix_1 = tf.constant([[3,3]])
matrix_2 = tf.constant([[2],[2]])
product = tf.matmul(matrix_1, matrix_2) # matrix multiply np.dot(m1,m2)

# method 1
sess = tf.Session()
result = sess.run(product)
print(result)
sess.close()

[[12]]
# method 2
with tf.Session() as sess: # 打开session
    result2 = sess.run(product)
    print(result2)
    
[[12]]

tf中的变量

如果有定义变量variable,一定要initial, 然后run一下

state = tf.Variable(0,name = "counter")
# print(state.name)
one = tf.constant(1)  # 常量
new_value = tf.add(state, one)
update = tf.assign(state,new_value)
# 初始化所有的变量,如果定义变量一定要初始化
init = tf.initialize_all_variables()
with tf.Session() as sess:
    sess.run(init)
    for i in range(3):
        sess.run(update)
        print(sess.run(state))
1
2
3

传入值placeholder

input_1 = tf.placeholder(tf.float32)
input_2 = tf.placeholder(tf.float32)
output = tf.multiply(input_1,input_2)
with tf.Session() as sess:
    print(sess.run(output,feed_dict={input_1:[7.],input_2:[2.]}))
[14.]

添加层,定义激励函数

def add_layer(inputs, in_size, out_size,activation_function=None):
    Weights = tf.Variable(tf.random_normal([in_size,out_size])) # 矩阵的话习惯性大写变量名首字母,in_size行out_size列
    biases = tf.Variable(tf.zeros([1,out_size])) + 0.1   # 推荐baries不为零,1行out_size列,每一层这个都是随机变化的
    Wx_plus_b = tf.matmul(inputs,Weights) + biases # 矩阵的乘法加上偏置
    if activation_function is None:
        outputs = Wx_plus_b
    else:
        outputs = activation_function(Wx_plus_b)
    return outputs

建造神经网络

可视化操作


import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
# 定义数据
x_data = np.linspace(-1,1,300)[:,np.newaxis]  # x 特征
noise = np.random.normal(0,0.05,x_data.shape)
y_data = np.square(x_data) - 0.5 + noise      # y 真实值

xs = tf.placeholder(tf.float32,[None,1])   # None表示无论给多少个例子都ok
ys = tf.placeholder(tf.float32,[None,1])
# 建造第一层layer,add hidden layer
l1 = add_layer(xs,1,10,activation_function=tf.nn.relu)   # 1个x,10个隐藏层神经元
# add output layer
prediction = add_layer(l1,10,1,activation_function = None)
loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys-prediction),reduction_indices=[1])) # 计算误差

train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss) # 以0.1的步长对loss进行优化提升

# 对所有变量进行初始
init =tf.global_variables_initializer()
sess = tf.Session()

sess.run(init) # 执行初始化

fig = plt.figure() # 生成一个图片框
ax = fig.add_subplot(1,1,1)
# plot真实数值
ax.scatter(x_data, y_data)
plt.ion() # show了之后不暂停
plt.show()
for i in range(1000):
    sess.run(train_step, feed_dict={xs:x_data,ys:y_data})
    if i%50 == 0:
        #print(sess.run(loss,feed_dict={xs:x_data,ys:y_data}))
        try:
            ax.lines.remove(lines[0]) # 去除掉lines的第一条线段,防止很多线重合看不清楚
        except Exception:
            pass
        prediction_value = sess.run(prediction,feed_dict={xs:x_data})
        lines = ax.plot(x_data,prediction_value,'r-',lw=5)  # 曲线的形式plot
        plt.pause(0.1)
        
        
        

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-DWGRqOSd-1583826600572)(output_12_0.png)]

优化器

1、class tf.train.GrandientDescentOptimizer 梯度下降法优化器

2、 class tf.train.AdabeltaOptimizer

可视化来帮助展示神经网络学习理解

import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
def add_layer(inputs, in_size, out_size,activation_function=None):
    with tf.name_scope("layer"):  # 定义layer大框架
        with tf.name_scope("weights"): # 定义小组件
            Weights = tf.Variable(tf.random_normal([in_size,out_size])) # 矩阵的话习惯性大写变量名首字母,in_size行out_size列
        with tf.name_scope("biases"):
            biases = tf.Variable(tf.zeros([1,out_size])) + 0.1   # 推荐baries不为零,1行out_size列,每一层这个都是随机变化的
        with tf.name_scope("Wx_plus_b"):
            Wx_plus_b = tf.matmul(inputs,Weights) + biases # 矩阵的乘法加上偏置
        if activation_function is None:
            outputs = Wx_plus_b
        else:
            outputs = activation_function(Wx_plus_b)
        return outputs
# 定义数据
x_data = np.linspace(-1,1,300)[:,np.newaxis]  # x 特征
noise = np.random.normal(0,0.05,x_data.shape)
y_data = np.square(x_data) - 0.5 + noise      # y 真实值

with tf.name_scope("inputs"):  
    xs = tf.placeholder(tf.float32,[None,1], name = "x_input")   # None表示无论给多少个例子都ok
    ys = tf.placeholder(tf.float32,[None,1], name = "y_input")
# 建造第一层layer,add hidden layer
l1 = add_layer(xs,1,10,activation_function=tf.nn.relu)   # 1个x,10个隐藏层神经元
# add output layer
prediction = add_layer(l1,10,1,activation_function = None)
with tf.name_scope("loss"):
    loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys-prediction),reduction_indices=[1])) # 计算误差
with tf.name_scope("train"):
    train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss) # 以0.1的步长对loss进行优化提升

# 对所有变量进行初始
init =tf.global_variables_initializer()
sess = tf.Session()

writer = tf.summary.FileWriter("F:/A大数据学习/logs/", sess.graph) # 整个框架定义完之后,放在log中

sess.run(init) # 执行初始化
# 在命令窗口运行日志
tensorboard --logdir="F:/A大数据学习/logs/"

你可能感兴趣的:(python,深度学习,tensorflow,python)