(二)Tensorflow 搭建自己的神经网络 (莫烦 Python 教程)

本教程资料来自于:Tensorflow 搭建自己的神经网络 (莫烦 Python 教程)

https://www.bilibili.com/video/av16001891?from=search&seid=6106673353000958940

14 Tensorboard可视化

import tensorflow as tf

import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'


def add_layre(inputs, in_size, out_size, activation_function=None):
    with tf.name_scope('inputs'):
        with tf.name_scope('Weights'):
            Weights = tf.Variable(tf.random_normal([in_size, out_size]), name='w')
        with tf.name_scope('biases'):
            biases = tf.Variable(tf.zeros([1, out_size]) + 0.1, name='b')
        with tf.name_scope('Wx_plus_b'):
            Wx_plus_b = tf.matmul(inputs, Weights) + biases
        if activation_function is None:
            outputs = Wx_plus_b
        else:
            outputs = activation_function(Wx_plus_b)
        return outputs


# 输入端,一个大框架
with tf.name_scope('inputs'):
    xs = tf.placeholder(tf.float32, [None, 1], name='x_input')
    ys = tf.placeholder(tf.float32, [None, 1], name='y_input')

l1 = add_layre(xs, 1, 10, activation_function=tf.nn.relu)
prediction = add_layre(l1, 10, 1, activation_function=None)

with tf.name_scope('loss'):
    loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - prediction),
                                        reduction_indices=[1]))

with tf.name_scope('train'):
    train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)

init = tf.global_variables_initializer()
sess = tf.Session()

# 加载到本地的浏览器中
writer = tf.summary.FileWriter("logs/", sess.graph)

sess.run(init)

"""
如果想看图的话,这些代码就够了。
"""

终端调试

lijuncheng@lijunchengdeMacBook-Pro ~/Code/TensorFlow $ cd /Users/lijuncheng/Code/TensorFlow ; env "PYTHONIOENCODING=UTF-8" "PYTHONUNBUFFERED=1" /Users/lijuncheng/anaconda3/bin/python3 /Users/lijuncheng/.vscode/extensions/ms-python.python-2018.3.1/pythonFiles/PythonTools/visualstudio_py_launcher_nodebug.py /Users/lijuncheng/Code/TensorFlow 64316 34806ad9-833a-4524-8cd6-18ca4aa74f14 RedirectOutput,RedirectOutput /Users/lijuncheng/Code/TensorFlow/python6.py
[1]    23515 terminated  env "PYTHONIOENCODING=UTF-8" "PYTHONUNBUFFERED=1"    64316
lijuncheng@lijunchengdeMacBook-Pro ~/Code/TensorFlow $ ls
Hello_TF.ipynb logs           python1.py     python2.py     python3.py     python4.py     python5.py     python6.py
lijuncheng@lijunchengdeMacBook-Pro ~/Code/TensorFlow $ cd logs
lijuncheng@lijunchengdeMacBook-Pro ~/Code/TensorFlow/logs $ ls
events.out.tfevents.1523452508.lijunchengdeMacBook-Pro.local
lijuncheng@lijunchengdeMacBook-Pro ~/Code/TensorFlow/logs $ cd ..
lijuncheng@lijunchengdeMacBook-Pro ~/Code/TensorFlow $ tensorboard --logdir='logs'
/Users/lijuncheng/anaconda3/lib/python3.6/site-packages/h5py/__init__.py:34: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
  from ._conv import register_converters as _register_converters
Starting TensorBoard b'47' at http://0.0.0.0:6006
(Press CTRL+C to quit)
WARNING:tensorflow:path ../external/data/plugin/text/runs not found, sending 404
WARNING:tensorflow:path ../external/data/plugin/text/runs not found, sending 404
WARNING:tensorflow:path ../external/data/plugin/text/runs not found, sending 404
WARNING:tensorflow:path ../external/data/plugin/text/runs not found, sending 404

注意⚠️:必须是log的上一级目录里运行

tensorboard --logdir='logs'

15 Tensorboard可视化2

目的:可视化神经网络的训练功能
HISTOGRAMS 神经网络训练的整个工程
EVENS 更多你想要显示的东西

import tensorflow as tf
import numpy as np

import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'


def add_layre(inputs, in_size, out_size, n_layer, activation_function=None):
    layer_name = 'layer%s' % n_layer
    with tf.name_scope(layer_name):
        with tf.name_scope('Weights'):
            Weights = tf.Variable(tf.random_normal([in_size, out_size]), name='w')
            # 注意API tf.histogram_summary.(layer_name+'/weights', Weights)
            tf.summary.histogram(layer_name+'/weights', Weights)
        with tf.name_scope('biases'):
            biases = tf.Variable(tf.zeros([1, out_size]) + 0.1, name='b')
            tf.summary.histogram(layer_name+'/biases', biases)
        with tf.name_scope('Wx_plus_b'):
            Wx_plus_b = tf.matmul(inputs, Weights) + biases
        if activation_function is None:
            outputs = Wx_plus_b
        else:
            outputs = activation_function(Wx_plus_b)
        tf.summary.histogram(layer_name+'/outputs', outputs)
        return outputs


# Make up some real data
x_data = np.linspace(-1, 1, 300)[:, np.newaxis]
noise = np.random.normal(0, 0.05, x_data.shape)
y_data = np.square(x_data) - 0.5 + noise

with tf.name_scope('inputs'):
    xs = tf.placeholder(tf.float32, [None, 1], name='x_input')
    ys = tf.placeholder(tf.float32, [None, 1], name='y_input')

l1 = add_layre(xs, 1, 10, n_layer=1, activation_function=tf.nn.relu)
prediction = add_layre(l1, 10, 1, n_layer=2, activation_function=None)

with tf.name_scope('loss'):
    loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - prediction),
                                        reduction_indices=[1]))
    # 存量的变化会显示到EVENTS中
    tf.summary.scalar('loss', loss)
    # 注意loss不断减少,说明神经网络是学到东西的

with tf.name_scope('train'):
    train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)

init = tf.global_variables_initializer()
sess = tf.Session()

# 把所有的summary打包合并到FileWriter中
# merged = tf.merge_all_summaries()
merged = tf.summary.merge_all()
writer = tf.summary.FileWriter("logs/", sess.graph)

sess.run(init)

for i in range(1000):
    sess.run(train_step, feed_dict={xs: x_data, ys: y_data})
    if i % 50 == 0:
        # 每隔50步就记录一下
        result = sess.run(merged, feed_dict={xs: x_data, ys: y_data})
        writer.add_summary(result, i)

lijuncheng@lijunchengdeMacBook-Pro ~/Code/TensorFlow $ tensorboard --logdir='logs'/Users/lijuncheng/anaconda3/lib/python3.6/site-packages/h5py/__init__.py:34: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.  from ._conv import register_converters as _register_converters
ERROR:tensorflow:TensorBoard attempted to bind to port 6006, but it was already in use
TensorBoard attempted to bind to port 6006, but it was already in use

16 Classification 分类学习

分类问题
手写识别
28 * 28 = 784

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

# number 1 to 10 data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
# 添加数据


def add_layre(inputs, in_size, out_size, activation_function=None):
    # add one more layer and return the output of this layer
    Weights = tf.Variable(tf.random_normal([in_size, out_size]))
    biases = tf.Variable(tf.zeros([1, out_size]) + 0.1)
    Wx_plus_b = tf.matmul(inputs, Weights) + biases
    if activation_function is None:
        outputs = Wx_plus_b
    else:
        outputs = activation_function(Wx_plus_b)
    return outputs


def compute_accuacy(v_xs, v_ys):
    global prediction  # prediction设置为全局变量
    y_pre = sess.run(prediction, feed_dict={xs: v_xs})  # 用v_xs生成预测值

    # 预测数据 和 真实数据 做对比
    # 它输出的是一个概率, 为是 0 1 2 3 4 5 6 7 8 9 不同的概率
    correct_prediction = tf.equal(tf.arg_max(y_pre, 1), tf.arg_max(v_ys, 1))

    # 计算结果 百分比
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    result = sess.run(accuracy, feed_dict={xs: v_xs, ys: v_ys})

    return result
    pass


# define placeholder for inputs to network
xs = tf.placeholder(tf.float32, [None, 784])  # 28*28
# 每一个图片都是28*28=784个像素表示
ys = tf.placeholder(tf.float32, [None, 10])
# 每例子都有10个输出 one-hot编码   0 1 2 3 4 5 6 7 8 9

# add output layer
prediction = add_layre(xs, 784, 10, activation_function=tf.nn.softmax)
# softmax一般就是用来做分类的

# the error between prediction and real data
cross_entropy = tf.reduce_mean(-tf.reduce_sum(ys*tf.log(prediction), reduction_indices=[1]))

# 神经网络 用cross entropy 和 softmax来进行分类
# 交叉熵

# 梯度下降法训练
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)

sess = tf.Session()

# important.step
sess.run(tf.global_variables_initializer())

# mnist.train训练集  mnist.test测试集
for step in range(1000):
    # 提取出一部分x和y的sample
    batch_xs, batch_ys = mnist.train.next_batch(100)  # 提取100个
    # 每一次都提取100进行训练,而不是把所有数据一次性都训练
    sess.run(train_step, feed_dict={xs: batch_xs, ys: batch_ys})
    # 每隔50步计算一次训练的准确度
    if step % 50 == 0:
        print(compute_accuacy(mnist.test.images, mnist.test.labels))
    pass

lijuncheng@lijunchengdeMacBook-Pro ~/Code/TensorFlow $ cd /Users/lijuncheng/Code/TensorFlow ; env "PYTHONIOENCODING=UTF-8" "PYTHONUNBUFFERED=1" /Users/lijuncheng/anaconda3/bin/python3 /Users/lijuncheng/.vscode/extensions/ms-python.python-2018.3.1/pythonFiles/PythonTools/visualstudio_py_launcher_nodebug.py /Users/lijuncheng/Code/TensorFlow 53917 34806ad9-833a-4524-8cd6-18ca4aa74f14 RedirectOutput,RedirectOutput /Users/lijuncheng/Code/TensorFlow/python9.py
/Users/lijuncheng/anaconda3/lib/python3.6/site-packages/h5py/__init__.py:34: FutureWarning: Conversion of the second argument of issubdtypefrom `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
  from ._conv import register_converters as _register_converters
Successfully downloaded train-images-idx3-ubyte.gz 9912422 bytes.
Extracting MNIST_data/train-images-idx3-ubyte.gz
Successfully downloaded train-labels-idx1-ubyte.gz 28881 bytes.
Extracting MNIST_data/train-labels-idx1-ubyte.gz
Successfully downloaded t10k-images-idx3-ubyte.gz 1648877 bytes.
Extracting MNIST_data/t10k-images-idx3-ubyte.gz
Successfully downloaded t10k-labels-idx1-ubyte.gz 4542 bytes.
Extracting MNIST_data/t10k-labels-idx1-ubyte.gz
2018-04-23 14:48:39.418978: W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use SSE4.1 instructions, but these are available on your machine and could speed up CPUcomputations.
2018-04-23 14:48:39.419004: W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use SSE4.2 instructions, but these are available on your machine and could speed up CPUcomputations.
2018-04-23 14:48:39.419012: W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use AVX instructions, but these are available on your machine and could speed up CPU computations.
2018-04-23 14:48:39.419019: W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use AVX2 instructions, but these are available on your machine and could speed up CPU computations.
2018-04-23 14:48:39.419029: W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use FMA instructions, but these are available on your machine and could speed up CPU computations.
0.146
0.6547
0.7439
0.7815
0.8059
0.8196
0.831
0.8387
0.8361
0.8519
0.8538
0.8587
0.8614
0.8654
0.8591
0.8657
0.8718
0.8726
0.873
0.875
[1]    65354 terminated  env "PYTHONIOENCODING=UTF-8" "PYTHONUNBUFFERED=1"    53917

对莫烦老师表示敬意!

莫烦老师GitHub地址:https://github.com/MorvanZhou/Tensorflow-Tutorial/tree/master/tutorial-contents

你可能感兴趣的:((二)Tensorflow 搭建自己的神经网络 (莫烦 Python 教程))