Tensorflow学习: AlexNet练手版

版本0:
时间:2017/8/25; 23:49
缺点: 缺乏测试层,缺乏全连接层


# -*- coding: utf-8 -*-
"""
Created on Fri Aug 25 21:00:36 2017

@author: DidiLv
"""

from datetime import datetime
import math
import time
import tensorflow as tf

# batch的大小, batch的数量
batch_size = 32
num_batches = 100

# 定义一个显示网络每一层“结构”的函数,展示每一个卷积层或者池化层的大小和名称
'''
注:
t.op.name:函数接受一个tensor,显示他的名字
t.get_shape().as_list(): 函数接受一个tensor,显示他的形状
'''
def print_activations(t):
    print(t.op.name, ' ', t.get_shape().as_list())

# AlexNet 网络结构
'''
inference function:
    input:images
    output: pool5 and parameters in the AlexNet 
So the function will be too big!
'''
def inference(images):
    parameters = []

# scope: conv1    
    with tf.name_scope('conv1') as scope:
        # kernel: 这里注意文章中一个GPU用的是48呀 不知道怎么变成了64?!
        kernel = tf.Variable(tf.truncated_normal(shape = [11,11,3,64],
                                                 dtype = tf.float32, stddev=0.1), name = 'weights')

        conv = tf.nn.conv2d(images, kernel, strides = [1,4,4,1], padding='SAME')

        biases = tf.Variable(tf.constant(0.0, shape = [64], dtype = tf.float32), 
                           trainable=True, name='biases')
        # tf.nn.bias_add 是 tf.add 的一个特例,也即 tf.add 支持的操作比 tf.nn.bias_add 更多。...
        # 二者均支持 broadcasting(广播机制),也即两个操作数最后一个维度保持一致。
        bias = tf.bias_add(conv, biases) # 

        conv1 = tf.nn.relu(bias,name = scope)

        parameters += [kernel,biases]
    print_activations(conv1)
# scope: LRN
'''
Args:
      input: A `Tensor`. Must be one of the following types: `float32`, `half`.
        4-D.
      depth_radius: An optional `int`. Defaults to `5`.
        0-D.  Half-width of the 1-D normalization window.
      bias: An optional `float`. Defaults to `1`.
        An offset (usually positive to avoid dividing by 0).
      alpha: An optional `float`. Defaults to `1`.
        A scale factor, usually positive.
      beta: An optional `float`. Defaults to `0.5`. An exponent.
      name: A name for the operation (optional).

    Returns:
      A `Tensor`. Has the same type as `input`.

      strides,ksize format: [batch, height, width, channels]
'''
# 原文中用的是[5 2.0 1e-4 0.75]搞不懂 不过 我们能跑通就好
    lrn1 = tf.nn.lrn(conv1, depth_radius = 4, bias = 1.0, alpha = 0.001/9, beta = 0.75, name = 'lrn1')
    pool1 = tf.nn.max_pool(lrn1, ksize = [1,3,3,1], strides = [1,2,2,1],
                           padding = 'VALID', name = 'pool1')
    print_activations(pool1)

# 第二个卷积层    
    with tf.name_scope('conv2') as scope:
        # 注意是192层。。
        kernel = tf.Variable(tf.truncated_normal(shape = [5,5,64,192],dtype = tf.float32, stddev = 0.1)
                             name = 'weights') # 这里注意照样是weights实际上名字为conv2/weights能够与第一层区分的
        conv = tf.nn.conv2d(conv1, kernel, strides=[1,1,1,1], padding='SAME')
        biases = tf.Variable(tf.constant(0.0, shape=[192], dtype=tf.float32), trainable=True, name = 'biases')
        bias = tf.nn.add(conv, biases) #这里我用了add 其实更为广泛的用法
        conv2 = tf.nn.relu(bias, name=scope)
        parameters += [kernel,biases]
    print_activations(conv2)

    lrn2 = tf.nn.lrn(conv2, depth_radius = 4, bias = 1.0, alpha = 0.001/9, beta = 0.75, name = 'lrn2')
    pool2 = tf.nn.max_pool(lrn2, ksize=[1,3,3,1], strides=[1,2,2,1],
                           padding='VALID', name='pool2')
    print_activations(pool2)

# 第三个卷积层
    with tf.name_scope('conv3') as scope:
        # 注意是192层。。
        kernel = tf.Variable(tf.truncated_normal(shape = [3,3,192,384],dtype = tf.float32, stddev = 0.1)
                             name = 'weights') # 这里注意照样是weights实际上名字为conv3/weights能够与第一层区分的
        conv = tf.nn.conv2d(conv2, kernel, strides=[1,1,1,1], padding='SAME')
        biases = tf.Variable(tf.constant(0.0, shape=[384], dtype=tf.float32), trainable=True, name = 'biases')
        bias = tf.nn.bias_add(conv, biases) #这里我又用回了bias_add
        conv3 = tf.nn.relu(bias, name=scope)
        parameters += [kernel,biases]
    print_activations(conv3)

# 注意: 接下来就没有所谓LRN层了
# 第四个卷积层
    with tf.name_scope('conv4') as scope:
        kernel = tf.Variable(tf.truncated_normal(shape = [3,3,384,256],dtype = tf.float32, stddev = 0.1)
                             name = 'weights') # 这里注意照样是weights实际上名字为conv4/weights能够与第一层区分的
        conv = tf.nn.conv2d(conv3, kernel, strides=[1,1,1,1], padding='SAME')
        biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32), trainable=True, name = 'biases')
        bias = tf.nn.bias_add(conv, biases) #这里我用了bias_add 其实更为广泛的用法
        conv4 = tf.nn.relu(bias, name=scope)
        parameters += [kernel,biases]
    print_activations(conv4)
# 第五个卷积层
    with tf.name_scope('conv5') as scope:
        kernel = tf.Variable(tf.truncated_normal(shape = [3,3,256,256],dtype = tf.float32, stddev = 0.1)
                             name = 'weights') # 这里注意照样是weights实际上名字为conv4/weights能够与第一层区分的
        conv = tf.nn.conv2d(conv4, kernel, strides=[1,1,1,1], padding='SAME')
        biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32), trainable=True, name = 'biases')
        bias = tf.nn.bias_add(conv, biases) #这里我用了bias_add 其实更为广泛的用法
        conv5 = tf.nn.relu(bias, name=scope)
        parameters += [kernel,biases]
    print_activations(conv5)   

    pool5 = tf.nn.max_pool(conv5, ksize=[1,3,3,1], strides=[1,2,2,1], padding='VALID', name='pool5')
    print_activations(pool5)


    return pool5, parameters    
#   全连接层
pass
# 测试层
pass

你可能感兴趣的:(机器学习,数据挖掘,Tensorflow)