windows下使用tensorflow简单实现CNN

一般情况下,tensorflow都会安装在Linux服务器上,方便模型的训练。但是我们想要学习tensorflow或者本地调试tensorflow,windows下的开发还是更方便舒服一些,下面介绍一下windows下使用tensorflow,及用tensorflow简单实现CNN的方法。

1、windows安装tensorflow
1)首先安装python
目前tensorflow1.2版本需要安装python3.5以上64位才能支持,可以从官网找合适的版本下载,我这里安装的python3.5.2。
2)最简单的安装tensorflow步骤,在cmd下输入命令
python -m pip install tensorflow
等待安装完成。
3)进入python,输入import tensorflow as tf 回车判断是否安装成功。

2、简单实现CNN
首先整体实现见如下代码:

#coding=utf-8

import tensorflow as tf

#import data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data/',one_hot=True)

#model parameter
iters = 1000
batch_size = 128
drop_prob = 0.5
learning_rate = 0.001

#input
x = tf.placeholder("float", [None,784])
y = tf.placeholder('float', [None,10])

#model variable
weights = {
    #convolution layer 1,28*28*1卷积后变成24*24*32
    'conv_w1' : tf.Variable(tf.random_normal([5,5,1,32])),
    'conv_b1' : tf.Variable(tf.random_normal([32])),
    #convolution layer 2,池化并卷积后7*7*64
    'conv_w2' : tf.Variable(tf.random_normal([5,5,32,64])),
    'conv_b2' : tf.Variable(tf.random_normal([64])),

    #全连接层相当于加上输出只有两层,由于数据集太小,增加全连接层后发现已经难以训练了。
    #full connection layer 1
    'conn_w1' : tf.Variable(tf.random_normal([7*7*64,512])),
    'conn_b1' : tf.Variable(tf.random_normal([512])),
    #full connection layer 2
    'conn_w2' : tf.Variable(tf.random_normal([512,10])),
    'conn_b2' : tf.Variable(tf.random_normal([10])),
    #full connection layer 3
#     'conn_w3' : tf.Variable(tf.random_normal([512,128])),
#     'conn_b3' : tf.Variable(tf.random_normal([128])),
    #output layer 4
#     'conn_w4' : tf.Variable(tf.random_normal([128,10])),
#     'conn_b4' : tf.Variable(tf.random_normal([10])),

    }

def conv2d(x,w,b):
    '''卷积和relu操作'''
    '''x为输入tensor,由四维组成,分别为[一个batch的数量, height, width, 图像通道数];过滤器也为四维tensor,分别为[height,width,图像通道数,卷积核个数];striders对应输入各个维度上的滑动步长'''
    xw = tf.nn.conv2d(x, w, strides=[1,1,1,1],padding='SAME')
    xwb = tf.nn.bias_add(xw, b)
    return tf.nn.relu(xwb)

def max_pool(x,k=2):
    '''最大池化操作'''
    return tf.nn.max_pool(x, ksize=[1,k,k,1], strides=[1,k,k,1],padding='SAME')

def conv_network(x,weights,drop_prob):
    '''卷积神经网整体流程'''
    #将输入转换成卷积输入
    x = tf.reshape(x, [-1,28,28,1])
    #送入第一层卷积
    conv1 = conv2d(x, weights['conv_w1'], weights['conv_b1'])
    pool1 = max_pool(conv1, 2)
    #第二层卷积
    conv2 = conv2d(pool1, weights['conv_w2'], weights['conv_b2'])
    pool2 = max_pool(conv2, 2)
    #将池化输出reshape后送入全连接层
    conn1 = tf.reshape(pool2,[-1,7*7*64])
    conn1 = tf.nn.relu(tf.add(tf.matmul(conn1,weights['conn_w1']),weights['conn_b1']))
    conn1 = tf.nn.dropout(conn1, keep_prob=drop_prob)

#     conn2 = tf.nn.relu(tf.add(tf.matmul(conn1,weights['conn_w2']),weights['conn_b2']))
#     conn2 = tf.nn.dropout(conn2, keep_prob=drop_prob)
#     
#     conn3 = tf.nn.relu(tf.add(tf.matmul(conn2,weights['conn_w3']),weights['conn_b3']))
#     conn3 = tf.nn.dropout(conn3, keep_prob=drop_prob)

    out = tf.add(tf.matmul(conn1,weights['conn_w2']),weights['conn_b2'])

    return out

pred = conv_network(x, weights, drop_prob)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=pred))
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss) 
init = tf.global_variables_initializer()

with tf.Session() as session:
    session.run(init)
    for iter in range(iters):
        batch_x,batch_y = mnist.train.next_batch(batch_size)
        _,cost = session.run([optimizer,loss],feed_dict={x:batch_x,y:batch_y})

        print("iter:",iter,"cost:",cost)
    accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.arg_max(pred, 1), tf.arg_max(y,1)),'float'))
    accuracy = accuracy.eval(feed_dict={x:mnist.test.images,y:mnist.test.labels})
    print("accuarcy:",accuracy)

你可能感兴趣的:(神经网络,tensorflow,cnn)