#用CNN进行图片分类D:\ST\Python_work\program\AI-CNN-Tensorflow-master
#D:\ST\Python_work\program\AI-CNN-Tensorflow-master# 14*256张汉字图片,186行代码,一个代码,文件,bmp图片格式输入
'''
只有一个文件:main.py,耗时3000多秒。
#AI CNN Tensorflow master主要是有很多汉字图片,每个文件夹里的汉字都一样,只是书写方式不同,不同文件夹里面的文字
implement CNN using tensorflow for picture classification
train_dir = "C:\TRAIN"
saver.restore(sess, "./model.ckpt")
'''
import tensorflow as tf
from PIL import Image
import numpy as np
import os
batch_size = 16
batch_num = 224
test_batch_start = 200
Image_width = 28
Image_height = 28
N_classes = 14
learning_rate = 0.001
is_testing = False
sess = tf.InteractiveSession()
# 初始化权重
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
# 初始化偏置
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
# 卷积层
def conv2d(x, w):
return tf.nn.conv2d(x, w, strides=[1, 1, 1, 1], padding='SAME')
# 池化层
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# create model use placeholder
x = tf.placeholder(tf.float32, shape=[None, Image_height * Image_width])
y_ = tf.placeholder(tf.float32, shape=[None, N_classes])
x_image = tf.reshape(x,[-1,28,28,1])
# first layer
w_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
h_conv1 = tf.nn.relu(conv2d(x_image, w_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1) # 28->14
# second layer 32-64
w_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, w_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2) # 14->7
# densely connected layer
w_fc1 = weight_variable([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, w_fc1) + b_fc1)
# dropout
keep_prob = tf.placeholder("float")
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
# readout layer读出层
w_fc2 = weight_variable([1024, 14])
b_fc2 = bias_variable([14])
y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, w_fc2) + b_fc2)
scores = tf.nn.xw_plus_b(h_fc1_drop, w_fc2,b_fc2,name="scores")
# train and evaluate the model
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=scores, labels=y_)
#cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_*tf.log(y_conv), reduction_indices=[1]))
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)
#train_step = tf.train.AdamOptimizer(learning_rate=learning_rate, beta1=0.9, beta2=0.99).minimize(cross_entropy)
# 定义评测准确率的操作
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
train_dir = "D:\\ST\\Python_work\\program\\AI-CNN-Tensorflow-master\\TRAIN"
# 使用队列读取
def read_decode():
imagepaths, labels = list(), list()
label = 0
list2 = os.listdir(train_dir) #14个图片文件夹列表
labels = []
for i in range(0, len(list2)): #获取14个图片文件夹列表数目
path = os.path.join(train_dir, list2[i])
list_child = os.listdir(path) #获取直接放图片的文件夹
for j in range(0, len(list_child)):
label_s = []
path_whole = os.path.join(path, list_child[j]) #得到全路径
imagepaths.append(path_whole)
for num in range(N_classes):
if num == label:
label_s.append(1)
else:
label_s.append(0)
labels.append(label_s)
label += 1
img = []
for i in range(len(imagepaths)):
pixel = []
image = Image.open(imagepaths[i])
for w in range(Image_width):
for h in range(Image_height):
if image.getpixel((h, w)) == 255:
pixel.append(0)
else:
pixel.append(1)
img.append(pixel)
return img, labels
tf.global_variables_initializer().run()
X, label = read_decode()
index = [n for n in range(0, N_classes*256)]
np.random.shuffle(index)
image_batch = [[0 for x in range(batch_size)] for y in range(batch_num)]
label_batch = [[0 for x in range(batch_size)] for y in range(batch_num)]
for j in range(batch_num):
for z in range(batch_size):
image_batch[j][z] = X[index[j * batch_size + z]]
label_batch[j][z] = label[index[j * batch_size + z]]
test_image_batch = []
test_label_batch = []
#validation_image_batch = []
#validation_label_batch = []
for i in range(test_batch_start, batch_num):
for j in range(batch_size):
test_image_batch.append(image_batch[i][j])
test_label_batch.append(label_batch[i][j])
saver = tf.train.Saver()
# for i in range(9,11):
# for j in range(batch_size):
# validation_image_batch.append(image_batch[i][j])
# validation_label_batch.append(label_batch[i][j])
if is_testing :
saver.restore(sess, "./model.ckpt") #TensorFlow模型格式.ckpt
train_accuracy = accuracy.eval(feed_dict={x: test_image_batch, y_: test_label_batch, keep_prob: 1})
print("-->training accuracy %.4f" % (train_accuracy))
else :
for i in range(1,200): #人为规定训练200轮
for j in range(test_batch_start):
train_step.run(feed_dict={x: image_batch[j], y_: label_batch[j], keep_prob: 0.5})
# train_accuracy = accuracy.eval(feed_dict={x: validation_image_batch, y_: validation_label_batch, keep_prob: 1})
# print("-->step %d, training accuracy %.4f" % (i, train_accuracy))
# if train_accuracy > 0.985:
train_accuracy = accuracy.eval(feed_dict={x: test_image_batch, y_: test_label_batch, keep_prob: 1})
print("-->step %d, training accuracy %.4f" % (i, train_accuracy))
if(train_accuracy > 0.993): #准确率超过0.993才保存模型
saver.save(sess, "./model.ckpt")
'''
输出
2018-07-22 18:38:33.990284: I C:\tf_jenkins\home\workspace\rel-win\M\windows\PY\35\tensorflow\core\platform\cpu_feature_guard.cc:137] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX AVX2
-->step 1, training accuracy 0.8672
-->step 2, training accuracy 0.9583
-->step 3, training accuracy 0.9740
-->step 4, training accuracy 0.9766
-->step 5, training accuracy 0.9792
-->step 6, training accuracy 0.9818
-->step 7, training accuracy 0.9792
-->step 8, training accuracy 0.9844
-->step 9, training accuracy 0.9844
-->step 10, training accuracy 0.9740
-->step 11, training accuracy 0.9844
-->step 12, training accuracy 0.9844
-->step 13, training accuracy 0.9818
-->step 14, training accuracy 0.9870
-->step 15, training accuracy 0.9922
-->step 16, training accuracy 0.9844
-->step 17, training accuracy 0.9922
-->step 18, training accuracy 0.9766
-->step 19, training accuracy 0.9870
-->step 20, training accuracy 0.9896
-->step 21, training accuracy 0.9896
-->step 22, training accuracy 0.9896
-->step 23, training accuracy 0.9922
-->step 24, training accuracy 0.9896
-->step 25, training accuracy 0.9922
-->step 26, training accuracy 0.9896
-->step 27, training accuracy 0.9870
-->step 28, training accuracy 0.9896
-->step 29, training accuracy 0.9896
-->step 30, training accuracy 0.9896
-->step 31, training accuracy 0.9870
-->step 32, training accuracy 0.9896
-->step 33, training accuracy 0.9870
-->step 34, training accuracy 0.9792
-->step 35, training accuracy 0.9870
-->step 36, training accuracy 0.9896
-->step 37, training accuracy 0.9870
-->step 38, training accuracy 0.9870
-->step 39, training accuracy 0.9922
-->step 40, training accuracy 0.9870
-->step 41, training accuracy 0.9896
-->step 42, training accuracy 0.9896
-->step 43, training accuracy 0.9896
-->step 44, training accuracy 0.9818
-->step 45, training accuracy 0.9922
-->step 46, training accuracy 0.9896
-->step 47, training accuracy 0.9922
-->step 48, training accuracy 0.9896
-->step 49, training accuracy 0.9922
-->step 50, training accuracy 0.9896
-->step 51, training accuracy 0.9948
-->step 52, training accuracy 0.9896
-->step 53, training accuracy 0.9922
-->step 54, training accuracy 0.9922
-->step 55, training accuracy 0.9922
-->step 56, training accuracy 0.9922
-->step 57, training accuracy 0.9922
-->step 58, training accuracy 0.9870
-->step 59, training accuracy 0.9922
-->step 60, training accuracy 0.9844
-->step 61, training accuracy 0.9922
-->step 62, training accuracy 0.9922
-->step 63, training accuracy 0.9896
-->step 64, training accuracy 0.9922
-->step 65, training accuracy 0.9922
-->step 66, training accuracy 0.9922
-->step 67, training accuracy 0.9922
-->step 68, training accuracy 0.9922
-->step 69, training accuracy 0.9922
-->step 70, training accuracy 0.9896
-->step 71, training accuracy 0.9922
-->step 72, training accuracy 0.9922
-->step 73, training accuracy 0.9922
-->step 74, training accuracy 0.9922
-->step 75, training accuracy 0.9922
-->step 76, training accuracy 0.9922
-->step 77, training accuracy 0.9922
-->step 78, training accuracy 0.9922
-->step 79, training accuracy 0.9922
-->step 80, training accuracy 0.9922
-->step 81, training accuracy 0.9922
-->step 82, training accuracy 0.9896
-->step 83, training accuracy 0.9922
-->step 84, training accuracy 0.9922
-->step 85, training accuracy 0.9922
-->step 86, training accuracy 0.9896
-->step 87, training accuracy 0.9922
-->step 88, training accuracy 0.9922
-->step 89, training accuracy 0.9922
-->step 90, training accuracy 0.9922
-->step 91, training accuracy 0.9922
-->step 92, training accuracy 0.9922
-->step 93, training accuracy 0.9922
-->step 94, training accuracy 0.9922
-->step 95, training accuracy 0.9922
-->step 96, training accuracy 0.9922
-->step 97, training accuracy 0.9948
-->step 98, training accuracy 0.9922
-->step 99, training accuracy 0.9922
-->step 100, training accuracy 0.9922
-->step 101, training accuracy 0.9896
-->step 102, training accuracy 0.9922
-->step 103, training accuracy 0.9922
-->step 104, training accuracy 0.9896
-->step 105, training accuracy 0.9766
-->step 106, training accuracy 0.9948
-->step 107, training accuracy 0.9948
-->step 108, training accuracy 0.9948
-->step 109, training accuracy 0.9922
-->step 110, training accuracy 0.9896
-->step 111, training accuracy 0.9922
-->step 112, training accuracy 0.9922
-->step 113, training accuracy 0.9922
-->step 114, training accuracy 0.9922
-->step 115, training accuracy 0.9922
-->step 116, training accuracy 0.9896
-->step 117, training accuracy 0.9922
-->step 118, training accuracy 0.9922
-->step 119, training accuracy 0.9922
-->step 120, training accuracy 0.9922
-->step 121, training accuracy 0.9922
-->step 122, training accuracy 0.9922
-->step 123, training accuracy 0.9922
-->step 124, training accuracy 0.9922
-->step 125, training accuracy 0.9922
-->step 126, training accuracy 0.9922
-->step 127, training accuracy 0.9922
-->step 128, training accuracy 0.9922
-->step 129, training accuracy 0.9922
-->step 130, training accuracy 0.9922
-->step 131, training accuracy 0.9922
-->step 132, training accuracy 0.9922
-->step 133, training accuracy 0.9896
-->step 134, training accuracy 0.9896
-->step 135, training accuracy 0.9896
-->step 136, training accuracy 0.9896
-->step 137, training accuracy 0.9922
-->step 138, training accuracy 0.9896
-->step 139, training accuracy 0.9922
-->step 140, training accuracy 0.9922
-->step 141, training accuracy 0.9922
-->step 142, training accuracy 0.9922
-->step 143, training accuracy 0.9922
-->step 144, training accuracy 0.9870
-->step 145, training accuracy 0.9896
-->step 146, training accuracy 0.9922
-->step 147, training accuracy 0.9922
-->step 148, training accuracy 0.9922
-->step 149, training accuracy 0.9922
-->step 150, training accuracy 0.9948
-->step 151, training accuracy 0.9922
-->step 152, training accuracy 0.9922
-->step 153, training accuracy 0.9922
-->step 154, training accuracy 0.9896
-->step 155, training accuracy 0.9922
-->step 156, training accuracy 0.9922
-->step 157, training accuracy 0.9922
-->step 158, training accuracy 0.9922
-->step 159, training accuracy 0.9948
-->step 160, training accuracy 0.9922
-->step 161, training accuracy 0.9922
-->step 162, training accuracy 0.9922
-->step 163, training accuracy 0.9922
-->step 164, training accuracy 0.9896
-->step 165, training accuracy 0.9922
-->step 166, training accuracy 0.9922
-->step 167, training accuracy 0.9922
-->step 168, training accuracy 0.9922
-->step 169, training accuracy 0.9922
-->step 170, training accuracy 0.9948
-->step 171, training accuracy 0.9922
-->step 172, training accuracy 0.9896
-->step 173, training accuracy 0.9922
-->step 174, training accuracy 0.9922
-->step 175, training accuracy 0.9896
-->step 176, training accuracy 0.9922
-->step 177, training accuracy 0.9922
-->step 178, training accuracy 0.9922
-->step 179, training accuracy 0.9922
-->step 180, training accuracy 0.9922
-->step 181, training accuracy 0.9922
-->step 182, training accuracy 0.9922
-->step 183, training accuracy 0.9922
-->step 184, training accuracy 0.9896
-->step 185, training accuracy 0.9922
-->step 186, training accuracy 0.9922
-->step 187, training accuracy 0.9922
-->step 188, training accuracy 0.9896
-->step 189, training accuracy 0.9922
-->step 190, training accuracy 0.9922
-->step 191, training accuracy 0.9922
-->step 192, training accuracy 0.9948
-->step 193, training accuracy 0.9922
-->step 194, training accuracy 0.9922
-->step 195, training accuracy 0.9922
-->step 196, training accuracy 0.9922
-->step 197, training accuracy 0.9922
-->step 198, training accuracy 0.9922
-->step 199, training accuracy 0.9896
[Finished in 3085.6s]
'''