TensorFlow 图像分类demo

# -*- coding: utf-8 -*-
"""
Created on Fri Jun 15 10:01:56 2018

@author: itdp03
"""

import tensorflow as tf  
import glob  
from itertools import groupby  
from collections import defaultdict  
from PIL import Image  
import os  
  

IMAGE_WIDTH = 256  
IMAGE_HEIGHT = 256
IMAGE_CHANNEL = 1 
  
sess = tf.InteractiveSession()  
  
#查找符合一定规则的所有文件,并将文件名以lis形式返回。  
image_filenames = glob.glob("./Image/*/*.jpg")#获取所用文件路径  

#用list类型初始化training和testing数据集,用defaultdict的好处是为字典中不存在的键提供默认值  
training_dataset = defaultdict(list)
testing_dataset = defaultdict(list)
  
#将品种名从文件名中切分出,image_filename_with_breed是一个迭代器,用list(image_filename_with_breed)将其转换为list,其中的元素类似于:('n02085620-Chihuahua', './imagenet-dogs/n02085620-Chihuahua/n02085620_10131.jpg')。  
image_filename_with_breed = list(map(lambda filename: (filename.split("\\")[-2], filename), image_filenames))

## Group each image by the breed which is the 0th element in the tuple returned above  
#groupby后得到的是一个迭代器,每个元素的形式为:
#('n02085620-Chihuahua', ),其中第1个元素为种类;第2个元素代表该类的文件,这两个元素也分别对应for循环里的dog_breed和breed_images。  
for dog_breed, breed_images in groupby(image_filename_with_breed,  
                                       lambda x: x[0]):  
  
    #enumerate的作用是列举breed_images中的所有元素,可同时返回索引和元素,i和breed_image  
    #的最后一个值分别是:168、('n02116738-African_hunting_dog', './imagenet-dogs/  
    #n02116738-African_hunting_dog/n02116738_9924.jpg')  
    for i, breed_image in enumerate(breed_images):  
  
        #因为breed_images是按类分别存储的,所以下面是将大约20%的数据作为测试集,大约80%的  
        #数据作为训练集。  
        #testing_dataset和training_dataset是两个字典,testing_dataset中  
        #的第一个元素是 'n02085620-Chihuahua': ['./imagenet-dogs/n02085620-Chihuahua/  
        #n02085620_10074.jpg', './imagenet-dogs/n02085620-Chihuahua/  
        #n02085620_11140.jpg',.....]  
        if i % 5 == 0:  
            testing_dataset[dog_breed].append(breed_image[1])  
        else:  
            training_dataset[dog_breed].append(breed_image[1])  
  
    # 测试每种类型下的测试集是否至少包含了18%的数据  
    breed_training_count = len(training_dataset[dog_breed])  
    breed_testing_count = len(testing_dataset[dog_breed])  
  
    #round(,n)函数:浮点数x的四舍五入值,n:小数点后n位。
    assert round(breed_testing_count /  
                 (breed_training_count + breed_testing_count),  
                 2) > 0.18, "Not enough testing images."  
  
"""
Fill a TFRecords file with the images found in `dataset` and include their category. 
Parameters 
---------- 
dataset : dict(list) 
  Dictionary with each key being a label for the list of image filenames of its value. 
record_location : str 
  Location to store the TFRecord output. 
"""
def write_records_file(dataset, record_location):
    if not os.path.exists(record_location):  
        print("目录 %s 不存在,自动创建中..." % (record_location))  
        os.makedirs(record_location)  
    writer = None  
  
    # Enumerating the dataset because the current index is used to breakup the files if they get over 100  
    # images to avoid a slowdown in writing.  
    current_index = 0  
    #遍历每一种类型的所有文件  
    for breed, images_filenames in dataset.items():  
        #遍历每一个文件  
        for image_filename in images_filenames:  
            if current_index % 100 == 0:  
                if writer:  
                    writer.close()  
                #创建tensorflow record的文件名  
                record_filename = "{record_location}-{current_index}.tfrecords".format(  #print('{名字}今天{动作}'.format(名字='陈某某',动作='拍视频'))#通过关键字
                    record_location=record_location,  
                    current_index=current_index)  
  
                writer = tf.python_io.TFRecordWriter(record_filename)  
            current_index += 1  
  
            
            #使用Image.open读取图像比tf.read_file的速度快10倍,建议使用Image.open  
            image = Image.open(image_filename)
            #test--hzz
            #grayscale_image = tf.image.rgb_to_grayscale(image) 
            
            image = image.resize((IMAGE_WIDTH, IMAGE_HEIGHT))
            image_bytes = image.tobytes()  # 将图片转成二进制  
            # Instead of using the label as a string, it'd be more efficient to turn it into either an  
            # integer index or a one-hot encoded rank one tensor.  
            
            #将表示种类的字符串转换为python默认的utf-8格式,防止有问题  
            image_label = breed.encode("utf-8")
            image_name = image_filename.split("\\")[-1].encode("utf-8")
            print(image_label)
            ## 创建一个 example protocol buffer 。  
            # 其中,feature={  
            # 'label':  
            # tf.train.Feature(bytes_list=tf.train.BytesList(value=[image_label])),  
            # 'image':  
            # tf.train.Feature(bytes_list=tf.train.BytesList(value=[image_bytes]))  
            # })是创建1个属性  
            example = tf.train.Example(  
                features=tf.train.Features(feature={  
                    'label':  
                    tf.train.Feature(bytes_list=tf.train.BytesList(  
                        value=[image_label])),
                    'name':                         
                    tf.train.Feature(bytes_list=tf.train.BytesList(  
                        value=[image_name])),
                    'image':  
                    tf.train.Feature(bytes_list=tf.train.BytesList(  
                        value=[image_bytes]))  
                }))  
            #SerializeToString()将文件序列化为二进制字符串  
            writer.write(example.SerializeToString())  
    writer.close()  
  
#分别将测试数据和训练数据写入tensorflow record,分别保存在文件夹./output/testing-images/和./output/  
#training-images/下面。  
    
write_records_file(training_dataset, "./training-image/training-image")
write_records_file(testing_dataset, "./testing-image/testing-image")

# -*- coding: utf-8 -*-

#from tensorflow.examples.tutorials.mnist import input_data
#mnist = input_data.read_data_sets(r'D:\project\tensorflow\MNIST_data', one_hot=True)
import tensorflow as tf
import glob
import numpy as np
import matplotlib.pyplot as plt
import os
IMG_WIDTH = 256
IMG_HEIGHT = 256
IMG_CHANNEL = 1
LABEL_COUNT = 2
BATCH_SIZE = 500

sess = tf.InteractiveSession()

x = tf.placeholder(tf.float32, shape=[BATCH_SIZE,IMG_WIDTH,IMG_HEIGHT,IMG_CHANNEL])
y_ = tf.placeholder(tf.float32, shape=[BATCH_SIZE, LABEL_COUNT])
#----------------------------------------------------
def read_tfrecord(serialized, batch_size):
    #parse_single_example解析器将中的example协议内存块解析为张量,  
    #每个tfrecord中有多幅图片,但parse_single_example只提取单个样本,  
    #parse_single_example只是解析tfrecord,并不对图像进行解码  
    features = tf.parse_single_example(
        serialized,  
        features={
            'label': tf.FixedLenFeature([], tf.string),  
            'name': tf.FixedLenFeature([], tf.string),  
            'image': tf.FixedLenFeature([], tf.string),  
        })  
  
    #将图像文件解码为uint8,因为所有通道的信息都处于0~255,然后reshape  
    record_image = tf.decode_raw(features['image'], tf.uint8)
    image = tf.reshape(record_image, [IMG_WIDTH,IMG_HEIGHT,IMG_CHANNEL])
    #将label平化为字符串  
    label_name = tf.cast(features['label'], tf.string)
    file_name = tf.cast(features['name'], tf.string)
    #用于生成batch的缓冲队列的大小,下面采用的是经验公式  
    min_after_dequeue = 0  
    capacity = min_after_dequeue + 3 * batch_size  
  
    #生成image_batch和label_batch 
    
    image_batch, label_name_batch,file_name_batch = tf.train.shuffle_batch( 
        [image, label_name, file_name],
        batch_size=batch_size,
        capacity=capacity,
        min_after_dequeue=min_after_dequeue)
    return image_batch, label_name_batch, file_name_batch

def convert_image(image_batch):  
    return (tf.image.convert_image_dtype(image_batch, tf.float32)) 

def find_label_index(all_label_names,label_name_batch):  
    return tf.map_fn(
        lambda l: tf.where(tf.equal(all_label_names, l))[0, 0:1][0],  
        label_name_batch,
        dtype=tf.int64)
    
def get_one_hot_labels(label_index_batch):
    labels = tf.expand_dims(label_index_batch, 1)
    #扩充维数,将(5,)扩充为(5,1),里面的内容不变:[[5],[4],[3],[8],[7]]
    print(labels)
    indices = tf.expand_dims(tf.range(0, BATCH_SIZE, 1), 1)
    #扩充维数。由于batch_size=(5, ),循环之后生成5*1的矩阵:[[0],[1],[2],[3],[4]]
     
    concated = tf.concat([tf.to_int32(indices), tf.to_int32(labels)],1)
    #将indices和labels在第二维连接,形成5*2的矩阵:[[0,5],[1,4],[2,3],[3,8],[4,7]]
    onehot_labels = tf.sparse_to_dense(
            concated, tf.stack([BATCH_SIZE, LABEL_COUNT]), 1.0, 0.0)
    return onehot_labels
        
def weight_variable(shape):
  initial = tf.truncated_normal(shape, stddev=0.1)
  return tf.Variable(initial)

def bias_variable(shape):
  initial = tf.constant(0.1, shape=shape)
  return tf.Variable(initial)

def conv2d(x, W):
  return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')

def max_pool_2x2(x):
  return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
                        strides=[1, 2, 2, 1], padding='SAME')
#
#---------------------------model---------------------------
#
#---卷积层1
CONV1_NODE_COUNT = 16
W_conv1 = weight_variable([5, 5, IMG_CHANNEL, CONV1_NODE_COUNT])
b_conv1 = bias_variable([CONV1_NODE_COUNT])
x_image = tf.reshape(x, [-1,IMG_WIDTH,IMG_HEIGHT,IMG_CHANNEL])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
#---池化层1
h_pool1 = max_pool_2x2(h_conv1)
#---卷积层2
CONV2_NODE_COUNT = CONV1_NODE_COUNT*2
W_conv2 = weight_variable([5, 5, CONV1_NODE_COUNT, CONV2_NODE_COUNT])
b_conv2 = bias_variable([CONV2_NODE_COUNT])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
#---池化层2
h_pool2 = max_pool_2x2(h_conv2)

#---卷积层3
CONV3_NODE_COUNT = CONV1_NODE_COUNT*4
W_conv3 = weight_variable([5, 5, CONV2_NODE_COUNT, CONV3_NODE_COUNT])
b_conv3 = bias_variable([CONV3_NODE_COUNT])
h_conv3 = tf.nn.relu(conv2d(h_pool2, W_conv3) + b_conv3)
#---池化层3
h_pool3 = max_pool_2x2(h_conv3)

#---卷积层4
#CONV4_NODE_COUNT = CONV1_NODE_COUNT*8
#W_conv4 = weight_variable([5, 5, CONV3_NODE_COUNT, CONV4_NODE_COUNT])
#b_conv4 = bias_variable([CONV4_NODE_COUNT])
#h_conv4 = tf.nn.relu(conv2d(h_pool3, W_conv4) + b_conv4)
##---池化层3
#h_pool4 = max_pool_2x2(h_conv4)

#--展开层
#注:需要保证输入的img宽度经过2次池化后的宽度为整数256/2/2=64
width_after_pool = int(IMG_WIDTH/2/2/2)
height_after_pool = int(IMG_HEIGHT/2/2/2)
h_flat = tf.reshape(h_pool3, [BATCH_SIZE, -1])
print("------------------")
print(h_flat.get_shape())
#---全连接层1

W_fc1 = weight_variable([65536, 1024])
b_fc1 = bias_variable([1024])
h_fc1 = tf.nn.relu(tf.matmul(h_flat, W_fc1) + b_fc1)
#---全连接层2
W_fc2 = weight_variable([1024, 512])
b_fc2 = bias_variable([512])
h_fc2 = tf.nn.relu(tf.matmul(h_fc1, W_fc2) + b_fc2)

#---输出
keep_prob = tf.placeholder(tf.float32)
h_drop = tf.nn.dropout(h_fc2, keep_prob)
W_fc3 = weight_variable([512, LABEL_COUNT])
b_fc3 = bias_variable([LABEL_COUNT])
y_conv = tf.matmul(h_drop, W_fc3) + b_fc3

cross_entropy = tf.reduce_mean(
    tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
#
#---------------------------model end---------------------------
saver = tf.train.Saver(tf.trainable_variables(), max_to_keep=3)



#glob_path = glob.glob(r"D:\project\tensorflow\mnistImg\*")
glob_path = glob.glob(r".\Image\*")
all_label_names = list(map(lambda c: c.split("\\")[-1], glob_path))  #n02085620-Chihuahua


filename_queue_train = tf.train.string_input_producer(
        tf.train.match_filenames_once(r".\training-image\*.tfrecords"))  
train_reader = tf.TFRecordReader()
_, serialized_train = train_reader.read(filename_queue_train)
train_image_batch, train_label_name_batch,train_file_name_batch = read_tfrecord(  
        serialized_train, BATCH_SIZE)

train_image_step = convert_image(train_image_batch)
train_label_index = find_label_index(all_label_names,train_label_name_batch)
train_label_step = get_one_hot_labels(train_label_index)

#test data prepare
filename_queue_test = tf.train.string_input_producer(
        tf.train.match_filenames_once(r".\testing-image\*.tfrecords"))
test_reader = tf.TFRecordReader()
_, serialized_test = test_reader.read(filename_queue_test)
test_image_batch, test_label_name_batch,test_file_name_batch = read_tfrecord(  
        serialized_test, BATCH_SIZE)
test_image_step = convert_image(test_image_batch)
test_label_index = find_label_index(all_label_names,test_label_name_batch)
test_label_step = get_one_hot_labels(test_label_index)
#------------init-----------
init_op = tf.group(tf.global_variables_initializer(),  
                   tf.local_variables_initializer())  
sess.run(init_op)  
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
#------------init end-----------

#sess.run(tf.global_variables_initializer())
#sess.run(tf.local_variables_initializer())
#--------train-----------
#for i in range(600):
#  train_images,train_labels,train_label_ii,label_names,file_names = sess.run(
#          [train_image_step,
#           train_label_step,
#           train_label_index,
#           train_label_name_batch,
#           train_file_name_batch])
# 
#
#  if i%10 == 0:
#    train_accuracy,loss_value = sess.run(
#            [accuracy,cross_entropy],
#            feed_dict={x:train_images, y_: train_labels, keep_prob: 1.0})
#    print("loss------------------------",loss_value)
#    print("step %d, training accuracy %g"%(i, train_accuracy))
#    print("------------------------")
#    print(sess.run(tf.cast(tf.argmax(y_conv,1),tf.int32),
#                   feed_dict={x: train_images, y_: train_labels, keep_prob: 1.0}))
#    print(sess.run(tf.cast(tf.argmax(y_,1),tf.int32),
#                   feed_dict={x: train_images, y_: train_labels, keep_prob: 1.0}))
#  train_step.run(feed_dict={x: train_images, y_: train_labels, keep_prob: 0.5})
#
#saver.save(sess, './model/model.ckpt')
#if not os.path.exists(' ./model/model.meta'):
#    saver.export_meta_graph("./model/model.meta")
#saver = tf.train.import_meta_graph('./model/model.meta')
saver.restore(sess,'./model/model.ckpt')
for i in range(50):
    test_images,test_labels = sess.run([test_image_step,test_label_step])
    print("test accuracy %g"%accuracy.eval(feed_dict={
        x: test_images, y_: test_labels, keep_prob: 1.0}))

coord.request_stop()  
#等待所有线程退出  
coord.join(threads)  
sess.close() 

你可能感兴趣的:(tensorflow)