tflearn 应用cnn完成cifar10数据集训练和预测

实现的内容:
参照原tflearn example,进行改动:
1. 对cifar10数据集完成了预处理,将原5个训练集进行了划分,划分为训练集和验证集;
2. 使用tflearn搭建CNN网络,使用了三层卷积+最大池化
3. 对搭建的CNN网络,在原测试集上进行了评估
4. 对任意一张图片进行预测;
5. 将模型转移到阿里云PCA,储存在云端;

预处理&训练代码:

# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import

import tensorflow as tf

from six.moves import urllib
import tarfile

from tensorflow.python.lib.io import file_io
import os
import sys
import numpy as np
import pickle
import argparse
import scipy

from sklearn.preprocessing import LabelBinarizer


FLAGS = None


# 本地读取可以直接使用with open():
# ```
# def load_cfar10_batch(cifar10_dataset_folder_path, batch_id):
#     """
#     Load a batch of the dataset
#     """
#     with open(cifar10_dataset_folder_path + '/data_batch_' + str(batch_id), mode='rb') as file:
#         batch = pickle.load(file, encoding='latin1')
# 
#     features = batch['data'].reshape(-1, 3, 32, 32)).transpose(0, 2, 3, 1)
#     labels = batch['labels']
# 
#     return features, labels
# ```
# 
# 注意:features = batch['data'].reshape((len(batch['data']), 3, 32, 32)).transpose(0, 2, 3, 1)
# 原数据是1024(32*32)个红,1024个绿,1024个蓝色,依次循环的数据,所以先resha成3,32,32,再转置成32,32,3

def load_batch(fpath):
    object = file_io.read_file_to_string(fpath)
    #origin_bytes = bytes(object, encoding='latin1')
    # with open(fpath, 'rb') as file:
    # batch = pickle.load(file, encoding='latin1')
    if sys.version_info > (3, 0):
        # Python3
        batch = pickle.loads(object, encoding='latin1')
    else:
        # Python2
        batch = pickle.loads(object)
    features = batch["data"].reshape((len(batch['data']), 3, 32, 32)).transpose(0, 2, 3, 1)
    labels = batch["labels"]
    return features, labels


def normalize(x):
    """
    Normalize a list of sample image data in the range of 0 to 1
    : x: List of image data.  The image shape is (32, 32, 3)
    : return: Numpy array of normalize data
    """
    x = (x - np.min(x))/(np.max(x) - np.min(x))
    return x

# from sklearn.preprocessing import LabelBinarizer
enc = LabelBinarizer()
enc.fit(range(10))

def one_hot_encode(x):
    """
    One hot encode a list of sample labels. Return a one-hot encoded vector for each label.
    : x: List of sample Labels
    : return: Numpy array of one-hot encoded labels
    """
    return enc.transform(x)


def load_data(dirname):
    X_train = []
    Y_train = []

    for i in range(1, 6):
        fpath = os.path.join(dirname, 'data_batch_' + str(i))
        data, labels = load_batch(fpath)
        if i == 1:
            X_train = data
            Y_train = labels
        else:
            X_train = np.concatenate([X_train, data], axis=0)
            Y_train = np.concatenate([Y_train, labels], axis=0)

    fpath = os.path.join(dirname, 'test_batch')
    X_test, Y_test = load_batch(fpath)

    X_valid = []
    Y_valid = []
    validation_count = int(len(X_train) * 0.1)
    X_valid.extend(X_train[-validation_count:])
    Y_valid.extend(Y_train[-validation_count:])


    X_train = normalize(X_train[:-validation_count])
    Y_train = one_hot_encode(Y_train[:-validation_count])
    X_valid = normalize(X_valid)
    Y_valid = one_hot_encode(Y_valid)
    X_test = normalize(X_test)
    Y_test = one_hot_encode(Y_test)



    return (X_train, Y_train), (X_valid, Y_valid), (X_test, Y_test)


#reporthook from stackoverflow #13881092
def reporthook(blocknum, blocksize, totalsize):
    readsofar = blocknum * blocksize
    if totalsize > 0:
        percent = readsofar * 1e2 / totalsize
        s = "\r%5.1f%% %*d / %d" % (
            percent, len(str(totalsize)), readsofar, totalsize)
        sys.stderr.write(s)
        if readsofar >= totalsize: # near the end
            sys.stderr.write("\n")
    else: # total size is unknown
        sys.stderr.write("read %d\n" % (readsofar,))


import tflearn
from tflearn.data_utils import shuffle, to_categorical
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.estimator import regression
from tflearn.data_preprocessing import ImagePreprocessing
from tflearn.data_augmentation import ImageAugmentation


def main(_):
    dirname = os.path.join(FLAGS.buckets, "")
    (X, Y), (X_valid, Y_valid), (X_test, Y_test) = load_data(dirname)
    print("load data done")

    X, Y = shuffle(X, Y)

    # Real-time data preprocessing
    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()

    # Real-time data augmentation
    img_aug = ImageAugmentation()
    img_aug.add_random_flip_leftright()
    img_aug.add_random_rotation(max_angle=25.)

    # Convolutional network building
    network = input_data(shape=[None, 32, 32, 3],
                         data_preprocessing=img_prep,
                         data_augmentation=img_aug)
    network = conv_2d(network, 32, 3, activation='relu')
    network = max_pool_2d(network, 2)
    network = conv_2d(network, 64, 3, activation='relu')
    network = max_pool_2d(network, 3)
    network = conv_2d(network, 64, 3, activation='relu')
    network = max_pool_2d(network, 2)
    network = fully_connected(network, 512, activation='relu')
    network = dropout(network, 0.5)
    network = fully_connected(network, 10, activation='softmax')
    network = regression(network, optimizer='adam',
                         loss='categorical_crossentropy',
                         learning_rate=0.001)

    # Train using classifier
    model = tflearn.DNN(network, tensorboard_verbose=0)
    model.fit(X, Y, n_epoch=50, shuffle=True, validation_set=(X_valid, Y_valid),
              show_metric=True, batch_size=96, run_id='cifar10_cnn')
    model_path = os.path.join(FLAGS.checkpointDir, "model.tfl")
    print(model_path)
    model.save(model_path)


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    #获得buckets路径
    parser.add_argument('--buckets', type=str, default='',
                        help='input data path')
    #获得checkpoint路径
    parser.add_argument('--checkpointDir', type=str, default='',
                        help='output model path')
    FLAGS, _ = parser.parse_known_args()
    tf.app.run(main=main)

在测试集上restore,并evaluate:


# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import

import tensorflow as tf

from six.moves import urllib
import tarfile

from tensorflow.python.lib.io import file_io
import os
import sys
import numpy as np
import pickle
import argparse
import scipy

from sklearn.preprocessing import LabelBinarizer


FLAGS = None

def load_batch(fpath):
    object = file_io.read_file_to_string(fpath)
    #origin_bytes = bytes(object, encoding='latin1')
    # with open(fpath, 'rb') as file:
    # batch = pickle.load(file, encoding='latin1')
    if sys.version_info > (3, 0):
        # Python3
        batch = pickle.loads(object, encoding='latin1')
    else:
        # Python2
        batch = pickle.loads(object)
    features = batch["data"].reshape((len(batch['data']), 3, 32, 32)).transpose(0, 2, 3, 1)
    labels = batch["labels"]
    return features, labels


def normalize(x):
    """
    Normalize a list of sample image data in the range of 0 to 1
    : x: List of image data.  The image shape is (32, 32, 3)
    : return: Numpy array of normalize data
    """
    x = (x - np.min(x))/(np.max(x) - np.min(x))
    return x

# from sklearn.preprocessing import LabelBinarizer
enc = LabelBinarizer()
enc.fit(range(10))

def one_hot_encode(x):
    """
    One hot encode a list of sample labels. Return a one-hot encoded vector for each label.
    : x: List of sample Labels
    : return: Numpy array of one-hot encoded labels
    """
    return enc.transform(x)


def load_data(dirname):   
    fpath = os.path.join(dirname, 'test_batch')
    X_test, Y_test = load_batch(fpath)

    X_test = normalize(X_test)
    Y_test = one_hot_encode(Y_test)

    return (X_test, Y_test)


#reporthook from stackoverflow #13881092
def reporthook(blocknum, blocksize, totalsize):
    readsofar = blocknum * blocksize
    if totalsize > 0:
        percent = readsofar * 1e2 / totalsize
        s = "\r%5.1f%% %*d / %d" % (
            percent, len(str(totalsize)), readsofar, totalsize)
        sys.stderr.write(s)
        if readsofar >= totalsize: # near the end
            sys.stderr.write("\n")
    else: # total size is unknown
        sys.stderr.write("read %d\n" % (readsofar,))


import tflearn
from tflearn.data_utils import shuffle, to_categorical
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.estimator import regression
from tflearn.data_preprocessing import ImagePreprocessing
from tflearn.data_augmentation import ImageAugmentation

def main(_):
    dirname = os.path.join(FLAGS.buckets, "")
    (X_test, Y_test) = load_data(dirname)
    print("load data done")

    # Real-time data preprocessing
    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()

    # Real-time data augmentation
    img_aug = ImageAugmentation()
    img_aug.add_random_flip_leftright()
    img_aug.add_random_rotation(max_angle=25.)

    # Convolutional network building
    network = input_data(shape=[None, 32, 32, 3],
                         data_preprocessing=img_prep,
                         data_augmentation=img_aug)
    network = conv_2d(network, 32, 3, activation='relu')
    network = max_pool_2d(network, 2)
    network = conv_2d(network, 64, 3, activation='relu')
    network = max_pool_2d(network, 3)
    network = conv_2d(network, 64, 3, activation='relu')
    network = max_pool_2d(network, 2)
    network = fully_connected(network, 512, activation='relu')
    network = dropout(network, 0.5)
    network = fully_connected(network, 10, activation='softmax')
    network = regression(network, optimizer='adam',
                         loss='categorical_crossentropy',
                         learning_rate=0.001)

    # Test using model
    model = tflearn.DNN(network, tensorboard_verbose=0)
    model_path = os.path.join(FLAGS.checkpointDir, "model.tfl")
    print(model_path)
    model.load(model_path)

    result = model.evaluate(X_test, Y_test)
    print(result)


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    #获得buckets路径
    parser.add_argument('--buckets', type=str, default='',
                        help='input data path')
    #获得checkpoint路径
    parser.add_argument('--checkpointDir', type=str, default='',
                        help='output model path')
    FLAGS, _ = parser.parse_known_args()
    tf.app.run(main=main)

导入任一张图片,进行预测:

# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import

import tensorflow as tf

from six.moves import urllib
import tarfile

import tflearn
from tflearn.data_utils import shuffle, to_categorical
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.estimator import regression
from tflearn.data_preprocessing import ImagePreprocessing
from tflearn.data_augmentation import ImageAugmentation

from tensorflow.python.lib.io import file_io
import os
import sys
import numpy as np
import pickle
import argparse
import scipy
FLAGS = None

    ```
    #与test中代码相同,省略……
    ```
def main(_):
    ```
    #与test中代码相同,省略……
    ```
    # Train using classifier
    model = tflearn.DNN(network, tensorboard_verbose=0)
    # model.fit(X, Y, n_epoch=100, shuffle=True, validation_set=(X_test, Y_test),
    #           show_metric=True, batch_size=96, run_id='cifar10_cnn')
    model_path = os.path.join(FLAGS.checkpointDir, "model.tfl")
    print(model_path)
    model.load(model_path)



    predict_pic = os.path.join(FLAGS.buckets, "bird_bullocks_oriole.jpg")
    img_obj = file_io.read_file_to_string(predict_pic)
    file_io.write_string_to_file("bird_bullocks_oriole.jpg", img_obj)

    img = scipy.ndimage.imread("bird_bullocks_oriole.jpg", mode="RGB")

    # Scale it to 32x32
    img = scipy.misc.imresize(img, (32, 32), interp="bicubic").astype(np.float32, casting='unsafe')

    # Predict
    prediction = model.predict([img])
    print (prediction[0])
    print (prediction[0])
    #print (prediction[0].index(max(prediction[0])))
    num=['airplane','automobile','bird','cat','deer','dog','frog','horse','ship','truck']
    print ("This is a %s"%(num[prediction[0].tolist().index(max(prediction[0]))]))


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    #获得buckets路径
    parser.add_argument('--buckets', type=str, default='',
                        help='input data path')
    #获得checkpoint路径
    parser.add_argument('--checkpointDir', type=str, default='',
                        help='output model path')
    FLAGS, _ = parser.parse_known_args()
    tf.app.run(main=main)

你可能感兴趣的:(机器学习,Deeping,learning)