使用Keras进行图像分类

Keras深度学习框架可以用来了解深度学习可以用来做什么,下面介绍一些使用Keras来做基础的图像分类的内容,欢迎各位交流。

参考资料:https://morvanzhou.github.io/tutorials/machine-learning/keras/2-3-CNN/

我使用的版本:Python2.7,numpy1.13.1,Theano0.9.0,Keras2.0.6,h5py2.5.0,opencv2.4.13,WIN7系统。

 

要做图像分类,首先需要有数据集,需要将下载到的图像数据集转化为Keras可以识别的numpy矩阵。需要得到X_train,X_test,y_train,y_test,其中X_train和X_test分别是一个4维矩阵,第一维代表有几幅图像,后三维代表图像数据,y_train和y_test是一维列表,表示对应的图像属于哪一类。

 

可以下载到的图像数据集一般分为两种,一种是由若干文件夹组成,每个文件夹的名字是该类别的名字,每个文件夹中包含若干图像,这种数据集需要自己划分训练集和测试集;另一种由train文件夹和test文件夹组成,每个文件夹中有一些文件夹,其名字是类别的名字,其中有很多的图像,这种则固定了训练集和测试集。有时候数据集中会有文件来说明图像的名字和对应的标注,但是对于图像分类来说,不需要这些标注也可以提取出需要的numpy矩阵。

 

这里使用简单的网络对Caltech101数据集进行分类,这里暂时不考虑去除背景类,经过简单的改动后也可对cifar10数据集进行分类。如果需要更高的准确率,需要修改所用的网络。

提取的方法如下:(get_data和get_2data函数分别对应上面说的两种数据集。)

def eachFile(filepath):                 #将目录内的文件名放入列表中
    pathDir =  os.listdir(filepath)
    out = []
    for allDir in pathDir:
        child = allDir.decode('gbk')    # .decode('gbk')是解决中文显示乱码问题
        out.append(child)
    return out

def get_data(data_name,train_percentage=0.7,resize=True,data_format=None):   #从文件夹中获取图像数据
    file_name = os.path.join(pic_dir_out,data_name+str(Width)+"X"+str(Height)+".pkl")   
    if os.path.exists(file_name):           #判断之前是否有存到文件中
        (X_train, y_train), (X_test, y_test) = cPickle.load(open(file_name,"rb"))
        return (X_train, y_train), (X_test, y_test)  
    data_format = conv_utils.normalize_data_format(data_format)
    pic_dir_set = eachFile(pic_dir_data)  
    X_train = []
    y_train = []
    X_test = []
    y_test = []
    label = 0
    for pic_dir in pic_dir_set:
        print pic_dir_data+pic_dir
        if not os.path.isdir(os.path.join(pic_dir_data,pic_dir)):
            continue    
        pic_set = eachFile(os.path.join(pic_dir_data,pic_dir))
        pic_index = 0
        train_count = int(len(pic_set)*train_percentage)
        for pic_name in pic_set:
            if not os.path.isfile(os.path.join(pic_dir_data,pic_dir,pic_name)):
                continue
            img = cv2.imread(os.path.join(pic_dir_data,pic_dir,pic_name))
            if img is None:
                continue
            img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) 
            if (resize):
                img = cv2.resize(img,(Width,Height))
            if (data_format == 'channels_last'):
                img = img.reshape(-1,Width,Height,1)
            elif (data_format == 'channels_first'):
                img = img.reshape(-1,1,Width,Height)
            if (pic_index < train_count):
                X_train.append(img)
                y_train.append(label)          
            else:
                X_test.append(img)
                y_test.append(label)
            pic_index += 1
        if len(pic_set) <> 0:        
            label += 1
    X_train = np.concatenate(X_train,axis=0)        
    X_test = np.concatenate(X_test,axis=0)    
    y_train = np.array(y_train)
    y_test = np.array(y_test)
    cPickle.dump([(X_train, y_train), (X_test, y_test)],open(file_name,"wb")) 
    return (X_train, y_train), (X_test, y_test)   

def get_2data(data_name,resize=True,data_format=None):   #当数据被分为train和test两个部分时使用
    file_name = os.path.join(pic_dir_out,data_name+str(Width)+"X"+str(Height)+".pkl")   
    if os.path.exists(file_name):           #判断之前是否有存到文件中
        (X_train, y_train), (X_test, y_test) = cPickle.load(open(file_name,"rb"))
        return (X_train, y_train), (X_test, y_test)   
    data_format = conv_utils.normalize_data_format(data_format)
    all_dir_set = eachFile(pic_dir_data)
    X_train = []
    y_train = []
    X_test = []
    y_test = []

    for all_dir in all_dir_set:
        if not os.path.isdir(os.path.join(pic_dir_data,all_dir)):
            continue
        label = 0
        pic_dir_set = eachFile(os.path.join(pic_dir_data,all_dir))
        for pic_dir in pic_dir_set:
            print pic_dir_data+pic_dir
            if not os.path.isdir(os.path.join(pic_dir_data,all_dir,pic_dir)):
                continue    
            pic_set = eachFile(os.path.join(pic_dir_data,all_dir,pic_dir))
            for pic_name in pic_set:
                if not os.path.isfile(os.path.join(pic_dir_data,all_dir,pic_dir,pic_name)):
                    continue
                img = cv2.imread(os.path.join(pic_dir_data,all_dir,pic_dir,pic_name))
                if img is None:
                    continue
                img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) 
                if resize:
                    img = cv2.resize(img,(Width,Height))
                if (data_format == 'channels_last'):
                    img = img.reshape(-1,Width,Height,1)
                elif (data_format == 'channels_first'):
                    img = img.reshape(-1,1,Width,Height)
                if ('train' in all_dir):
                    X_train.append(img)
                    y_train.append(label)          
                elif ('test' in all_dir):
                    X_test.append(img)
                    y_test.append(label)
            if len(pic_set) <> 0:        
                label += 1
    X_train = np.concatenate(X_train,axis=0)        
    X_test = np.concatenate(X_test,axis=0)    
    y_train = np.array(y_train)
    y_test = np.array(y_test)
    cPickle.dump([(X_train, y_train), (X_test, y_test)],open(file_name,"wb")) 
    return (X_train, y_train), (X_test, y_test)   

其中的一些参数值为

    Width = 32
    Height = 32
    num_classes = 102        
    pic_dir_out = 'E:/pic_cnn/pic_out/'  
    pic_dir_data = 'E:/pic_cnn/pic_dataset/Caltech101/'  

如果每次都要遍历这些文件夹,获得numpy矩阵,还是比较慢的,通过文件存取的方式,可以将提取到的矩阵存成文件,之后运行的时候就可以较快的运行。


接下来需要对数据做预处理,先将图像数值转换到0到1之间,如果不这样做准确率会下降。np_utils.to_categorical的用途是,假设图像分为10类,得到的y_train和y_test就是0到9的数字组成的列表,需要将它做一个变换,例如其中的数字5,表示第6类,变化之后为[0, 0, 0, 0, 0, 1, 0, 0, 0, 0],第6位取值为1。原因是之后得到的对每幅图像的预测结果,也是一个10维的列表,例如[0, 0, 0, 0.1, 0, 0.8, 0, 0.1, 0, 0],其中的最大值如果和实际值是在同一位,说明预测准确。

    X_train = X_train/255.              #数据预处理
    X_test = X_test/255.
    print X_train.shape
    print X_test.shape
    y_train = np_utils.to_categorical(y_train, num_classes)
    y_test = np_utils.to_categorical(y_test, num_classes)

之后就可以使用Keras构建一些简单的CNN结构。

所设计的CNN结构代码如下:

    model = Sequential()                #CNN构建
    model.add(Convolution2D(
        input_shape=(Width, Height, 1),
        #input_shape=(1, Width, Height),
        filters=8,
        kernel_size=3,
        strides=1,
        padding='same',     
        data_format='channels_last',
    ))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(
        pool_size=2,
        strides=2,
        data_format='channels_last',
    ))
    model.add(Convolution2D(16, 3, strides=1, padding='same', data_format='channels_last'))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(2, 2, data_format='channels_last'))
    model.add(Dropout(0.5))
    model.add(Flatten())
    model.add(Dense(256, activation='relu'))
    model.add(Dropout(0.5))
    
    model.add(Dense(num_classes, activation='softmax'))
    
    model.compile(optimizer=Adam(),
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

Convolution2D层(卷积层)相当于用卷积核去扫描原图像,得到一些新的图像。其中的参数fliters表示卷积核的数量,也就是得到的新图像的数量,kernel_size是卷积核的大小,strides是每次扫描移动几个像素,padding表示是否通过在图像周围加一圈0,使得生成的卷积图像大小与原图像相同,’same’表示加一圈0,默认’valid’表示不加,data_format表示图像的通道位于3维中的前面还是后面。另外,第一个层需要注明input_shape,表示输入图像的大小。

Activation层(激励函数)是一个函数,将前面传递过来的值做一个变换,这个函数需要有导数,常用的有relu,softmax。relu是当x小于0时,y等于0,当x大于0时,y等于x。

MaxPooling2D层(池化层)将前面得到的卷积图像,用一个小方格来扫描,每个方格中只记录它的最大值,扫描结束之后会产生新的小一些的图像。pool_size表示方格的大小,strides表示每次移动的长度,如果都为2则会使图像的长和宽都除以2。

Flatten层是将图像展平成一维的列表。

Dense层(全连接层)可以使参数的数量发生变化,参数units表示该层有多少个神经元,可以改变输出结果的维度。

Dropout层表示对其相邻的两层训练参数时,会随机的丢弃一定百分比的神经元的连接,减少过拟合的现象。

softmax激励函数可以将输出的结果转化为0到1之间的浮点数,同一个列表中所有数值的和为1,可以当作是分为该类的概率。

 

结构设计好之后,需要通过compile函数定义一些优化参数的方式。

optimizer表示梯度下降是选用哪种优化器来优化参数,loss表示损失值的计算使用哪种方式,metrics表示对测试数据evaluate时,性能评估的方法。

 

然后就可以使用训练数据进行训练了。训练过程如下:

    print('\nTraining ------------')    #从文件中提取参数,训练后存在新的文件中
    cm = 0
    cm_str = '' if cm==0 else str(cm)
    cm2_str = '' if (cm+1)==0 else str(cm+1)  
    if cm >= 1:
        model.load_weights(os.path.join(pic_dir_out,'cnn_model_Caltech101_'+cm_str+'.h5'))
        #model.load_weights(os.path.join(pic_dir_out,'cnn_model_Cifar10_'+cm_str+'.h5'))    
    model.fit(X_train, y_train, epochs=10, batch_size=128,)   #正式训练数据
    model.save_weights(os.path.join(pic_dir_out,'cnn_model_Caltech101_'+cm2_str+'.h5'))

epochs参数表示总共进行多少轮训练,batch_size表示每次梯度更新会用到多少组数据。这里增加了一些小的操作的用途是,每次训练完网络的参数后保存成文件,递增修改cm的值后再运行可以先读取上次训练的参数,然后再接着训练。我在运行代码的过程中发现,该程序消耗的内存会不断增加,使得epochs的值不能取一个非常大的值,所以只能多次运行才能得到收敛的结果。暂时不清楚有没有办法减小内存的消耗。

 

最后是对测试数据进行预测,并评估结果。这里得到模型最终的损失值和准确率,以及top-N的准确率,和每个类别的准确率。

    print('\nTesting ------------')     #对测试集进行评估,额外获得metrics中的信息
    loss, accuracy = model.evaluate(X_test, y_test)
    print('\n')
    print('test loss: ', loss)
    print('test accuracy: ', accuracy)
    
    class_name_list = get_name_list(pic_dir_data)   #获取每一类的名字列表 
    pred = model.predict(X_test, batch_size=128)    #获取top-N的每类的准确率
    N = 5
    pred_list = []
    for row in pred:
        pred_list.append(row.argsort()[-N:][::-1])  #获取最大的N个值的下标
    pred_array = np.array(pred_list)
    test_arg = np.argmax(y_test,axis=1)
    class_count = [0 for _ in xrange(num_classes)]
    class_acc = [0 for _ in xrange(num_classes)]
    for i in xrange(len(test_arg)):
        class_count[test_arg[i]] += 1
        if test_arg[i] in pred_array[i]:
            class_acc[test_arg[i]] += 1
    print('top-'+str(N)+' all acc:',str(sum(class_acc))+'/'+str(len(test_arg)),sum(class_acc)/float(len(test_arg)))
    for i in xrange(num_classes):
        print (i, class_name_list[i], 'acc: '+str(class_acc[i])+'/'+str(class_count[i]))

完整代码如下:

import cv2
import numpy as np

from keras.utils import np_utils, conv_utils
from keras.models import Sequential
from keras.layers import Convolution2D, MaxPooling2D, Flatten, Dropout, Dense, Activation
from keras.optimizers import Adam

import os
import cPickle

def get_name_list(filepath):                #获取各个类别的名字
    pathDir =  os.listdir(filepath)
    out = []
    for allDir in pathDir:
        if os.path.isdir(os.path.join(filepath,allDir)):
            child = allDir.decode('gbk')    # .decode('gbk')是解决中文显示乱码问题
            out.append(child)
    return out
    
def eachFile(filepath):                 #将目录内的文件名放入列表中
    pathDir =  os.listdir(filepath)
    out = []
    for allDir in pathDir:
        child = allDir.decode('gbk')    # .decode('gbk')是解决中文显示乱码问题
        out.append(child)
    return out

def get_data(data_name,train_percentage=0.7,resize=True,data_format=None):   #从文件夹中获取图像数据
    file_name = os.path.join(pic_dir_out,data_name+str(Width)+"X"+str(Height)+".pkl")   
    if os.path.exists(file_name):           #判断之前是否有存到文件中
        (X_train, y_train), (X_test, y_test) = cPickle.load(open(file_name,"rb"))
        return (X_train, y_train), (X_test, y_test)  
    data_format = conv_utils.normalize_data_format(data_format)
    pic_dir_set = eachFile(pic_dir_data)  
    X_train = []
    y_train = []
    X_test = []
    y_test = []
    label = 0
    for pic_dir in pic_dir_set:
        print pic_dir_data+pic_dir
        if not os.path.isdir(os.path.join(pic_dir_data,pic_dir)):
            continue    
        pic_set = eachFile(os.path.join(pic_dir_data,pic_dir))
        pic_index = 0
        train_count = int(len(pic_set)*train_percentage)
        for pic_name in pic_set:
            if not os.path.isfile(os.path.join(pic_dir_data,pic_dir,pic_name)):
                continue
            img = cv2.imread(os.path.join(pic_dir_data,pic_dir,pic_name))
            if img is None:
                continue
            img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) 
            if (resize):
                img = cv2.resize(img,(Width,Height))
            if (data_format == 'channels_last'):
                img = img.reshape(-1,Width,Height,1)
            elif (data_format == 'channels_first'):
                img = img.reshape(-1,1,Width,Height)
            if (pic_index < train_count):
                X_train.append(img)
                y_train.append(label)          
            else:
                X_test.append(img)
                y_test.append(label)
            pic_index += 1
        if len(pic_set) <> 0:        
            label += 1
    X_train = np.concatenate(X_train,axis=0)        
    X_test = np.concatenate(X_test,axis=0)    
    y_train = np.array(y_train)
    y_test = np.array(y_test)
    cPickle.dump([(X_train, y_train), (X_test, y_test)],open(file_name,"wb")) 
    return (X_train, y_train), (X_test, y_test)   

def get_2data(data_name,resize=True,data_format=None):   #当train和test数据被分为两个部分时使用
    file_name = os.path.join(pic_dir_out,data_name+str(Width)+"X"+str(Height)+".pkl")   
    if os.path.exists(file_name):           #判断之前是否有存到文件中
        (X_train, y_train), (X_test, y_test) = cPickle.load(open(file_name,"rb"))
        return (X_train, y_train), (X_test, y_test)   
    data_format = conv_utils.normalize_data_format(data_format)
    all_dir_set = eachFile(pic_dir_data)
    X_train = []
    y_train = []
    X_test = []
    y_test = []

    for all_dir in all_dir_set:
        if not os.path.isdir(os.path.join(pic_dir_data,all_dir)):
            continue
        label = 0
        pic_dir_set = eachFile(os.path.join(pic_dir_data,all_dir))
        for pic_dir in pic_dir_set:
            print pic_dir_data+pic_dir
            if not os.path.isdir(os.path.join(pic_dir_data,all_dir,pic_dir)):
                continue    
            pic_set = eachFile(os.path.join(pic_dir_data,all_dir,pic_dir))
            for pic_name in pic_set:
                if not os.path.isfile(os.path.join(pic_dir_data,all_dir,pic_dir,pic_name)):
                    continue
                img = cv2.imread(os.path.join(pic_dir_data,all_dir,pic_dir,pic_name))
                if img is None:
                    continue
                img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) 
                if resize:
                    img = cv2.resize(img,(Width,Height))
                if (data_format == 'channels_last'):
                    img = img.reshape(-1,Width,Height,1)
                elif (data_format == 'channels_first'):
                    img = img.reshape(-1,1,Width,Height)
                if ('train' in all_dir):
                    X_train.append(img)
                    y_train.append(label)          
                elif ('test' in all_dir):
                    X_test.append(img)
                    y_test.append(label)
            if len(pic_set) <> 0:        
                label += 1
    X_train = np.concatenate(X_train,axis=0)        
    X_test = np.concatenate(X_test,axis=0)    
    y_train = np.array(y_train)
    y_test = np.array(y_test)
    cPickle.dump([(X_train, y_train), (X_test, y_test)],open(file_name,"wb")) 
    return (X_train, y_train), (X_test, y_test)   

def main():
    global Width, Height, pic_dir_out, pic_dir_data
    Width = 32
    Height = 32
    num_classes = 102                   #Caltech101为102  cifar10为10
    pic_dir_out = 'E:/pic_cnn/pic_out/'  
    pic_dir_data = 'E:/pic_cnn/pic_dataset/Caltech101/'  
    (X_train, y_train), (X_test, y_test) = get_data("Caltech101_gray_data_",0.7,data_format='channels_last')
    #pic_dir_data = 'E:/pic_cnn/pic_dataset/cifar10/'
    #(X_train, y_train), (X_test, y_test) = get_2data("Cifar10_gray_data_",resize=False,data_format='channels_last')
    
    X_train = X_train/255.              #数据预处理
    X_test = X_test/255.
    print X_train.shape
    print X_test.shape
    y_train = np_utils.to_categorical(y_train, num_classes)
    y_test = np_utils.to_categorical(y_test, num_classes)
    
    model = Sequential()                #CNN构建
    model.add(Convolution2D(
        input_shape=(Width, Height, 1),
        #input_shape=(1, Width, Height),
        filters=8,
        kernel_size=3,
        strides=1,
        padding='same',     
        data_format='channels_last',
    ))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(
        pool_size=2,
        strides=2,
        data_format='channels_last',
    ))
    model.add(Convolution2D(16, 3, strides=1, padding='same', data_format='channels_last'))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(2, 2, data_format='channels_last'))
    model.add(Dropout(0.5))
    model.add(Flatten())
    model.add(Dense(256, activation='relu'))
    model.add(Dropout(0.5))
    
    model.add(Dense(num_classes, activation='softmax'))
    
    model.compile(optimizer=Adam(),
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
        
    print('\nTraining ------------')    #从文件中提取参数,训练后存在新的文件中
    cm = 0                              #修改这个参数可以多次训练
    cm_str = '' if cm==0 else str(cm)
    cm2_str = '' if (cm+1)==0 else str(cm+1)  
    if cm >= 1:
        model.load_weights(os.path.join(pic_dir_out,'cnn_model_Caltech101_'+cm_str+'.h5'))
        #model.load_weights(os.path.join(pic_dir_out,'cnn_model_Cifar10_'+cm_str+'.h5'))    
    model.fit(X_train, y_train, epochs=10, batch_size=128,)   #正式训练数据
    model.save_weights(os.path.join(pic_dir_out,'cnn_model_Caltech101_'+cm2_str+'.h5'))
     
    print('\nTesting ------------')     #对测试集进行评估,额外获得metrics中的信息
    loss, accuracy = model.evaluate(X_test, y_test)
    print('\n')
    print('test loss: ', loss)
    print('test accuracy: ', accuracy)
    
    class_name_list = get_name_list(pic_dir_data)    #获取top-N的每类的准确率
    #class_name_list = get_name_list(os.path.join(pic_dir_data,'train'))
    pred = model.predict(X_test, batch_size=128)
    N = 5
    pred_list = []
    for row in pred:
        pred_list.append(row.argsort()[-N:][::-1])  #获取最大的N个值的下标
    pred_array = np.array(pred_list)
    test_arg = np.argmax(y_test,axis=1)
    class_count = [0 for _ in xrange(num_classes)]
    class_acc = [0 for _ in xrange(num_classes)]
    for i in xrange(len(test_arg)):
        class_count[test_arg[i]] += 1
        if test_arg[i] in pred_array[i]:
            class_acc[test_arg[i]] += 1
    print('top-'+str(N)+' all acc:',str(sum(class_acc))+'/'+str(len(test_arg)),sum(class_acc)/float(len(test_arg)))
    for i in xrange(num_classes):
        print (i, class_name_list[i], 'acc: '+str(class_acc[i])+'/'+str(class_count[i]))
    
if __name__ == '__main__':
    main()

运行结果如下:

(6353, 32, 32, 1)
(2792, 32, 32, 1)

Training ------------
Epoch 1/10
6353/6353 [==============================] - 8s - loss: 4.2459 - acc: 0.1152     
Epoch 2/10
6353/6353 [==============================] - 8s - loss: 3.8954 - acc: 0.1942     
Epoch 3/10
6353/6353 [==============================] - 8s - loss: 3.6121 - acc: 0.2500     
Epoch 4/10
6353/6353 [==============================] - 8s - loss: 3.3974 - acc: 0.2811     
Epoch 5/10
6353/6353 [==============================] - 8s - loss: 3.2033 - acc: 0.3101     
Epoch 6/10
6353/6353 [==============================] - 9s - loss: 3.0413 - acc: 0.3343     
Epoch 7/10
6353/6353 [==============================] - 9s - loss: 2.9090 - acc: 0.3559     
Epoch 8/10
6353/6353 [==============================] - 9s - loss: 2.7931 - acc: 0.3760     
Epoch 9/10
6353/6353 [==============================] - 9s - loss: 2.7039 - acc: 0.3897     
Epoch 10/10
6353/6353 [==============================] - 9s - loss: 2.6152 - acc: 0.4003     

Testing ------------
2720/2792 [============================>.] - ETA: 0s

('test loss: ', 2.5188725370177227)
('test accuracy: ', 0.42836676217765041)
('top-5 all acc:', '1754/2792', 0.6282234957020058)
(0, u'0.accordion', 'acc: 15/17')
(1, u'1.airplanes', 'acc: 238/240')
(2, u'10.brain', 'acc: 7/30')
(3, u'100.wrench', 'acc: 5/12')
(4, u'101.yin_yang', 'acc: 15/18')
(5, u'11.brontosaurus', 'acc: 7/13')
(6, u'12.buddha', 'acc: 9/26')
(7, u'13.butterfly', 'acc: 6/28')
(8, u'14.camera', 'acc: 5/15')
(9, u'15.cannon', 'acc: 0/13')
(10, u'16.car_side', 'acc: 37/37')
(11, u'17.ceiling_fan', 'acc: 1/15')
(12, u'18.cellphone', 'acc: 16/18')
(13, u'19.chair', 'acc: 4/19')
(14, u'2.anchor', 'acc: 2/13')
(15, u'20.chandelier', 'acc: 27/33')
(16, u'21.cougar_body', 'acc: 0/15')
(17, u'22.cougar_face', 'acc: 8/21')
(18, u'23.crab', 'acc: 4/22')
(19, u'24.crayfish', 'acc: 3/21')
(20, u'25.crocodile', 'acc: 0/15')
(21, u'26.crocodile_head', 'acc: 1/16')
(22, u'27.cup', 'acc: 3/18')
(23, u'28.dalmatian', 'acc: 14/21')
(24, u'29.dollar_bill', 'acc: 14/16')
(25, u'3.ant', 'acc: 0/13')
(26, u'30.dolphin', 'acc: 5/20')
(27, u'31.dragonfly', 'acc: 12/21')
(28, u'32.electric_guitar', 'acc: 15/23')
(29, u'33.elephant', 'acc: 14/20')
(30, u'34.emu', 'acc: 0/16')
(31, u'35.euphonium', 'acc: 8/20')
(32, u'36.ewer', 'acc: 7/26')
(33, u'37.Faces', 'acc: 127/131')
(34, u'38.Faces_easy', 'acc: 127/131')
(35, u'39.ferry', 'acc: 10/21')
(36, u'4.BACKGROUND_Google', 'acc: 133/141')
(37, u'40.flamingo', 'acc: 9/21')
(38, u'41.flamingo_head', 'acc: 0/14')
(39, u'42.garfield', 'acc: 6/11')
(40, u'43.gerenuk', 'acc: 0/11')
(41, u'44.gramophone', 'acc: 4/16')
(42, u'45.grand_piano', 'acc: 24/30')
(43, u'46.hawksbill', 'acc: 17/30')
(44, u'47.headphone', 'acc: 3/13')
(45, u'48.hedgehog', 'acc: 4/17')
(46, u'49.helicopter', 'acc: 17/27')
(47, u'5.barrel', 'acc: 4/15')
(48, u'50.ibis', 'acc: 10/24')
(49, u'51.inline_skate', 'acc: 5/10')
(50, u'52.joshua_tree', 'acc: 11/20')
(51, u'53.kangaroo', 'acc: 15/26')
(52, u'54.ketch', 'acc: 26/35')
(53, u'55.lamp', 'acc: 8/19')
(54, u'56.laptop', 'acc: 12/25')
(55, u'57.Leopards', 'acc: 58/60')
(56, u'58.llama', 'acc: 9/24')
(57, u'59.lobster', 'acc: 0/13')
(58, u'6.bass', 'acc: 1/17')
(59, u'60.lotus', 'acc: 12/20')
(60, u'61.mandolin', 'acc: 2/13')
(61, u'62.mayfly', 'acc: 1/12')
(62, u'63.menorah', 'acc: 19/27')
(63, u'64.metronome', 'acc: 6/10')
(64, u'65.minaret', 'acc: 21/23')
(65, u'66.Motorbikes', 'acc: 237/240')
(66, u'67.nautilus', 'acc: 3/17')
(67, u'68.octopus', 'acc: 0/11')
(68, u'69.okapi', 'acc: 6/12')
(69, u'7.beaver', 'acc: 3/14')
(70, u'70.pagoda', 'acc: 15/15')
(71, u'71.panda', 'acc: 2/12')
(72, u'72.pigeon', 'acc: 4/14')
(73, u'73.pizza', 'acc: 4/16')
(74, u'74.platypus', 'acc: 1/11')
(75, u'75.pyramid', 'acc: 8/18')
(76, u'76.revolver', 'acc: 19/25')
(77, u'77.rhino', 'acc: 3/18')
(78, u'78.rooster', 'acc: 11/15')
(79, u'79.saxophone', 'acc: 0/12')
(80, u'8.binocular', 'acc: 6/10')
(81, u'80.schooner', 'acc: 14/19')
(82, u'81.scissors', 'acc: 4/12')
(83, u'82.scorpion', 'acc: 2/26')
(84, u'83.sea_horse', 'acc: 1/18')
(85, u'84.snoopy', 'acc: 3/11')
(86, u'85.soccer_ball', 'acc: 10/20')
(87, u'86.stapler', 'acc: 6/14')
(88, u'87.starfish', 'acc: 9/26')
(89, u'88.stegosaurus', 'acc: 4/18')
(90, u'89.stop_sign', 'acc: 9/20')
(91, u'9.bonsai', 'acc: 26/39')
(92, u'90.strawberry', 'acc: 3/11')
(93, u'91.sunflower', 'acc: 8/26')
(94, u'92.tick', 'acc: 9/15')
(95, u'93.trilobite', 'acc: 26/26')
(96, u'94.umbrella', 'acc: 13/23')
(97, u'95.watch', 'acc: 62/72')
(98, u'96.water_lilly', 'acc: 1/12')
(99, u'97.wheelchair', 'acc: 11/18')
(100, u'98.wild_cat', 'acc: 0/11')
(101, u'99.windsor_chair', 'acc: 8/17')

你可能感兴趣的:(图像分类)