cifar-10数据集来自于kaggle平台上下载下来的(一般新推出的模型论文都会使用cifar数据集,但是使用的是cifar-100,是针对100类不同对象的分类),我们通过在本地建立模型来实现该10分类问题,并将test数据集上的识别结果生成csv文件上传到kaggle上进行验证,可以得到识别test数据的分数。
这里代码使用的是基本的卷积神经网络模型(conv + bn + max_pooling)
import matplotlib as mpl #画图用的库
import matplotlib.pyplot as plt
#下面这一句是为了可以在notebook中画图
%matplotlib inline
import numpy as np
import sklearn #机器学习算法库
import pandas as pd #处理数据的库
import os
import sys
import time
import tensorflow as tf
from tensorflow import keras #使用tensorflow中的keras
#import keras #单纯的使用keras
print(tf.__version__)
print(sys.version_info)
for module in mpl, np, sklearn, pd, tf, keras:
print(module.__name__, module.__version__)
2.0.0
sys.version_info(major=3, minor=6, micro=9, releaselevel='final', serial=0)
matplotlib 3.1.2
numpy 1.18.0
sklearn 0.21.3
pandas 0.25.3
tensorflow 2.0.0
tensorflow_core.keras 2.2.4-tf
physical_devices = tf.config.experimental.list_physical_devices('GPU')
assert len(physical_devices) > 0, "Not enough GPU hardware devices available"
tf.config.experimental.set_memory_growth(physical_devices[0], True)
class_names = [
'airplane',
'automobile',
'bird',
'cat',
'deer',
'dog',
'frog',
'horse',
'ship',
'truck',
]
train_dir = "/home/galaxy/DeepLearning/DATASETS/cifar-10/train"
train_labels_file = "/home/galaxy/DeepLearning/DATASETS/cifar-10/trainLabels.csv"
test_dir = "/home/galaxy/DeepLearning/DATASETS/cifar-10/test"
test_csv_file = '/home/galaxy/DeepLearning/DATASETS/cifar-10/sampleSubmission.csv'
print(os.path.exists(train_dir))
print(os.path.exists(test_dir))
print(os.path.exists(train_labels_file))
#读取csv文件查看其内容
#labels = pd.read_csv(train_labels_file, header=0)
#print(labels)
#因为train文件夹下直接存放的是所有的文件,其每个图片的编号对应的label都在csv文件里面一一对应,所以我们需要将图片文件与label一一对应
def parse_csv_file(filepath, folder):
results = []
with open(filepath, 'r') as f:
lines = f.readlines()[1:]#跳过 第一行的 id,label
for line in lines:
image_id, label_str = line.strip('\n').split(',')#strip表示去掉换行符,以 ','作为分隔符
image_full_path = os.path.join(folder, image_id + '.png')
results.append((image_full_path, label_str))
return results
import pprint
train_labels_info = parse_csv_file(train_labels_file, train_dir)
test_labels_info = parse_csv_file(test_csv_file, test_dir)
pprint.pprint(train_labels_info[0:5])
pprint.pprint(test_labels_info[0:5])
print(len(train_labels_info), len(test_labels_info))
True
True
True
[('/home/galaxy/DeepLearning/DATASETS/cifar-10/train/1.png', 'frog'),
('/home/galaxy/DeepLearning/DATASETS/cifar-10/train/2.png', 'truck'),
('/home/galaxy/DeepLearning/DATASETS/cifar-10/train/3.png', 'truck'),
('/home/galaxy/DeepLearning/DATASETS/cifar-10/train/4.png', 'deer'),
('/home/galaxy/DeepLearning/DATASETS/cifar-10/train/5.png', 'automobile')]
[('/home/galaxy/DeepLearning/DATASETS/cifar-10/test/1.png', 'cat'),
('/home/galaxy/DeepLearning/DATASETS/cifar-10/test/2.png', 'cat'),
('/home/galaxy/DeepLearning/DATASETS/cifar-10/test/3.png', 'cat'),
('/home/galaxy/DeepLearning/DATASETS/cifar-10/test/4.png', 'cat'),
('/home/galaxy/DeepLearning/DATASETS/cifar-10/test/5.png', 'cat')]
50000 300000
#train_df = pd.DataFrame(train_labels_info)#DataFrame 为 表格型数据结构
#这里将train_df切分为 训练集和验证集 两部分
train_df = pd.DataFrame(train_labels_info[0:45000])
valid_df = pd.DataFrame(train_labels_info[45000:])
test_df = pd.DataFrame(test_labels_info)
#设置 DataFrame的列名
train_df.columns = ['filepath', 'class']
valid_df.columns = ['filepath', 'class']
test_df.columns = ['filepath', 'class']
print(train_df.head())
print(valid_df.head())
print(test_df.head())
filepath class
0 /home/galaxy/DeepLearning/DATASETS/cifar-10/tr... frog
1 /home/galaxy/DeepLearning/DATASETS/cifar-10/tr... truck
2 /home/galaxy/DeepLearning/DATASETS/cifar-10/tr... truck
3 /home/galaxy/DeepLearning/DATASETS/cifar-10/tr... deer
4 /home/galaxy/DeepLearning/DATASETS/cifar-10/tr... automobile
filepath class
0 /home/galaxy/DeepLearning/DATASETS/cifar-10/tr... horse
1 /home/galaxy/DeepLearning/DATASETS/cifar-10/tr... automobile
2 /home/galaxy/DeepLearning/DATASETS/cifar-10/tr... deer
3 /home/galaxy/DeepLearning/DATASETS/cifar-10/tr... automobile
4 /home/galaxy/DeepLearning/DATASETS/cifar-10/tr... airplane
filepath class
0 /home/galaxy/DeepLearning/DATASETS/cifar-10/te... cat
1 /home/galaxy/DeepLearning/DATASETS/cifar-10/te... cat
2 /home/galaxy/DeepLearning/DATASETS/cifar-10/te... cat
3 /home/galaxy/DeepLearning/DATASETS/cifar-10/te... cat
4 /home/galaxy/DeepLearning/DATASETS/cifar-10/te... cat
##################################
#因为这里的数据集不是按照文件夹来分类的,而是所有的图片数据都是在一个文件夹中,我们使用DataFrame来表示每一个数据文件对应的label,
#所以这里使用的是 flow_from_dataframe
##################################
#resnet50使用的图像宽高均为224
#height = 224
#width = 224
height = 32 #设置图像被缩放的宽高
width = 32
channels = 3 #图像通道数
batch_size = 32
num_classes = 10
##########------------训练集数据------------##########
#初始化一个训练数据相关的generator
#具体用于 数据集中的图片数据进行处理,可以对图片数据进行归一化、旋转、翻转等数据增强类操作
train_datagen = keras.preprocessing.image.ImageDataGenerator(
#preprocessing_function = keras.applications.resnet50.preprocess_input,#resnet50专门用来预处理图像的函数,把图像做归一化到-1~1之间
# 使用第一行preprocessing_function 就不需要 rescale
rescale = 1./255, #放缩因子, 除以255是因为图片中每个像素点值范围都在0~255之间
rotation_range = 40, #图片随机转动的角度范围(-40 ~ 40)
width_shift_range = 0.2, #值 < 1时,表示偏移的比例,即在 0~值 这个比例幅度之间进行偏移
height_shift_range= 0.2, #值 > 1时,表示像素宽度,即该图片的偏移幅度大小
shear_range = 0.2, #剪切强度
zoom_range = 0.2, #缩放强度
horizontal_flip = True,#水平随机翻转
fill_mode = 'nearest',#像素填充模式
)
#接下来读取目录下的图片然后按照上面的数据增强相关操作对图片进行处理
train_generator = train_datagen.flow_from_dataframe(train_df,
directory=train_dir,
x_col='filepath',
y_col='class',
classes=class_names,
target_size=(height,width),
batch_size=batch_size,
seed=7,
shuffle=True,
class_mode='sparse')
'''
train_generator = train_datagen.flow_from_directory(train_dir,
target_size = (height,width), #目录下的图片会被resize的大小
batch_size = batch_size,
seed = 7,#随机种子,用于洗牌和转换,随便给个数即可
shuffle = True,#False->则按字母数字顺序对数据进行排序 True->打乱数据
class_mode = "categorical", # 该参数决定了返回的标签数组的形式
#classes = 这个参数就是描述的 文件夹名与输出标签的对应关系
)
'''
#classes:可选参数,为子文件夹的列表,如['dogs','cats']默认为None. 若未提供,则该类别列表将从directory下的子文件夹名称/结构自动推断。
#每一个子文件夹都会被认为是一个新的类。(类别的顺序将按照字母表顺序映射到标签值)。通过属性class_indices可获得文件夹名与类的序号的对应字典。
#使用生成器的.class_indices方法即可获取模型默认的Labels序列,文件夹名与类的序号的对应字典
print(train_generator.class_indices)
##########------------验证集数据------------##########
#初始化一个验证数据相关的generator
#验证数据集上不需要进行数据增强的相关操作,仅保留缩放即可,不然的话训练集与验证集的值的分布会不同
valid_datagen = keras.preprocessing.image.ImageDataGenerator(
#preprocessing_function = keras.applications.resnet50.preprocess_input,#resnet50专门用来预处理图像的函数,相当于归一化,所以无需rescale
rescale = 1./255, #放缩因子, 除以255是因为图片中每个像素点值范围都在0~255之间
)
#接下来读取目录下的图片然后按照上面的数据增强相关操作对图片进行处理
valid_generator = valid_datagen.flow_from_dataframe(valid_df,
directory=train_dir,
x_col='filepath',
y_col='class',
classes=class_names,
target_size=(height,width),
batch_size=batch_size,
seed=7,
shuffle=True,
class_mode='sparse')
'''
valid_generator = valid_datagen.flow_from_directory(valid_dir,
target_size = (height,width), #目录下的图片会被resize的大小
batch_size = batch_size,
seed = 7,#随机种子,用于洗牌和转换,随便给个数即可
shuffle = False,#不需要训练所以不需要打乱数据
class_mode = "categorical", # 该参数决定了返回的标签数组的形式
)
'''
#使用生成器的.class_indices方法即可获取模型默认的Labels序列,文件夹名与类的序号的对应字典
print(valid_generator.class_indices)
train_num = train_generator.samples
valid_num = valid_generator.samples
print(train_num, valid_num)
Found 45000 validated image filenames belonging to 10 classes.
{'airplane': 0, 'automobile': 1, 'bird': 2, 'cat': 3, 'deer': 4, 'dog': 5, 'frog': 6, 'horse': 7, 'ship': 8, 'truck': 9}
Found 5000 validated image filenames belonging to 10 classes.
{'airplane': 0, 'automobile': 1, 'bird': 2, 'cat': 3, 'deer': 4, 'dog': 5, 'frog': 6, 'horse': 7, 'ship': 8, 'truck': 9}
45000 5000
for i in range(1):
x,y = train_generator.next()
print(x.shape, y.shape)
print(y)
#因为 class_mode 设置为sparse,所以label标签返回的是2D的one-hot编码标签(2-> one_hot -> [0, 0, 1])
(32, 32, 32, 3) (32,)
[2. 1. 4. 4. 4. 4. 6. 5. 2. 8. 4. 6. 6. 3. 7. 1. 7. 2. 8. 8. 3. 0. 5. 3.
9. 1. 4. 5. 6. 7. 9. 2.]
#使用resnet50做迁移学习
'''
#1.这里ResNet50层当做一层,只有最后一层是可以被训练的
resnet50_fine_tune = keras.models.Sequential([
keras.applications.ResNet50(include_top=False,#include_top:是否保留顶层的全连接网络,这里最后要定义自己的softmax选False
pooling='avg',#‘avg’代表全局平均池化,‘max’代表全局最大值池化
weights='imagenet',#None代表随机初始化,即不加载预训练权重;'imagenet’代表加载预训练权重
),
keras.layers.Dense(num_classes, activation='softmax'),
])
resnet50_fine_tune.layers[0].trainable=False #设置ResNet50这一层的参数不可训练,因为 weights='imagenet'
#2.这里ResNet50中最后几层都是可以训练,我们可以在模型架构里面看到 Trainable params可训练参数会大大增加
resnet50 = keras.applications.ResNet50(include_top=False, pooling='avg', weights='imagenet')
for layers in resnet50.layers[0:-5]: #这里遍历最后五层之前的layers并设置其权重相关参数不可遍历
layers.trainable = False
resnet50_fine_tune = keras.models.Sequential([
resnet50,
keras.layers.Dense(num_classes, activation='softmax'),
])
'''
model = keras.models.Sequential([
keras.layers.Conv2D(filters=128, kernel_size=3, padding="same", activation="relu",input_shape=(width, height, channels)),
keras.layers.BatchNormalization(),
keras.layers.Conv2D(filters=128, kernel_size=3, padding="same", activation="relu"),
keras.layers.BatchNormalization(),
keras.layers.MaxPool2D(pool_size=2),
keras.layers.Conv2D(filters=256, kernel_size=3, padding="same", activation="relu"),
keras.layers.BatchNormalization(),
keras.layers.Conv2D(filters=256, kernel_size=3, padding="same", activation="relu"),
keras.layers.BatchNormalization(),
keras.layers.MaxPool2D(pool_size=2),
keras.layers.Conv2D(filters=512, kernel_size=3, padding="same", activation="relu"),
keras.layers.BatchNormalization(),
keras.layers.Conv2D(filters=512, kernel_size=3, padding="same", activation="relu"),
keras.layers.BatchNormalization(),
keras.layers.MaxPool2D(pool_size=2),
keras.layers.Flatten(),
keras.layers.Dense(512,activation="relu"),
keras.layers.Dense(num_classes, activation="softmax"),
])
#损失函数 sparse_categorical_crossentropy 和 categorical_crossentropy 的选择取决于前面设定的y值的取值类型
#如果y取值为 2D的 one-hot编码,则选择 categorical_crossentropy
#如果y取值为 1D的 整数标签,则选择 sparse_categorical_crossentropy
#前面的 tensorflow2------分类问题fashion_mnist 文章中有过相关描述
# metrics 表示选择 accuracy作为评价参数
model.compile(loss="sparse_categorical_crossentropy", optimizer="adam",metrics=["accuracy"])
model.summary()
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 32, 32, 128) 3584
_________________________________________________________________
batch_normalization (BatchNo (None, 32, 32, 128) 512
_________________________________________________________________
conv2d_1 (Conv2D) (None, 32, 32, 128) 147584
_________________________________________________________________
batch_normalization_1 (Batch (None, 32, 32, 128) 512
。。。
max_pooling2d_2 (MaxPooling2 (None, 4, 4, 512) 0
_________________________________________________________________
flatten (Flatten) (None, 8192) 0
_________________________________________________________________
dense (Dense) (None, 512) 4194816
_________________________________________________________________
dense_1 (Dense) (None, 10) 5130
=================================================================
Total params: 8,783,498
Trainable params: 8,779,914
Non-trainable params: 3,584
import shutil
callback_dir = "./callback_cifar-10"
if os.path.exists(callback_dir):
shutil.rmtree(callback_dir)
os.mkdir(callback_dir)
output_model_file=os.path.join(callback_dir,"cifar10_model.h5")#在logdir中创建一个模型文件.h5
callbacks = [
keras.callbacks.TensorBoard(callback_dir),
keras.callbacks.ModelCheckpoint(output_model_file, save_best_only=True),
keras.callbacks.EarlyStopping(patience=5,min_delta=1e-3),
]
epochs = 20#使用fine_tune 不需要太多次迭代就能够达到一个较好的效果
#使用fit_generator是因为使用的是 ImageDataGenerator 获取数据集数据的
history = model.fit_generator(train_generator,#steps_per_epoch: 一个epoch包含的步数(每一步是一个batch的数据送入)
steps_per_epoch = train_num // batch_size,
epochs = epochs,
validation_data = valid_generator,
validation_steps= valid_num // batch_size,
callbacks = callbacks,
)
'''
history = resnet50_fine_tune.fit_generator(train_generator,#steps_per_epoch: 一个epoch包含的步数(每一步是一个batch的数据送入)
steps_per_epoch = train_num // batch_size,
epochs = epochs,
validation_data = valid_generator,
validation_steps= valid_num // batch_size,
callbacks = callbacks,
)
'''
#运行打印看到val_accuracy的值并没有逐渐变大而是一直保持不变,是因为激活函数使用的是selu导致,可尝试更换激活函数为relu
Epoch 1/20
1406/1406 [==============================] - 110s 79ms/step - loss: 2.2732 - accuracy: 0.2828 - val_loss: 1.8249 - val_accuracy: 0.3488
Epoch 2/20
1406/1406 [==============================] - 115s 82ms/step - loss: 1.5933 - accuracy: 0.4090 - val_loss: 1.6994 - val_accuracy: 0.4183
Epoch 3/20
1406/1406 [==============================] - 118s 84ms/step - loss: 1.3849 - accuracy: 0.4980 - val_loss: 1.6454 - val_accuracy: 0.4351
Epoch 4/20
1406/1406 [==============================] - 118s 84ms/step - loss: 1.2231 - accuracy: 0.5648 - val_loss: 1.1681 - val_accuracy: 0.6034
。。。
1406/1406 [==============================] - 107s 76ms/step - loss: 0.5279 - accuracy: 0.8201 - val_loss: 0.6024 - val_accuracy: 0.8271
Epoch 19/20
1406/1406 [==============================] - 107s 76ms/step - loss: 0.5139 - accuracy: 0.8255 - val_loss: 0.5964 - val_accuracy: 0.8165
Epoch 20/20
1406/1406 [==============================] - 107s 76ms/step - loss: 0.4912 - accuracy: 0.8327 - val_loss: 0.5760 - val_accuracy: 0.8251
test_datagen = keras.preprocessing.image.ImageDataGenerator(
#preprocessing_function = keras.applications.resnet50.preprocess_input,#resnet50专门用来预处理图像的函数,相当于归一化,所以无需rescale
rescale = 1./255, #放缩因子, 除以255是因为图片中每个像素点值范围都在0~255之间
)
#接下来读取目录下的图片然后按照上面的数据增强相关操作对图片进行处理
test_generator = test_datagen.flow_from_dataframe(test_df,
directory=test_dir,
x_col='filepath',
y_col='class',
classes=class_names,
target_size=(height,width),
batch_size=batch_size,
seed=7,
shuffle=False,
class_mode='sparse')
test_num = test_generator.samples
print(test_num)
Found 300000 validated image filenames belonging to 10 classes.
300000
test_predict = model.predict_generator(test_generator,
workers=10, #并行度
use_multiprocessing=True, #True表示使用多进程, False表示使用10个线程
)
# print(test_predict)
print(test_predict.shape)# test_predict是一个300000 * 10 的矩阵
print(test_predict[0:5])#打印这个矩阵前5行概率,每一行都是一个样本的输出,即所对应的十类数据的概率分布
(300000, 10)
[[9.54611506e-03 1.80074260e-01 2.62391381e-03 5.42382039e-02
1.09696174e-02 3.63456691e-03 7.73755368e-03 8.62432702e-04
4.05505439e-03 7.26258218e-01]
[9.30483162e-01 4.34813369e-03 5.24184527e-03 3.60551313e-03
9.16590355e-03 2.50156707e-04 1.25574612e-03 2.26421675e-04
3.19052041e-02 1.35178575e-02]
[7.67244501e-07 9.99046147e-01 2.93465563e-09 1.32095090e-06
1.09673610e-10 1.53255339e-10 2.51321115e-08 2.06194639e-09
5.29228110e-08 9.51812486e-04]
[1.32097994e-04 5.31877231e-06 2.55758619e-06 3.46006732e-06
1.49765199e-07 8.87896690e-08 6.87345619e-07 2.77201764e-07
9.99815166e-01 4.02240330e-05]
[9.64022636e-01 3.00924119e-04 4.65835538e-03 1.33373905e-02
1.23353524e-03 5.36854519e-03 2.72046030e-03 9.04678775e-04
6.65733730e-03 7.96015956e-04]]
#最大概率的索引来获得它对应的类别预测
#axis参数:对于二维向量而言,0代表对行进行最大值选取,此时对每一列进行操作;1代表对列进行最大值选取,此时对每一行进行操作
test_predict_class_index = np.argmax(test_predict, axis=1)
print(test_predict_class_index[0:5])
[9 0 1 8 0]
test_predict_class = [class_names[t] for t in test_predict_class_index]
print(test_predict_class[0:5])
#print('This is a ', test_predict_class[0])
['truck', 'airplane', 'automobile', 'ship', 'airplane']
#生成 csv文件上传到 Kaggle中
def generate_submissions(filename, predict_class):
with open(filename, 'w') as f:
f.write('id,label\n')
for i in range(len(predict_class)):
f.write('%d,%s\n' % (i+1, predict_class[i]))
output_file = '/home/galaxy/DeepLearning/DATASETS/cifar-10/submission.csv'
generate_submissions(output_file,test_predict_class)