使用的编译软件及python为Pycharm+Anaconda。有关两个软件的安装与配置自行度娘即可。
1.安装Anaconda3******.exe文件
2.安装pycharm*******.exe文件
3.在电脑左下角‘开始’处找到安装好的Anaconda文件夹,打开该文件夹下的Anaconda-Prompt终端
4.创建环境
5.在打开的终端输入 conda create -n python35 python=3.5 创建环境。PS:我喜欢用python3.5
6.创建成功后激化环境,失败自行度娘解决 activate python35
7.pip安装以下库,某些不会用 例:pip install tensorflow 最新版本即可
tensorflow
six >=1.8.0
Augmentor
numpy
opencv-python
pandas
matplotlib
Pillow
xlwt
xlrd
sklearn
tensorboardX
torchvision==0.2.2
任意标注,按类标注给定RGB像素值即可,工具lableme、ps皆可。标注的标签图像类型本文需要和原图一致,即RGB三通道图像。
例:
用ps标注,把眼睛标注为[66,0,88]
原图、标签图像名字需一一对应,放在不同文件下
例:
直接干代码!!!
导入必要依赖包
"""
Author: W_maoxian
Begin Date: 20201009
End Date: 20201010
"""
from tensorflow import keras
from tensorflow.keras import layers
import numpy as np
import cv2 as cv
import os
import collections
from densenet import DenseNet
import matplotlib.pyplot as plt
import datetime
使用opencv读取图像
"""
Author: W_maoxian
Begin Date: 20201009
End Date: 20201010
"""
def load_image_from_directory(images_dir, img_size, isLabel=False, classes=None, isOneHot=False, suffix='.png', dtype=np.float32):
"""
从数据集目录中加载图像数组
:param images_dir: 数据集目录,原图或标签,该文件夹下直接是图像,三通道,24位深度
:param img_size: 网络要求的图像大小
:param isLabel: 是否为标签, 测试集的标签由于只是显示作用,不参与训练,因此保持默认False即可
:param classes: 类,与isLabel相匹配,isLabel=True时,必须赋值
:param suffix: 图像后缀
:param dtype: 图像数据类型
:return: 返回图像数组,[图像个数,高,宽,通道数]
"""
images_path = []
for fname in os.listdir(images_dir):
if fname.endswith(suffix) and not fname.startswith('.'):
images_path.append(os.path.join(images_dir, fname)) # 数组填充,将图像绝对路径添加至数组images_path末尾
images_path = sorted(images_path) # 按顺序整理
images = [] # 创建空数组
for i, path in enumerate(images_path):
img = cv.imdecode(np.fromfile(path, dtype=np.uint8), cv.IMREAD_COLOR) # 可为中文路径读取图像
# img = cv.imread(path) # 读取图像, 无中文路径
if img.shape[:2] is not img_size:
img = cv.resize(img, dsize=img_size, interpolation=cv.INTER_NEAREST) # 不满足条件重置图像尺寸
img = img[:, :, ::-1] # 交换图像通道
if isLabel:
if isOneHot:
newImg = np.zeros(img.shape[:2] + (len(classes),), dtype=dtype) # 创建空数组用于储存图像数组
for j, value in enumerate(classes.values()):
newImg[np.bitwise_and(np.bitwise_and(img[:, :, 0] == value[0], img[:, :, 1] == value[1]), img[:, :, 2] == value[2]), j] = 1 # 把标签转为one-hot形式
img = newImg
else:
for j, value in enumerate(classes.values()):
img[np.bitwise_and(np.bitwise_and(img[:, :, 0] == value[0], img[:, :, 1] == value[1]), img[:, :, 2] == value[2])] = j # 把标签像素转为类别次序数
img = np.expand_dims(img, axis=-1) # 扩充维度
images.append(img) # 填充数组
images = np.array(images, dtype=dtype) # 改变数组类型
return images, images_path
精度和损失曲线绘制
"""
Author: W_maoxian
Begin Date: 20201009
End Date: 20201010
"""
def draw_curve_picture(history, save_path, time):
"""
:param history: 模型参数数据
:param save_path: 曲线图像保存路径
:param time: 当前程序运行时间
:return: None
"""
# 保存并绘制loss,acc
acc = history.history['accuracy'] # 获取训练精度数组
val_acc = history.history['val_accuracy'] # 获取验证精度数组
loss = history.history['loss'] # 获取训练损失数组
val_loss = history.history['val_loss'] # 获取验证精度数组
epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, 'r', label='Training acc') # 绘图
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy') # 曲线名字
plt.legend()
plt.savefig(r"{0}\accuracy_%s.png".format(save_path) % time, dpi=300) # 保存曲线为图像
plt.figure()
plt.plot(epochs, loss, 'r', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.savefig(r"{0}\loss_%s.png".format(save_path) % time, dpi=300)
对网络预测值进行处理
"""
Author: W_maoxian
Begin Date: 20201009
End Date: 20201010
"""
def mask2gray(mask, input_type=None):
"""
:param mask: gt的shape是[h,w], pred的shape是[h,w,c]
:param input_type: pred需要多处理一步
:return: 采用最大最小值归一化返回灰度图,单通道
"""
if input_type is 'pred':
mask = np.argmax(mask, axis=-1) # 取指定axis通道上最大值得索引
mask = mask.astype(dtype=np.uint8) # 改变数据类型
rst = mask.copy() # 复制数据
cv.normalize(mask, rst, alpha=0, beta=255, norm_type=cv.NORM_MINMAX) # 归一化至0-255
return rst
就直接干!
"""
Author: W_maoxian
Begin Date: 20201009
End Date: 20201010
"""
def main(save_model_path, model_name, model_struct_name, batch_size, img_size, epoch, test_model=True, single_result=False, fine_tune=False):
"""
:param save_model_path: 模型保存路径
:param model_name: 模型名字
:param model_struct_name: 模型结构图保存路径
:param batch_size: 批大小,同时训练batch_size张图像
:param test_model: 是否测试模型, False为训练模型
:param img_size: 图像尺寸
:param epoch: 整个数据集迭代次数
:param single_result: 是否只保存测试图像,False即保存原图和测试图像共有的对比图
:param fine_tune: 是否微调继续训练模型
:return:None
"""
classes = collections.OrderedDict(
[('background', [0, 0, 0]), ('obj1', [80, 160, 160]), ('obj2', [40, 80, 80]), ('obj3', [120, 240, 240]),
('obj4', [66, 0, 88])]) # 目标名字及对应标签像素值
train_imgs, _ = load_image_from_directory(
images_dir=r"*\0train\imgs", img_size=img_size,
suffix='.png')
train_labels, _ = load_image_from_directory(
images_dir=r"*\0train\labels", img_size=img_size,
isLabel=True, isOneHot=True, classes=classes, suffix='.png')
valid_imgs, _ = load_image_from_directory(
images_dir=r"*\1valid\imgs", img_size=img_size,
suffix='.png')
valid_labels, _ = load_image_from_directory(
images_dir=r"*\1valid\labels", img_size=img_size,
isLabel=True, isOneHot=True, classes=classes, suffix='.png')
if test_model:
print('---------------------------------加载模型中------------------------------')
model = keras.models.load_model(filepath=r'{0}\{1}'.format(save_model_path, model_name)) # 加载模型
print('---------------------------------模型加载完成------------------------------')
test_images, test_images_path = load_image_from_directory(
images_dir=r"*\2test\imgs",
img_size=img_size,
suffix='.tiff')
test_labels, _ = load_image_from_directory(
images_dir=r"*\labels",
img_size=img_size,
suffix='.png')
print('---------------------------------正在预测---------------------------')
preds = model.predict(test_images, batch_size=batch_size) # 模型预测
print('---------------------------------预测完成,开始保存图像--------------------------------')
for i in range(test_images.shape[0]):
pred = preds[i]
pred = mask2gray(pred, input_type='pred')
if single_result:
cv.imwrite(
r"{0}\{1}".format(save_model_path, test_images_path[i].split('\\')[-1]).split('.tiff')[0] + '.png',
cv.cvtColor(pred, cv.COLOR_GRAY2RGB)) # 保存图像
else:
origin_img = test_images[i]
save_img = np.zeros([img_size[0], img_size[1] * 2, 3], dtype=np.uint8) # 创建空数组用于储存预测图像
save_img[:, 0:img_size[0], :] = cv.cvtColor(pred, cv.COLOR_GRAY2RGB) # 储存预测图
save_img[:, img_size[0] * 1:img_size[1] * 2, :] = cv.cvtColor(origin_img, cv.COLOR_BGR2RGB) # 储存原图像
cv.imwrite(
r"{0}\{1}".format(save_model_path, test_images_path[i].split('\\')[-1]).split('.tiff')[0] + '.png',
save_img) # 保存图像
print('已保存第%s张图像' % i)
else:
if os.path.exists(save_model_path) is False: # 检查是否创建文件夹
os.mkdir(save_model_path)
if fine_tune:
model = keras.models.load_model(filepath=r'{0}\{1}'.format(save_model_path, model_name))
else:
model = DenseNet(img_size=img_size, num_classes=len(classes)) # 创建模型
model.summary() # 统计模型参数
# keras.utils.plot_model(model, show_shapes=True, dpi=96,to_file=r'{0}\{1}'.format(save_model_path, model_struct_name)) # 保存模型结构图 关于保存模型结构图的方法自己度娘吧!
model.compile(optimizer=keras.optimizers.Adam(), loss=keras.losses.categorical_crossentropy,
metrics=['accuracy']) # 编译模型
history = model.fit(train_imgs, train_labels, batch_size=batch_size, epochs=epoch,
callbacks=[keras.callbacks.ModelCheckpoint(r'{0}\{1}'.format(save_model_path, model_name),
save_best_only=True, monitor='loss'),
keras.callbacks.EarlyStopping(monitor='val_loss', patience=6)],
validation_data=(valid_imgs, valid_labels),
steps_per_epoch=train_imgs.shape[0] // batch_size,
validation_batch_size=batch_size,
validation_freq=1) # fit数据
time = datetime.datetime.strftime(datetime.datetime.now(), '%Y%m%d-%H%M%S')
draw_curve_picture(history=history, save_path=save_model_path, time=time)
使用U-shape结构的DenseNet变体
"""
Author: W_maoxian
Begin Date: 20201009
End Date: 20201010
"""
# 导入依赖包
from tensorflow import keras
from tensorflow.keras import layers
def encoder(x, unit_num, filter):
"""
编码器
:param x: 多维矩阵, [batch_size,w,h,filter]
:param filter: 卷积核数量
:param unit_num: 卷积操作循环次数
:return x: 返回值,多维矩阵, [batch_size,w,h,filter]
"""
concatlayer = [] # 创建空数组,用于保存卷积结果
for i in range(unit_num): # 卷积操作次数
x = layers.SeparableConv2D(filters=filter, kernel_size=3, strides=1, padding='same')(x) # 可分离卷积操作
x = layers.BatchNormalization()(x) # BN层
x = layers.Activation('relu')(x) # 激活层
if i > 0: # 从第二次卷积操作开始叠加前一次的卷积结果至下一次的输入
for layer in concatlayer:
x = layers.Concatenate()([x, layer])
concatlayer.append(x) # 保存上一次卷积结果
x = layers.SeparableConv2D(filters=filter, kernel_size=1, strides=1, padding='same')(x)
x = layers.MaxPooling2D(pool_size=2, strides=2, padding='same')(x) # 最大池化
return x
def decoder(x, concat_layer, unit_num, filter):
"""
解码器
:param x: 多维矩阵, [batch_size,w,h,filter]
:param filter: 卷积核数量
:param unit_num: 卷积操作循环次数
:param concat_layer: 待融合的特征层,来自编码器,多维矩阵, [batch_size,w,h,filter]
:return x: 返回值,多维矩阵, [batch_size,w,h,filter]
"""
x = layers.Conv2DTranspose(filters=filter, kernel_size=3, strides=2, padding='same')(x)
concatlayer = []
for i in range(unit_num):
x = layers.SeparableConv2D(filters=filter, kernel_size=3, strides=1, padding='same')(x)
x = layers.BatchNormalization()(x)
x = layers.Activation('relu')(x)
if i > 0:
for layer in concatlayer:
x = layers.Concatenate()([x, layer])
concatlayer.append(x)
x = layers.SeparableConv2D(filters=filter, kernel_size=1, strides=1, padding='same')(x)
x = layers.Concatenate()([x, concat_layer]) # 与编码器的特征层进行融合
return x
def DenseNet(img_size, num_classes):
"""
构建网络
:param img_size: 输入图像尺寸
:param num_classes: 类别数
:return model:返回值,网络模型
"""
inputs = keras.Input(shape=img_size + (3,)) # 构建输入
block1 = encoder(inputs, 3, 32)
block2 = encoder(block1, 3, 64)
block3 = encoder(block2, 3, 128)
block4 = encoder(block3, 3, 256)
block5 = encoder(block4, 3, 512)
block6 = decoder(block5, block4, 3, 256)
block7 = decoder(block6, block3, 3, 128)
block8 = decoder(block7, block2, 3, 64)
block9 = decoder(block8, block1, 3, 32)
x = layers.Conv2DTranspose(filters=32, kernel_size=3, strides=2, padding='same')(block9)
x = layers.BatchNormalization()(x)
x = layers.Activation('relu')(x)
outputs = layers.SeparableConv2D(num_classes, 3, activation='softmax', padding='same')(x) # 降低网络输出维度至类别数num_classes
model = keras.Model(inputs, outputs) # 构建模型
return model
"""
Author: W_maoxian
Begin Date: 20201009
End Date: 20201010
"""
if __name__ == '__main__':
main(save_model_path=r"*/save_models",
model_name='model.h5',
model_struct_name='model.png',
batch_size=4,
img_size=(512, 512),
epoch=100,
test_model=False,
single_result=False,
fine_tune=False)
有关路径请自行更改为自己电脑的本地对应文件夹
要是这样你都训练不出你的模型,我真就无F-U-C-K说了
赞赞赞赞赞赞!!!?????
淦!