UNET家族网络之Unet++(附带了Nestnet、uent、PSPnet等)

最近复现了一下unet++,发现这个项目里包含了很多网络,推荐给大家一下,GitHub链接:https://github.com/MrGiovanni/UNetPlusPlus/tree/master/segmentation_models
另外还有一个链接:https://github.com/ShawnBIT/UNet-family
里面包含了很多unet家族的网络,可以多了解一下。
这个unet++比较独特,思路比较新颖,所以复现一下先,本人还没调出好结果(要放假喽!)
这个项目里面说了怎么训练的主要就是数据加载部分没有给出,但是Keras的数据加载很简单,下面我把数据加载和训练,还有预测都给放一下,其他的下载就行了
这里是二分类,多分类的话数据加载要改改,我看了模型是支持多分类的。
数据读取,这个也是别的GitHub上的,data.py,这里面就用了数据加载那个函数,预测部分也用了一下里面的函数,后面会看到
数据存放结构在我数据加载的地方有所体现,很简单,记得图像和标签同名就行了,另外标签是0和1

from __future__ import print_function
from keras.preprocessing.image import ImageDataGenerator
import numpy as np 
import os
import glob
import skimage.io as io
import skimage.transform as trans

def adjustData(img,mask,flag_multi_class,num_class):
    if(flag_multi_class):
        img = img / 255
        mask = mask[:,:,:,0] if(len(mask.shape) == 4) else mask[:,:,0]
        new_mask = np.zeros(mask.shape + (num_class,))
        for i in range(num_class):
            new_mask[mask == i,i] = 1
        new_mask = np.reshape(new_mask,(new_mask.shape[0],new_mask.shape[1]*new_mask.shape[2],new_mask.shape[3])) if flag_multi_class else np.reshape(new_mask,(new_mask.shape[0]*new_mask.shape[1],new_mask.shape[2]))
        mask = new_mask
    elif(np.max(img) > 1):
        img = img / 255
        # mask = mask /255
        mask[mask > 0.5] = 1
        mask[mask <= 0.5] = 0
    return (img,mask)

def trainGenerator(batch_size,train_path,image_folder,mask_folder,aug_dict,image_color_mode = "rgb",
                    mask_color_mode = "grayscale",image_save_prefix  = "image",mask_save_prefix  = "mask",
                    flag_multi_class = False,num_class = 2,save_to_dir = None,target_size = (512,512),seed = 1):
    '''
    can generate image and mask at the same time
    use the same seed for image_datagen and mask_datagen to ensure the transformation for image and mask is the same
    if you want to visualize the results of generator, set save_to_dir = "your path"
    '''
    image_datagen = ImageDataGenerator(**aug_dict)
    mask_datagen = ImageDataGenerator(**aug_dict)
    image_generator = image_datagen.flow_from_directory(
        train_path,
        classes = [image_folder],
        class_mode = None,
        color_mode = image_color_mode,
        target_size = target_size,
        batch_size = batch_size,
        save_to_dir = save_to_dir,
        save_prefix  = image_save_prefix,
        seed = seed)
    mask_generator = mask_datagen.flow_from_directory(
        train_path,
        classes = [mask_folder],
        class_mode = None,
        color_mode = mask_color_mode,
        target_size = target_size,
        batch_size = batch_size,
        save_to_dir = save_to_dir,
        save_prefix  = mask_save_prefix,
        seed = seed)
    train_generator = zip(image_generator, mask_generator)
    for (img,mask) in train_generator:
        img,mask = adjustData(img,mask,flag_multi_class,num_class)
        yield (img,mask)

def testGenerator(test_path,num_image = 30,target_size = (512,512),flag_multi_class = False,as_gray = False):
    images = os.listdir(test_path)
    num_image = len(images)
    for i in range(num_image):
        # img = io.imread(os.path.join(test_path,"%d.png"%i),as_gray = as_gray)
        img = io.imread(os.path.join(test_path,images[i]),as_gray = as_gray)
        img = img / 255
        img = trans.resize(img,target_size)
        img = np.reshape(img,img.shape+(1,)) if (not flag_multi_class) else img
        img = np.reshape(img,(1,)+img.shape)
        img = np.squeeze(img)
        img = np.expand_dims(img, axis=0)
        yield img

def geneTrainNpy(image_path,mask_path,flag_multi_class = False,num_class = 2,image_prefix = "image",mask_prefix = "mask",image_as_gray = True,mask_as_gray = True):
    image_name_arr = glob.glob(os.path.join(image_path,"%s*.png"%image_prefix))
    image_arr = []
    mask_arr = []
    for index,item in enumerate(image_name_arr):
        img = io.imread(item,as_gray = image_as_gray)
        img = np.reshape(img,img.shape + (1,)) if image_as_gray else img
        mask = io.imread(item.replace(image_path,mask_path).replace(image_prefix,mask_prefix),as_gray = mask_as_gray)
        mask = np.reshape(mask,mask.shape + (1,)) if mask_as_gray else mask
        img,mask = adjustData(img,mask,flag_multi_class,num_class)
        image_arr.append(img)
        mask_arr.append(mask)
    image_arr = np.array(image_arr)
    mask_arr = np.array(mask_arr)
    return image_arr,mask_arr

训练文件 train.py

from data import *
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler, ReduceLROnPlateau, TensorBoard
from segmentation_models import Unet, Nestnet, Xnet

# prepare model
# model = Xnet(backbone_name='resnet50', encoder_weights='imagenet', decoder_block_type='transpose') # build UNet++
# model = Unet(backbone_name='resnet50', encoder_weights='imagenet', decoder_block_type='transpose') # build U-Net
model = Nestnet(backbone_name='resnet50', encoder_weights='imagenet', decoder_block_type='transpose') # build DLA

model.summary()
# model.compile('Adam', 'binary_crossentropy', ['binary_accuracy'])
model.compile(optimizer=Adam(lr=1.0e-3), loss='binary_crossentropy', metrics=['accuracy'])

# train model
batch_size = 4
img_size = 512
epochs = 100
train_im_path,train_mask_path = 'D:/xxx/unet_plus/road_test/train/imgs/','D:/xxx/unet_plus/road_test/train/labels/'
val_im_path,val_mask_path = 'D:/xxx/unet_plus/road_test/val/imgs/','D:/xxx/unet_plus/road_test/val/labels/'
train_set = os.listdir(train_im_path)
val_set = os.listdir(val_im_path)
train_number = len(train_set)
val_number = len(val_set)

train_root = 'D:/xxx/unet_plus/road_test/train/'
val_root = 'D:/xxx/unet_plus/road_test/val/'
data_gen_args = dict(rotation_range=0.2,
                    width_shift_range=0.05,
                    height_shift_range=0.05,
                    shear_range=0.05,
                    zoom_range=0.05,
                    horizontal_flip=True,
                    fill_mode='nearest')
training_generator = trainGenerator(batch_size,train_root,'imgs','labels',data_gen_args,save_to_dir = None)
validation_generator = trainGenerator(batch_size,val_root,'imgs','labels',data_gen_args,save_to_dir = None)


model_path = 'D:/xxx/UNetPlusPlus-master/weights/'
model_name = 'road_{epoch:03d}.h5'
model_file = os.path.join(model_path, model_name)
# model_checkpoint = ModelCheckpoint(model_file, monitor='val_loss', verbose=1, save_best_only=False, mode='max')
model_checkpoint = ModelCheckpoint(model_file, monitor='loss', verbose=1, save_best_only=False)
lr_reducer = ReduceLROnPlateau(monitor='val_loss', factor=np.sqrt(0.5625), cooldown=0, patience=5, min_lr=0.5e-6)
callable = [model_checkpoint, lr_reducer, TensorBoard(log_dir='./log')]

model.fit_generator(generator=training_generator,
                    validation_data=validation_generator,
                    steps_per_epoch=train_number//batch_size,
                    validation_steps=val_number//batch_size,                            
                    use_multiprocessing=False,
                    epochs=epochs,verbose=1,
                    callbacks=callable)

测试文件predict.py

import os
import cv2
from data import *
from keras.models import load_model

model = load_model('./weights/road_100.h5')
test_path = "D:/xxx/unet_plus/road_test/val/imgs/"
out = "D:/xxx/UNetPlusPlus-master/result/"
imgs = os.listdir(test_path)
count = len(imgs)
testGene = testGenerator(test_path)
results = model.predict_generator(testGene,count,verbose=1)
for i in range(count):
    out_path = os.path.join(out, imgs[i])
    im = results[i]
    im[im>0.5] = 255
    im[im<=0.5] = 0
    cv2.imwrite(out_path, im)

你可能感兴趣的:(Keras,python)