(keras)Dense block和Residual block的使用

最近做实验,用Keras改网络的朋友可以参考下
数据读入data.py

from __future__ import print_function
from keras.preprocessing.image import ImageDataGenerator
import numpy as np 
import os
import cv2
import glob
import skimage.io as io
import skimage.transform as trans
from skimage import img_as_ubyte

Sky = [128,128,128]
Building = [128,0,0]
Pole = [192,192,128]
Road = [128,64,128]
Pavement = [60,40,222]
Tree = [128,128,0]
SignSymbol = [192,128,128]
Fence = [64,64,128]
Car = [64,0,128]
Pedestrian = [64,64,0]
Bicyclist = [0,128,192]
Unlabelled = [0,0,0]

COLOR_DICT = np.array([Sky, Building, Pole, Road, Pavement,
                          Tree, SignSymbol, Fence, Car, Pedestrian, Bicyclist, Unlabelled])


def adjustData(img,mask,flag_multi_class,num_class):
    if(flag_multi_class):
        img = img / 255
        mask = mask[:,:,:,0] if(len(mask.shape) == 4) else mask[:,:,0]
        new_mask = np.zeros(mask.shape + (num_class,))
        for i in range(num_class):
            #for one pixel in the image, find the class in mask and convert it into one-hot vector
            #index = np.where(mask == i)
            #index_mask = (index[0],index[1],index[2],np.zeros(len(index[0]),dtype = np.int64) + i) if (len(mask.shape) == 4) else (index[0],index[1],np.zeros(len(index[0]),dtype = np.int64) + i)
            #new_mask[index_mask] = 1
            new_mask[mask == i,i] = 1
        new_mask = np.reshape(new_mask,(new_mask.shape[0],new_mask.shape[1]*new_mask.shape[2],new_mask.shape[3])) if flag_multi_class else np.reshape(new_mask,(new_mask.shape[0]*new_mask.shape[1],new_mask.shape[2]))
        mask = new_mask
    elif(np.max(img) > 1):
        img = img / 255.0
        mask = mask /255.0
        mask[mask > 0.5] = 1
        mask[mask <= 0.5] = 0
    return (img,mask)



def trainGenerator(batch_size,train_path,image_folder,mask_folder,aug_dict,image_color_mode = "rgb",
                    mask_color_mode = "grayscale",image_save_prefix  = "image",mask_save_prefix  = "mask",
                    flag_multi_class = False,num_class = 2,save_to_dir = None,target_size = (256,256),seed = 1):
    '''
    can generate image and mask at the same time
    use the same seed for image_datagen and mask_datagen to ensure the transformation for image and mask is the same
    if you want to visualize the results of generator, set save_to_dir = "your path"
    mask_color_mode = "grayscale"
    '''
    image_datagen = ImageDataGenerator(**aug_dict)
    mask_datagen = ImageDataGenerator(**aug_dict)
    image_generator = image_datagen.flow_from_directory(
        train_path,
        classes = [image_folder],
        class_mode = None,
        color_mode = image_color_mode,
        target_size = target_size,
        batch_size = batch_size,
        save_to_dir = save_to_dir,
        save_prefix  = image_save_prefix,
        seed = seed)
    mask_generator = mask_datagen.flow_from_directory(
        train_path,
        classes = [mask_folder],
        class_mode = None,
        color_mode = mask_color_mode,
        target_size = target_size,
        batch_size = batch_size,
        save_to_dir = save_to_dir,
        save_prefix  = mask_save_prefix,
        seed = seed)
    train_generator = zip(image_generator, mask_generator)
    for (img,mask) in train_generator:
        img,mask = adjustData(img,mask,flag_multi_class,num_class)
        yield (img,mask)



def testGenerator(test_path,num_image = 30,target_size = (448,448),flag_multi_class = False,as_gray = True):
    imgs = os.listdir(test_path)
    for im in imgs:
        # img = io.imread(os.path.join(test_path,im),as_gray = as_gray)
        img = io.imread(os.path.join(test_path,im))
        img = img / 255.0
        # img = trans.resize(img,target_size)
        # img = np.reshape(img,img.shape+(1,)) if (not flag_multi_class) else img
        img = np.reshape(img,(1,)+img.shape)
        yield img
    # for i in range(num_image):
    #     img = io.imread(os.path.join(test_path,"%d.png"%i),as_gray = as_gray)
    #     img = img / 255
    #     img = trans.resize(img,target_size)
    #     img = np.reshape(img,img.shape+(1,)) if (not flag_multi_class) else img
    #     img = np.reshape(img,(1,)+img.shape)
    #     yield img


def geneTrainNpy(image_path,mask_path,flag_multi_class = False,num_class = 2,image_prefix = "image",mask_prefix = "mask",image_as_gray = True,mask_as_gray = True):
    image_name_arr = glob.glob(os.path.join(image_path,"%s*.png"%image_prefix))
    image_arr = []
    mask_arr = []
    for index,item in enumerate(image_name_arr):
        img = io.imread(item,as_gray = image_as_gray)
        img = np.reshape(img,img.shape + (1,)) if image_as_gray else img
        mask = io.imread(item.replace(image_path,mask_path).replace(image_prefix,mask_prefix),as_gray = mask_as_gray)
        mask = np.reshape(mask,mask.shape + (1,)) if mask_as_gray else mask
        img,mask = adjustData(img,mask,flag_multi_class,num_class)
        image_arr.append(img)
        mask_arr.append(mask)
    image_arr = np.array(image_arr)
    mask_arr = np.array(mask_arr)
    return image_arr,mask_arr


def labelVisualize(num_class,color_dict,img):
    img = img[:,:,0] if len(img.shape) == 3 else img
    img_out = np.zeros(img.shape + (3,))
    for i in range(num_class):
        img_out[img == i,:] = color_dict[i]
    return img_out / 255.0



def saveResult(save_path,npyfile,names,flag_multi_class = False,num_class = 2):
    # for i,item in enumerate(npyfile):
    #     img = labelVisualize(num_class,COLOR_DICT,item) if flag_multi_class else item[:,:,0]
    #     io.imsave(os.path.join(save_path,"%d_predict.png"%i),img)

    for i,item in enumerate(npyfile):
        if flag_multi_class:
            img = labelVisualize(num_class,COLOR_DICT,item)
        else:
            img = item[:,:,0]
            # img = cv2.resize(img, (500, 500), interpolation = cv2.INTER_NEAREST)
            # print(np.max(img), np.min(img))
            img[img>0.5] = 255
            img[img<=0.5] = 0
        io.imsave(os.path.join(save_path,"%s"%names[i]),img)
        # name = names[i].split('.')[0]
        # io.imsave(os.path.join(save_path,"%s.tif"%name),img_as_ubyte(img))

精度评定eval.py

# -*- coding: utf-8 -*-
import os
import cv2
import numpy as np
from sklearn.metrics import confusion_matrix

class IOUMetric:
    """
    Class to calculate mean-iou using fast_hist method
    """
    def __init__(self, num_classes):
        self.num_classes = num_classes
        self.hist = np.zeros((num_classes, num_classes))
    def _fast_hist(self, label_pred, label_true):
        mask = (label_true >= 0) & (label_true < self.num_classes)        
        hist = np.bincount(
            self.num_classes * label_true[mask].astype(int) +
            label_pred[mask], minlength=self.num_classes ** 2).reshape(self.num_classes, self.num_classes)
        return hist

    def evaluate(self, predictions, gts):
        for lp, lt in zip(predictions, gts):
            assert len(lp.flatten()) == len(lt.flatten())
            self.hist += self._fast_hist(lp.flatten(), lt.flatten())    
        # miou
        iou = np.diag(self.hist) / (self.hist.sum(axis=1) + self.hist.sum(axis=0) - np.diag(self.hist))
        miou = np.nanmean(iou) 
        # mean acc
        acc = np.diag(self.hist).sum() / self.hist.sum()
        acc_cls = np.nanmean(np.diag(self.hist) / self.hist.sum(axis=1))
        freq = self.hist.sum(axis=1) / self.hist.sum()
        fwavacc = (freq[freq > 0] * iou[freq > 0]).sum()
        return acc, acc_cls, iou, miou, fwavacc


if __name__ == '__main__':
    label_path = 'D:/wcs/dense_seg/build/test/labels/'
    predict_path = 'D:/wcs/dense_seg/re/'
    pres = os.listdir(predict_path)
    labels = []
    predicts = []
    for im in pres:
        if im[-4:] == '.png':
            label_name = im.split('.')[0] + '.png'
            lab_path = os.path.join(label_path, label_name)
            pre_path = os.path.join(predict_path, im)
            label = cv2.imread(lab_path,0)
            pre = cv2.imread(pre_path,0)
            label[label>0] = 1
            pre[pre>0] = 1
            labels.append(label)
            predicts.append(pre)
    el = IOUMetric(2)
    acc, acc_cls, iou, miou, fwavacc = el.evaluate(predicts, labels)
    print('acc: ',acc)
    print('acc_cls: ',acc_cls)
    print('iou: ',iou)
    print('miou: ',miou)
    print('fwavacc: ',fwavacc)

    pres = os.listdir(predict_path)
    init = np.zeros((2,2))
    for im in pres:
        lb_path = os.path.join(label_path, im)
        pre_path = os.path.join(predict_path, im)
        lb = cv2.imread(lb_path,0)
        pre = cv2.imread(pre_path,0)
        lb[lb>0] = 1
        pre[pre>0] = 1
        lb = lb.flatten()
        pre = pre.flatten()
        confuse = confusion_matrix(lb, pre)
        init += confuse

    precision = init[1][1]/(init[0][1] + init[1][1]) 
    recall = init[1][1]/(init[1][0] + init[1][1])
    accuracy = (init[0][0] + init[1][1])/init.sum()
    f1_score = 2*precision*recall/(precision + recall)
    print('class_accuracy: ', precision)
    print('class_recall: ', recall)
    print('accuracy: ', accuracy)
    print('f1_score: ', f1_score)

1.训练,测试代码DenseNet.py

import numpy as np
import keras
from keras.engine import Layer
import keras.backend as K
from keras.models import Model, save_model, load_model
from keras.layers import Input, Dense, Dropout, BatchNormalization, LeakyReLU, concatenate
from keras.layers import Conv2D, MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D
from keras.models import *
from keras.layers import *
from data import *
from keras.models import load_model
from keras.optimizers import *
from keras.callbacks import ModelCheckpoint, TensorBoard, LearningRateScheduler

def DenseLayer(x, nb_filter, bn_size=4, alpha=0.0, drop_rate=0.2):
    
    # Bottleneck layers
    x = BatchNormalization(axis=3)(x)
    x = LeakyReLU(alpha=alpha)(x)
    x = Conv2D(bn_size*nb_filter, (1, 1), strides=(1,1), padding='same')(x)
    
    # Composite function
    x = BatchNormalization(axis=3)(x)
    x = LeakyReLU(alpha=alpha)(x)
    x = Conv2D(nb_filter, (3, 3), strides=(1,1), padding='same')(x)
    
    if drop_rate: x = Dropout(drop_rate)(x)
    
    return x


def DenseBlock(x, nb_layers, growth_rate, drop_rate=0.2):
    
    for ii in range(nb_layers):
        conv = DenseLayer(x, nb_filter=growth_rate, drop_rate=drop_rate)
        x = concatenate([x, conv], axis=3)
        
    return x

def TransitionLayer(x, compression=0.5, alpha=0.0, is_max=1):
    
    nb_filter = int(x.shape.as_list()[-1]*compression)
    x = BatchNormalization(axis=3)(x)
    x = LeakyReLU(alpha=alpha)(x)
    x = Conv2D(nb_filter, (1, 1), strides=(1,1), padding='same')(x)
    if is_max != 0: x = MaxPooling2D(pool_size=(2, 2), strides=2)(x)
    else: x = AveragePooling2D(pool_size=(2, 2), strides=2)(x)
    
    return x

IMAGE_ORDERING = 'channels_last'
def dense_seg(n_classes=1, input_size = (256,256,1), pretrained_weights = None):
    growth_rate = 12
    drop_rate = 0.2
    inpt = Input(input_size)
    x = Conv2D(growth_rate*2, (3, 3), strides=1, padding='same')(inpt)
    x = BatchNormalization(axis=3)(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = DenseBlock(x, 12, growth_rate, drop_rate=0.2)
    x = TransitionLayer(x)
    x = DenseBlock(x, 12, growth_rate, drop_rate=0.2)
    x = TransitionLayer(x)
    x = DenseBlock(x, 12, growth_rate, drop_rate=0.2)
    x = TransitionLayer(x)

    x = (UpSampling2D( (2,2), data_format=IMAGE_ORDERING))(x)
    x = (ZeroPadding2D( (1,1), data_format=IMAGE_ORDERING))(x)
    x = (Conv2D( 256, (3, 3), padding='valid', data_format=IMAGE_ORDERING))(x)
    x = (BatchNormalization())(x)
    x = Dropout(drop_rate)(x)

    x = (UpSampling2D( (2,2), data_format=IMAGE_ORDERING))(x)
    x = (ZeroPadding2D( (1,1), data_format=IMAGE_ORDERING))(x)
    x = (Conv2D( 128, (3, 3), padding='valid', data_format=IMAGE_ORDERING))(x)
    x = (BatchNormalization())(x)
    x = Dropout(drop_rate)(x)

    x = (UpSampling2D( (2,2), data_format=IMAGE_ORDERING))(x)
    x = (ZeroPadding2D( (1,1), data_format=IMAGE_ORDERING))(x)
    x = (Conv2D( 64, (3, 3), padding='valid', data_format=IMAGE_ORDERING))(x)
    x = (BatchNormalization())(x)
    x = Dropout(drop_rate)(x)

    x = Conv2D(n_classes , (3, 3) , padding='same', data_format=IMAGE_ORDERING )(x)
    x = Conv2D(1, 1, activation = 'sigmoid')(x)

    model = Model(inpt, x)

    model.compile(optimizer = Adam(lr = 5e-5), loss = 'binary_crossentropy', metrics = ['accuracy'])

    model.summary()

    if(pretrained_weights):
        model.load_weights(pretrained_weights)

    return model

def scheduler(epoch):
    # 每隔100个epoch,学习率减小为原来的1/10
    if epoch % 1 == 0 and epoch != 0:
        lr = K.get_value(model.optimizer.lr)
        K.set_value(model.optimizer.lr, lr * 0.95)
        print("lr changed to {}".format(lr * 0.95))
    return K.get_value(model.optimizer.lr)

if __name__ == '__main__':
    data_gen_args = dict(
                    # rotation_range=0.2,
                    # width_shift_range=0.05,
                    # height_shift_range=0.05,
                    # shear_range=0.05,
                    # zoom_range=0.1,
                    horizontal_flip=True,
                    vertical_flip=True,
                    fill_mode='nearest')
    data_gen_args = dict()

    train_Gene = trainGenerator(3,'./build/train','images','labels',data_gen_args,save_to_dir = None)


    model = dense_seg(input_size = (256,256,3), pretrained_weights = './logs/build_1.h5')

    reduce_lr = LearningRateScheduler(scheduler)
    model_checkpoint = ModelCheckpoint(os.path.join('./logs/', 'build_2.h5'), monitor='accuracy',verbose=1, save_best_only=True)
    tensorboard = TensorBoard(log_dir='./logs', histogram_freq=0,write_graph=True, write_images=False)
    model.fit_generator(train_Gene,
    # validation_data=val_Gene,
    # validation_steps=4,
    steps_per_epoch=1644,
    epochs=40,
    shuffle=True,
    callbacks=[reduce_lr, model_checkpoint, tensorboard])
    # callbacks=[model_checkpoint, tensorboard])


    # path = 'D:/wcs/dense_seg/build/test/images/'
    # save_path = 'D:/wcs/dense_seg/re/'
    # names = os.listdir(path)
    # model = load_model('./logs/build_3.h5')
    # testGene = testGenerator(path)
    # results = model.predict_generator(testGene,360,verbose=1)
    # saveResult(save_path,results,names)
	
acc:  0.9409253862169054
acc_cls:  0.8964666522092115
iou:  [0.93015432 0.72302761]
miou:  0.826590963614625
fwavacc:  0.8914602548522483
class_accuracy:  0.7147935949168034
class_recall:  0.7003612432690246
accuracy:  0.8504329528087138
f1_score:  0.7075038254590356

2.训练,测试代码DenseNet2.py

import numpy as np
import keras
from keras.engine import Layer
import keras.backend as K
from keras.models import Model, save_model, load_model
from keras.layers import Input, Dense, Dropout, BatchNormalization, LeakyReLU, concatenate
from keras.layers import Conv2D, MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D
from keras.models import *
from keras.layers import *
from data import *
from keras.models import load_model
from keras.optimizers import *
from keras.callbacks import ModelCheckpoint, TensorBoard, LearningRateScheduler

def DenseLayer(x, nb_filter, bn_size=4, alpha=0.0, drop_rate=0.2):
    
    # Bottleneck layers
    x = BatchNormalization(axis=3)(x)
    x = LeakyReLU(alpha=alpha)(x)
    x = Conv2D(bn_size*nb_filter, (1, 1), strides=(1,1), padding='same')(x)
    
    # Composite function
    x = BatchNormalization(axis=3)(x)
    x = LeakyReLU(alpha=alpha)(x)
    x = Conv2D(nb_filter, (3, 3), strides=(1,1), padding='same')(x)
    
    if drop_rate: x = Dropout(drop_rate)(x)
    
    return x


def DenseBlock(x, nb_layers, growth_rate, drop_rate=0.2):
    
    for ii in range(nb_layers):
        conv = DenseLayer(x, nb_filter=growth_rate, drop_rate=drop_rate)
        x = concatenate([x, conv], axis=3)
        
    return x

def TransitionLayer(x, compression=0.5, alpha=0.0, is_max=1):
    
    nb_filter = int(x.shape.as_list()[-1]*compression)
    x = BatchNormalization(axis=3)(x)
    x = LeakyReLU(alpha=alpha)(x)
    x = Conv2D(nb_filter, (1, 1), strides=(1,1), padding='same')(x)
    if is_max != 0: x = MaxPooling2D(pool_size=(2, 2), strides=2)(x)
    else: x = AveragePooling2D(pool_size=(2, 2), strides=2)(x)
    
    return x

#=================================================== residual_block =================================================
def BatchActivate(x):
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    return x
def convolution_block(x, filters, size, strides=(1,1), padding='same', activation=True):
    x = Conv2D(filters, size, strides=strides, padding=padding)(x)
    if activation == True:
        x = BatchActivate(x)
    return x
# residual_block
def residual_block(blockInput, num_filters=16, batch_activate = False):
    x = BatchActivate(blockInput)
    x = convolution_block(x, num_filters, (3,3) )
    x = convolution_block(x, num_filters, (3,3), activation=False)
    x = Add()([x, blockInput])
    if batch_activate:
        x = BatchActivate(x)
    return x
#=================================================== residual_block =================================================

IMAGE_ORDERING = 'channels_last'
def dense_seg(n_classes=1, input_size = (256,256,1), pretrained_weights = None):
    growth_rate = 12
    drop_rate = 0.2
    inpt = Input(input_size)
    x = Conv2D(growth_rate*2, (3, 3), strides=1, padding='same')(inpt)
    x = BatchNormalization(axis=3)(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = DenseBlock(x, 12, growth_rate, drop_rate=0.2)
    x = TransitionLayer(x)
    x = DenseBlock(x, 12, growth_rate, drop_rate=0.2)
    x = TransitionLayer(x)
    x = DenseBlock(x, 12, growth_rate, drop_rate=0.2)
    x = TransitionLayer(x)

    x = residual_block(x, num_filters=129)
    x = residual_block(x, num_filters=129, batch_activate = True)


    x = (UpSampling2D( (2,2), data_format=IMAGE_ORDERING))(x)
    x = (ZeroPadding2D( (1,1), data_format=IMAGE_ORDERING))(x)
    x = (Conv2D( 256, (3, 3), padding='valid', data_format=IMAGE_ORDERING))(x)
    x = (BatchNormalization())(x)
    x = Dropout(drop_rate)(x)

    x = (UpSampling2D( (2,2), data_format=IMAGE_ORDERING))(x)
    x = (ZeroPadding2D( (1,1), data_format=IMAGE_ORDERING))(x)
    x = (Conv2D( 128, (3, 3), padding='valid', data_format=IMAGE_ORDERING))(x)
    x = (BatchNormalization())(x)
    x = Dropout(drop_rate)(x)

    x = (UpSampling2D( (2,2), data_format=IMAGE_ORDERING))(x)
    x = (ZeroPadding2D( (1,1), data_format=IMAGE_ORDERING))(x)
    x = (Conv2D( 64, (3, 3), padding='valid', data_format=IMAGE_ORDERING))(x)
    x = (BatchNormalization())(x)
    x = Dropout(drop_rate)(x)

    x = Conv2D(n_classes , (3, 3) , padding='same', data_format=IMAGE_ORDERING )(x)
    x = Conv2D(1, 1, activation = 'sigmoid')(x)

    model = Model(inpt, x)

    model.summary()
    
    model.compile(optimizer = Adam(lr = 1e-3), loss = 'binary_crossentropy', metrics = ['accuracy'])

    if(pretrained_weights):
        model.load_weights(pretrained_weights)

    return model

def scheduler(epoch):
    # 每隔100个epoch,学习率减小为原来的1/10
    if epoch % 1 == 0 and epoch != 0:
        lr = K.get_value(model.optimizer.lr)
        K.set_value(model.optimizer.lr, lr * 0.95)
        print("lr changed to {}".format(lr * 0.95))
    return K.get_value(model.optimizer.lr)

def scheduler2(epoch):
    # 每隔100个epoch,学习率减小为原来的1/10
    if epoch % 50 == 0 and epoch != 0:
        lr = K.get_value(model.optimizer.lr)
        K.set_value(model.optimizer.lr, lr * 0.5)
        print("lr changed to {}".format(lr * 0.5))
    return K.get_value(model.optimizer.lr)

if __name__ == '__main__':
    data_gen_args = dict(
                    # rotation_range=0.2,
                    # width_shift_range=0.05,
                    # height_shift_range=0.05,
                    # shear_range=0.05,
                    # zoom_range=0.1,
                    horizontal_flip=True,
                    vertical_flip=True,
                    fill_mode='nearest')
    data_gen_args = dict()

    train_Gene = trainGenerator(6,'./build/train','images','labels',data_gen_args,save_to_dir = None)


    model = dense_seg(input_size = (256,256,3), pretrained_weights = None)

    reduce_lr = LearningRateScheduler(scheduler2)
    model_checkpoint = ModelCheckpoint(os.path.join('./logs/', 'build_rs.h5'), monitor='acc',verbose=1, save_best_only=True)
    tensorboard = TensorBoard(log_dir='./logs', histogram_freq=0,write_graph=True, write_images=False)
    model.fit_generator(train_Gene,
    # validation_data=val_Gene,
    # validation_steps=4,
    steps_per_epoch=822,
    epochs=100,
    shuffle=True,
    callbacks=[reduce_lr, model_checkpoint, tensorboard])
    # callbacks=[model_checkpoint, tensorboard])


    # path = './build/test/images/'
    # save_path = './re/'
    # names = os.listdir(path)
    # model = load_model('./logs/build_rs.h5')
    # testGene = testGenerator(path)
    # results = model.predict_generator(testGene,360,verbose=1)
    # saveResult(save_path,results,names)
	
acc:  0.9393964979383681
acc_cls:  0.892688597872882
iou:  [0.9284631  0.71606213]
miou:  0.822262613439277
fwavacc:  0.8887837334801199
class_accuracy:  0.7258091400593357
class_recall:  0.7072045651261849
accuracy:  0.8603447808159722
f1_score:  0.7163860825655127

3.训练,测试代码DenseNet3.py

import numpy as np
import keras
from keras.engine import Layer
import keras.backend as K
from keras.models import Model, save_model, load_model
from keras.layers import Input, Dense, Dropout, BatchNormalization, LeakyReLU, concatenate
from keras.layers import Conv2D, MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D
from keras.models import *
from keras.layers import *
from data import *
from keras.models import load_model
from keras.optimizers import *
from keras.callbacks import ModelCheckpoint, TensorBoard, LearningRateScheduler

def DenseLayer(x, nb_filter, bn_size=4, alpha=0.0, drop_rate=0.2):
    
    # Bottleneck layers
    x = BatchNormalization(axis=3)(x)
    x = LeakyReLU(alpha=alpha)(x)
    x = Conv2D(bn_size*nb_filter, (1, 1), strides=(1,1), padding='same')(x)
    
    # Composite function
    x = BatchNormalization(axis=3)(x)
    x = LeakyReLU(alpha=alpha)(x)
    x = Conv2D(nb_filter, (3, 3), strides=(1,1), padding='same')(x)
    
    if drop_rate: x = Dropout(drop_rate)(x)
    
    return x


def DenseBlock(x, nb_layers, growth_rate, drop_rate=0.2):
    
    for ii in range(nb_layers):
        conv = DenseLayer(x, nb_filter=growth_rate, drop_rate=drop_rate)
        x = concatenate([x, conv], axis=3)
        
    return x

def TransitionLayer(x, compression=0.5, alpha=0.0, is_max=1):
    
    nb_filter = int(x.shape.as_list()[-1]*compression)
    x = BatchNormalization(axis=3)(x)
    x = LeakyReLU(alpha=alpha)(x)
    x = Conv2D(nb_filter, (1, 1), strides=(1,1), padding='same')(x)
    if is_max != 0: x = MaxPooling2D(pool_size=(2, 2), strides=2)(x)
    else: x = AveragePooling2D(pool_size=(2, 2), strides=2)(x)
    
    return x

#=================================================== residual_block =================================================
def BatchActivate(x):
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    return x
def convolution_block(x, filters, size, strides=(1,1), padding='same', activation=True):
    x = Conv2D(filters, size, strides=strides, padding=padding)(x)
    if activation == True:
        x = BatchActivate(x)
    return x
# residual_block
def residual_block(blockInput, num_filters=16, batch_activate = False):
    x = BatchActivate(blockInput)
    x = convolution_block(x, num_filters, (3,3) )
    x = convolution_block(x, num_filters, (3,3), activation=False)
    x = Add()([x, blockInput])
    if batch_activate:
        x = BatchActivate(x)
    return x
#=================================================== residual_block =================================================

IMAGE_ORDERING = 'channels_last'
def dense_seg(n_classes=1, input_size = (256,256,1), pretrained_weights = None):
    growth_rate = 12
    drop_rate = 0.2
    inpt = Input(input_size)
    x = Conv2D(growth_rate*2, (3, 3), strides=1, padding='same')(inpt)
    x = BatchNormalization(axis=3)(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = DenseBlock(x, 12, growth_rate, drop_rate=0.2)
    x = TransitionLayer(x)

    x = residual_block(x, num_filters=84)
    x = residual_block(x, num_filters=84, batch_activate = True)

    x = DenseBlock(x, 12, growth_rate, drop_rate=0.2)
    x = TransitionLayer(x)

    x = residual_block(x, num_filters=114)
    x = residual_block(x, num_filters=114, batch_activate = True)

    x = DenseBlock(x, 12, growth_rate, drop_rate=0.2)
    x = TransitionLayer(x)

    x = residual_block(x, num_filters=129)
    x = residual_block(x, num_filters=129, batch_activate = True)


    x = (UpSampling2D( (2,2), data_format=IMAGE_ORDERING))(x)
    x = (ZeroPadding2D( (1,1), data_format=IMAGE_ORDERING))(x)
    x = (Conv2D( 256, (3, 3), padding='valid', data_format=IMAGE_ORDERING))(x)
    x = (BatchNormalization())(x)
    x = Dropout(drop_rate)(x)
    x = Activation('relu')(x)

    x = (UpSampling2D( (2,2), data_format=IMAGE_ORDERING))(x)
    x = (ZeroPadding2D( (1,1), data_format=IMAGE_ORDERING))(x)
    x = (Conv2D( 128, (3, 3), padding='valid', data_format=IMAGE_ORDERING))(x)
    x = (BatchNormalization())(x)
    x = Dropout(drop_rate)(x)
    x = Activation('relu')(x)

    x = (UpSampling2D( (2,2), data_format=IMAGE_ORDERING))(x)
    x = (ZeroPadding2D( (1,1), data_format=IMAGE_ORDERING))(x)
    x = (Conv2D( 64, (3, 3), padding='valid', data_format=IMAGE_ORDERING))(x)
    x = (BatchNormalization())(x)
    x = Dropout(drop_rate)(x)
    x = Activation('relu')(x)

    x = Conv2D(n_classes , (3, 3) , padding='same', data_format=IMAGE_ORDERING )(x)
    x = Conv2D(1, 1, activation = 'sigmoid')(x)

    model = Model(inpt, x)

    model.summary()
    
    model.compile(optimizer = Adam(lr = 1e-3), loss = 'binary_crossentropy', metrics = ['accuracy'])

    if(pretrained_weights):
        model.load_weights(pretrained_weights)

    return model

def scheduler(epoch):
    # 每隔100个epoch,学习率减小为原来的1/10
    if epoch % 1 == 0 and epoch != 0:
        lr = K.get_value(model.optimizer.lr)
        K.set_value(model.optimizer.lr, lr * 0.95)
        print("lr changed to {}".format(lr * 0.95))
    return K.get_value(model.optimizer.lr)

def scheduler2(epoch):
    # 每隔100个epoch,学习率减小为原来的1/10
    if epoch % 50 == 0 and epoch != 0:
        lr = K.get_value(model.optimizer.lr)
        K.set_value(model.optimizer.lr, lr * 0.5)
        print("lr changed to {}".format(lr * 0.5))
    return K.get_value(model.optimizer.lr)

if __name__ == '__main__':
    data_gen_args = dict(
                    # rotation_range=0.2,
                    # width_shift_range=0.05,
                    # height_shift_range=0.05,
                    # shear_range=0.05,
                    # zoom_range=0.1,
                    horizontal_flip=True,
                    vertical_flip=True,
                    fill_mode='nearest')
    data_gen_args = dict()

    train_Gene = trainGenerator(6,'./build/train','images','labels',data_gen_args,save_to_dir = None)
    val_Gene = trainGenerator(6,'./build/val','images','labels',data_gen_args,save_to_dir = None)


    model = dense_seg(input_size = (256,256,3), pretrained_weights = None)

    reduce_lr = LearningRateScheduler(scheduler2)
    model_checkpoint = ModelCheckpoint(os.path.join('./logs/', 'build_rs2.h5'), monitor='acc',verbose=1, save_best_only=True)
    tensorboard = TensorBoard(log_dir='./logs', histogram_freq=0,write_graph=True, write_images=False)
    model.fit_generator(train_Gene,
    validation_data=val_Gene,
    validation_steps=4,
    steps_per_epoch=822,
    epochs=100,
    shuffle=True,
    callbacks=[reduce_lr, model_checkpoint, tensorboard])
    # callbacks=[model_checkpoint, tensorboard])

    # path = './build/test/images/'
    # save_path = './re/'
    # names = os.listdir(path)
    # model = load_model('./logs/build_rs2.h5')
    # testGene = testGenerator(path)
    # results = model.predict_generator(testGene,360,verbose=1)
    # saveResult(save_path,results,names)
		
acc:  0.9390493604871962
acc_cls:  0.8824207140325924
iou:  [0.92846515 0.70824372]
miou:  0.8183544369524303
fwavacc:  0.8873248187124734
class_accuracy:  0.7283347863724868
class_recall:  0.6865863005739034
accuracy:  0.8562536445453013
f1_score:  0.7068446306556696

4.训练,测试代码DenseNet4.py

import numpy as np
import keras
from keras.engine import Layer
import keras.backend as K
from keras.models import Model, save_model, load_model
from keras.layers import Input, Dense, Dropout, BatchNormalization, LeakyReLU, concatenate
from keras.layers import Conv2D, MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D
from keras.models import *
from keras.layers import *
from data import *
from keras.models import load_model
from keras.optimizers import *
from keras.callbacks import ModelCheckpoint, TensorBoard, LearningRateScheduler
import pickle

def DenseLayer(x, nb_filter, bn_size=4, alpha=0.0, drop_rate=0.2):
    
    # Bottleneck layers
    x = BatchNormalization(axis=3)(x)
    x = LeakyReLU(alpha=alpha)(x)
    x = Conv2D(bn_size*nb_filter, (1, 1), strides=(1,1), padding='same')(x)
    
    # Composite function
    x = BatchNormalization(axis=3)(x)
    x = LeakyReLU(alpha=alpha)(x)
    x = Conv2D(nb_filter, (3, 3), strides=(1,1), padding='same')(x)
    
    if drop_rate: x = Dropout(drop_rate)(x)
    
    return x


def DenseBlock(x, nb_layers, growth_rate, drop_rate=0.2):
    
    for ii in range(nb_layers):
        conv = DenseLayer(x, nb_filter=growth_rate, drop_rate=drop_rate)
        x = concatenate([x, conv], axis=3)
        
    return x

def TransitionLayer(x, compression=0.5, alpha=0.0, is_max=1):
    
    nb_filter = int(x.shape.as_list()[-1]*compression)
    x = BatchNormalization(axis=3)(x)
    x = LeakyReLU(alpha=alpha)(x)
    x = Conv2D(nb_filter, (1, 1), strides=(1,1), padding='same')(x)
    if is_max != 0: x = MaxPooling2D(pool_size=(2, 2), strides=2)(x)
    else: x = AveragePooling2D(pool_size=(2, 2), strides=2)(x)
    
    return x

#=================================================== residual_block =================================================
def BatchActivate(x):
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    return x
def convolution_block(x, filters, size, strides=(1,1), padding='same', activation=True):
    x = Conv2D(filters, size, strides=strides, padding=padding)(x)
    if activation == True:
        x = BatchActivate(x)
    return x
# residual_block
def residual_block(blockInput, num_filters=16, batch_activate = False):
    x = BatchActivate(blockInput)
    x = convolution_block(x, num_filters, (3,3) )
    x = convolution_block(x, num_filters, (3,3), activation=False)
    x = Add()([x, blockInput])
    if batch_activate:
        x = BatchActivate(x)
    return x
#=================================================== residual_block =================================================

IMAGE_ORDERING = 'channels_last'
def dense_seg(n_classes=1, input_size = (256,256,1), pretrained_weights = None):
    growth_rate = 12
    drop_rate = 0.2
    inpt = Input(input_size)
    x = Conv2D(growth_rate*2, (3, 3), strides=1, padding='same')(inpt)
    x = BatchNormalization(axis=3)(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = DenseBlock(x, 12, growth_rate, drop_rate=0.2)
    x = TransitionLayer(x)

    x = residual_block(x, num_filters=84)
    x = residual_block(x, num_filters=84, batch_activate = True)

    x = DenseBlock(x, 12, growth_rate, drop_rate=0.2)
    x = TransitionLayer(x)

    x = residual_block(x, num_filters=114)
    x = residual_block(x, num_filters=114, batch_activate = True)

    x = DenseBlock(x, 12, growth_rate, drop_rate=0.2)
    x = TransitionLayer(x)

    x = residual_block(x, num_filters=129)
    x = residual_block(x, num_filters=129, batch_activate = True)

    x = Conv2DTranspose(256, (3, 3), strides=(2, 2), padding="same")(x)
    x = (BatchNormalization())(x)
    x = Dropout(drop_rate)(x)
    x = Activation('relu')(x)

    x = Conv2DTranspose(128, (3, 3), strides=(2, 2), padding="same")(x)
    x = (BatchNormalization())(x)
    x = Dropout(drop_rate)(x)
    x = Activation('relu')(x)

    x = Conv2DTranspose(64, (3, 3), strides=(2, 2), padding="same")(x)
    x = (BatchNormalization())(x)
    x = Dropout(drop_rate)(x)
    x = Activation('relu')(x)

    x = Conv2D(n_classes , (3, 3) , padding='same', data_format=IMAGE_ORDERING )(x)
    x = Conv2D(1, 1, activation = 'sigmoid')(x)

    model = Model(inpt, x)

    model.summary()
    
    model.compile(optimizer = Adam(lr = 1e-3), loss = 'binary_crossentropy', metrics = ['accuracy'])

    if(pretrained_weights):
        model.load_weights(pretrained_weights)

    return model

def scheduler(epoch):
    # 每隔100个epoch,学习率减小为原来的1/10
    if epoch % 1 == 0 and epoch != 0:
        lr = K.get_value(model.optimizer.lr)
        K.set_value(model.optimizer.lr, lr * 0.95)
        print("lr changed to {}".format(lr * 0.95))
    return K.get_value(model.optimizer.lr)

def scheduler2(epoch):
    # 每隔100个epoch,学习率减小为原来的1/10
    if epoch % 50 == 0 and epoch != 0:
        lr = K.get_value(model.optimizer.lr)
        K.set_value(model.optimizer.lr, lr * 0.5)
        print("lr changed to {}".format(lr * 0.5))
    return K.get_value(model.optimizer.lr)

if __name__ == '__main__':
    # batchsize = 3
    # train_path = './build/train'
    # val_path = './build/val'
    # train_num = len(os.listdir(train_path+'/images/'))
    # val_num = len(os.listdir(val_path+'/images/'))
    # data_gen_args = dict(
    #                 # rotation_range=0.2,
    #                 # width_shift_range=0.05,
    #                 # height_shift_range=0.05,
    #                 # shear_range=0.05,
    #                 # zoom_range=0.1,
    #                 horizontal_flip=True,
    #                 vertical_flip=True,
    #                 fill_mode='nearest')
    # data_gen_args = dict()

    # train_Gene = trainGenerator(batchsize,train_path,'images','labels',data_gen_args,save_to_dir = None)
    # val_Gene = trainGenerator(batchsize,val_path,'images','labels',data_gen_args,save_to_dir = None)


    # model = dense_seg(input_size = (256,256,3), pretrained_weights = None)

    # reduce_lr = LearningRateScheduler(scheduler)
    # model_checkpoint = ModelCheckpoint(os.path.join('./logs/', 'build_rs4.h5'), monitor='accuracy',verbose=1, save_best_only=True)
    # tensorboard = TensorBoard(log_dir='./logs', histogram_freq=0,write_graph=True, write_images=False)
    # history = model.fit_generator(train_Gene,
    # validation_data=val_Gene,
    # validation_steps=int(val_num/batchsize),
    # steps_per_epoch=int(train_num/batchsize),
    # epochs=100,
    # shuffle=True,
    # callbacks=[reduce_lr, model_checkpoint, tensorboard])
    # # callbacks=[model_checkpoint, tensorboard])
    # with open('log.txt','wb') as file_txt:
    #     pickle.dump(history.history,file_txt)


    path = './build/test/images/'
    save_path = './re/'
    names = os.listdir(path)
    model = load_model('./logs/build_rs4.h5')
    testGene = testGenerator(path)
    results = model.predict_generator(testGene,360,verbose=1)
    saveResult(save_path,results,names)
    
acc:  0.9403639899359809
acc_cls:  0.8869246829429505
iou:  [0.9298605  0.71518819]
miou:  0.8225243449167414
fwavacc:  0.8897568190548629
class_accuracy:  0.7288344948843302
class_recall:  0.6927176304620091
accuracy:  0.8573885764435327
f1_score:  0.7103172585700902

5.训练,测试代码DenseNet5.py

import numpy as np
import keras
from keras.engine import Layer
import keras.backend as K
from keras.models import Model, save_model, load_model
from keras.layers import Input, Dense, Dropout, BatchNormalization, LeakyReLU, concatenate
from keras.layers import Conv2D, MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D
from keras.models import *
from keras.layers import *
from data import *
from keras.models import load_model
from keras.optimizers import *
from keras.callbacks import ModelCheckpoint, TensorBoard, LearningRateScheduler
import pickle

def DenseLayer(x, nb_filter, bn_size=4, alpha=0.0, drop_rate=0.2):
    
    # Bottleneck layers
    x = BatchNormalization(axis=3)(x)
    x = LeakyReLU(alpha=alpha)(x)
    x = Conv2D(bn_size*nb_filter, (1, 1), strides=(1,1), padding='same')(x)
    
    # Composite function
    x = BatchNormalization(axis=3)(x)
    x = LeakyReLU(alpha=alpha)(x)
    x = Conv2D(nb_filter, (3, 3), strides=(1,1), padding='same')(x)
    
    if drop_rate: x = Dropout(drop_rate)(x)
    
    return x


def DenseBlock(x, nb_layers, growth_rate, drop_rate=0.2):
    
    for ii in range(nb_layers):
        conv = DenseLayer(x, nb_filter=growth_rate, drop_rate=drop_rate)
        x = concatenate([x, conv], axis=3)
        
    return x

def TransitionLayer(x, compression=0.5, alpha=0.0, is_max=1):
    
    nb_filter = int(x.shape.as_list()[-1]*compression)
    x = BatchNormalization(axis=3)(x)
    x = LeakyReLU(alpha=alpha)(x)
    x = Conv2D(nb_filter, (1, 1), strides=(1,1), padding='same')(x)
    if is_max != 0: x = MaxPooling2D(pool_size=(2, 2), strides=2)(x)
    else: x = AveragePooling2D(pool_size=(2, 2), strides=2)(x)
    
    return x

#=================================================== residual_block =================================================
def BatchActivate(x):
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    return x
def convolution_block(x, filters, size, strides=(1,1), padding='same', activation=True):
    x = Conv2D(filters, size, strides=strides, padding=padding)(x)
    if activation == True:
        x = BatchActivate(x)
    return x
# residual_block
def residual_block(blockInput, num_filters=16, batch_activate = False):
    x = BatchActivate(blockInput)
    x = convolution_block(x, num_filters, (3,3) )
    x = convolution_block(x, num_filters, (3,3), activation=False)
    x = Add()([x, blockInput])
    if batch_activate:
        x = BatchActivate(x)
    return x
#=================================================== residual_block =================================================

IMAGE_ORDERING = 'channels_last'
def dense_seg(n_classes=1, input_size = (256,256,1), pretrained_weights = None):
    growth_rate = 12
    drop_rate = 0.2
    inpt = Input(input_size)
    x = Conv2D(growth_rate*2, (3, 3), strides=1, padding='same')(inpt)
    x = BatchNormalization(axis=3)(x)
    x = LeakyReLU(alpha=0.1)(x)

    x1_1 = DenseBlock(x, 12, growth_rate, drop_rate=drop_rate)
    x1_1 = TransitionLayer(x1_1)

    x1_2 = residual_block(x, num_filters=24)
    x1_2 = residual_block(x1_2, num_filters=24, batch_activate = True)
    x1_2 = MaxPooling2D(pool_size=(2, 2), strides=2)(x1_2)
    x1_2 = Conv2D(84, (1, 1), strides=(1,1), padding='same')(x1_2)
    
    x1_3 = convolution_block(x, 84, (3,3))
    x1_3 = (BatchNormalization())(x1_3)
    x1_3 = MaxPooling2D(pool_size=(2, 2), strides=2)(x1_3)

    x1 = concatenate([x1_1, x1_2, x1_3], axis=-1)

    x2_1 = DenseBlock(x1, 12, growth_rate, drop_rate=drop_rate)
    x2_1 = TransitionLayer(x2_1)

    x2_2 = residual_block(x1, num_filters=252)
    x2_2 = residual_block(x2_2, num_filters=252, batch_activate = True)
    x2_2 = MaxPooling2D(pool_size=(2, 2), strides=2)(x2_2)
    x2_2 = Conv2D(198, (1, 1), strides=(1,1), padding='same')(x2_2)

    x2_3 = convolution_block(x1, 198, (3,3))
    x2_3 = (BatchNormalization())(x2_3)
    x2_3 = MaxPooling2D(pool_size=(2, 2), strides=2)(x2_3)

    x2 = concatenate([x2_1, x2_2, x2_3], axis=-1)

    x3_1 = DenseBlock(x2, 12, growth_rate, drop_rate=drop_rate)
    x3_1 = TransitionLayer(x3_1)

    x3_2 = residual_block(x2, num_filters=594)
    x3_2 = residual_block(x3_2, num_filters=594, batch_activate = True)
    x3_2 = MaxPooling2D(pool_size=(2, 2), strides=2)(x3_2)
    x3_2 = Conv2D(369, (1, 1), strides=(1,1), padding='same')(x3_2)

    x3_3 = convolution_block(x2, 369, (3,3))
    x3_3 = (BatchNormalization())(x3_3)
    x3_3 = MaxPooling2D(pool_size=(2, 2), strides=2)(x3_3)

    x3 = concatenate([x3_1, x3_2, x3_3], axis=-1)

    x4 = MaxPooling2D(pool_size=(2, 2), strides=2)(x3)
    x4 = Conv2D(512, (1, 1), strides=(1,1), padding='same')(x4)
    x4 = MaxPooling2D(pool_size=(2, 2), strides=2)(x4)
    x4 = Conv2D(512, (1, 1), strides=(1,1), padding='same')(x4)

    x = Conv2DTranspose(256, (3, 3), strides=(2, 2), padding="same")(x4)
    x = (BatchNormalization())(x)
    x = Dropout(drop_rate)(x)
    x = Activation('relu')(x)

    x = Conv2DTranspose(128, (3, 3), strides=(2, 2), padding="same")(x)
    x = (BatchNormalization())(x)
    x = Dropout(drop_rate)(x)
    x = Activation('relu')(x)

    x = Conv2DTranspose(64, (3, 3), strides=(2, 2), padding="same")(x)
    x = (BatchNormalization())(x)
    x = Dropout(drop_rate)(x)
    x = Activation('relu')(x)

    x = Conv2DTranspose(32, (3, 3), strides=(2, 2), padding="same")(x)
    x = (BatchNormalization())(x)
    x = Dropout(drop_rate)(x)
    x = Activation('relu')(x)

    x = Conv2DTranspose(16, (3, 3), strides=(2, 2), padding="same")(x)
    x = (BatchNormalization())(x)
    x = Dropout(drop_rate)(x)
    x = Activation('relu')(x)

    x = Conv2D(n_classes , (3, 3) , padding='same', data_format=IMAGE_ORDERING )(x)
    x = Conv2D(1, 1, activation = 'sigmoid')(x)

    model = Model(inpt, x)

    model.summary()
    
    model.compile(optimizer = Adam(lr = 1e-3), loss = 'binary_crossentropy', metrics = ['accuracy'])

    if(pretrained_weights):
        model.load_weights(pretrained_weights)

    return model

def scheduler(epoch):
    # 每隔100个epoch,学习率减小为原来的1/10
    if epoch % 1 == 0 and epoch != 0:
        lr = K.get_value(model.optimizer.lr)
        K.set_value(model.optimizer.lr, lr * 0.95)
        print("lr changed to {}".format(lr * 0.95))
    return K.get_value(model.optimizer.lr)

def scheduler2(epoch):
    # 每隔100个epoch,学习率减小为原来的1/10
    if epoch % 50 == 0 and epoch != 0:
        lr = K.get_value(model.optimizer.lr)
        K.set_value(model.optimizer.lr, lr * 0.5)
        print("lr changed to {}".format(lr * 0.5))
    return K.get_value(model.optimizer.lr)

if __name__ == '__main__':
    batchsize = 4
    train_path = './build/train'
    val_path = './build/val'
    train_num = len(os.listdir(train_path+'/images/'))
    val_num = len(os.listdir(val_path+'/images/'))
    data_gen_args = dict(
                    # rotation_range=0.2,
                    # width_shift_range=0.05,
                    # height_shift_range=0.05,
                    # shear_range=0.05,
                    # zoom_range=0.1,
                    horizontal_flip=True,
                    vertical_flip=True,
                    fill_mode='nearest')
    # data_gen_args = dict()

    train_Gene = trainGenerator(batchsize,train_path,'images','labels',data_gen_args,save_to_dir = None)
    val_Gene = trainGenerator(batchsize,val_path,'images','labels',data_gen_args,save_to_dir = None)


    model = dense_seg(input_size = (256,256,3), pretrained_weights = None)

    reduce_lr = LearningRateScheduler(scheduler)
    model_checkpoint = ModelCheckpoint(os.path.join('./logs/', 'build_rs5.h5'), monitor='accuracy',verbose=1, save_best_only=True)
    tensorboard = TensorBoard(log_dir='./logs', histogram_freq=0,write_graph=True, write_images=False)
    history = model.fit_generator(train_Gene,
    validation_data=val_Gene,
    validation_steps=int(val_num/batchsize),
    steps_per_epoch=int(train_num/batchsize),
    epochs=100,
    shuffle=True,
    callbacks=[reduce_lr, model_checkpoint, tensorboard])
    # callbacks=[model_checkpoint, tensorboard])
    with open('log5.txt','wb') as file_txt:
        pickle.dump(history.history,file_txt)


    # path = './build/test/images/'
    # save_path = './re/'
    # names = os.listdir(path)
    # model = load_model('./logs/build_rs5.h5')
    # testGene = testGenerator(path)
    # results = model.predict_generator(testGene,360,verbose=1)
    # saveResult(save_path,results,names)
	
acc:  0.9294767167833117
acc_cls:  0.8555872543966359
iou:  [0.91820562 0.66146861]
miou:  0.7898371140804237
fwavacc:  0.8702436829466755
class_accuracy:  0.6986921915315005
class_recall:  0.6386589756330122
accuracy:  0.8301194862083152
f1_score:  0.667328148779939

6.训练,测试代码DenseNet6.py

import numpy as np
import keras
from keras.engine import Layer
import keras.backend as K
from keras.models import Model, save_model, load_model
from keras.layers import Input, Dense, Dropout, BatchNormalization, LeakyReLU, concatenate
from keras.layers import Conv2D, MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D
from keras.models import *
from keras.layers import *
from data import *
from keras.models import load_model
from keras.optimizers import *
from keras.callbacks import ModelCheckpoint, TensorBoard, LearningRateScheduler
import pickle

def DenseLayer(x, nb_filter, bn_size=4, alpha=0.0, drop_rate=0.2):
    
    # Bottleneck layers
    x = BatchNormalization(axis=3)(x)
    x = LeakyReLU(alpha=alpha)(x)
    x = Conv2D(bn_size*nb_filter, (1, 1), strides=(1,1), padding='same')(x)
    
    # Composite function
    x = BatchNormalization(axis=3)(x)
    x = LeakyReLU(alpha=alpha)(x)
    x = Conv2D(nb_filter, (3, 3), strides=(1,1), padding='same')(x)
    
    if drop_rate: x = Dropout(drop_rate)(x)
    
    return x


def DenseBlock(x, nb_layers, growth_rate, drop_rate=0.2):
    
    for ii in range(nb_layers):
        conv = DenseLayer(x, nb_filter=growth_rate, drop_rate=drop_rate)
        x = concatenate([x, conv], axis=3)
        
    return x

def TransitionLayer(x, compression=0.5, alpha=0.0, is_max=1):
    
    nb_filter = int(x.shape.as_list()[-1]*compression)
    x = BatchNormalization(axis=3)(x)
    x = LeakyReLU(alpha=alpha)(x)
    x = Conv2D(nb_filter, (1, 1), strides=(1,1), padding='same')(x)
    if is_max != 0: x = MaxPooling2D(pool_size=(2, 2), strides=2)(x)
    else: x = AveragePooling2D(pool_size=(2, 2), strides=2)(x)
    
    return x

#=================================================== residual_block =================================================
def BatchActivate(x):
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    return x
def convolution_block(x, filters, size, strides=(1,1), padding='same', activation=True):
    x = Conv2D(filters, size, strides=strides, padding=padding)(x)
    if activation == True:
        x = BatchActivate(x)
    return x
# residual_block
def residual_block(blockInput, num_filters=16, batch_activate = False):
    x = BatchActivate(blockInput)
    x = convolution_block(x, num_filters, (3,3) )
    x = convolution_block(x, num_filters, (3,3), activation=False)
    x = Add()([x, blockInput])
    if batch_activate:
        x = BatchActivate(x)
    return x
#=================================================== residual_block =================================================

IMAGE_ORDERING = 'channels_last'
def dense_seg(n_classes=1, input_size = (256,256,1), pretrained_weights = None):
    growth_rate = 12
    drop_rate = 0.5
    inpt = Input(input_size)
    x = Conv2D(growth_rate*2, (3, 3), strides=1, padding='same')(inpt)
    x = BatchNormalization(axis=3)(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = Conv2D(256, (1, 1), strides=(1,1), padding='same')(x)
    # print('x0: ',x.shape)
    
    x1 = DenseBlock(x, 12, growth_rate, drop_rate=drop_rate)
    x1 = TransitionLayer(x1)
    x1 = Conv2D(128, (1, 1), strides=(1,1), padding='same')(x1)
    # print('x1: ',x1.shape)
    
    x2 = DenseBlock(x1, 12, growth_rate, drop_rate=drop_rate)
    x2 = TransitionLayer(x2)
    x2 = Conv2D(64, (1, 1), strides=(1,1), padding='same')(x2)
    # print('x2: ',x2.shape)
    
    x3 = DenseBlock(x2, 12, growth_rate, drop_rate=drop_rate)
    x3 = TransitionLayer(x3)
    x3 = Conv2D(32, (1, 1), strides=(1,1), padding='same')(x3)
    # print('x3: ',x3.shape)

    xu1 = (UpSampling2D( (2,2), data_format=IMAGE_ORDERING))(x3)
    xu1 = (ZeroPadding2D( (1,1), data_format=IMAGE_ORDERING))(xu1)
    xu1 = (Conv2D( 256, (3, 3), padding='valid', data_format=IMAGE_ORDERING))(xu1)
    xu1 = (BatchNormalization())(xu1)
    xu1 = concatenate([xu1,x2], axis=-1)
    xu1 = Conv2D(256, (1, 1), strides=(1,1), padding='same')(xu1)
    xu1 = Dropout(drop_rate)(xu1)
    # print('xu1: ',xu1.shape)

    xu2 = (UpSampling2D( (2,2), data_format=IMAGE_ORDERING))(xu1)
    xu2 = (ZeroPadding2D( (1,1), data_format=IMAGE_ORDERING))(xu2)
    xu2 = (Conv2D( 128, (3, 3), padding='valid', data_format=IMAGE_ORDERING))(xu2)
    xu2 = (BatchNormalization())(xu2)
    xu2 = concatenate([xu2,x1], axis=-1)
    xu2 = Conv2D(128, (1, 1), strides=(1,1), padding='same')(xu2)
    xu2 = Dropout(drop_rate)(xu2)
    # print('xu2: ',xu2.shape)

    xu3 = (UpSampling2D( (2,2), data_format=IMAGE_ORDERING))(xu2)
    xu3 = (ZeroPadding2D( (1,1), data_format=IMAGE_ORDERING))(xu3)
    xu3 = (Conv2D( 64, (3, 3), padding='valid', data_format=IMAGE_ORDERING))(xu3)
    xu3 = (BatchNormalization())(xu3)
    xu3 = concatenate([xu3,x], axis=-1)
    xu3 = Conv2D(64, (1, 1), strides=(1,1), padding='same')(xu3)
    xu3 = Dropout(drop_rate)(xu3)
    # print('xu3: ',xu3.shape)

    x = Conv2D(n_classes , (3, 3) , padding='same', data_format=IMAGE_ORDERING )(xu3)
    x = Conv2D(1, 1, activation = 'sigmoid')(x)

    model = Model(inpt, x)

    model.compile(optimizer = Adam(lr = 1e-3), loss = 'binary_crossentropy', metrics = ['accuracy'])
    # model.compile(optimizer = Adam(lr = 1e-3), loss = dice_coef_loss, metrics = ['accuracy'])

    model.summary()

    if(pretrained_weights):
        model.load_weights(pretrained_weights)

    return model

def scheduler(epoch):
    # 每隔100个epoch,学习率减小为原来的1/10
    if epoch % 1 == 0 and epoch != 0:
        lr = K.get_value(model.optimizer.lr)
        K.set_value(model.optimizer.lr, lr * 0.95)
        print("lr changed to {}".format(lr * 0.95))
    return K.get_value(model.optimizer.lr)

def scheduler2(epoch):
    # 每隔100个epoch,学习率减小为原来的1/10
    if epoch % 50 == 0 and epoch != 0:
        lr = K.get_value(model.optimizer.lr)
        K.set_value(model.optimizer.lr, lr * 0.5)
        print("lr changed to {}".format(lr * 0.5))
    return K.get_value(model.optimizer.lr)

if __name__ == '__main__':
    batchsize = 1
    train_path = './build/train'
    val_path = './build/val'
    train_num = len(os.listdir(train_path+'/images/'))
    val_num = len(os.listdir(val_path+'/images/'))
    data_gen_args = dict(
                    # rotation_range=0.2,
                    # width_shift_range=0.05,
                    # height_shift_range=0.05,
                    # shear_range=0.05,
                    # zoom_range=0.1,
                    horizontal_flip=True,
                    vertical_flip=True,
                    fill_mode='nearest')
    # data_gen_args = dict()

    train_Gene = trainGenerator(batchsize,train_path,'images','labels',data_gen_args,save_to_dir = None)
    val_Gene = trainGenerator(batchsize,val_path,'images','labels',data_gen_args,save_to_dir = None)


    model = dense_seg(input_size = (256,256,3), pretrained_weights = None)

    reduce_lr = LearningRateScheduler(scheduler)
    model_checkpoint = ModelCheckpoint(os.path.join('./logs/', 'build_rs6.h5'), monitor='accuracy',verbose=1, save_best_only=True)
    tensorboard = TensorBoard(log_dir='./logs', histogram_freq=0,write_graph=True, write_images=False)
    history = model.fit_generator(train_Gene,
    validation_data=val_Gene,
    validation_steps=int(val_num/batchsize),
    steps_per_epoch=int(train_num/batchsize),
    epochs=100,
    shuffle=True,
    callbacks=[reduce_lr, model_checkpoint, tensorboard])
    # callbacks=[model_checkpoint, tensorboard])
    with open('log6.txt','wb') as file_txt:
        pickle.dump(history.history,file_txt)


    # path = './build/test/images/'
    # save_path = './re/'
    # names = os.listdir(path)
    # model = load_model('./logs/build_rs6.h5')
    # testGene = testGenerator(path)
    # results = model.predict_generator(testGene,360,verbose=1)
    # saveResult(save_path,results,names)
	

recall 升高的了

acc:  0.9166478051079644
acc_cls:  0.8929823318931407
iou:  [0.90079919 0.65714938]
miou:  0.7789742816432312
fwavacc:  0.8552821167439204
class_accuracy:  0.6636710526703985
class_recall:  0.7313568242734099
accuracy:  0.8405633087895342
f1_score:  0.6958719054511978

7.训练,测试代码DenseNet7.py

import tensorflow as tf
import numpy as np
import keras
from keras.engine import Layer
import keras.backend as K
from keras.models import Model, save_model, load_model
from keras.layers import Input, Dense, Dropout, BatchNormalization, LeakyReLU, concatenate
from keras.layers import Conv2D, MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D
from keras.models import *
from keras.layers import *
from data import *
from keras.models import load_model
from keras.optimizers import *
from keras.callbacks import ModelCheckpoint, TensorBoard, LearningRateScheduler

def DenseLayer(x, nb_filter, bn_size=4, alpha=0.0, drop_rate=0.2):
    
    # Bottleneck layers
    x = BatchNormalization(axis=3)(x)
    x = LeakyReLU(alpha=alpha)(x)
    x = Conv2D(bn_size*nb_filter, (1, 1), strides=(1,1), padding='same')(x)
    
    # Composite function
    x = BatchNormalization(axis=3)(x)
    x = LeakyReLU(alpha=alpha)(x)
    x = Conv2D(nb_filter, (3, 3), strides=(1,1), padding='same')(x)
    
    if drop_rate: x = Dropout(drop_rate)(x)
    
    return x


def DenseBlock(x, nb_layers, growth_rate, drop_rate=0.2):
    
    for ii in range(nb_layers):
        conv = DenseLayer(x, nb_filter=growth_rate, drop_rate=drop_rate)
        x = concatenate([x, conv], axis=3)
        
    return x

def TransitionLayer(x, compression=0.5, alpha=0.0, is_max=1):
    
    nb_filter = int(x.shape.as_list()[-1]*compression)
    x = BatchNormalization(axis=3)(x)
    x = LeakyReLU(alpha=alpha)(x)
    x = Conv2D(nb_filter, (1, 1), strides=(1,1), padding='same')(x)
    if is_max != 0: x = MaxPooling2D(pool_size=(2, 2), strides=2)(x)
    else: x = AveragePooling2D(pool_size=(2, 2), strides=2)(x)
    
    return x

IMAGE_ORDERING = 'channels_last'
def dense_seg(n_classes=1, input_size = (256,256,1), pretrained_weights = None):
    growth_rate = 12
    drop_rate = 0.3
    inpt = Input(input_size)
    x = Conv2D(growth_rate*2, (3, 3), strides=1, padding='same')(inpt)
    x = BatchNormalization(axis=3)(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = DenseBlock(x, 12, growth_rate, drop_rate=drop_rate)
    x = TransitionLayer(x)
    x = DenseBlock(x, 12, growth_rate, drop_rate=drop_rate)
    x = TransitionLayer(x)
    x = DenseBlock(x, 12, growth_rate, drop_rate=drop_rate)
    x = TransitionLayer(x)

    x1 = Conv2D(512, (3,3), padding='same', data_format=IMAGE_ORDERING, dilation_rate=(2,2))(x)
    x2 = Conv2D(512, (3,3), padding='same', data_format=IMAGE_ORDERING, dilation_rate=(3,3))(x)
    x3 = Conv2D(512, (3,3), padding='same', data_format=IMAGE_ORDERING, dilation_rate=(4,4))(x)
    x = concatenate([x1,x2,x3], axis=-1)
    x = Conv2D(512, (1, 1), strides=(1,1), padding='same')(x)
    x = BatchNormalization(axis=3)(x)
    x = LeakyReLU(alpha=0.1)(x)

    
    x = (UpSampling2D( (2,2), data_format=IMAGE_ORDERING))(x)
    x = (ZeroPadding2D( (1,1), data_format=IMAGE_ORDERING))(x)
    x = (Conv2D( 256, (3, 3), padding='valid', data_format=IMAGE_ORDERING))(x)
    x = (BatchNormalization())(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = Dropout(drop_rate)(x)

    x = (UpSampling2D( (2,2), data_format=IMAGE_ORDERING))(x)
    x = (ZeroPadding2D( (1,1), data_format=IMAGE_ORDERING))(x)
    x = (Conv2D( 128, (3, 3), padding='valid', data_format=IMAGE_ORDERING))(x)
    x = (BatchNormalization())(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = Dropout(drop_rate)(x)

    x = (UpSampling2D( (2,2), data_format=IMAGE_ORDERING))(x)
    x = (ZeroPadding2D( (1,1), data_format=IMAGE_ORDERING))(x)
    x = (Conv2D( 64, (3, 3), padding='valid', data_format=IMAGE_ORDERING))(x)
    x = (BatchNormalization())(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = Dropout(drop_rate)(x)

    x = Conv2D(n_classes , (3, 3) , padding='same', data_format=IMAGE_ORDERING )(x)
    x = Conv2D(1, 1, activation = 'sigmoid')(x)

    model = Model(inpt, x)

    model.compile(optimizer = Adam(lr = 1e-3), loss = 'binary_crossentropy', metrics = ['accuracy'])
    # model.compile(optimizer = Adam(lr = 1e-3), loss = dice_coef_loss, metrics = ['accuracy'])

    model.summary()

    if(pretrained_weights):
        model.load_weights(pretrained_weights)

    return model

def scheduler(epoch):
    # 每隔100个epoch,学习率减小为原来的1/10
    if epoch % 1 == 0 and epoch != 0:
        lr = K.get_value(model.optimizer.lr)
        K.set_value(model.optimizer.lr, lr * 0.95)
        print("lr changed to {}".format(lr * 0.95))
    return K.get_value(model.optimizer.lr)

def focal_loss(gamma=2., alpha=.25):
    def focal_loss_fixed(y_true, y_pred):
        pt_1 = tf.where(tf.equal(y_true, 1), y_pred, tf.ones_like(y_pred))
        pt_0 = tf.where(tf.equal(y_true, 0), y_pred, tf.zeros_like(y_pred))
        return -K.mean(alpha * K.pow(1. - pt_1, gamma) * K.log(pt_1)) - K.mean((1 - alpha) * K.pow(pt_0, gamma) * K.log(1. - pt_0))
    return focal_loss_fixed

def dice_coef(y_true, y_pred, smooth=1):
    intersection = K.sum(y_true * y_pred, axis=[1,2,3])
    union = K.sum(y_true, axis=[1,2,3]) + K.sum(y_pred, axis=[1,2,3])
    return K.mean( (2. * intersection + smooth) / (union + smooth), axis=0)
def dice_coef_loss(y_true, y_pred):
    return 1 - dice_coef(y_true, y_pred, smooth=1)


if __name__ == '__main__':
    data_gen_args = dict(
                    # rotation_range=0.2,
                    # width_shift_range=0.05,
                    # height_shift_range=0.05,
                    # shear_range=0.05,
                    # zoom_range=0.1,
                    horizontal_flip=True,
                    vertical_flip=True,
                    fill_mode='nearest')
    # data_gen_args = dict()

    train_Gene = trainGenerator(3,'./build/train','images','labels',data_gen_args,save_to_dir = None)
    val_Gene = trainGenerator(3,'./build/val','images','labels',data_gen_args,save_to_dir = None)


    model = dense_seg(input_size = (256,256,3), pretrained_weights = None)

    reduce_lr = LearningRateScheduler(scheduler)
    model_checkpoint = ModelCheckpoint(os.path.join('./logs/', 'build_rs7.h5'), monitor='accuracy',verbose=1, save_best_only=True)
    tensorboard = TensorBoard(log_dir='./logs', histogram_freq=0,write_graph=True, write_images=False)
    model.fit_generator(train_Gene,
    validation_data=val_Gene,
    validation_steps=48,
    steps_per_epoch=300,
    epochs=100,
    shuffle=True,    callbacks=[reduce_lr, model_checkpoint, tensorboard])
    # callbacks=[model_checkpoint, tensorboard])


    # path = './build/test/images/'
    # save_path = './re/'
    # names = os.listdir(path)
    # # model = load_model('./logs/build_rs7.h5',custom_objects={'dice_coef_loss':dice_coef_loss})
    # model = load_model('./logs/build_rs7.h5')
    # testGene = testGenerator(path)
    # results = model.predict_generator(testGene,360,verbose=1)
    # saveResult(save_path,results,names)
	
acc:  0.9278492397732205
acc_cls:  0.8774149621308626
iou:  [0.91522934 0.67355827]
miou:  0.7943938026578184
fwavacc:  0.8700819236810907
class_accuracy:  0.6912663769660737
class_recall:  0.6827610913136113
accuracy:  0.8393043175374926
f1_score:  0.6869874101085913

8.训练,测试代码DenseNet8.py

import numpy as np
import keras
from keras.engine import Layer
import keras.backend as K
from keras.models import Model, save_model, load_model
from keras.layers import Input, Dense, Dropout, BatchNormalization, LeakyReLU, concatenate
from keras.layers import Conv2D, MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D
from keras.models import *
from keras.layers import *
from data import *
from keras.models import load_model
from keras.optimizers import *
from keras.callbacks import ModelCheckpoint, TensorBoard, LearningRateScheduler
import pickle

def DenseLayer(x, nb_filter, bn_size=4, alpha=0.0, drop_rate=0.2):
    
    # Bottleneck layers
    x = BatchNormalization(axis=3)(x)
    x = LeakyReLU(alpha=alpha)(x)
    x = Conv2D(bn_size*nb_filter, (1, 1), strides=(1,1), padding='same')(x)
    
    # Composite function
    x = BatchNormalization(axis=3)(x)
    x = LeakyReLU(alpha=alpha)(x)
    x = Conv2D(nb_filter, (3, 3), strides=(1,1), padding='same')(x)
    
    if drop_rate: x = Dropout(drop_rate)(x)
    
    return x


def DenseBlock(x, nb_layers, growth_rate, drop_rate=0.2):
    
    for ii in range(nb_layers):
        conv = DenseLayer(x, nb_filter=growth_rate, drop_rate=drop_rate)
        x = concatenate([x, conv], axis=3)
        
    return x

def TransitionLayer(x, compression=0.5, alpha=0.0, is_max=1):
    
    nb_filter = int(x.shape.as_list()[-1]*compression)
    x = BatchNormalization(axis=3)(x)
    x = LeakyReLU(alpha=alpha)(x)
    x = Conv2D(nb_filter, (1, 1), strides=(1,1), padding='same')(x)
    if is_max != 0: x = MaxPooling2D(pool_size=(2, 2), strides=2)(x)
    else: x = AveragePooling2D(pool_size=(2, 2), strides=2)(x)
    
    return x

#=================================================== residual_block =================================================
def BatchActivate(x):
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    return x
def convolution_block(x, filters, size, strides=(1,1), padding='same', activation=True):
    x = Conv2D(filters, size, strides=strides, padding=padding)(x)
    if activation == True:
        x = BatchActivate(x)
    return x
# residual_block
def residual_block(blockInput, num_filters=16, batch_activate = False):
    x = BatchActivate(blockInput)
    x = convolution_block(x, num_filters, (3,3) )
    x = convolution_block(x, num_filters, (3,3), activation=False)
    x = Add()([x, blockInput])
    if batch_activate:
        x = BatchActivate(x)
    return x
#=================================================== residual_block =================================================

IMAGE_ORDERING = 'channels_last'
def dense_seg(n_classes=1, input_size = (256,256,1), pretrained_weights = None):
    growth_rate = 12
    drop_rate = 0.2
    inpt = Input(input_size)
    x = Conv2D(growth_rate*2, (3, 3), strides=1, padding='same')(inpt)
    x = BatchNormalization(axis=3)(x)
    x = LeakyReLU(alpha=0.1)(x)

    x1_1 = DenseBlock(x, 12, growth_rate, drop_rate=drop_rate)
    x1_1 = TransitionLayer(x1_1)

    x1_2 = Conv2D(512, (3,3), padding='same', data_format=IMAGE_ORDERING, dilation_rate=(2,2))(x)
    x1_2 = BatchNormalization(axis=3)(x1_2)
    x1_2 = LeakyReLU(alpha=0.1)(x1_2)
    x1_2 = MaxPooling2D(pool_size=(2, 2), strides=2)(x1_2)
    x1_2 = Conv2D(84, (1, 1), strides=(1,1), padding='same')(x1_2)
    x1_2 = BatchNormalization(axis=3)(x1_2)
    x1_2 = LeakyReLU(alpha=0.1)(x1_2)
    
    x2_1 = DenseBlock(x1_1, 12, growth_rate, drop_rate=drop_rate)
    x2_1 = TransitionLayer(x2_1)

    x2_2 = Conv2D(256, (3,3), padding='same', data_format=IMAGE_ORDERING, dilation_rate=(2,2))(x1_2)
    x2_2 = BatchNormalization(axis=3)(x2_2)
    x2_2 = LeakyReLU(alpha=0.1)(x2_2)
    x2_2 = MaxPooling2D(pool_size=(2, 2), strides=2)(x2_2)
    x2_2 = Conv2D(114, (1, 1), strides=(1,1), padding='same')(x2_2)
    x2_2 = BatchNormalization(axis=3)(x2_2)
    x2_2 = LeakyReLU(alpha=0.1)(x2_2)

    x3_1 = DenseBlock(x2_1, 12, growth_rate, drop_rate=drop_rate)
    x3_1 = TransitionLayer(x3_1)

    x3_2 = Conv2D(128, (3,3), padding='same', data_format=IMAGE_ORDERING, dilation_rate=(2,2))(x2_2)
    x3_2 = BatchNormalization(axis=3)(x3_2)
    x3_2 = LeakyReLU(alpha=0.1)(x3_2)
    x3_2 = MaxPooling2D(pool_size=(2, 2), strides=2)(x3_2)
    x3_2 = Conv2D(129, (1, 1), strides=(1,1), padding='same')(x3_2)
    x3_2 = BatchNormalization(axis=3)(x3_2)
    x3_2 = LeakyReLU(alpha=0.1)(x3_2)

    x3 = concatenate([x3_1, x3_2], axis=-1)

    x = Conv2DTranspose(256, (3, 3), strides=(2, 2), padding="same")(x3)
    x = (BatchNormalization())(x)
    x = Dropout(drop_rate)(x)
    x = Activation('relu')(x)

    x = Conv2DTranspose(128, (3, 3), strides=(2, 2), padding="same")(x)
    x = (BatchNormalization())(x)
    x = Dropout(drop_rate)(x)
    x = Activation('relu')(x)

    x = Conv2DTranspose(64, (3, 3), strides=(2, 2), padding="same")(x)
    x = (BatchNormalization())(x)
    x = Dropout(drop_rate)(x)
    x = Activation('relu')(x)

    x = Conv2D(n_classes , (3, 3) , padding='same', data_format=IMAGE_ORDERING )(x)
    x = Conv2D(1, 1, activation = 'sigmoid')(x)

    model = Model(inpt, x)

    model.summary()
    
    model.compile(optimizer = Adam(lr = 1e-3), loss = 'binary_crossentropy', metrics = ['accuracy'])

    if(pretrained_weights):
        model.load_weights(pretrained_weights)

    return model

def scheduler(epoch):
    # 每隔100个epoch,学习率减小为原来的1/10
    if epoch % 1 == 0 and epoch != 0:
        lr = K.get_value(model.optimizer.lr)
        K.set_value(model.optimizer.lr, lr * 0.95)
        print("lr changed to {}".format(lr * 0.95))
    return K.get_value(model.optimizer.lr)

def scheduler2(epoch):
    # 每隔100个epoch,学习率减小为原来的1/10
    if epoch % 50 == 0 and epoch != 0:
        lr = K.get_value(model.optimizer.lr)
        K.set_value(model.optimizer.lr, lr * 0.5)
        print("lr changed to {}".format(lr * 0.5))
    return K.get_value(model.optimizer.lr)

if __name__ == '__main__':
    batchsize = 6
    train_path = './build/train'
    val_path = './build/val'
    train_num = len(os.listdir(train_path+'/images/'))
    val_num = len(os.listdir(val_path+'/images/'))
    data_gen_args = dict(
                    # rotation_range=0.2,
                    # width_shift_range=0.05,
                    # height_shift_range=0.05,
                    # shear_range=0.05,
                    # zoom_range=0.1,
                    horizontal_flip=True,
                    vertical_flip=True,
                    fill_mode='nearest')
    # data_gen_args = dict()

    train_Gene = trainGenerator(batchsize,train_path,'images','labels',data_gen_args,save_to_dir = None)
    val_Gene = trainGenerator(batchsize,val_path,'images','labels',data_gen_args,save_to_dir = None)


    model = dense_seg(input_size = (256,256,3), pretrained_weights = None)

    reduce_lr = LearningRateScheduler(scheduler)
    model_checkpoint = ModelCheckpoint(os.path.join('./logs/', 'build_rs8.h5'), monitor='accuracy',verbose=1, save_best_only=True)
    tensorboard = TensorBoard(log_dir='./logs', histogram_freq=0,write_graph=True, write_images=False)
    history = model.fit_generator(train_Gene,
    validation_data=val_Gene,
    validation_steps=int(val_num/batchsize),
    steps_per_epoch=int(train_num/batchsize),
    epochs=100,
    shuffle=True,
    callbacks=[reduce_lr, model_checkpoint, tensorboard])
    # callbacks=[model_checkpoint, tensorboard])
    with open('log5.txt','wb') as file_txt:
        pickle.dump(history.history,file_txt)


    # path = './build/test/images/'
    # save_path = './re/'
    # names = os.listdir(path)
    # model = load_model('./logs/build_rs8.h5')
    # testGene = testGenerator(path)
    # results = model.predict_generator(testGene,360,verbose=1)
    # saveResult(save_path,results,names)
	
acc:  0.937930679321289
acc_cls:  0.9001486436661168
iou:  [0.92638038 0.71652779]
miou:  0.8214540879946295
fwavacc:  0.887177090545547
class_accuracy:  0.71579127662083
class_recall:  0.7213452663372042
accuracy:  0.8590701559315557
f1_score:  0.7185575394431779

9.训练,测试代码DenseNet9.py

import numpy as np
import keras
from keras.engine import Layer
import keras.backend as K
from keras.models import Model, save_model, load_model
from keras.layers import Input, Dense, Dropout, BatchNormalization, LeakyReLU, concatenate
from keras.layers import Conv2D, MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D
from keras.models import *
from keras.layers import *
from data import *
from keras.models import load_model
from keras.optimizers import *
from keras.callbacks import ModelCheckpoint, TensorBoard, LearningRateScheduler
import pickle

def DenseLayer(x, nb_filter, bn_size=4, alpha=0.0, drop_rate=0.2):
    
    # Bottleneck layers
    x = BatchNormalization(axis=3)(x)
    x = LeakyReLU(alpha=alpha)(x)
    x = Conv2D(bn_size*nb_filter, (1, 1), strides=(1,1), padding='same')(x)
    
    # Composite function
    x = BatchNormalization(axis=3)(x)
    x = LeakyReLU(alpha=alpha)(x)
    x = Conv2D(nb_filter, (3, 3), strides=(1,1), padding='same')(x)
    
    if drop_rate: x = Dropout(drop_rate)(x)
    
    return x


def DenseBlock(x, nb_layers, growth_rate, drop_rate=0.2):
    
    for ii in range(nb_layers):
        conv = DenseLayer(x, nb_filter=growth_rate, drop_rate=drop_rate)
        x = concatenate([x, conv], axis=3)
        
    return x

def TransitionLayer(x, compression=0.5, alpha=0.0, is_max=1):
    
    nb_filter = int(x.shape.as_list()[-1]*compression)
    x = BatchNormalization(axis=3)(x)
    x = LeakyReLU(alpha=alpha)(x)
    x = Conv2D(nb_filter, (1, 1), strides=(1,1), padding='same')(x)
    if is_max != 0: x = MaxPooling2D(pool_size=(2, 2), strides=2)(x)
    else: x = AveragePooling2D(pool_size=(2, 2), strides=2)(x)
    
    return x

#=================================================== residual_block =================================================
def BatchActivate(x):
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    return x
def convolution_block(x, filters, size, strides=(1,1), padding='same', activation=True):
    x = Conv2D(filters, size, strides=strides, padding=padding)(x)
    if activation == True:
        x = BatchActivate(x)
    return x
# residual_block
def residual_block(blockInput, num_filters=16, batch_activate = False):
    x = BatchActivate(blockInput)
    x = convolution_block(x, num_filters, (3,3) )
    x = convolution_block(x, num_filters, (3,3), activation=False)
    x = Add()([x, blockInput])
    if batch_activate:
        x = BatchActivate(x)
    return x
#=================================================== residual_block =================================================

IMAGE_ORDERING = 'channels_last'
def dense_seg(n_classes=1, input_size = (256,256,1), pretrained_weights = None):
    growth_rate = 12
    drop_rate = 0.2
    inpt = Input(input_size)
    x = Conv2D(growth_rate*2, (3, 3), strides=1, padding='same')(inpt)
    x = BatchNormalization(axis=3)(x)
    x = LeakyReLU(alpha=0.1)(x)

    x1_1 = DenseBlock(x, 12, growth_rate, drop_rate=drop_rate)
    x1_1 = TransitionLayer(x1_1)

    x1_2 = Conv2D(512, (3,3), padding='same', data_format=IMAGE_ORDERING, dilation_rate=(2,2))(x)
    x1_2 = BatchNormalization(axis=3)(x1_2)
    x1_2 = LeakyReLU(alpha=0.1)(x1_2)
    x1_2 = MaxPooling2D(pool_size=(2, 2), strides=2)(x1_2)
    x1_2 = Conv2D(84, (1, 1), strides=(1,1), padding='same')(x1_2)
    x1_2 = BatchNormalization(axis=3)(x1_2)
    x1_2 = LeakyReLU(alpha=0.1)(x1_2)
    
    x2_1 = DenseBlock(x1_1, 12, growth_rate, drop_rate=drop_rate)
    x2_1 = TransitionLayer(x2_1)

    x2_2 = Conv2D(256, (3,3), padding='same', data_format=IMAGE_ORDERING, dilation_rate=(2,2))(x1_2)
    x2_2 = BatchNormalization(axis=3)(x2_2)
    x2_2 = LeakyReLU(alpha=0.1)(x2_2)
    x2_2 = MaxPooling2D(pool_size=(2, 2), strides=2)(x2_2)
    x2_2 = Conv2D(114, (1, 1), strides=(1,1), padding='same')(x2_2)
    x2_2 = BatchNormalization(axis=3)(x2_2)
    x2_2 = LeakyReLU(alpha=0.1)(x2_2)

    x3_1 = DenseBlock(x2_1, 12, growth_rate, drop_rate=drop_rate)
    x3_1 = TransitionLayer(x3_1)

    x3_2 = Conv2D(128, (3,3), padding='same', data_format=IMAGE_ORDERING, dilation_rate=(2,2))(x2_2)
    x3_2 = BatchNormalization(axis=3)(x3_2)
    x3_2 = LeakyReLU(alpha=0.1)(x3_2)
    x3_2 = MaxPooling2D(pool_size=(2, 2), strides=2)(x3_2)
    x3_2 = Conv2D(129, (1, 1), strides=(1,1), padding='same')(x3_2)
    x3_2 = BatchNormalization(axis=3)(x3_2)
    x3_2 = LeakyReLU(alpha=0.1)(x3_2)

    x3 = concatenate([x3_1, x3_2], axis=-1)

#======================================================================#
    x_a = Conv2DTranspose(256, (3, 3), strides=(2, 2), padding="same")(x3)
    x_a = (BatchNormalization())(x_a)
    x_a = Dropout(drop_rate)(x_a)
    x_a = Activation('relu')(x_a)

    x_a = Conv2DTranspose(128, (3, 3), strides=(2, 2), padding="same")(x_a)
    x_a = (BatchNormalization())(x_a)
    x_a = Dropout(drop_rate)(x_a)
    x_a = Activation('relu')(x_a)

    x_a = Conv2DTranspose(64, (3, 3), strides=(2, 2), padding="same")(x_a)
    x_a = (BatchNormalization())(x_a)
    x_a = Dropout(drop_rate)(x_a)
    x_a = Activation('relu')(x_a)

#=======================================================================#
    x_b = (UpSampling2D( (2,2), data_format=IMAGE_ORDERING))(x3)
    x_b = (ZeroPadding2D( (1,1), data_format=IMAGE_ORDERING))(x_b)
    x_b = (Conv2D( 256, (3, 3), padding='valid', data_format=IMAGE_ORDERING))(x_b)
    x_b = (BatchNormalization())(x_b)
    x_b = Dropout(drop_rate)(x_b)

    x_b = (UpSampling2D( (2,2), data_format=IMAGE_ORDERING))(x_b)
    x_b = (ZeroPadding2D( (1,1), data_format=IMAGE_ORDERING))(x_b)
    x_b = (Conv2D( 128, (3, 3), padding='valid', data_format=IMAGE_ORDERING))(x_b)
    x_b = (BatchNormalization())(x_b)
    x_b = Dropout(drop_rate)(x_b)

    x_b = (UpSampling2D( (2,2), data_format=IMAGE_ORDERING))(x_b)
    x_b = (ZeroPadding2D( (1,1), data_format=IMAGE_ORDERING))(x_b)
    x_b = (Conv2D( 64, (3, 3), padding='valid', data_format=IMAGE_ORDERING))(x_b)
    x_b = (BatchNormalization())(x_b)
    x_b = Dropout(drop_rate)(x_b)

    x = concatenate([x_a, x_b], axis=-1)

    x = Conv2D(n_classes , (3, 3) , padding='same', data_format=IMAGE_ORDERING )(x)
    x = Conv2D(1, 1, activation = 'sigmoid')(x)

    model = Model(inpt, x)

    model.summary()
    
    model.compile(optimizer = Adam(lr = 1e-3), loss = 'binary_crossentropy', metrics = ['accuracy'])

    if(pretrained_weights):
        model.load_weights(pretrained_weights)

    return model

def scheduler(epoch):
    # 每隔100个epoch,学习率减小为原来的1/10
    if epoch % 1 == 0 and epoch != 0:
        lr = K.get_value(model.optimizer.lr)
        K.set_value(model.optimizer.lr, lr * 0.95)
        print("lr changed to {}".format(lr * 0.95))
    return K.get_value(model.optimizer.lr)

def scheduler2(epoch):
    # 每隔100个epoch,学习率减小为原来的1/10
    if epoch % 50 == 0 and epoch != 0:
        lr = K.get_value(model.optimizer.lr)
        K.set_value(model.optimizer.lr, lr * 0.5)
        print("lr changed to {}".format(lr * 0.5))
    return K.get_value(model.optimizer.lr)

if __name__ == '__main__':
    batchsize = 2
    train_path = './build/train'
    val_path = './build/val'
    train_num = len(os.listdir(train_path+'/images/'))
    val_num = len(os.listdir(val_path+'/images/'))
    data_gen_args = dict(
                    # rotation_range=0.2,
                    # width_shift_range=0.05,
                    # height_shift_range=0.05,
                    # shear_range=0.05,
                    # zoom_range=0.1,
                    horizontal_flip=True,
                    vertical_flip=True,
                    fill_mode='nearest')
    # data_gen_args = dict()

    train_Gene = trainGenerator(batchsize,train_path,'images','labels',data_gen_args,save_to_dir = None)
    val_Gene = trainGenerator(batchsize,val_path,'images','labels',data_gen_args,save_to_dir = None)

    model = dense_seg(input_size = (256,256,3), pretrained_weights = None)

    reduce_lr = LearningRateScheduler(scheduler)
    model_checkpoint = ModelCheckpoint(os.path.join('./logs/', 'build_rs9.h5'), monitor='accuracy',verbose=1, save_best_only=True)
    tensorboard = TensorBoard(log_dir='./logs', histogram_freq=0,write_graph=True, write_images=False)
    history = model.fit_generator(train_Gene,
    validation_data=val_Gene,
    validation_steps=int(val_num/batchsize),
    steps_per_epoch=int(train_num/batchsize),
    epochs=100,
    shuffle=True,
    callbacks=[reduce_lr, model_checkpoint, tensorboard])
    # callbacks=[model_checkpoint, tensorboard])
    with open('log9.txt','wb') as file_txt:
        pickle.dump(history.history,file_txt)

    # path = './build/test/images/'
    # save_path = './re/'
    # names = os.listdir(path)
    # model = load_model('./logs/build_rs9.h5')
    # testGene = testGenerator(path)
    # results = model.predict_generator(testGene,360,verbose=1)
    # saveResult(save_path,results,names)
	
acc:  0.9380671183268229
acc_cls:  0.879003939340906
iou:  [0.92742855 0.70299867]
miou:  0.8152136113144722
fwavacc:  0.8855020229207873
class_accuracy:  0.7149182730751146
class_recall:  0.6721084856304297
accuracy:  0.8443759685390991
f1_score:  0.6928527295529191

10.训练,测试代码DenseNet10.py

import numpy as np
import keras
from keras.engine import Layer
import keras.backend as K
from keras.models import Model, save_model, load_model
from keras.layers import Input, Dense, Dropout, BatchNormalization, LeakyReLU, concatenate
from keras.layers import Conv2D, MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D
from keras.models import *
from keras.layers import *
from data import *
from keras.models import load_model
from keras.optimizers import *
from keras.callbacks import ModelCheckpoint, TensorBoard, LearningRateScheduler
import pickle

def DenseLayer(x, nb_filter, bn_size=4, alpha=0.0, drop_rate=0.2):
    
    # Bottleneck layers
    x = BatchNormalization(axis=3)(x)
    x = LeakyReLU(alpha=alpha)(x)
    x = Conv2D(bn_size*nb_filter, (1, 1), strides=(1,1), padding='same')(x)
    
    # Composite function
    x = BatchNormalization(axis=3)(x)
    x = LeakyReLU(alpha=alpha)(x)
    x = Conv2D(nb_filter, (3, 3), strides=(1,1), padding='same')(x)
    
    if drop_rate: x = Dropout(drop_rate)(x)
    
    return x


def DenseBlock(x, nb_layers, growth_rate, drop_rate=0.2):
    
    for ii in range(nb_layers):
        conv = DenseLayer(x, nb_filter=growth_rate, drop_rate=drop_rate)
        x = concatenate([x, conv], axis=3)
        
    return x

def TransitionLayer(x, compression=0.5, alpha=0.0, is_max=1):
    
    nb_filter = int(x.shape.as_list()[-1]*compression)
    x = BatchNormalization(axis=3)(x)
    x = LeakyReLU(alpha=alpha)(x)
    x = Conv2D(nb_filter, (1, 1), strides=(1,1), padding='same')(x)
    if is_max != 0: x = MaxPooling2D(pool_size=(2, 2), strides=2)(x)
    else: x = AveragePooling2D(pool_size=(2, 2), strides=2)(x)
    
    return x

#=================================================== residual_block =================================================
def BatchActivate(x):
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    return x
def convolution_block(x, filters, size, strides=(1,1), padding='same', activation=True):
    x = Conv2D(filters, size, strides=strides, padding=padding)(x)
    if activation == True:
        x = BatchActivate(x)
    return x
# residual_block
def residual_block(blockInput, num_filters=16, batch_activate = False):
    x = BatchActivate(blockInput)
    x = convolution_block(x, num_filters, (3,3) )
    x = convolution_block(x, num_filters, (3,3), activation=False)
    x = Add()([x, blockInput])
    if batch_activate:
        x = BatchActivate(x)
    return x
#=================================================== residual_block =================================================

IMAGE_ORDERING = 'channels_last'
def dense_seg(n_classes=1, input_size = (256,256,1), pretrained_weights = None):
    growth_rate = 12
    drop_rate = 0.5
    inpt = Input(input_size)
    x = Conv2D(growth_rate*2, (3, 3), strides=1, padding='same')(inpt)
    x = BatchNormalization(axis=3)(x)
    x = LeakyReLU(alpha=0.1)(x)

    x1_1 = DenseBlock(x, 12, growth_rate, drop_rate=drop_rate)
    x1_1 = TransitionLayer(x1_1)

    x1_2 = Conv2D(512, (3,3), padding='same', data_format=IMAGE_ORDERING, dilation_rate=(2,2))(x)
    x1_2 = BatchNormalization(axis=3)(x1_2)
    x1_2 = LeakyReLU(alpha=0.1)(x1_2)
    x1_2 = MaxPooling2D(pool_size=(2, 2), strides=2)(x1_2)
    x1_2 = Conv2D(84, (1, 1), strides=(1,1), padding='same')(x1_2)
    x1_2 = BatchNormalization(axis=3)(x1_2)
    x1_2 = LeakyReLU(alpha=0.1)(x1_2)

    x1_3 = Conv2D(512, (3,3), padding='same', data_format=IMAGE_ORDERING, dilation_rate=(3,3))(x)
    x1_3 = BatchNormalization(axis=3)(x1_3)
    x1_3 = LeakyReLU(alpha=0.1)(x1_3)
    x1_3 = MaxPooling2D(pool_size=(2, 2), strides=2)(x1_3)
    x1_3 = Conv2D(84, (1, 1), strides=(1,1), padding='same')(x1_3)
    x1_3 = BatchNormalization(axis=3)(x1_3)
    x1_3 = LeakyReLU(alpha=0.1)(x1_3)
    
    x2_1 = DenseBlock(x1_1, 12, growth_rate, drop_rate=drop_rate)
    x2_1 = TransitionLayer(x2_1)

    x2_2 = Conv2D(256, (3,3), padding='same', data_format=IMAGE_ORDERING, dilation_rate=(2,2))(x1_2)
    x2_2 = BatchNormalization(axis=3)(x2_2)
    x2_2 = LeakyReLU(alpha=0.1)(x2_2)
    x2_2 = MaxPooling2D(pool_size=(2, 2), strides=2)(x2_2)
    x2_2 = Conv2D(114, (1, 1), strides=(1,1), padding='same')(x2_2)
    x2_2 = BatchNormalization(axis=3)(x2_2)
    x2_2 = LeakyReLU(alpha=0.1)(x2_2)

    x2_3 = Conv2D(256, (3,3), padding='same', data_format=IMAGE_ORDERING, dilation_rate=(3,3))(x1_3)
    x2_3 = BatchNormalization(axis=3)(x2_3)
    x2_3 = LeakyReLU(alpha=0.1)(x2_3)
    x2_3 = MaxPooling2D(pool_size=(2, 2), strides=2)(x2_3)
    x2_3 = Conv2D(114, (1, 1), strides=(1,1), padding='same')(x2_3)
    x2_3 = BatchNormalization(axis=3)(x2_3)
    x2_3 = LeakyReLU(alpha=0.1)(x2_3)

    x3_1 = DenseBlock(x2_1, 12, growth_rate, drop_rate=drop_rate)
    x3_1 = TransitionLayer(x3_1)

    x3_2 = Conv2D(128, (3,3), padding='same', data_format=IMAGE_ORDERING, dilation_rate=(2,2))(x2_2)
    x3_2 = BatchNormalization(axis=3)(x3_2)
    x3_2 = LeakyReLU(alpha=0.1)(x3_2)
    x3_2 = MaxPooling2D(pool_size=(2, 2), strides=2)(x3_2)
    x3_2 = Conv2D(129, (1, 1), strides=(1,1), padding='same')(x3_2)
    x3_2 = BatchNormalization(axis=3)(x3_2)
    x3_2 = LeakyReLU(alpha=0.1)(x3_2)

    x3_3 = Conv2D(128, (3,3), padding='same', data_format=IMAGE_ORDERING, dilation_rate=(3,3))(x2_3)
    x3_3 = BatchNormalization(axis=3)(x3_3)
    x3_3 = LeakyReLU(alpha=0.1)(x3_3)
    x3_3 = MaxPooling2D(pool_size=(2, 2), strides=2)(x3_3)
    x3_3 = Conv2D(129, (1, 1), strides=(1,1), padding='same')(x3_3)
    x3_3 = BatchNormalization(axis=3)(x3_3)
    x3_3 = LeakyReLU(alpha=0.1)(x3_3)

    x3 = concatenate([x3_1, x3_2, x3_3], axis=-1)

    x = Conv2DTranspose(256, (3, 3), strides=(2, 2), padding="same")(x3)
    x = (BatchNormalization())(x)
    x = Dropout(drop_rate)(x)
    x = Activation('relu')(x)

    x = Conv2DTranspose(128, (3, 3), strides=(2, 2), padding="same")(x)
    x = (BatchNormalization())(x)
    x = Dropout(drop_rate)(x)
    x = Activation('relu')(x)

    x = Conv2DTranspose(64, (3, 3), strides=(2, 2), padding="same")(x)
    x = (BatchNormalization())(x)
    x = Dropout(drop_rate)(x)
    x = Activation('relu')(x)

    x = Conv2D(n_classes , (3, 3) , padding='same', data_format=IMAGE_ORDERING )(x)
    x = Conv2D(1, 1, activation = 'sigmoid')(x)

    model = Model(inpt, x)

    model.summary()
    
    model.compile(optimizer = Adam(lr = 1e-3), loss = 'binary_crossentropy', metrics = ['accuracy'])

    if(pretrained_weights):
        model.load_weights(pretrained_weights)

    return model

def scheduler(epoch):
    # 每隔100个epoch,学习率减小为原来的1/10
    if epoch % 1 == 0 and epoch != 0:
        lr = K.get_value(model.optimizer.lr)
        K.set_value(model.optimizer.lr, lr * 0.95)
        print("lr changed to {}".format(lr * 0.95))
    return K.get_value(model.optimizer.lr)

def scheduler2(epoch):
    # 每隔100个epoch,学习率减小为原来的1/10
    if epoch % 50 == 0 and epoch != 0:
        lr = K.get_value(model.optimizer.lr)
        K.set_value(model.optimizer.lr, lr * 0.5)
        print("lr changed to {}".format(lr * 0.5))
    return K.get_value(model.optimizer.lr)

if __name__ == '__main__':
    batchsize = 4
    train_path = './build/train'
    val_path = './build/val'
    train_num = len(os.listdir(train_path+'/images/'))
    val_num = len(os.listdir(val_path+'/images/'))
    data_gen_args = dict(
                    # rotation_range=0.2,
                    # width_shift_range=0.05,
                    # height_shift_range=0.05,
                    # shear_range=0.05,
                    # zoom_range=0.1,
                    horizontal_flip=True,
                    vertical_flip=True,
                    fill_mode='nearest')
    # data_gen_args = dict()

    train_Gene = trainGenerator(batchsize,train_path,'images','labels',data_gen_args,save_to_dir = None)
    val_Gene = trainGenerator(batchsize,val_path,'images','labels',data_gen_args,save_to_dir = None)


    model = dense_seg(input_size = (256,256,3), pretrained_weights = None)

    reduce_lr = LearningRateScheduler(scheduler)
    model_checkpoint = ModelCheckpoint(os.path.join('./logs/', 'build_rs10.h5'), monitor='accrucy',verbose=1, save_best_only=True)
    tensorboard = TensorBoard(log_dir='./logs', histogram_freq=0,write_graph=True, write_images=False)
    history = model.fit_generator(train_Gene,
    validation_data=val_Gene,
    validation_steps=int(val_num/batchsize),
    steps_per_epoch=int(train_num/batchsize),
    epochs=100,
    shuffle=True,
    callbacks=[reduce_lr, model_checkpoint, tensorboard])
    # callbacks=[model_checkpoint, tensorboard])
    with open('log10.txt','wb') as file_txt:
        pickle.dump(history.history,file_txt)


    # path = './build/test/images/'
    # save_path = './re/'
    # names = os.listdir(path)
    # model = load_model('./logs/build_rs10_62.h5')
    # testGene = testGenerator(path)
    # results = model.predict_generator(testGene,360,verbose=1)
    # saveResult(save_path,results,names)
	
acc:  0.9184202829996745
acc_cls:  0.8946685491779769
iou:  [0.90287524 0.66238032]
miou:  0.7826277821720478
fwavacc:  0.8579475503821734
class_accuracy:  0.6671062660205629
class_recall:  0.7323654282192458
accuracy:  0.8421045939127604
f1_score:  0.6982142878527884

你可能感兴趣的:(Keras,python)