先上主函数代码:
# -*- coding: utf-8 -*-
from model import *
from data import *#导入这两个文件中的所有函数
from keras.utils import multi_gpu_model
import tensorflow as tf
#os.environ["CUDA_VISIBLE_DEVICES"] = "0"
from model import ParallelModelCheckpoint
gpu_nums=2
pretrained_weights='unet_membrane.hdf5'
with tf.device('/cpu:0'):
model = unet()
if(pretrained_weights):
model.load_weights(pretrained_weights)
parallel_model = multi_gpu_model(model, gpus=gpu_nums)
parallel_model.compile(optimizer = Adam(lr = 1e-4), loss = 'binary_crossentropy', metrics = ['accuracy'])
#model.summary()
model_checkpoint=ParallelModelCheckpoint(model,'unet_membrane.hdf5')
data_gen_args = dict(rotation_range=0.2,
width_shift_range=0.05,
height_shift_range=0.05,
shear_range=0.05,
zoom_range=0.05,
horizontal_flip=True,
fill_mode='nearest')#数据增强时的变换方式的字典
myGene = trainGenerator(2,'data/membrane/train','image','label',data_gen_args,save_to_dir = None)#得到一个生成器,以batch=2的速率无限生成增强后的数据
#model_checkpoint = ModelCheckpoint('unet_membrane.hdf5', monitor='loss',verbose=1, save_best_only=True)
parallel_model.fit_generator(myGene,steps_per_epoch=300,epochs=5,callbacks=[model_checkpoint])
testGene = testGenerator("data/membrane/test")
results = parallel_model.predict_generator(testGene,30,verbose=1)
#上面的返回值是:预测值的 Numpy 数组。
saveResult("data/membrane/test1",results)#保存结果
步骤一:
导入multi_gpu_model
from keras.utils import multi_gpu_model
步骤二:
在cpu的scope下实例化model,官网推荐这么写,但是也有人经常没有也可以的,至于没用的话是有些影响的,官方这么解释的:
Instantiate the base model (or "template" model). We recommend doing this with under a CPU device scope, so that the model's weights are hosted on CPU memory. Otherwise they may end up hosted on a GPU, which would complicate weight sharing.
实例化基本模型(或“模板”模型)。 我们建议在CPU设备范围内执行此操作,以便模型的权重托管在CPU内存上。 否则它们可能最终托管在GPU上,这会使重量分享变得复杂。
with tf.device('/cpu:0'):
model = unet()
步骤三:
多GPU设置与编译
parallel_model = multi_gpu_model(model, gpus=gpu_nums)
parallel_model.compile(optimizer = Adam(lr = 1e-4), loss = 'binary_crossentropy', metrics = ['accuracy'])
步骤四:
自己定义检查点Checkpoint类(我写在了model.py文件里了,见下面),因为之前那个回调函数不能用在多GPU情况下,是因为这个时候保存权重要用模板model去保存,也就是原始的单个model的实例去保存:
class ParallelModelCheckpoint(ModelCheckpoint):
def __init__(self,model,filepath, monitor='loss', verbose=0,
save_best_only=True, save_weights_only=False,
mode='auto', period=1):
self.single_model = model
super(ParallelModelCheckpoint,self).__init__(filepath, monitor, verbose,save_best_only, save_weights_only,mode, period)
def set_model(self, model):
super(ParallelModelCheckpoint,self).set_model(self.single_model)
后面就是按照主文件来就行了。
贴出来model.py文件:
# -*- coding: utf-8 -*-
import numpy as np
import os
import skimage.io as io
import skimage.transform as trans
import numpy as np
from keras.models import *
from keras.layers import *
from keras.optimizers import *
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
#from keras import backend as keras
class ParallelModelCheckpoint(ModelCheckpoint):
def __init__(self,model,filepath, monitor='loss', verbose=0,
save_best_only=True, save_weights_only=False,
mode='auto', period=1):
self.single_model = model
super(ParallelModelCheckpoint,self).__init__(filepath, monitor, verbose,save_best_only, save_weights_only,mode, period)
def set_model(self, model):
super(ParallelModelCheckpoint,self).set_model(self.single_model)
def unet(pretrained_weights = None,input_size = (256,256,1)):
inputs1 = Input(input_size)
conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(inputs1)
conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool1)
conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool2)
conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool3)
conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv4)
drop4 = Dropout(0.5)(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)
conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool4)
conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv5)
drop5 = Dropout(0.5)(conv5)
up6 = Conv2D(512, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(drop5))#上采样之后再进行卷积,相当于转置卷积操作!
merge6 = concatenate([drop4,up6],axis=3)
conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge6)
conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv6)
up7 = Conv2D(256, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv6))
merge7 = concatenate([conv3,up7],axis = 3)
conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge7)
conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv7)
up8 = Conv2D(128, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv7))
merge8 = concatenate([conv2,up8],axis = 3)
conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge8)
conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv8)
up9 = Conv2D(64, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv8))
merge9 = concatenate([conv1,up9],axis = 3)
conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge9)
conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)
conv9 = Conv2D(2, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)
conv10 = Conv2D(1, 1, activation = 'sigmoid')(conv9)#我怀疑这个sigmoid激活函数是多余的,因为在后面的loss中用到的就是二进制交叉熵,包含了sigmoid
model = Model(inputs = inputs1, outputs = conv10)
return model
data.py:
# -*- coding: utf-8 -*-
from __future__ import print_function
from keras.preprocessing.image import ImageDataGenerator
import numpy as np
import os
import glob
import skimage.io as io
import skimage.transform as trans
Sky = [128,128,128]
Building = [128,0,0]
Pole = [192,192,128]
Road = [128,64,128]
Pavement = [60,40,222]
Tree = [128,128,0]
SignSymbol = [192,128,128]
Fence = [64,64,128]
Car = [64,0,128]
Pedestrian = [64,64,0]
Bicyclist = [0,128,192]
Unlabelled = [0,0,0]
COLOR_DICT = np.array([Sky, Building, Pole, Road, Pavement,
Tree, SignSymbol, Fence, Car, Pedestrian, Bicyclist, Unlabelled])
def adjustData(img,mask,flag_multi_class,num_class):
if(flag_multi_class):#此程序中不是多类情况,所以不考虑这个
img = img / 255
mask = mask[:,:,:,0] if(len(mask.shape) == 4) else mask[:,:,0]#if else的简洁写法,一行表达式,为真时放在前面
new_mask = np.zeros(mask.shape + (num_class,))#np.zeros里面是shape元组,此目的是扩展维度到5维
for i in range(num_class):
#for one pixel in the image, find the class in mask and convert it into one-hot vector
#index = np.where(mask == i)
#index_mask = (index[0],index[1],index[2],np.zeros(len(index[0]),dtype = np.int64) + i) if (len(mask.shape) == 4) else (index[0],index[1],np.zeros(len(index[0]),dtype = np.int64) + i)
#new_mask[index_mask] = 1
new_mask[mask == i,i] = 1
new_mask = np.reshape(new_mask,(new_mask.shape[0],new_mask.shape[1]*new_mask.shape[2],new_mask.shape[3])) if flag_multi_class else np.reshape(new_mask,(new_mask.shape[0]*new_mask.shape[1],new_mask.shape[2]))
mask = new_mask
elif(np.max(img) > 1):
img = img / 255
mask = mask /255
mask[mask > 0.5] = 1
mask[mask <= 0.5] = 0
return (img,mask)
def trainGenerator(batch_size,train_path,image_folder,mask_folder,aug_dict,image_color_mode = "grayscale",
mask_color_mode = "grayscale",image_save_prefix = "image",mask_save_prefix = "mask",
flag_multi_class = False,num_class = 2,save_to_dir = None,target_size = (256,256),seed = 1):
'''
can generate image and mask at the same time
use the same seed for image_datagen and mask_datagen to ensure the transformation for image and mask is the same
if you want to visualize the results of generator, set save_to_dir = "your path"
'''
image_datagen = ImageDataGenerator(**aug_dict)
mask_datagen = ImageDataGenerator(**aug_dict)
image_generator = image_datagen.flow_from_directory(#https://blog.csdn.net/nima1994/article/details/80626239
train_path,#训练数据文件夹路径
classes = [image_folder],#类别文件夹,对哪一个类进行增强
class_mode = None,#不返回标签
color_mode = image_color_mode,#灰度,单通道模式
target_size = target_size,#转换后的目标图片大小
batch_size = batch_size,#每次产生的(进行转换的)图片张数
save_to_dir = save_to_dir,#保存的图片路径
save_prefix = image_save_prefix,#生成图片的前缀,仅当提供save_to_dir时有效
seed = seed)
mask_generator = mask_datagen.flow_from_directory(
train_path,
classes = [mask_folder],
class_mode = None,
color_mode = mask_color_mode,
target_size = target_size,
batch_size = batch_size,
save_to_dir = save_to_dir,
save_prefix = mask_save_prefix,
seed = seed)
train_generator = zip(image_generator, mask_generator)#组合成一个生成器
for (img,mask) in train_generator:#由于batch是2,所以一次返回两张,即img是一个2张灰度图片的数组,[2,256,256]
img,mask = adjustData(img,mask,flag_multi_class,num_class)#返回的img依旧是[2,256,256]
yield (img,mask)#每次分别产出两张图片和标签
def testGenerator(test_path,num_image = 30,target_size = (256,256),flag_multi_class = False,as_gray = True):
for i in range(num_image):
img = io.imread(os.path.join(test_path,"%d.png"%i),as_gray = as_gray)
img = img / 255
img = trans.resize(img,target_size)
img = np.reshape(img,img.shape+(1,)) if (not flag_multi_class) else img
img = np.reshape(img,(1,)+img.shape)#将测试图片扩展一个维度,与训练时的输入[2,256,256]保持一致
yield img
def geneTrainNpy(image_path,mask_path,flag_multi_class = False,num_class = 2,image_prefix = "image",mask_prefix = "mask",image_as_gray = True,mask_as_gray = True):
image_name_arr = glob.glob(os.path.join(image_path,"%s*.png"%image_prefix))#相当于文件搜索,搜索某路径下与字符匹配的文件https://blog.csdn.net/u010472607/article/details/76857493/
image_arr = []
mask_arr = []
for index,item in enumerate(image_name_arr):#enumerate是枚举,输出[(0,item0),(1,item1),(2,item2)]
img = io.imread(item,as_gray = image_as_gray)
img = np.reshape(img,img.shape + (1,)) if image_as_gray else img
mask = io.imread(item.replace(image_path,mask_path).replace(image_prefix,mask_prefix),as_gray = mask_as_gray)#重新在mask_path文件夹下搜索带有mask字符的图片(标签图片)
mask = np.reshape(mask,mask.shape + (1,)) if mask_as_gray else mask
img,mask = adjustData(img,mask,flag_multi_class,num_class)
image_arr.append(img)
mask_arr.append(mask)
image_arr = np.array(image_arr)
mask_arr = np.array(mask_arr)#转换成array
return image_arr,mask_arr#该函数主要是分别在训练集文件夹在和标签文件加下搜索图片,然后扩展一个维度后以array的形式返回。
def labelVisualize(num_class,color_dict,img):
img = img[:,:,0] if len(img.shape) == 3 else img
img_out = np.zeros(img.shape + (3,))#变成RGB空间,因为其他颜色只能再RGB空间才会显示
for i in range(num_class):
img_out[img == i,:] = color_dict[i]#为不同类别图上不同的颜色,color_dict[i]是与类别数有关的颜色,img_out[img == i,:]是img_out在img中等于i类的位置上的点
return img_out / 255
'''
def saveResult(save_path,npyfile,flag_multi_class = False,num_class = 2):
for i,item in enumerate(npyfile):
img = labelVisualize(num_class,COLOR_DICT,item) if flag_multi_class else item[:,:,0]
io.imsave(os.path.join(save_path,"%d_predict.png"%i),img)
'''
def saveResult(save_path,npyfile,flag_multi_class = False,num_class = 2):
for i,item in enumerate(npyfile):
if flag_multi_class:
img = labelVisualize(num_class,COLOR_DICT,item)#多类的话就图成彩色,非多类(两类)的话就是黑白色
else:
img=item[:,:,0]
print(np.max(img),np.min(img))
img[img>0.5]=1
img[img<=0.5]=0
print(np.max(img),np.min(img))
io.imsave(os.path.join(save_path,"%d_predict.png"%i),img)