小数据集unet实现 tf2 keras py3 pycharm

小白踩坑日记006

终于转向了图像分割领域

感谢前辈大神的无私分享

代码分为2个部分

第一部分是unet构造本身 代码如下:

from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.layers import Conv2D, Input, MaxPooling2D, Dropout, concatenate, UpSampling2D

def Unet(num_class, image_size):

inputs = Input(shape=[image_size, image_size, 1])
conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same')(inputs)
conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same')(pool1)
conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same')(pool2)
conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same')(pool3)
conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same')(conv4)
drop4 = Dropout(0.5)(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)

conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same')(pool4)
conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same')(conv5)
drop5 = Dropout(0.5)(conv5)

up6 = Conv2D(512, 2, activation = 'relu', padding = 'same')(UpSampling2D(size = (2,2))(drop5))
merge6 = concatenate([drop4,up6], axis = 3)
conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same')(merge6)
conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same')(conv6)

up7 = Conv2D(256, 2, activation = 'relu', padding = 'same')(UpSampling2D(size = (2,2))(conv6))
merge7 = concatenate([conv3,up7], axis = 3)
conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same')(merge7)
conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same')(conv7)

up8 = Conv2D(128, 2, activation = 'relu', padding = 'same')(UpSampling2D(size = (2,2))(conv7))
merge8 = concatenate([conv2,up8], axis = 3)
conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same')(merge8)
conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same')(conv8)

up9 = Conv2D(64, 2, activation = 'relu', padding = 'same')(UpSampling2D(size = (2,2))(conv8))
merge9 = concatenate([conv1,up9], axis = 3)
conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same')(merge9)
conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same')(conv9)
conv9 = Conv2D(2, 3, activation = 'relu', padding = 'same')(conv9)
conv10 = Conv2D(num_class, 1, activation = 'sigmoid')(conv9)
model = Model(inputs = inputs, outputs = conv10)
model.compile(optimizer = Adam(lr = 1e-4), loss = 'binary_crossentropy', metrics = ['accuracy'])

return model

第二部分是训练部分 代码如下:

import os
import cv2
import numpy as np
from Unet import Unet
from tensorflow.keras.preprocessing.image import ImageDataGenerator

def DataGenerator(file_path, batch_size):

aug_dict = dict(rotation_range=0.2,
                    width_shift_range=0.05,
                    height_shift_range=0.05,
                    shear_range=0.05,
                    zoom_range=0.05,
                    horizontal_flip=True,
                    fill_mode='nearest')
aug_dict = dict(horizontal_flip=True,
                    fill_mode='nearest')

image_datagen = ImageDataGenerator(**aug_dict)
mask_datagen = ImageDataGenerator(**aug_dict)
image_generator = image_datagen.flow_from_directory(
    file_path,
    classes=["images"],
    color_mode = "grayscale",
    target_size = (256, 256),
    class_mode = None,
    batch_size = batch_size, seed=1)

mask_generator = mask_datagen.flow_from_directory(
    file_path,
    classes=["labels"],
    color_mode = "grayscale",
    target_size = (256, 256),
    class_mode = None,
    batch_size = batch_size, seed=1)

train_generator = zip(image_generator, mask_generator)
for (img,mask) in train_generator:
    img = img / 255.
    mask = mask / 255.
    mask[mask > 0.5] = 1
    mask[mask <= 0.5] = 0
    yield (img,mask)

model = Unet(1, image_size=256)
trainset = DataGenerator(“membrane/train”, batch_size=2)
model.fit_generator(trainset,steps_per_epoch=5000,epochs=1)
model.save_weights(“model.h5”)

#注意根据你的硬件水平调整epochs

testSet = DataGenerator(“membrane/test”, batch_size=1)
alpha = 0.3
model.load_weights(“model.h5”)
if not os.path.exists("./results"): os.mkdir("./results")

for idx, (img, mask) in enumerate(testSet):
oring_img = img[0]
pred_mask = model.predict(img)[0]
pred_mask[pred_mask > 0.5] = 1
pred_mask[pred_mask <= 0.5] = 0
img = cv2.cvtColor(img[0], cv2.COLOR_GRAY2RGB)
H, W, C = img.shape
for i in range(H):
for j in range(W):
if pred_mask[i][j][0] <= 0.5:
img[i][j] = (1-alpha)*img[i][j]255 + alphanp.array([0, 0, 255])
else:
img[i][j] = img[i][j]255
image_accuracy = np.mean(mask == pred_mask)
image_path = “./results/pred_”+str(idx)+".png"
print("=> accuracy: %.4f, saving %s" %(image_accuracy, image_path))
cv2.imwrite(image_path, img)
cv2.imwrite("./results/origin_%d.png" %idx, oring_img
255)
if idx == 29: break

数据集的名称是:membrane
数据集很小43MB
训练集90张512512图片
标签也是90张
测试集30张
标签也是30张
易于获得 易于启动 适合新人
____*

值得注意的是
图像分割的训练时间真长
我用的cpu版本
一台单次epochs是30小时!
另一台是18小时

我的环境是win10 pycharm tf2 keras python3
要注意添加cv2和scipy

这个代码很简单
适合迅速找到感觉
再次感谢前辈们的无私分享

你可能感兴趣的:(小数据集unet实现 tf2 keras py3 pycharm)