Keras复现 ICCV 2017论文 AOD—net

 

声明:本人是无神论者,信奉“神经网络黑盒不黑”。


本机配置:

python 3.5(.1)

keras 任何版本

tensorflow

NVIDIA显卡+CUDA 8.0+cuDNN(训练时间CPU时间就很短,嫌弃predict速度较慢的建议使用GPU)

opencv-python

numpy


文章简介:

Li, Boyi, et al. “Aod-net: All-in-one dehazing network.” Proceedings of the IEEE International Conference on Computer Vision. Vol. 1. No. 4. 2017.

https://arxiv.org/abs/1707.06543

AOD网络是第一个端对端去雾的网络,不依赖任何中间参数估计,AOD—net是基于大气散射模型配置的模型,网络可以无缝嵌入到任何深层网络中,本人认为,该文章能荣登ICCV首先归功于其物理模型的引入,ICCV喜欢无神论,“我的神经网络是根据物理模型造出来的”,这句话不知道能干掉多少去雾文章,其次,文章首次实现端对端,模型轻量可嵌套,倘若电脑上有相关python+keras配置,相对图片去雾或增强对比度简直比美图秀秀还方便,效果就更不用说了(虽然我做的效果比较一般)。

模型:

Keras复现 ICCV 2017论文 AOD—net_第1张图片

去雾原理:(就是比较基础的散射模型)

Keras复现 ICCV 2017论文 AOD—net_第2张图片


 

程序:

数据集下载地址:https://sites.google.com/view/reside-dehaze-datasets/reside-standard?authuser=0

需要比较强的VPN,清华校园网的VPN翻不过去,本人用的express。

jupyter:

from keras.models import Sequential, load_model
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense, Lambda
from keras import backend as K
import cv2, numpy as np
import glob
from keras.activations import relu 
import keras as keras
from keras.models import Model


from keras.layers import Input, concatenate, Conv2D, MaxPooling2D, Conv2DTranspose
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint
from keras import backend as K
img_width, img_height = 640, 480
import os
from scipy.misc import imsave
from keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard
from  sklearn.model_selection import train_test_split
import imageio
import glob
from skimage import transform as tf

from scipy import ndimage
import matplotlib.pyplot as plt
import matplotlib.image as plt_img
import scipy
import scipy
import skimage
import re
#import LRFinder
import math as m

from keras_contrib.losses import DSSIMObjective
from keras import backend as K
from pathlib import Path


import numpy as np
from keras_contrib.losses import DSSIMObjective
from keras import backend as K
from skimage.measure import compare_ssim, compare_psnr

import cv2
import numpy as np
from matplotlib import pyplot as plt
def train_generator(train_path, label_path, batch_size):
    L = len(train_path)

    #this line is just to make the generator infinite, keras needs that    
    while True:
        batch_start = 0
        batch_end = batch_size

        while batch_start < L:
            limit = min(batch_end, L)
            X = get_image(train_path[batch_start:limit])
            #print train_path[batch_start:limit]
            # Modify this to find similar labeled names in label directory
            
            Y = list(map(lambda x:(re.sub(".*/","",x)), train_path[batch_start:limit]))
            Y = list(map(lambda x:(re.sub("_.*.png",".png",x)), Y))
            Y = list(map(lambda x:(re.sub("[hazy]","",x)), Y))
            Y = list(map(lambda x:(re.sub("^",train_label_dir + '/',x)), Y))
            
            
            
            Y = get_image(Y)
        
         
            
            batch_start += batch_size   
            batch_end += batch_size
            
            yield (X,Y)   
def creat_AODNet(self,l2_regularization=0.0001):
        l2_reg = l2_regularization
        inputs=Input(shape=(self.heigt,self.width,self.channel))
        conv1 = Conv2D(3, (1, 1), kernel_initializer='random_normal', activation='relu',padding="same" ,
                       kernel_regularizer=l2(l2_reg),name="conv1")(inputs)

        conv2 = Conv2D(3, (3, 3), kernel_initializer='random_normal', activation='relu',padding="same" ,
                       kernel_regularizer=l2(l2_reg),name="conv2")(conv1)

        concat1=concatenate([conv1,conv2],axis=-1,name="concat1")

        conv3 = Conv2D(3, (5, 5), kernel_initializer='random_normal', activation='relu', padding="same",
                       kernel_regularizer=l2(l2_reg), name="conv3")(concat1)

        concat2 = concatenate([conv2, conv3], axis=-1,name="concat2")

        conv4 = Conv2D(3, (5, 5), kernel_initializer='random_normal', activation='relu', padding="same",
                       kernel_regularizer=l2(l2_reg), name="conv4")(concat2)

        concat3 = concatenate([conv1,conv2, conv3,conv4], axis=-1, name="concat3")

        K_x = Conv2D(3, (5, 5), kernel_initializer='random_normal', activation='relu', padding="same",
                       kernel_regularizer=l2(l2_reg), name="K_x")(concat3)

        """
          说明:
          I(x) = J(x)*t(x) + A*(1 − t(x))
          J(x)=K(x)*I(x)-K(x)+b
          where :
          J(x)is the scene radiance (i.e., the ideal “clean image”)
          I(x) is observed hazy image
          A denotes the global atmosphericlight
          t(x) is the transmission matrix 
        """

        mul=Multiply(name="mul")([K_x,inputs])
        sub=Subtract(name="sub")([mul,K_x])
        add_b=Lambda(lambda x:1+x,name="add_b")(sub)
        output=Lambda(lambda x:relu(x),name="output")(add_b)
        model=Model(inputs=inputs,outputs=output)
        return model

Keras复现 ICCV 2017论文 AOD—net_第3张图片

 

 

if __name__ == '__main__':
    
   
    train_data_dir = '../../jupyter/AOD_net/data/hazy'
    train_label_dir = '../../jupyter/AOD_net/data/clear'
    
    
    #train_data_dir = '../../TAMU/CSCE_633/Project/OTS/train'
    #train_label_dir = '../../TAMU/CSCE_633/Project/OTS/clear'

    batch_size = 1
    
    if K.image_data_format() == 'channels_first':
        input_shape = (3, img_height, img_width)
    else:
        input_shape = (img_height, img_width, 3)
model_checkpoint = ModelCheckpoint('aod_net.h5', 
                                       #monitor='val_loss', 
                                       #save_best_only=True,
                                      )
    
    tensorboard_vis = TensorBoard(log_dir='./logs', histogram_freq=0, 
                                  batch_size=batch_size, write_graph=True, 
                                  write_grads=False, write_images=False, 
                                  embeddings_freq=0, embeddings_layer_names=None, 
                                  embeddings_metadata=None)
    
    
    from keras import optimizers
    

    print('Fitting model...')
   
    
    
    model = get_unet(1, False) 
    model.summary()

 

#train_path = glob.glob("AOD_net/data/hazy/*png")
train_path = glob.glob(train_data_dir+"/*.png")
label_path = glob.glob(train_label_dir+'/'+'*.png')
epochs = 1
train_X, validation_X = train_test_split(train_path, test_size=0.0, 
                                         shuffle=True,
                                         
                                        )
steps_per_epoch = int(len(train_X)/batch_size)


train_set_size = len(train_path)
print("Train set size is:", train_set_size, ",", "Steps per epochs is:", steps_per_epoch, ",", "batch size is:", batch_size, "total epochs is:", epochs)
#assert (train_set_size == (steps_per_epoch*batch_size))
#assert (train_set_size == (len(glob.glob(train_label_dir+'/'+'*.png'))))
#assert (train_set_size == (len(label_path)))


validation_steps = int(len(validation_X)/batch_size)
print ("Validation steps:", validation_steps)      

 

model.load_weights('aod.h5')
model.save('aod_1_epoch_weights.h5')
model.fit_generator(generator=train_generator(train_X, label_path, batch_size),
                    #validation_data=generate_arrays_from_file(validation_X, label_path, batch_size),
                    steps_per_epoch=steps_per_epoch, 
                    #validation_steps = validation_steps,
                    epochs=epochs,
                    verbose=1,
                    callbacks=[model_checkpoint, tensorboard_vis],
                    #use_multiprocessing=False,
                    #max_queue_size = False,
                    )
def step_decay(epoch):
    initial_lrate = 0.0001
    drop = 0.5
    epochs_drop = 2
    lrate = initial_lrate * math.pow(drop, math.floor((1+epoch)/epochs_drop))
    print("current lrate")
    print(lrate)
    return lrate
class LossHistory(keras.callbacks.Callback):
    def on_train_begin(self, logs={}):
        self.losses = []
        self.acc = []
        self.val_losses = []
        self.val_acc = []
        

    def on_batch_end(self, batch, logs={}):
     #  print(logs.get('loss'))
     #   print(logs.get('acc'))
        self.losses.append(logs.get('loss'))
        self.acc.append(logs.get('acc'))
        self.val_losses.append(logs.get('val_loss'))
        self.val_acc.append(logs.get('val_acc'))
        plt.plot(self.acc,'r--')
        plt.plot(self.losses,'b')
        plt.plot(self.val_acc,'g--')
        plt.plot(self.val_losses,'y')
      #  plt.show()
test_data_dir = glob.glob('../../jupyter/outdoor/hazy' + "/*.jpg")

print (test_data_dir)
model = load_model('aod_net.h5',
                  custom_objects={
                      #'sum2':sum2,
                      'relu':relu,
                      #'range_error':range_error,
                      #'heat_error':heat_error,
                  }
                  )
test_pred_dir = 'dehazed_clahe_wo_dcp'

def save_image(Y_predicted_array, filenames):
    if not os.path.exists(test_pred_dir):
          os.mkdir(test_pred_dir)
    for idx, file_name in enumerate(filenames):
        f = re.sub(".jpg","", file_name)
        f = re.sub(".*/","", f)
        f = re.sub("[hazy]","",f)
       
        scipy.misc.toimage(Y_predicted_array[idx]).save(os.path.join(test_pred_dir, f + "_dehazed.png"))
def show_image(file_data):
    plt.figure(figsize=(20, 6))
    plt.imshow(file_data)
    plt.show()
batch_size = 1
    assert batch_size == 1
    steps = int(m.ceil(len(test_data_dir)/float(batch_size)))
    print ("Steps:", steps, "Length of test set:", len(test_data_dir))
    assert (steps*batch_size >= len(test_data_dir))
    
    for step_idx in range(0,steps):
        
        Y_array = get_image(test_data_dir[step_idx*batch_size:(step_idx+1)*batch_size])
        
        Y_predicted_array = model.predict(Y_array, batch_size=batch_size, verbose=1)
    
        #print Y_predicted_array.shape
        
    #show_image(X_test[0]) 
    
        YY = np.array(Y_predicted_array[0]/130).astype("int")
        YY_SAVE = np.array(Y_predicted_array).astype("int")
        save_image(YY_SAVE, test_data_dir[step_idx*batch_size:(step_idx+1)*batch_size])
        show_image(Y_array[0])
        show_image(YY)

测试:

Keras复现 ICCV 2017论文 AOD—net_第4张图片

Keras复现 ICCV 2017论文 AOD—net_第5张图片

Keras复现 ICCV 2017论文 AOD—net_第6张图片

Keras复现 ICCV 2017论文 AOD—net_第7张图片

Keras复现 ICCV 2017论文 AOD—net_第8张图片

Keras复现 ICCV 2017论文 AOD—net_第9张图片

 

你可能感兴趣的:(日记,深度学习,CV)