超分辨率python_基于深度学习的图像超分辨率(VDSR, SRCNN,FSTRCNN)

# -*- coding: utf-8 -*-

"""

Spyder Editor

This is a temporary script file.

"""

import numpy as np

import tensorflow as tf

from keras import layers

from keras.layers import Input, Add, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D, AveragePooling2D, MaxPooling2D, GlobalMaxPooling2D

from keras.models import Model,load_model

from keras.preprocessing import image

from keras.utils import layer_utils

from keras.utils.data_utils import get_file

from keras.applications.imagenet_utils import preprocess_input

import pydot

from IPython.display import SVG

from keras.utils.vis_utils import model_to_dot

from keras.utils import plot_model

import keras.backend as K

K.set_image_data_format('channels_last')

import matplotlib.pyplot as plt

from matplotlib.pyplot import imshow

import scipy.misc

K.set_learning_phase(1)

#import kt_utils

#import resnets_utils

from PIL import Image

config = tf.ConfigProto()

config.gpu_options.allow_growth = True

session = tf.Session(config=config)

#定义vdsr的卷积结构

def conv_block(X,f=3,filters=64):

"""

X:(m,n_H_prev,n_W_prev,n_C_prev)输入数据

f:integer,卷积核的大小,vdsr用的是3x3 64层的卷积核

filters:list of integer,每层卷积层中卷积核的数量

stage:integer 阶段几

block:string/character 块名字

"""

#获得各层卷积核数量

F1=filters

#保存输入,用于最后与处理后的数据相加

X_shortcut=X

#第一部分

for i in range(1,21):

X=Conv2D(filters=F1,kernel_size=(f,f),strides=(1,1),padding='same',kernel_initializer = 'glorot_uniform')(X)

X=BatchNormalization(axis=3,name='bn'+str(i))(X)

X=Activation('relu')(X)

#图像输出层

X=Conv2D(filters=1,kernel_size=(3,3),strides=(1,1),padding='same',kernel_initializer = 'glorot_uniform')(X)

X=Add()([X,X_shortcut])

X=Activation('relu')(X)

return X

#%%

def vdsr_model(input_shape=(289,387,3),output_shape=(289,387,3)):

X_input=Input(input_shape);

X=conv_block(X_input)

#创建模型

model=Model(inputs=X_input,outputs=X,name="VDSR");

return model;

#载入数据

import os

from PIL import Image

import numpy as np

import h5py

def read_picture(path, n_C):

import matplotlib.pyplot as plt

# function:读取path路径下的图片,并转为形状为[m,n_H,n_W,n_C]的数组

# path:str,图片所在路径

# n_C:int,图像维数,黑白图像输入1,rgb图像输入3

# datas:返回维度为(m,n_H,n_W,n_C)的array(数组)矩阵

datas = []

x_dirs = os.listdir(path)

for x_file in x_dirs:

fpath = os.path.join(path, x_file)

if n_C == 1:

_x = Image.open(fpath).convert("L")

# plt.imshow(_x,"gray") #显示图像(只显示最后一张)

elif n_C == 3:

_x = Image.open(fpath)

# plt.imshow(_x) #显示图像(只显示最后一张)

else:

print("错误:图像维数错误")

n_W = _x.size[0]

n_H = _x.size[1]

# 若要对图像进行放大缩小,激活(去掉注释)以下函数

'''

rat=0.4 #放大/缩小倍数

n_W=int(rat*n_W)

n_H=int(rat*n_H)

_x=_x.resize((n_W,n_H)) #直接给n_W,n_H赋值可将图像变为任意大小

'''

datas.append(np.array(_x))

_x.close()

datas = np.array(datas)

m = datas.shape[0]

datas = datas.reshape((m, n_H, n_W, n_C))

# print(datas.shape)

return datas

def saveInH5py(datas):

f = h5py.File('data.h5', 'w')

f.create_dataset('train_set', data=datas)

f.close()

def load_dataset():

train_dataset = h5py.File('data.h5', 'r')

train_set_orig = np.array(train_dataset["train_set"][:])

return train_set_orig

#读取图片作为数据集

x_src = 'D:\\0_project\\mark_pic\\data\\new_pic\\small\\pic_re_shrink_g'

y_src = 'D:\\0_project\\mark_pic\\data\\new_pic\\small\\pic_re_shrink'

x_datas = read_picture(x_src, 3)

saveInH5py(x_datas)

train_set_x_orig = load_dataset()

y_datas = read_picture(y_src, 3)

saveInH5py(y_datas)

train_set_y_orig = load_dataset()

print(train_set_x_orig.shape)

print(train_set_y_orig.shape)

#编译

model=vdsr_model(input_shape=(289,387,3),output_shape=(289,387,3))

model.compile(optimizer="sgd", loss="mean_squared_error", metrics=["accuracy"])

#训练

X_train=train_set_x_orig

Y_train=train_set_y_orig

mymodel=model.fit(X_train,Y_train,epochs=20, validation_split= 0.1, batch_size=1)

model.save('e:\\vdsr.h5')

#%%

#载入预测图像

model=load_model('e:\\vdsr.h5')

x_test_src='D:\\0_project\\mark_pic\\data\\new_pic\\small\\pic_test'

x_test=read_picture(x_test_src, 3)

saveInH5py(x_test)

test_set_x_orig = load_dataset()

pred=model.predict(test_set_x_orig)

你可能感兴趣的:(超分辨率python)