模块化下 tensorflow1.x 自动构建任意层的深度神经网络(DNN)

文章目录

    • 前言
    • 一些参考博客
    • 实现
    • 模块的使用
    • 说明

前言

之前写了一篇tensorflow2自动化构建DNN网络的博客,今天又重新梳理了之前在tensorflow1中写的代码,旧的代码没有模块化,这里本人使用类(class)将原来的散乱函数模块化。参见:tensorflow 2.X 自动构建任意层的深度神经网络(DNN)

一些参考博客

1、python:类基础
2、python中的类及self详解
3、python的class(类)中的object是什么意思?
4、python中class的定义及使用
5、Python中的__init__()和__call__()函数
6、python基础教程:__call__用法

实现

运行如下程序的环境:tensorflow1.x环境的部署和配置(个人记录)

class my_actFunc(object):
    def __init__(self, actName='linear'):
        super(my_actFunc, self).__init__()
        self.actName = actName

    def __call__(self, x_input):
        if str.lower(self.actName) == 'relu':
            out_x = tf.nn.relu(x_input)
        elif str.lower(self.actName) == 'leaky_relu':
            out_x = tf.nn.leaky_relu(x_input)
        elif str.lower(self.actName) == 'tanh':
            out_x = tf.nn.relu(x_input)
        elif str.lower(self.actName) == 'srelu':
            out_x = tf.nn.relu(x_input)*tf.nn.relu(1-x_input)
        elif str.lower(self.actName) == 's2relu':
            out_x = tf.nn.relu(x_input)*tf.nn.relu(1-x_input)*tf.sin(2*np.pi*x_input)
        elif str.lower(self.actName) == 'elu':
            out_x = tf.nn.elu(x_input)
        elif str.lower(self.actName) == 'sin':
            out_x = tf.sin(x_input)
        elif str.lower(self.actName) == 'sigmoid':
            out_x = tf.nn.sigmoid(x_input)
        elif str.lower(self.actName) == 'mish':
            out_x = x_input*tf.tanh(tf.math.log(1+tf.exp(x_input)))
        else:
            out_x = x_input
        return out_x
class Dense_Net(object):
    """
    Args:
        indim: the dimension for input data
        outdim: the dimension for output
        hidden_units: the number of  units for hidden layer, a list or a tuple
        name2Model: the name of using DNN type, DNN , ScaleDNN or FourierDNN
        actName2in: the name of activation function for input layer
        actName: the name of activation function for hidden layer
        actName2out: the name of activation function for output layer
        scope2W: the namespace of weight
        scope2B: the namespace of bias
        repeat_high_freq: repeating the high-frequency component of scale-transformation factor or not
        if name2Model is not wavelet NN, actName2in is not same as actName; otherwise, actName2in is same as actName
    """
    def __init__(self, indim=1, outdim=1, hidden_units=None, name2Model='DNN', actName2in='tanh', actName='tanh',
                 actName2out='linear', scope2W='Weight', scope2B='Bias', repeat_high_freq=True, type2float='float32',
                 varcoe=0.5):
        super(Dense_Net, self).__init__()
        self.indim = indim
        self.outdim = outdim
        self.hidden_units = hidden_units
        self.name2Model = name2Model
        self.actName2in = actName2in
        self.actName = actName
        self.actName2out = actName2out
        self.actFunc_in = my_actFunc(actName=actName2in)
        self.actFunc = my_actFunc(actName=actName)
        self.actFunc_out = my_actFunc(actName=actName2out)
        self.repeat_high_freq = repeat_high_freq
        self.type2float = type2float
        self.Ws = []
        self.Bs = []
        if type2float == 'float32':
            self.float_type = tf.float32
        elif type2float == 'float64':
            self.float_type = tf.float64
        else:
            self.float_type = tf.float16

        with tf.variable_scope('WB_scope', reuse=tf.AUTO_REUSE):
            stddev_WB = (2.0 / (indim + hidden_units[0])) ** 0.5
            Win = tf.get_variable(name=str(scope2W) + '_in', shape=(indim, hidden_units[0]),
                                  initializer=tf.random_normal_initializer(stddev=stddev_WB), trainable=True,
                                  dtype=self.float_type)
            Bin = tf.get_variable(name=str(scope2B) + '_in', shape=(hidden_units[0],),
                                  initializer=tf.random_normal_initializer(stddev=stddev_WB),
                                  dtype=self.float_type, trainable=True)
            self.Ws.append(Win)
            self.Bs.append(Bin)
            for i_layer in range(len(hidden_units) - 1):
                stddev_WB = (2.0 / (hidden_units[i_layer] + hidden_units[i_layer + 1])) ** 0.5
                if i_layer == 0:
                    W = tf.get_variable(name=str(scope2W) + str(i_layer),
                                        shape=(hidden_units[i_layer], hidden_units[i_layer + 1]),
                                        initializer=tf.random_normal_initializer(stddev=stddev_WB), trainable=True,
                                        dtype=self.float_type)
                    B = tf.get_variable(name=str(scope2B) + str(i_layer), shape=(hidden_units[i_layer + 1],),
                                        initializer=tf.random_normal_initializer(stddev=stddev_WB), trainable=True,
                                        dtype=self.float_type)
                else:
                    W = tf.get_variable(name=str(scope2W) + str(i_layer),
                                        shape=(hidden_units[i_layer], hidden_units[i_layer + 1]),
                                        initializer=tf.random_normal_initializer(stddev=stddev_WB), trainable=True,
                                        dtype=self.float_type)
                    B = tf.get_variable(name=str(scope2B) + str(i_layer), shape=(hidden_units[i_layer + 1],),
                                        initializer=tf.random_normal_initializer(stddev=stddev_WB), trainable=True,
                                        dtype=self.float_type)
                self.Ws.append(W)
                self.Bs.append(B)

            # 输出层:最后一层的权重和偏置。将最后的结果变换到输出维度
            stddev_WB = (2.0 / (hidden_units[-1] + outdim)) ** varcoe
            Wout = tf.get_variable(name=str(scope2W) + '_out', shape=(hidden_units[-1], outdim),
                                   initializer=tf.random_normal_initializer(stddev=stddev_WB), trainable=True,
                                   dtype=self.float_type)
            Bout = tf.get_variable(name=str(scope2B) + '_out', shape=(outdim,),
                                   initializer=tf.random_normal_initializer(stddev=stddev_WB), trainable=True,
                                   dtype=self.float_type)

            self.Ws.append(Wout)
            self.Bs.append(Bout)

    def get_regular_sum2WB(self, regular_model):
        layers = len(self.hidden_units)+1
        if regular_model == 'L1':
            regular_w = 0
            regular_b = 0
            for i_layer in range(layers):
                regular_w = regular_w + tf.reduce_sum(tf.abs(self.Ws[i_layer]), keepdims=False)
                regular_b = regular_b + tf.reduce_sum(tf.abs(self.Bs[i_layer]), keepdims=False)
        elif regular_model == 'L2':
            regular_w = 0
            regular_b = 0
            for i_layer in range(layers):
                regular_w = regular_w + tf.reduce_sum(tf.square(self.Ws[i_layer]), keepdims=False)
                regular_b = regular_b + tf.reduce_sum(tf.square(self.Bs[i_layer]), keepdims=False)
        else:
            regular_w = tf.constant(0.0)
            regular_b = tf.constant(0.0)
        return regular_w + regular_b

    def __call__(self, inputs, scale=None):
        """
        Args
            inputs: the input point set [num, in-dim]
            scale: The scale-factor to transform the input-data
        return
            output: the output point set [num, out-dim]
        """
        # ------ dealing with the input data ---------------
        H = tf.add(tf.matmul(inputs, self.Ws[0]), self.Bs[0])
        H = self.actFunc_in(H)

        #  ---resnet(one-step skip connection for two consecutive layers if have equal neurons)---
        hidden_record = self.hidden_units[0]
        for i_layer in range(len(self.hidden_units) - 1):
            H_pre = H
            H = tf.add(tf.matmul(H, self.Ws[i_layer + 1]), self.Bs[i_layer + 1])
            H = self.actFunc(H)
            if self.hidden_units[i_layer + 1] == hidden_record:
                H = H + H_pre
            hidden_record = self.hidden_units[i_layer + 1]

        H = tf.add(tf.matmul(H, self.Ws[-1]), self.Bs[-1])
        out_result = self.actFunc_out(H)
        return out_result

模块的使用

if __name__ == "__main__":
    input_dim = 3
    out_dim = 1
    hidden_layer = (5, 10, 10, 15, 20)
    name2base_model = 'DNN'
    actFun = 'tanh'

    model = Dense_Net(indim=input_dim, outdim=out_dim, hidden_units=hidden_layer, name2Model=name2base_model,
                      actName=actFun)
    batch_size = 10
    x = np.random.rand(batch_size, input_dim)
    freq = [1, 2, 3, 4, 5, 6, 7, 8]
    with tf.device('/gpu:%s' % ('0')):
        with tf.variable_scope('vscope', reuse=tf.AUTO_REUSE):
            X = tf.placeholder(tf.float32, name='XYit2train', shape=[None, input_dim])      # [N, D]
            Y = model(X, scale=freq)

    # ConfigProto 加上allow_soft_placement=True就可以使用 gpu 了
    config = tf.ConfigProto(allow_soft_placement=True)  # 创建sess的时候对sess进行参数配置
    config.gpu_options.allow_growth = True  # True是让TensorFlow在运行过程中动态申请显存,避免过多的显存占用。
    config.allow_soft_placement = True  # 当指定的设备不存在时,允许选择一个存在的设备运行。比如gpu不存在,自动降到cpu上运行
    with tf.Session(config=config) as sess:
        for i_epoch in range(5):
            sess.run(tf.global_variables_initializer())
            y = sess.run(Y, feed_dict={
     X: x})
            print('Y:', y)

说明

本代码只是作为一个参考范式,楼主代码水平有限,大家可自行调整修改。

你可能感兴趣的:(tensorflow学习,python,神经网络,dnn,tensorflow)