0002-keras自定义层+模型保存后自定义层的加载

自定义Position层

import keras
from keras import backend as K
import tensorflow as tf

class Position_Embedding(keras.layers.Layer):    
    def __init__(self, size=None, mode='sum', **kwargs):
        self.size = size 
        self.mode = mode
        super(Position_Embedding, self).__init__(**kwargs)        
    def call(self, x):
        if (self.size == None) or (self.mode == 'sum'):
            self.size = int(x.shape[-1])
        position_j = 1. / K.pow(  10000., 2 * K.arange(self.size / 2, dtype='float32') / self.size  )
        position_j = K.expand_dims(position_j, 0)
        
        position_i = K.cumsum(K.ones_like(x[:,:,0]), 1)-1 
        position_i = K.expand_dims(position_i, 2)
        position_ij = K.dot(position_i, position_j)
        position_ij = K.concatenate([K.cos(position_ij), K.sin(position_ij)], 2)
        if self.mode == 'sum':
            return position_ij + x
        elif self.mode == 'concat':
            return K.concatenate([position_ij, x], 2)        
    def compute_output_shape(self, input_shape):
        if self.mode == 'sum':
            return input_shape
        elif self.mode == 'concat':
            return (input_shape[0], input_shape[1], input_shape[2]+self.size)

定义Attention层

class Attention(keras.layers.Layer):
    def __init__(self, nb_head, size_per_head, **kwargs):
        self.nb_head = nb_head
        self.size_per_head = size_per_head
        self.output_dim = nb_head*size_per_head
        super(Attention, self).__init__(**kwargs)

    def build(self, input_shape):
        self.WQ = self.add_weight(name='WQ',shape=(int(input_shape[0][-1]), self.output_dim),initializer='glorot_uniform',
                                  trainable=True)
        self.WK = self.add_weight(name='WK',shape=(int(input_shape[1][-1]), self.output_dim),initializer='glorot_uniform',
                                  trainable=True)
        self.WV = self.add_weight(name='WV',shape=(int(input_shape[2][-1]), self.output_dim),initializer='glorot_uniform',
                                  trainable=True)
        super(Attention, self).build(input_shape)
  
    def Mask(self, inputs, seq_len, mode='mul'):
        if seq_len == None:
            return inputs
        else:
            mask = K.one_hot(seq_len[:,0], K.shape(inputs)[1])
            mask = 1 - K.cumsum(mask, 1)
            for _ in range(len(inputs.shape)-2):
                mask = K.expand_dims(mask, 2)
            if mode == 'mul':
                return inputs * mask
            if mode == 'add':
                return inputs - (1 - mask) * 1e12
                
    def call(self, x):
        
        if len(x) == 3:
            Q_seq,K_seq,V_seq = x
            Q_len,V_len = None,None
        elif len(x) == 5:
            Q_seq,K_seq,V_seq,Q_len,V_len = x
            
        Q_seq = K.dot(Q_seq, self.WQ)
        Q_seq = K.reshape(Q_seq, (-1, K.shape(Q_seq)[1], self.nb_head, self.size_per_head))
        Q_seq = K.permute_dimensions(Q_seq, (0,2,1,3))
        
        K_seq = K.dot(K_seq, self.WK)
        K_seq = K.reshape(K_seq, (-1, K.shape(K_seq)[1], self.nb_head, self.size_per_head))
        K_seq = K.permute_dimensions(K_seq, (0,2,1,3))
        
        V_seq = K.dot(V_seq, self.WV)
        V_seq = K.reshape(V_seq, (-1, K.shape(V_seq)[1], self.nb_head, self.size_per_head))
        V_seq = K.permute_dimensions(V_seq, (0,2,1,3))
        
        A = K.batch_dot(Q_seq, K_seq, axes=[3,3]) / self.size_per_head**0.5
        A = K.permute_dimensions(A, (0,3,2,1))
        
        A = self.Mask(A, V_len, 'add')
        A = K.permute_dimensions(A, (0,3,2,1))    
        A = K.softmax(A)
        
        
        O_seq = K.batch_dot(A, V_seq, axes=[3,2])
        O_seq = K.permute_dimensions(O_seq, (0,2,1,3))
        O_seq = K.reshape(O_seq, (-1, K.shape(O_seq)[1], self.output_dim))
        O_seq = self.Mask(O_seq, Q_len, 'mul')
        
        return O_seq
        
    def compute_output_shape(self, input_shape):
        return (input_shape[0][0], input_shape[0][1], self.output_dim)     
    
    def get_config(self):        
        config = {"nb_head":self.nb_head,"size_per_head":self.size_per_head}
        base_config = super(Attention,self).get_config()
        return dict(list(base_config.items()) + list(config.items()))

下面展示模型保存后自定义层的调用

model = models.load_model("/data/user//model.h5",custom_objects={'Position_Embedding':Position_Embedding,"Attention":Attention})

你可能感兴趣的:(0002-keras自定义层+模型保存后自定义层的加载)