def __init__(self,
filters,
kernel_size,
strides=1,
padding='valid',
data_format='channels_last',
dilation_rate=1,
groups=1,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
参数:
filters:滤波器的个数。
kernel_size:卷积核的大小
strides:表示步
padding:填充。填充的方式,valid, causal or same
1维卷积常用于序列模型或者是自然语言处理
super(Conv1D, self).__init__(
rank=1,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
groups=groups,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
kernel_constraint=constraints.get(kernel_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
输入方式不同于pytorch.
填充方式未valid
import tensorflow as tf
input=tf.random.normal((6,1),mean=0,stddev=1)
input=tf.reshape(input,(1,6,1))
print('输入维度',input.shape)
model=tf.keras.layers.Conv1D(filters=32,kernel_size=3,strides=1,activation='relu',input_shape=input.shape[1:],padding='valid')
output=model(input)
print('输出维度',output.shape)
填充方式为same
import tensorflow as tf
input=tf.random.normal((6,1),mean=0,stddev=1)
input=tf.reshape(input,(1,6,1))
print('输入维度',input.shape)
model=tf.keras.layers.Conv1D(filters=32,kernel_size=3,strides=1,activation='relu',input_shape=input.shape[1:],padding='same')
output=model(input)
print('输出维度',output.shape)
输入数据为图像数据。输入格式为h,w,c。比如输入为(28,28,1),28x28表示单通道的图像。1表示的图像的通道(即表示有几个这样的图像)
类比图像数据,Conv1d的输入为(6,1),其中6表示向量的大小,1表示有一个这样的向量
import tensorflow as tf
input=tf.random.normal((28,28,1),mean=0,stddev=1)
input=tf.reshape(input,(1,28,28,1))
print('输入维度',input.shape)
model=tf.keras.layers.Conv2D(filters=32,kernel_size=3,strides=1,activation='relu',input_shape=input.shape[1:],padding='same')
output=model(input)
print('输出维度',output.shape)
空洞率(dilation_rate)。即在原始卷积核中间填充0,使卷积核的大小发生变化。
>>> # With `dilation_rate` as 2. >>> input_shape = (4, 28, 28, 3) >>> x = tf.random.normal(input_shape) >>> y = tf.keras.layers.Conv2D( ... 2, 3, activation='relu', dilation_rate=2, input_shape=input_shape[1:])(x) >>> print(y.shape) (4, 24, 24, 2)
spatial convolution over volumes 体积上的空间卷积
>>> # The inputs are 28x28x28 volumes with a single channel, and the >>> # batch size is 4 >>> input_shape =(4, 28, 28, 28, 1) >>> x = tf.random.normal(input_shape) >>> y = tf.keras.layers.Conv3D( ... 2, 3, activation='relu', input_shape=input_shape[1:])(x) >>> print(y.shape) (4, 26, 26, 26, 2)
import tensorflow as tf
input=tf.random.normal((28,28,28,1),mean=0,stddev=1)
input=tf.reshape(input,(1,28,28,28,1))
print('输入维度',input.shape)
model=tf.keras.layers.Conv3D(filters=32,kernel_size=3,strides=1,activation='relu',input_shape=input.shape[1:],padding='same')
output=model(input)
print('输出维度',output.shape)