conv2d(input,filter,strides,padding,use_cudnn_on_gpu,data_format,name)
#input:输入图像,是一个Tensor,具有[batch, in_height, in_width, in_channels]这样的shape,具体含义是[训练时一个batch的图片数量, 图片高度, 图片宽度, 图像通道数(1~3)],注意这是一个4维的Tensor,要求类型为float32和float64其中之一(长度为4的列表)
#[1,32,32,:]:表示第二张图片(0是第一),图片大小为32x32
#filter:滤波器,就是卷积的权重,是一个Tensor,具有[filter_height, filter_width, in_channels, out_channels]这样的shape,具体含义是[卷积核的高度,卷积核的宽度,图像通道数,卷积核个数],要求类型与参数input相同,有一个地方需要注意,第三维in_channels,就是参数input的第四维
#strides:卷积步长,这是一个一维的向量,长度4
#padding:string类型的量,只能是"SAME","VALID"其中之一,这个值决定了不同的卷积方式(SAME全零填充,VALID不全零填充(默认))
#use_cudnn_on_gpu:bool类型,是否使用cudnn加速,默认为true
import tensorflow as tf
import tensorflow.compat.v1 as tf1
import numpy as np
tf1.disable_eager_execution()
#调用GPU
M = np.array([[[2],[1],[2],[-1]],[[0],[-1],[3],[0]],[[2],[1],[-1],[4]],[[-2],[0],[-3],[4]]],dtype= "float32").reshape(1,4,4,1)
#定义输入矩阵(相当于输入数据)
filter_weight = tf1.get_variable("weights",[2,2,1,1],initializer= tf.constant_initializer(1))
#定义卷积层网络2x2,输入通道,输出通道
biases = tf1.get_variable("biase", [1], initializer = tf.constant_initializer(1))
#创建过滤器的偏置项,其中[1]为深度。
x = tf1.placeholder('float32',[1,None, None,1])
#tf1.placeholder(dtype,数据形状(数组形式),名字(省略))
conv = tf.nn.conv2d (x, filter_weight ,strides=[1,1,1,1],padding= "SAME")
#定义卷积层运算,x为输入数据(矩阵),filter_weight为滤波器
#strides在官方定义中是一个一维具有四个元素的张量,其规定前后必须为1,中间两个分别为长方向的步长和宽方向的步长
add_bias = tf.nn.bias_add (conv,biases)
#给矩阵conv(卷积后的结果)内的每一个值都加上偏置项
init_op = tf1.global_variables_initializer()
#初始化全部变量
with tf1.Session() as sess:
init_op.run()
M_conv = sess.run(add_bias ,feed_dict= {x:M})
print("M after convolution:\n",M_conv )
运行结果:
......
......
......
Skipping registering GPU devices...
2020-08-04 09:10:24.892716: I tensorflow/core/platform/cpu_feature_guard.cc:143] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2
2020-08-04 09:10:25.236881: I tensorflow/compiler/xla/service/service.cc:168] XLA service 0x2860295a770 initialized for platform Host (this does not guarantee that XLA will be used). Devices:
2020-08-04 09:10:25.237199: I tensorflow/compiler/xla/service/service.cc:176] StreamExecutor device (0): Host, Default Version
2020-08-04 09:10:25.246006: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1102] Device interconnect StreamExecutor with strength 1 edge matrix:
2020-08-04 09:10:25.246208: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1108]
M after convolution:
[[[[ 3.]
[ 6.]
[ 5.]
[ 0.]]
[[ 3.]
[ 3.]
[ 7.]
[ 5.]]
[[ 2.]
[-2.]
[ 5.]
[ 9.]]
[[-1.]
[-2.]
[ 2.]
[ 5.]]]]
Process finished with exit code 0
注意:filter的深度必须和输入图像的深度相同
池化类似于卷积。也需要对池化函数是否设置池化核大小、是否使用全0填充、以及每次池化后移动的步长。
常用的池化函数为:
nn.pool(input, window_shape, pooling_type, padding, dilation_rate, strides, name, data_format)
nn.avg_pool(value, ksize, strides,padding,data_format, name )
#取范围内平均值
nn.max_pool(value, ksize, strides,padding,data_format, name )
#取范围内最大值
池化示例:
import tensorflow as tf
import tensorflow.compat.v1 as tf1
import numpy as np
tf1.disable_eager_execution()
M = np.array([[[-2],[2],[0],[3]],[[1],[2],[-1],[2]],[[0],[-1],[1],[0]]],dtype="float32").reshape(1,3,4,1)
filter_weight = tf1.get_variable("weight",[2,2,1,1],initializer=tf.constant_initializer([[2,0],[-1,1]]))
biases = tf1.get_variable("biases",[1],dtype="float32",initializer= tf1.constant_initializer(1))
x = tf1.placeholder ('float32',[1,None ,None ,1])
conv = tf.nn.conv2d(x, filter_weight, strides= [1,1,1,1], padding = "SAME")
add_bias = tf.nn.bias_add (conv, biases )
pool = tf.nn.max_pool(add_bias,ksize= [1,2,2,1],strides= [1,2,2,1],padding = "SAME")
with tf1.Session() as sess:
tf1.global_variables_initializer ().run()
M_conv = sess.run(add_bias ,feed_dict= {x:M})
M_pool = sess.run(pool, feed_dict= {x:M})
print("after convolution:\n",M_conv)
print("after average pooled:\n",M_pool)
结果:
.....
.....
.....
Skipping registering GPU devices...
2020-08-04 10:33:33.167597: I tensorflow/core/platform/cpu_feature_guard.cc:143] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2
2020-08-04 10:33:33.184001: I tensorflow/compiler/xla/service/service.cc:168] XLA service 0x191bb8eeb80 initialized for platform Host (this does not guarantee that XLA will be used). Devices:
2020-08-04 10:33:33.184634: I tensorflow/compiler/xla/service/service.cc:176] StreamExecutor device (0): Host, Default Version
2020-08-04 10:33:33.185244: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1102] Device interconnect StreamExecutor with strength 1 edge matrix:
2020-08-04 10:33:33.185865: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1108]
after convolution:
[[[[-2.]
[ 2.]
[ 4.]
[ 5.]]
[[ 2.]
[ 7.]
[-2.]
[ 5.]]
[[ 1.]
[-1.]
[ 3.]
[ 1.]]]]
after average pooled:
[[[[7.]
[5.]]
[[1.]
[3.]]]]
Process finished with exit code 0