备注:
版本要求:halcon21.05++
Python下的VGG网络模型源码:
import tensorflow as tf
class VGG16(Model):
def __init__(self):
super(VGG16, self).__init__()
self.c1 = Conv2D(filters=64, kernel_size=(3, 3), padding='same')
self.b1 = BatchNormalization()
self.a1 = Activation('relu')
self.c2 = Conv2D(filters=64, kernel_size=(3, 3), padding='same', )
self.b2 = BatchNormalization()
self.a2 = Activation('relu')
self.p1 = MaxPool2D(pool_size=(2, 2), strides=2, padding='same')
self.d1 = Dropout(0.2)
self.c3 = Conv2D(filters=128, kernel_size=(3, 3), padding='same')
self.b3 = BatchNormalization()
self.a3 = Activation('relu')
self.c4 = Conv2D(filters=128, kernel_size=(3, 3), padding='same')
self.b4 = BatchNormalization()
self.a4 = Activation('relu')
self.p2 = MaxPool2D(pool_size=(2, 2), strides=2, padding='same')
self.d2 = Dropout(0.2)
self.c5 = Conv2D(filters=256, kernel_size=(3, 3), padding='same')
self.b5 = BatchNormalization()
self.a5 = Activation('relu')
self.c6 = Conv2D(filters=256, kernel_size=(3, 3), padding='same')
self.b6 = BatchNormalization()
self.a6 = Activation('relu')
self.c7 = Conv2D(filters=256, kernel_size=(3, 3), padding='same')
self.b7 = BatchNormalization()
self.a7 = Activation('relu')
self.p3 = MaxPool2D(pool_size=(2, 2), strides=2, padding='same')
self.d3 = Dropout(0.2)
self.c8 = Conv2D(filters=512, kernel_size=(3, 3), padding='same')
self.b8 = BatchNormalization()
self.a8 = Activation('relu')
self.c9 = Conv2D(filters=512, kernel_size=(3, 3), padding='same')
self.b9 = BatchNormalization()
self.a9 = Activation('relu')
self.c10 = Conv2D(filters=512, kernel_size=(3, 3), padding='same')
self.b10 = BatchNormalization()
self.a10 = Activation('relu')
self.p4 = MaxPool2D(pool_size=(2, 2), strides=2, padding='same')
self.d4 = Dropout(0.2)
self.c11 = Conv2D(filters=512, kernel_size=(3, 3), padding='same')
self.b11 = BatchNormalization()
self.a11 = Activation('relu')
self.c12 = Conv2D(filters=512, kernel_size=(3, 3), padding='same')
self.b12 = BatchNormalization()
self.a12 = Activation('relu')
self.c13 = Conv2D(filters=512, kernel_size=(3, 3), padding='same')
self.b13 = BatchNormalization()
self.a13 = Activation('relu')
self.p5 = MaxPool2D(pool_size=(2, 2), strides=2, padding='same')
self.d5 = Dropout(0.2)
self.flatten = Flatten()
self.f1 = Dense(512, activation='relu')
self.d6 = Dropout(0.2)
self.f2 = Dense(512, activation='relu')
self.d7 = Dropout(0.2)
self.f3 = Dense(10, activation='softmax')
def call(self, x):
x = self.c1(x)
x = self.b1(x)
x = self.a1(x)
x = self.c2(x)
x = self.b2(x)
x = self.a2(x)
x = self.p1(x)
x = self.d1(x)
x = self.c3(x)
x = self.b3(x)
x = self.a3(x)
x = self.c4(x)
x = self.b4(x)
x = self.a4(x)
x = self.p2(x)
x = self.d2(x)
x = self.c5(x)
x = self.b5(x)
x = self.a5(x)
x = self.c6(x)
x = self.b6(x)
x = self.a6(x)
x = self.c7(x)
x = self.b7(x)
x = self.a7(x)
x = self.p3(x)
x = self.d3(x)
x = self.c8(x)
x = self.b8(x)
x = self.a8(x)
x = self.c9(x)
x = self.b9(x)
x = self.a9(x)
x = self.c10(x)
x = self.b10(x)
x = self.a10(x)
x = self.p4(x)
x = self.d4(x)
x = self.c11(x)
x = self.b11(x)
x = self.a11(x)
x = self.c12(x)
x = self.b12(x)
x = self.a12(x)
x = self.c13(x)
x = self.b13(x)
x = self.a13(x)
x = self.p5(x)
x = self.d5(x)
x = self.flatten(x)
x = self.f1(x)
x = self.d6(x)
x = self.f2(x)
x = self.d7(x)
y = self.f3(x)
return y
model = VGG16()
Halcon实现VGG16 网络模型:
dev_update_off ()
dev_close_window ()
InputWidth := 128
InputHeight := 128
InputDepth := 3
*输入层
create_dl_layer_input ('input', [InputWidth,InputHeight,InputDepth], [], [], DLLayerInput)
*卷积层1
create_dl_layer_convolution (DLLayerInput, 'convolution1', [3,3], 1, 1, 64, 1, 'half_kernel_size', 'relu', \
['weight_filler','weight_filler_variance_norm','weight_filler_const_val','bias_term'],\
['xavier','norm_in',0.0,'true'], DLLayerConvolution1)
*BN层1
create_dl_layer_batch_normalization (DLLayerConvolution1, 'batchnorm1', 'auto', 0.0001, 'relu', \
['weight_filler','weight_filler_variance_norm','weight_filler_const_val','bias_filler','bias_filler_variance_norm','bias_filler_const_val','bias_term','upper_bound'], \
['xavier','norm_in',1.0,'const','norm_in',0.0,'true',6.0], DLLayerBatchNorm1)
*卷积层2
create_dl_layer_convolution (DLLayerBatchNorm1, 'convolution2', [3,3], 1, 1, 64, 1, 'half_kernel_size', 'relu', \
['weight_filler','weight_filler_variance_norm','weight_filler_const_val','bias_term'], \
['xavier','norm_in',0.0,'true'], DLLayerConvolution2)
*BN层2
create_dl_layer_batch_normalization (DLLayerConvolution2, 'batchnorm2', 'auto', 0.0001, 'relu', \
['weight_filler','weight_filler_variance_norm','weight_filler_const_val','bias_filler','bias_filler_variance_norm','bias_filler_const_val','bias_term','upper_bound'], \
['xavier','norm_in',1.0,'const','norm_in',0.0,'true',6.0],DLLayerBatchNorm2)
*池化层2
create_dl_layer_pooling (DLLayerBatchNorm2, 'pooling2', [2,2], 2, 'implicit', 'average', [], [], DLLayerPooling2)
*dropout层2
create_dl_layer_dropout (DLLayerPooling2, 'dropout2', 0.2, [], [], DLLayerDropOut2)
*卷积层3
create_dl_layer_convolution (DLLayerDropOut2, 'convolution3', [3,3], 1, 1, 128, 1, 'half_kernel_size', 'relu', \
['weight_filler','weight_filler_variance_norm','weight_filler_const_val','bias_term'], \
['xavier','norm_in',0.0,'true'], DLLayerConvolution3)
*BN层3
create_dl_layer_batch_normalization (DLLayerConvolution3, 'batchnorm3', 'auto', 0.0001, 'relu', \
['weight_filler','weight_filler_variance_norm','weight_filler_const_val','bias_filler', 'bias_filler_variance_norm','bias_filler_const_val','bias_term','upper_bound'], \
['xavier','norm_in',1.0,'const','norm_in',0.0,'true',6.0], DLLayerBatchNorm3)
*卷积层4
create_dl_layer_convolution (DLLayerBatchNorm3, 'convolution4',[3,3], 1, 1, 128, 1, 'half_kernel_size', 'relu', \
['weight_filler','weight_filler_variance_norm','weight_filler_const_val','bias_term'], \
['xavier','norm_in',0.0,'true'], DLLayerConvolution4)
*BN层4
create_dl_layer_batch_normalization (DLLayerConvolution4, 'batchnorm4', 'auto', 0.0001, 'relu', \
['weight_filler','weight_filler_variance_norm','weight_filler_const_val','bias_filler', 'bias_filler_variance_norm','bias_filler_const_val','bias_term','upper_bound'], \
['xavier','norm_in',1.0,'const','norm_in',0.0,'true',6.0],DLLayerBatchNorm4)
*池化层4
create_dl_layer_pooling (DLLayerBatchNorm4, 'pooling4', [2,2], 2, 'implicit', 'maximum', [], [], DLLayerPooling4)
*dropout层4
create_dl_layer_dropout (DLLayerPooling4, 'dropout4', 0.2, [], [], DLLayerDropOut4)
*卷积层5
create_dl_layer_convolution (DLLayerDropOut4, 'convolution5', [3,3], 1, 1, 256, 1, 'half_kernel_size', 'relu', \
['weight_filler','weight_filler_variance_norm','weight_filler_const_val','bias_term'], \
['xavier','norm_in',0.0,'false'], DLLayerConvolution5)
*BN层5
create_dl_layer_batch_normalization (DLLayerConvolution5, 'batchnorm5', 'auto', 0.0001, 'relu',\
['weight_filler','weight_filler_variance_norm','weight_filler_const_val','bias_filler', 'bias_filler_variance_norm','bias_filler_const_val','bias_term','upper_bound'], \
['xavier','norm_in',1.0,'const','norm_in',0.0,'true',6.0],DLLayerBatchNorm5)
*卷积层6
create_dl_layer_convolution (DLLayerBatchNorm5, 'convolution6', [3,3], 1, 1, 256, 1, 'half_kernel_size', 'relu', \
['weight_filler','weight_filler_variance_norm','weight_filler_const_val','bias_term'], \
['xavier','norm_in',0.0,'true'], DLLayerConvolution6)
*BN层6
create_dl_layer_batch_normalization (DLLayerConvolution6, 'batchnorm6', 'auto', 0.0001, 'relu', \
['weight_filler','weight_filler_variance_norm','weight_filler_const_val','bias_filler', 'bias_filler_variance_norm','bias_filler_const_val','bias_term','upper_bound'], \
['xavier','norm_in',1.0,'const','norm_in',0.0,'true',6.0],DLLayerBatchNorm6)
*卷积层7
create_dl_layer_convolution (DLLayerBatchNorm6, 'convolution7', [3,3], 1, 1, 256, 1, 'half_kernel_size', 'relu', \
['weight_filler','weight_filler_variance_norm','weight_filler_const_val','bias_term'], \
['xavier','norm_in',0.0,'true'],DLLayerConvolution7)
*BN层7
create_dl_layer_batch_normalization (DLLayerConvolution7, 'batchnorm7', 'auto', 0.0001, 'relu', \
['weight_filler','weight_filler_variance_norm','weight_filler_const_val','bias_filler', 'bias_filler_variance_norm','bias_filler_const_val','bias_term','upper_bound'], \
['xavier','norm_in',1.0,'const','norm_in',0.0,'true',6.0],DLLayerBatchNorm7)
*池化层7
create_dl_layer_pooling (DLLayerBatchNorm7, 'pooling7', [2,2], 2, 'implicit', 'maximum', [], [], DLLayerPooling7)
*dropout层7
create_dl_layer_dropout (DLLayerPooling7, 'dropout7', 0.2, [], [], DLLayerDropOut7)
*卷积层8
create_dl_layer_convolution (DLLayerDropOut7, 'convolution8', [3,3], 1, 1, 512, 1, 'half_kernel_size', 'relu', \
['weight_filler','weight_filler_variance_norm','weight_filler_const_val','bias_term'], \
['xavier','norm_in',0.0,'true'],DLLayerConvolution8)
*BN层8
create_dl_layer_batch_normalization (DLLayerConvolution8, 'batchnorm8', 'auto', 0.0001, 'relu', \
['weight_filler','weight_filler_variance_norm','weight_filler_const_val','bias_filler', 'bias_filler_variance_norm','bias_filler_const_val','bias_term','upper_bound'], \
['xavier','norm_in',1.0,'const','norm_in',0.0,'true',6.0],DLLayerBatchNorm8)
*卷积层9
create_dl_layer_convolution (DLLayerBatchNorm8, 'convolution9', [3,3], 1, 1, 512, 1, 'half_kernel_size', 'relu',\
['weight_filler','weight_filler_variance_norm','weight_filler_const_val','bias_term'], \
['xavier','norm_in',0.0,'true'],DLLayerConvolution9)
*BN层9
create_dl_layer_batch_normalization (DLLayerConvolution9, 'batchnorm9', 'auto', 0.0001, 'relu', \
['weight_filler','weight_filler_variance_norm','weight_filler_const_val','bias_filler', 'bias_filler_variance_norm','bias_filler_const_val','bias_term','upper_bound'], \
['xavier','norm_in',1.0,'const','norm_in',0.0,'true',6.0],DLLayerBatchNorm9)
*卷积层10
create_dl_layer_convolution (DLLayerBatchNorm9, 'convolution10', [3,3], 1, 1, 512, 1, 'half_kernel_size', 'relu', \
['weight_filler','weight_filler_variance_norm','weight_filler_const_val','bias_term'], \
['xavier','norm_in',0.0,'true'], DLLayerConvolution10)
*BN层10
create_dl_layer_batch_normalization (DLLayerConvolution10, 'batchnorm10', 'auto', 0.0001, 'relu', \
['weight_filler','weight_filler_variance_norm','weight_filler_const_val','bias_filler', 'bias_filler_variance_norm','bias_filler_const_val','bias_term','upper_bound'], \
['xavier','norm_in',1.0,'const','norm_in',0.0,'true',6.0],DLLayerBatchNorm10)
*池化层10
create_dl_layer_pooling (DLLayerBatchNorm10, 'pooling10', [2,2], 2, 'implicit', 'maximum', [], [], DLLayerPooling10)
*dropout层10
create_dl_layer_dropout (DLLayerPooling10, 'dropout10', 0.2, [], [], DLLayerDropOut10)
*卷积层11
create_dl_layer_convolution (DLLayerDropOut10, 'convolution11', [3,3], 1, 1, 512, 1, 'half_kernel_size', 'relu', \
['weight_filler','weight_filler_variance_norm','weight_filler_const_val','bias_term'], \
['xavier','norm_in',0.0,'true'],DLLayerConvolution11)
*BN层11
create_dl_layer_batch_normalization (DLLayerConvolution11, 'batchnorm11', 'auto', 0.0001, 'relu', \
['weight_filler','weight_filler_variance_norm','weight_filler_const_val','bias_filler', 'bias_filler_variance_norm','bias_filler_const_val','bias_term','upper_bound'], \
['xavier','norm_in',1.0,'const','norm_in',0.0,'true',6.0],DLLayerBatchNorm11)
*卷积层12
create_dl_layer_convolution (DLLayerBatchNorm11, 'convolution12', [3,3], 1, 1, 512, 1, 'half_kernel_size', 'none', \
['weight_filler','weight_filler_variance_norm','weight_filler_const_val','bias_term'], \
['xavier','norm_in',0.0,'true'],DLLayerConvolution12)
*BN层12
create_dl_layer_batch_normalization (DLLayerConvolution12, 'batchnorm12', 'auto', 0.0001, 'relu', \
['weight_filler','weight_filler_variance_norm','weight_filler_const_val','bias_filler', 'bias_filler_variance_norm','bias_filler_const_val','bias_term','upper_bound'], \
['xavier','norm_in',1.0,'const','norm_in',0.0,'true',6.0],DLLayerBatchNorm12)
*卷积层13
create_dl_layer_convolution (DLLayerBatchNorm12, 'convolution13', [3,3], 1, 1, 512, 1, 'half_kernel_size', 'none', \
['weight_filler','weight_filler_variance_norm','weight_filler_const_val','bias_term'], \
['xavier','norm_in',0.0,'true'],DLLayerConvolution13)
*BN层13
create_dl_layer_batch_normalization (DLLayerConvolution13, 'batchnorm13', 'auto', 0.0001, 'relu',\
['weight_filler','weight_filler_variance_norm','weight_filler_const_val','bias_filler', 'bias_filler_variance_norm','bias_filler_const_val','bias_term','upper_bound'], \
['xavier','norm_in',1.0,'const','norm_in',0.0,'true',6.0],DLLayerBatchNorm13)
*池化层13
create_dl_layer_pooling (DLLayerBatchNorm13, 'pooling13', [2,2], 2, 'implicit', 'maximum', [], [], DLLayerPooling13)
*dropout层13
create_dl_layer_dropout (DLLayerPooling13, 'dropout13', 0.2, [], [], DLLayerDropOut13)
*Flatten()
*'dense14'
create_dl_layer_dense (DLLayerDropOut13, 'dense14', 512, \
['weight_filler','weight_filler_variance_norm','weight_filler_const_val','bias_filler', 'bias_filler_variance_norm','bias_filler_const_val','bias_term'], \
['xavier','norm_in',1.0,'const','norm_in',0.0,'true'], DLLayerDense14)
create_dl_layer_dropout (DLLayerDense14, 'dropout14', 0.2, [], [], DLLayerDropOut14)
*'dense15'
create_dl_layer_dense (DLLayerDropOut14, 'dense15', 512,\
['weight_filler','weight_filler_variance_norm','weight_filler_const_val','bias_filler', 'bias_filler_variance_norm','bias_filler_const_val','bias_term'], \
['xavier','norm_in',1.0,'const','norm_in',0.0,'true'], DLLayerDense15)
create_dl_layer_dropout (DLLayerDense15, 'dropout15', 0.2, [], [], DLLayerDropOut15)
*'dense16'
create_dl_layer_dense (DLLayerDropOut15, 'dense16', 3, \
['weight_filler','weight_filler_variance_norm','weight_filler_const_val','bias_filler', 'bias_filler_variance_norm','bias_filler_const_val','bias_term'], \
['xavier','norm_in',1.0,'const','norm_in',0.0,'true'], DLLayerDense16)
* Add layers necessary to enable training.
create_dl_layer_softmax (DLLayerDense16, 'softmax', [], [], DLLayerSoftMax)
create_dl_layer_input ('image_label_id', [1,1,1], [], [], DLLayerTarget)
create_dl_layer_input ('weights', [1,1,1], [], [], DLLayerWeights)
create_dl_layer_loss_cross_entropy (DLLayerSoftMax, DLLayerTarget, DLLayerWeights, 'loss_cross_entropy', 1, [], [], DLLayerLossCrossEntropy)
create_dl_model (DLLayerLossCrossEntropy, DLModelHandle)
set_dl_model_param (DLModelHandle, 'type', 'classification')
set_dl_model_param (DLModelHandle, 'image_width', InputWidth)
set_dl_model_param (DLModelHandle, 'image_height', InputHeight)
get_dl_model_param (DLModelHandle, 'summary', NetworkSummary)
*'optimize_for_inference'
set_dl_model_param (DLModelHandle, 'optimize_for_inference', 'true')
stop ()
get_dl_model_layer_param (DLModelHandle, 'convolution2', 'activation_mode', ActivationModeConvolution2)
get_dl_layer_param (DLLayerConvolution2, 'input_layer', InputLayerConvolution2)
*
* set_dl_model_layer_param (DLModelHandle, 'convolution1', 'name', 'convolution1_with_relu')
* set_dl_model_layer_param (DLModelHandle, 'convolution2', 'name', 'convolution2_with_relu')
stop ()
create_dict (MetaData)
set_dict_tuple (MetaData, 'model_creator', 'Deep-Bool')
set_dict_tuple (MetaData, 'model_info', 'Deep-Bool:QQ480824932')
set_dl_model_param (DLModelHandle, 'meta_data', MetaData)
stop ()
write_dl_model (DLModelHandle, 'VGG16.hdl')