name: "HandDetection_416x416" input: "data" input_shape { dim: 1 dim: 3 dim: 416 dim: 416 } ###############################################################################All the same layer { bottom: "data" top: "conv1" name: "conv1" type: "Convolution" convolution_param { num_output: 32 kernel_size: 7 pad: 3 stride: 2 weight_filler { type: "msra" } bias_term: false } } #layer { # bottom: "conv1" # top: "conv1" # name: "bn_conv1" # type: "BatchNorm" # batch_norm_param { # moving_average_fraction: 0.9 # } #} # #layer { # bottom: "conv1" # top: "conv1" # name: "scale_conv1" # type: "Scale" # scale_param { # bias_term: true # } #} layer { bottom: "conv1" top: "conv1" name: "conv1_relu" type: "ReLU" } #################replace the pooling layer with conv layer####### #layer { # bottom: "conv1" # top: "pool1" # name: "pool1" # type: "Pooling" # pooling_param { # kernel_size: 3 # stride: 2 # pool: MAX # } #} ####################3 layer { name: "res1a_branch1_1x1" type: "Convolution" bottom: "conv1" top: "res1a_branch1_1x1" param { lr_mult: 1.0 decay_mult: 1.0 } convolution_param { num_output: 16 bias_term: false pad: 0 kernel_size: 1 stride: 1 weight_filler { type: "gaussian" std: 0.01 } } } #################### layer { bottom: "res1a_branch1_1x1" top: "res1a_branch1" name: "res1a_branch1" type: "Convolution" convolution_param { num_output: 32 kernel_size: 1 pad: 0 stride: 2 weight_filler { type: "msra" } bias_term: false } } layer { bottom: "res1a_branch1" top: "res1a_branch1" name: "bn1a_branch1" type: "BatchNorm" batch_norm_param { moving_average_fraction: 0.9 } } layer { bottom: "res1a_branch1" top: "res1a_branch1" name: "scale1a_branch1" type: "Scale" scale_param { bias_term: true } } ###############1x1############### layer { name: "res1a_branch2_1x1" type: "Convolution" bottom: "conv1" top: "res1a_branch2_1x1" param { lr_mult: 1.0 decay_mult: 1.0 } convolution_param { num_output: 16 bias_term: false pad: 0 kernel_size: 1 stride: 1 weight_filler { type: "gaussian" std: 0.01 } } } ############################### layer { bottom: "res1a_branch2_1x1" top: "res1a_branch2a" name: "res1a_branch2a" type: "Convolution" convolution_param { num_output: 32 kernel_size: 3 pad: 1 stride: 2 weight_filler { type: "msra" } bias_term: false } } layer { bottom: "res1a_branch2a" top: "res1a_branch2a" name: "bn1a_branch2a" type: "BatchNorm" batch_norm_param { moving_average_fraction: 0.9 } } layer { bottom: "res1a_branch2a" top: "res1a_branch2a" name: "scale1a_branch2a" type: "Scale" scale_param { bias_term: true } } layer { bottom: "res1a_branch2a" top: "res1a_branch2a" name: "res1a_branch2a_relu" type: "ReLU" } layer { bottom: "res1a_branch2a" top: "res1a_branch2b" name: "res1a_branch2b" type: "Convolution" convolution_param { num_output: 32 kernel_size: 3 pad: 1 stride: 1 weight_filler { type: "msra" } bias_term: false } } layer { bottom: "res1a_branch2b" top: "res1a_branch2b" name: "bn1a_branch2b" type: "BatchNorm" batch_norm_param { moving_average_fraction: 0.9 } } layer { bottom: "res1a_branch2b" top: "res1a_branch2b" name: "scale1a_branch2b" type: "Scale" scale_param { bias_term: true } } layer { bottom: "res1a_branch1" bottom: "res1a_branch2b" top: "res1a" name: "res1a" type: "Eltwise" eltwise_param { operation: SUM } } layer { bottom: "res1a" top: "res1a" name: "res1a_relu" type: "ReLU" } ################end####################### ###############1x1############### layer { name: "res2a_branch1_1x1" type: "Convolution" bottom: "res1a" top: "res2a_branch1_1x1" param { lr_mult: 1.0 decay_mult: 1.0 } convolution_param { num_output: 16 bias_term: false pad: 0 kernel_size: 1 stride: 1 weight_filler { type: "gaussian" std: 0.01 } } } ############################### layer { bottom: "res2a_branch1_1x1" top: "res2a_branch1" name: "res2a_branch1" type: "Convolution" convolution_param { num_output: 32 kernel_size: 1 pad: 0 stride: 1 weight_filler { type: "msra" } bias_term: false } } layer { bottom: "res2a_branch1" top: "res2a_branch1" name: "bn2a_branch1" type: "BatchNorm" batch_norm_param { moving_average_fraction: 0.9 } } layer { bottom: "res2a_branch1" top: "res2a_branch1" name: "scale2a_branch1" type: "Scale" scale_param { bias_term: true } } ###############1x1############### layer { name: "res2a_branch2_1x1" type: "Convolution" bottom: "res1a" top: "res2a_branch2_1x1" param { lr_mult: 1.0 decay_mult: 1.0 } convolution_param { num_output: 16 bias_term: false pad: 0 kernel_size: 1 stride: 1 weight_filler { type: "gaussian" std: 0.01 } } } ############################### layer { bottom: "res2a_branch2_1x1" top: "res2a_branch2a" name: "res2a_branch2a" type: "Convolution" convolution_param { num_output: 32 kernel_size: 3 pad: 1 stride: 1 weight_filler { type: "msra" } bias_term: false } } layer { bottom: "res2a_branch2a" top: "res2a_branch2a" name: "bn2a_branch2a" type: "BatchNorm" batch_norm_param { moving_average_fraction: 0.9 } } layer { bottom: "res2a_branch2a" top: "res2a_branch2a" name: "scale2a_branch2a" type: "Scale" scale_param { bias_term: true } } layer { bottom: "res2a_branch2a" top: "res2a_branch2a" name: "res2a_branch2a_relu" type: "ReLU" } layer { bottom: "res2a_branch2a" top: "res2a_branch2b" name: "res2a_branch2b" type: "Convolution" convolution_param { num_output: 32 kernel_size: 3 pad: 1 stride: 1 weight_filler { type: "msra" } bias_term: false } } layer { bottom: "res2a_branch2b" top: "res2a_branch2b" name: "bn2a_branch2b" type: "BatchNorm" batch_norm_param { moving_average_fraction: 0.9 } } layer { bottom: "res2a_branch2b" top: "res2a_branch2b" name: "scale2a_branch2b" type: "Scale" scale_param { bias_term: true } } layer { bottom: "res2a_branch1" bottom: "res2a_branch2b" top: "res2a" name: "res2a" type: "Eltwise" eltwise_param { operation: SUM } } layer { bottom: "res2a" top: "res2a" name: "res2a_relu" type: "ReLU" } ###############1x1############### layer { name: "res2b_branch2a_1x1" type: "Convolution" bottom: "res2a" top: "res2b_branch2a_1x1" param { lr_mult: 1.0 decay_mult: 1.0 } convolution_param { num_output: 16 bias_term: false pad: 0 kernel_size: 1 stride: 1 weight_filler { type: "gaussian" std: 0.01 } } } ############################### layer { bottom: "res2b_branch2a_1x1" top: "res2b_branch2a" name: "res2b_branch2a" type: "Convolution" convolution_param { num_output: 32 kernel_size: 3 pad: 1 stride: 1 weight_filler { type: "msra" } bias_term: false } } layer { bottom: "res2b_branch2a" top: "res2b_branch2a" name: "bn2b_branch2a" type: "BatchNorm" batch_norm_param { moving_average_fraction: 0.9 } } layer { bottom: "res2b_branch2a" top: "res2b_branch2a" name: "scale2b_branch2a" type: "Scale" scale_param { bias_term: true } } layer { bottom: "res2b_branch2a" top: "res2b_branch2a" name: "res2b_branch2a_relu" type: "ReLU" } layer { bottom: "res2b_branch2a" top: "res2b_branch2b" name: "res2b_branch2b" type: "Convolution" convolution_param { num_output: 32 kernel_size: 3 pad: 1 stride: 1 weight_filler { type: "msra" } bias_term: false } } layer { bottom: "res2b_branch2b" top: "res2b_branch2b" name: "bn2b_branch2b" type: "BatchNorm" batch_norm_param { moving_average_fraction: 0.9 } } layer { bottom: "res2b_branch2b" top: "res2b_branch2b" name: "scale2b_branch2b" type: "Scale" scale_param { bias_term: true } } layer { bottom: "res2a" bottom: "res2b_branch2b" top: "res2b" name: "res2b" type: "Eltwise" eltwise_param { operation: SUM } } layer { bottom: "res2b" top: "res2b" name: "res2b_relu" type: "ReLU" } ###############1x1############### layer { name: "res3a_branch1_1x1" type: "Convolution" bottom: "res2b" top: "res3a_branch1_1x1" param { lr_mult: 1.0 decay_mult: 1.0 } convolution_param { num_output: 32 bias_term: false pad: 0 kernel_size: 1 stride: 1 weight_filler { type: "gaussian" std: 0.01 } } } ############################### layer { bottom: "res3a_branch1_1x1" top: "res3a_branch1" name: "res3a_branch1" type: "Convolution" convolution_param { num_output: 64 kernel_size: 1 pad: 0 stride: 2 weight_filler { type: "msra" } bias_term: false } } layer { bottom: "res3a_branch1" top: "res3a_branch1" name: "bn3a_branch1" type: "BatchNorm" batch_norm_param { moving_average_fraction: 0.9 } } layer { bottom: "res3a_branch1" top: "res3a_branch1" name: "scale3a_branch1" type: "Scale" scale_param { bias_term: true } } ###############1x1############### layer { name: "res3a_branch2a_1x1" type: "Convolution" bottom: "res2b" top: "res3a_branch2a_1x1" param { lr_mult: 1.0 decay_mult: 1.0 } convolution_param { num_output: 32 bias_term: false pad: 0 kernel_size: 1 stride: 1 weight_filler { type: "gaussian" std: 0.01 } } } ############################### layer { bottom: "res3a_branch2a_1x1" top: "res3a_branch2a" name: "res3a_branch2a" type: "Convolution" convolution_param { num_output: 64 kernel_size: 3 pad: 1 stride: 2 weight_filler { type: "msra" } bias_term: false } } layer { bottom: "res3a_branch2a" top: "res3a_branch2a" name: "bn3a_branch2a" type: "BatchNorm" batch_norm_param { moving_average_fraction: 0.9 } } layer { bottom: "res3a_branch2a" top: "res3a_branch2a" name: "scale3a_branch2a" type: "Scale" scale_param { bias_term: true } } layer { bottom: "res3a_branch2a" top: "res3a_branch2a" name: "res3a_branch2a_relu" type: "ReLU" } layer { bottom: "res3a_branch2a" top: "res3a_branch2b" name: "res3a_branch2b" type: "Convolution" convolution_param { num_output: 64 kernel_size: 3 pad: 1 stride: 1 weight_filler { type: "msra" } bias_term: false } } layer { bottom: "res3a_branch2b" top: "res3a_branch2b" name: "bn3a_branch2b" type: "BatchNorm" batch_norm_param { moving_average_fraction: 0.9 } } layer { bottom: "res3a_branch2b" top: "res3a_branch2b" name: "scale3a_branch2b" type: "Scale" scale_param { bias_term: true } } layer { bottom: "res3a_branch1" bottom: "res3a_branch2b" top: "res3a" name: "res3a" type: "Eltwise" eltwise_param { operation: SUM } } layer { bottom: "res3a" top: "res3a" name: "res3a_relu" type: "ReLU" } ###############1x1############### layer { name: "res3b_branch2a_1x1" type: "Convolution" bottom: "res3a" top: "res3b_branch2a_1x1" param { lr_mult: 1.0 decay_mult: 1.0 } convolution_param { num_output: 32 bias_term: false pad: 0 kernel_size: 1 stride: 1 weight_filler { type: "gaussian" std: 0.01 } } } ############################### layer { bottom: "res3b_branch2a_1x1" top: "res3b_branch2a" name: "res3b_branch2a" type: "Convolution" convolution_param { num_output: 64 kernel_size: 3 pad: 1 stride: 1 weight_filler { type: "msra" } bias_term: false } } layer { bottom: "res3b_branch2a" top: "res3b_branch2a" name: "bn3b_branch2a" type: "BatchNorm" batch_norm_param { moving_average_fraction: 0.9 } } layer { bottom: "res3b_branch2a" top: "res3b_branch2a" name: "scale3b_branch2a" type: "Scale" scale_param { bias_term: true } } layer { bottom: "res3b_branch2a" top: "res3b_branch2a" name: "res3b_branch2a_relu" type: "ReLU" } layer { bottom: "res3b_branch2a" top: "res3b_branch2b" name: "res3b_branch2b" type: "Convolution" convolution_param { num_output: 64 kernel_size: 3 pad: 1 stride: 1 weight_filler { type: "msra" } bias_term: false } } layer { bottom: "res3b_branch2b" top: "res3b_branch2b" name: "bn3b_branch2b" type: "BatchNorm" batch_norm_param { moving_average_fraction: 0.9 } } layer { bottom: "res3b_branch2b" top: "res3b_branch2b" name: "scale3b_branch2b" type: "Scale" scale_param { bias_term: true } } layer { bottom: "res3a" bottom: "res3b_branch2b" top: "res3b" name: "res3b" type: "Eltwise" eltwise_param { operation: SUM } } layer { bottom: "res3b" top: "res3b" name: "res3b_relu" type: "ReLU" } ###############1x1############### layer { name: "res4a_branch1_1x1" type: "Convolution" bottom: "res3b" top: "res4a_branch1_1x1" param { lr_mult: 1.0 decay_mult: 1.0 } convolution_param { num_output: 64 bias_term: false pad: 0 kernel_size: 1 stride: 1 weight_filler { type: "gaussian" std: 0.01 } } } ############################### layer { bottom: "res4a_branch1_1x1" top: "res4a_branch1" name: "res4a_branch1" type: "Convolution" convolution_param { num_output: 128 kernel_size: 1 pad: 0 stride: 2 weight_filler { type: "msra" } bias_term: false } } layer { bottom: "res4a_branch1" top: "res4a_branch1" name: "bn4a_branch1" type: "BatchNorm" batch_norm_param { moving_average_fraction: 0.9 } } layer { bottom: "res4a_branch1" top: "res4a_branch1" name: "scale4a_branch1" type: "Scale" scale_param { bias_term: true } } ###############1x1############### layer { name: "res4a_branch2a_1x1" type: "Convolution" bottom: "res3b" top: "res4a_branch2a_1x1" param { lr_mult: 1.0 decay_mult: 1.0 } convolution_param { num_output: 64 bias_term: false pad: 0 kernel_size: 1 stride: 1 weight_filler { type: "gaussian" std: 0.01 } } } ############################### layer { bottom: "res4a_branch2a_1x1" top: "res4a_branch2a" name: "res4a_branch2a" type: "Convolution" convolution_param { num_output: 128 kernel_size: 3 pad: 1 stride: 2 weight_filler { type: "msra" } bias_term: false } } layer { bottom: "res4a_branch2a" top: "res4a_branch2a" name: "bn4a_branch2a" type: "BatchNorm" batch_norm_param { moving_average_fraction: 0.9 } } layer { bottom: "res4a_branch2a" top: "res4a_branch2a" name: "scale4a_branch2a" type: "Scale" scale_param { bias_term: true } } layer { bottom: "res4a_branch2a" top: "res4a_branch2a" name: "res4a_branch2a_relu" type: "ReLU" } layer { bottom: "res4a_branch2a" top: "res4a_branch2b" name: "res4a_branch2b" type: "Convolution" convolution_param { num_output: 128 kernel_size: 3 pad: 1 stride: 1 weight_filler { type: "msra" } bias_term: false } } layer { bottom: "res4a_branch2b" top: "res4a_branch2b" name: "bn4a_branch2b" type: "BatchNorm" batch_norm_param { moving_average_fraction: 0.9 } } layer { bottom: "res4a_branch2b" top: "res4a_branch2b" name: "scale4a_branch2b" type: "Scale" scale_param { bias_term: true } } layer { bottom: "res4a_branch1" bottom: "res4a_branch2b" top: "res4a" name: "res4a" type: "Eltwise" eltwise_param { operation: SUM } } layer { bottom: "res4a" top: "res4a" name: "res4a_relu" type: "ReLU" } ###############1x1############### layer { name: "res4b_branch2a_1x1" type: "Convolution" bottom: "res4a" top: "res4b_branch2a_1x1" param { lr_mult: 1.0 decay_mult: 1.0 } convolution_param { num_output: 64 bias_term: false pad: 0 kernel_size: 1 stride: 1 weight_filler { type: "gaussian" std: 0.01 } } } ############################### layer { bottom: "res4b_branch2a_1x1" top: "res4b_branch2a" name: "res4b_branch2a" type: "Convolution" convolution_param { num_output: 128 kernel_size: 3 pad: 1 stride: 1 weight_filler { type: "msra" } bias_term: false } } layer { bottom: "res4b_branch2a" top: "res4b_branch2a" name: "bn4b_branch2a" type: "BatchNorm" batch_norm_param { moving_average_fraction: 0.9 } } layer { bottom: "res4b_branch2a" top: "res4b_branch2a" name: "scale4b_branch2a" type: "Scale" scale_param { bias_term: true } } layer { bottom: "res4b_branch2a" top: "res4b_branch2a" name: "res4b_branch2a_relu" type: "ReLU" } layer { bottom: "res4b_branch2a" top: "res4b_branch2b" name: "res4b_branch2b" type: "Convolution" convolution_param { num_output: 128 kernel_size: 3 pad: 1 stride: 1 weight_filler { type: "msra" } bias_term: false } } layer { bottom: "res4b_branch2b" top: "res4b_branch2b" name: "bn4b_branch2b" type: "BatchNorm" batch_norm_param { moving_average_fraction: 0.9 } } layer { bottom: "res4b_branch2b" top: "res4b_branch2b" name: "scale4b_branch2b" type: "Scale" scale_param { bias_term: true } } layer { bottom: "res4a" bottom: "res4b_branch2b" top: "res4b" name: "res4b" type: "Eltwise" eltwise_param { operation: SUM } } layer { bottom: "res4b" top: "res4b" name: "res4b_relu" type: "ReLU" } ###############1x1############### layer { name: "res5a_branch1_1x1" type: "Convolution" bottom: "res4b" top: "res5a_branch1_1x1" param { lr_mult: 1.0 decay_mult: 1.0 } convolution_param { num_output: 128 bias_term: false pad: 0 kernel_size: 1 stride: 1 weight_filler { type: "gaussian" std: 0.01 } } } ############################### layer { bottom: "res5a_branch1_1x1" top: "res5a_branch1" name: "res5a_branch1" type: "Convolution" convolution_param { num_output: 256 kernel_size: 1 pad: 0 stride: 2 weight_filler { type: "msra" } bias_term: false } } layer { bottom: "res5a_branch1" top: "res5a_branch1" name: "bn5a_branch1" type: "BatchNorm" batch_norm_param { moving_average_fraction: 0.9 } } layer { bottom: "res5a_branch1" top: "res5a_branch1" name: "scale5a_branch1" type: "Scale" scale_param { bias_term: true } } ###############1x1############### layer { name: "res5a_branch2a_1x1" type: "Convolution" bottom: "res4b" top: "res5a_branch2a_1x1" param { lr_mult: 1.0 decay_mult: 1.0 } convolution_param { num_output: 128 bias_term: false pad: 0 kernel_size: 1 stride: 1 weight_filler { type: "gaussian" std: 0.01 } } } ############################### layer { bottom: "res5a_branch2a_1x1" top: "res5a_branch2a" name: "res5a_branch2a" type: "Convolution" convolution_param { num_output: 256 kernel_size: 3 pad: 1 stride: 2 weight_filler { type: "msra" } bias_term: false } } layer { bottom: "res5a_branch2a" top: "res5a_branch2a" name: "bn5a_branch2a" type: "BatchNorm" batch_norm_param { moving_average_fraction: 0.9 } } layer { bottom: "res5a_branch2a" top: "res5a_branch2a" name: "scale5a_branch2a" type: "Scale" scale_param { bias_term: true } } layer { bottom: "res5a_branch2a" top: "res5a_branch2a" name: "res5a_branch2a_relu" type: "ReLU" } layer { bottom: "res5a_branch2a" top: "res5a_branch2b" name: "res5a_branch2b" type: "Convolution" convolution_param { num_output: 256 kernel_size: 3 pad: 1 stride: 1 weight_filler { type: "msra" } bias_term: false } } layer { bottom: "res5a_branch2b" top: "res5a_branch2b" name: "bn5a_branch2b" type: "BatchNorm" batch_norm_param { moving_average_fraction: 0.9 } } layer { bottom: "res5a_branch2b" top: "res5a_branch2b" name: "scale5a_branch2b" type: "Scale" scale_param { bias_term: true } } layer { bottom: "res5a_branch1" bottom: "res5a_branch2b" top: "res5a" name: "res5a" type: "Eltwise" eltwise_param { operation: SUM } } layer { bottom: "res5a" top: "res5a" name: "res5a_relu" type: "ReLU" } ###############1x1############### layer { name: "res5b_branch2a_1x1" type: "Convolution" bottom: "res5a" top: "res5b_branch2a_1x1" param { lr_mult: 1.0 decay_mult: 1.0 } convolution_param { num_output: 128 bias_term: false pad: 0 kernel_size: 1 stride: 1 weight_filler { type: "gaussian" std: 0.01 } } } ############################### layer { bottom: "res5b_branch2a_1x1" top: "res5b_branch2a" name: "res5b_branch2a" type: "Convolution" convolution_param { num_output: 256 kernel_size: 3 pad: 1 stride: 1 weight_filler { type: "msra" } bias_term: false } } layer { bottom: "res5b_branch2a" top: "res5b_branch2a" name: "bn5b_branch2a" type: "BatchNorm" batch_norm_param { moving_average_fraction: 0.9 } } layer { bottom: "res5b_branch2a" top: "res5b_branch2a" name: "scale5b_branch2a" type: "Scale" scale_param { bias_term: true } } layer { bottom: "res5b_branch2a" top: "res5b_branch2a" name: "res5b_branch2a_relu" type: "ReLU" } layer { bottom: "res5b_branch2a" top: "res5b_branch2b" name: "res5b_branch2b" type: "Convolution" convolution_param { num_output: 256 kernel_size: 3 pad: 1 stride: 1 weight_filler { type: "msra" } bias_term: false } } layer { bottom: "res5b_branch2b" top: "res5b_branch2b" name: "bn5b_branch2b" type: "BatchNorm" batch_norm_param { moving_average_fraction: 0.9 } } layer { bottom: "res5b_branch2b" top: "res5b_branch2b" name: "scale5b_branch2b" type: "Scale" scale_param { bias_term: true } } layer { bottom: "res5a" bottom: "res5b_branch2b" top: "res5b" name: "res5b" type: "Eltwise" eltwise_param { operation: SUM } } layer { bottom: "res5b" top: "res5b" name: "res5b_relu" type: "ReLU" } #####add extra layers##### layer { name: "conv19" type: "Convolution" bottom: "res5b" top: "conv19" param { lr_mult: 1 decay_mult: 1 } convolution_param { num_output: 256 pad: 1 kernel_size: 3 stride: 1 bias_term: false weight_filler { type: "xavier" } } } layer { name: "bn19" type: "BatchNorm" bottom: "conv19" top: "conv19" } layer { bottom: "conv19" top: "conv19" name: "scale_conv19" type: "Scale" scale_param { bias_term: true } } layer { name: "relu19" type: "ReLU" bottom: "conv19" top: "conv19" relu_param { negative_slope: 0.1 } } layer { name: "conv20" type: "Convolution" bottom: "conv19" top: "conv20" param { lr_mult: 1 decay_mult: 1 } convolution_param { num_output: 384 pad: 1 kernel_size: 3 stride: 1 bias_term: false weight_filler { type: "xavier" } } } layer { name: "bn20" type: "BatchNorm" bottom: "conv20" top: "conv20" } layer { bottom: "conv20" top: "conv20" name: "scale_conv20" type: "Scale" scale_param { bias_term: true } } layer { name: "relu20" type: "ReLU" bottom: "conv20" top: "conv20" relu_param { negative_slope: 0.1 } } layer { name: "conv21" type: "Convolution" bottom: "conv20" top: "conv21" param { lr_mult: 1 decay_mult: 1 } convolution_param { num_output: 256 pad: 1 kernel_size: 3 stride: 1 bias_term: false weight_filler { type: "xavier" } } } layer { name: "bn21" type: "BatchNorm" bottom: "conv21" top: "conv21" } layer { bottom: "conv21" top: "conv21" name: "scale_conv21" type: "Scale" scale_param { bias_term: true } } layer { name: "relu21" type: "ReLU" bottom: "conv21" top: "conv21" relu_param { negative_slope: 0.1 } } ###############################################################################All the same layer { name: "conv_reg12" type: "Convolution" bottom: "conv21" top: "conv_reg" param { lr_mult: 1 decay_mult: 1 } param { lr_mult: 2 decay_mult: 0 } convolution_param { num_output: 85 kernel_size: 1 stride: 1 #bias_term: false weight_filler { type: "xavier" } } }