layer { name: "input" type: "Input" top: "data" input_param { shape { dim: 1 dim: 1 dim: 32 dim: 512 } } } layer { name: "power-data" type: "Power" bottom: "data" top: "0" power_param { scale: 0.0039215686274 shift: -0.67 } } layer { name: "208_Conv" type: "Convolution" bottom: "0" top: "208" convolution_param { num_output: 32 bias_term: false group: 1 pad_h: 1 pad_w: 1 kernel_h: 3 kernel_w: 3 stride_h: 2 stride_w: 2 dilation: 1 } } layer { name: "209_BatchNormalization_bn" type: "BatchNorm" bottom: "208" top: "209" batch_norm_param { use_global_stats: true eps: 9.99999974738e-06 } } layer { name: "209_BatchNormalization" type: "Scale" bottom: "209" top: "209" scale_param { bias_term: true } } layer { name: "210_Relu" type: "ReLU" bottom: "209" top: "210" } layer { name: "211_Conv" type: "Convolution" bottom: "210" top: "211" convolution_param { num_output: 32 bias_term: false group: 32 pad_h: 1 pad_w: 1 kernel_h: 3 kernel_w: 3 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "212_Conv" type: "Convolution" bottom: "211" top: "212" convolution_param { num_output: 32 bias_term: false group: 1 pad_h: 0 pad_w: 0 kernel_h: 1 kernel_w: 1 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "213_BatchNormalization_bn" type: "BatchNorm" bottom: "212" top: "213" batch_norm_param { use_global_stats: true eps: 9.99999974738e-06 } } layer { name: "213_BatchNormalization" type: "Scale" bottom: "213" top: "213" scale_param { bias_term: true } } layer { name: "214_Relu" type: "ReLU" bottom: "213" top: "214" } layer { name: "215_Conv" type: "Convolution" bottom: "214" top: "215" convolution_param { num_output: 96 bias_term: false group: 1 pad_h: 0 pad_w: 0 kernel_h: 1 kernel_w: 1 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "216_BatchNormalization_bn" type: "BatchNorm" bottom: "215" top: "216" batch_norm_param { use_global_stats: true eps: 9.99999974738e-06 } } layer { name: "216_BatchNormalization" type: "Scale" bottom: "216" top: "216" scale_param { bias_term: true } } layer { name: "217_Relu" type: "ReLU" bottom: "216" top: "217" } layer { name: "218_Conv" type: "Convolution" bottom: "217" top: "218" convolution_param { num_output: 96 bias_term: false group: 96 pad_h: 1 pad_w: 1 kernel_h: 3 kernel_w: 3 stride_h: 2 stride_w: 2 dilation: 1 } } layer { name: "219_Conv" type: "Convolution" bottom: "218" top: "219" convolution_param { num_output: 24 bias_term: false group: 1 pad_h: 0 pad_w: 0 kernel_h: 1 kernel_w: 1 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "220_BatchNormalization_bn" type: "BatchNorm" bottom: "219" top: "220" batch_norm_param { use_global_stats: true eps: 9.99999974738e-06 } } layer { name: "220_BatchNormalization" type: "Scale" bottom: "220" top: "220" scale_param { bias_term: true } } layer { name: "221_Conv" type: "Convolution" bottom: "220" top: "221" convolution_param { num_output: 72 bias_term: false group: 1 pad_h: 0 pad_w: 0 kernel_h: 1 kernel_w: 1 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "222_BatchNormalization_bn" type: "BatchNorm" bottom: "221" top: "222" batch_norm_param { use_global_stats: true eps: 9.99999974738e-06 } } layer { name: "222_BatchNormalization" type: "Scale" bottom: "222" top: "222" scale_param { bias_term: true } } layer { name: "223_Relu" type: "ReLU" bottom: "222" top: "223" } layer { name: "224_Conv" type: "Convolution" bottom: "223" top: "224" convolution_param { num_output: 72 bias_term: false group: 72 pad_h: 1 pad_w: 1 kernel_h: 3 kernel_w: 3 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "225_Conv" type: "Convolution" bottom: "224" top: "225" convolution_param { num_output: 24 bias_term: false group: 1 pad_h: 0 pad_w: 0 kernel_h: 1 kernel_w: 1 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "226_BatchNormalization_bn" type: "BatchNorm" bottom: "225" top: "226" batch_norm_param { use_global_stats: true eps: 9.99999974738e-06 } } layer { name: "226_BatchNormalization" type: "Scale" bottom: "226" top: "226" scale_param { bias_term: true } } layer { name: "227_Add" type: "Eltwise" bottom: "226" bottom: "220" top: "227" eltwise_param { operation: SUM } } layer { name: "228_Conv" type: "Convolution" bottom: "227" top: "228" convolution_param { num_output: 72 bias_term: false group: 1 pad_h: 0 pad_w: 0 kernel_h: 1 kernel_w: 1 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "229_BatchNormalization_bn" type: "BatchNorm" bottom: "228" top: "229" batch_norm_param { use_global_stats: true eps: 9.99999974738e-06 } } layer { name: "229_BatchNormalization" type: "Scale" bottom: "229" top: "229" scale_param { bias_term: true } } layer { name: "230_Relu" type: "ReLU" bottom: "229" top: "230" } layer { name: "231_Conv" type: "Convolution" bottom: "230" top: "231" convolution_param { num_output: 72 bias_term: false group: 72 pad_h: 1 pad_w: 1 kernel_h: 3 kernel_w: 3 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "232_Conv" type: "Convolution" bottom: "231" top: "232" convolution_param { num_output: 24 bias_term: false group: 1 pad_h: 0 pad_w: 0 kernel_h: 1 kernel_w: 1 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "233_BatchNormalization_bn" type: "BatchNorm" bottom: "232" top: "233" batch_norm_param { use_global_stats: true eps: 9.99999974738e-06 } } layer { name: "233_BatchNormalization" type: "Scale" bottom: "233" top: "233" scale_param { bias_term: true } } layer { name: "234_Add" type: "Eltwise" bottom: "233" bottom: "227" top: "234" eltwise_param { operation: SUM } } layer { name: "235_Conv" type: "Convolution" bottom: "234" top: "235" convolution_param { num_output: 72 bias_term: false group: 1 pad_h: 0 pad_w: 0 kernel_h: 1 kernel_w: 1 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "236_BatchNormalization_bn" type: "BatchNorm" bottom: "235" top: "236" batch_norm_param { use_global_stats: true eps: 9.99999974738e-06 } } layer { name: "236_BatchNormalization" type: "Scale" bottom: "236" top: "236" scale_param { bias_term: true } } layer { name: "237_Relu" type: "ReLU" bottom: "236" top: "237" } layer { name: "238_Conv" type: "Convolution" bottom: "237" top: "238" convolution_param { num_output: 72 bias_term: false group: 72 pad_h: 2 pad_w: 2 kernel_h: 5 kernel_w: 5 stride_h: 2 stride_w: 2 dilation: 1 } } layer { name: "239_Conv" type: "Convolution" bottom: "238" top: "239" convolution_param { num_output: 32 bias_term: false group: 1 pad_h: 0 pad_w: 0 kernel_h: 1 kernel_w: 1 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "240_BatchNormalization_bn" type: "BatchNorm" bottom: "239" top: "240" batch_norm_param { use_global_stats: true eps: 9.99999974738e-06 } } layer { name: "240_BatchNormalization" type: "Scale" bottom: "240" top: "240" scale_param { bias_term: true } } layer { name: "241_Conv" type: "Convolution" bottom: "240" top: "241" convolution_param { num_output: 96 bias_term: false group: 1 pad_h: 0 pad_w: 0 kernel_h: 1 kernel_w: 1 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "242_BatchNormalization_bn" type: "BatchNorm" bottom: "241" top: "242" batch_norm_param { use_global_stats: true eps: 9.99999974738e-06 } } layer { name: "242_BatchNormalization" type: "Scale" bottom: "242" top: "242" scale_param { bias_term: true } } layer { name: "243_Relu" type: "ReLU" bottom: "242" top: "243" } layer { name: "244_Conv" type: "Convolution" bottom: "243" top: "244" convolution_param { num_output: 96 bias_term: false group: 96 pad_h: 2 pad_w: 2 kernel_h: 5 kernel_w: 5 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "245_Conv" type: "Convolution" bottom: "244" top: "245" convolution_param { num_output: 32 bias_term: false group: 1 pad_h: 0 pad_w: 0 kernel_h: 1 kernel_w: 1 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "246_BatchNormalization_bn" type: "BatchNorm" bottom: "245" top: "246" batch_norm_param { use_global_stats: true eps: 9.99999974738e-06 } } layer { name: "246_BatchNormalization" type: "Scale" bottom: "246" top: "246" scale_param { bias_term: true } } layer { name: "247_Add" type: "Eltwise" bottom: "246" bottom: "240" top: "247" eltwise_param { operation: SUM } } layer { name: "248_Conv" type: "Convolution" bottom: "247" top: "248" convolution_param { num_output: 96 bias_term: false group: 1 pad_h: 0 pad_w: 0 kernel_h: 1 kernel_w: 1 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "249_BatchNormalization_bn" type: "BatchNorm" bottom: "248" top: "249" batch_norm_param { use_global_stats: true eps: 9.99999974738e-06 } } layer { name: "249_BatchNormalization" type: "Scale" bottom: "249" top: "249" scale_param { bias_term: true } } layer { name: "250_Relu" type: "ReLU" bottom: "249" top: "250" } layer { name: "251_Conv" type: "Convolution" bottom: "250" top: "251" convolution_param { num_output: 96 bias_term: false group: 96 pad_h: 2 pad_w: 2 kernel_h: 5 kernel_w: 5 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "252_Conv" type: "Convolution" bottom: "251" top: "252" convolution_param { num_output: 32 bias_term: false group: 1 pad_h: 0 pad_w: 0 kernel_h: 1 kernel_w: 1 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "253_BatchNormalization_bn" type: "BatchNorm" bottom: "252" top: "253" batch_norm_param { use_global_stats: true eps: 9.99999974738e-06 } } layer { name: "253_BatchNormalization" type: "Scale" bottom: "253" top: "253" scale_param { bias_term: true } } layer { name: "254_Add" type: "Eltwise" bottom: "253" bottom: "247" top: "254" eltwise_param { operation: SUM } } layer { name: "255_Conv" type: "Convolution" bottom: "254" top: "255" convolution_param { num_output: 192 bias_term: false group: 1 pad_h: 0 pad_w: 0 kernel_h: 1 kernel_w: 1 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "256_BatchNormalization_bn" type: "BatchNorm" bottom: "255" top: "256" batch_norm_param { use_global_stats: true eps: 9.99999974738e-06 } } layer { name: "256_BatchNormalization" type: "Scale" bottom: "256" top: "256" scale_param { bias_term: true } } layer { name: "257_Relu" type: "ReLU" bottom: "256" top: "257" } layer { name: "258_Conv" type: "Convolution" bottom: "257" top: "258" convolution_param { num_output: 192 bias_term: false group: 192 pad_h: 2 pad_w: 2 kernel_h: 5 kernel_w: 5 stride_h: 2 stride_w: 1 dilation: 1 } } layer { name: "259_Conv" type: "Convolution" bottom: "258" top: "259" convolution_param { num_output: 48 bias_term: false group: 1 pad_h: 0 pad_w: 0 kernel_h: 1 kernel_w: 1 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "260_BatchNormalization_bn" type: "BatchNorm" bottom: "259" top: "260" batch_norm_param { use_global_stats: true eps: 9.99999974738e-06 } } layer { name: "260_BatchNormalization" type: "Scale" bottom: "260" top: "260" scale_param { bias_term: true } } layer { name: "261_Conv" type: "Convolution" bottom: "260" top: "261" convolution_param { num_output: 288 bias_term: false group: 1 pad_h: 0 pad_w: 0 kernel_h: 1 kernel_w: 1 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "262_BatchNormalization_bn" type: "BatchNorm" bottom: "261" top: "262" batch_norm_param { use_global_stats: true eps: 9.99999974738e-06 } } layer { name: "262_BatchNormalization" type: "Scale" bottom: "262" top: "262" scale_param { bias_term: true } } layer { name: "263_Relu" type: "ReLU" bottom: "262" top: "263" } layer { name: "264_Conv" type: "Convolution" bottom: "263" top: "264" convolution_param { num_output: 288 bias_term: false group: 288 pad_h: 2 pad_w: 2 kernel_h: 5 kernel_w: 5 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "265_Conv" type: "Convolution" bottom: "264" top: "265" convolution_param { num_output: 48 bias_term: false group: 1 pad_h: 0 pad_w: 0 kernel_h: 1 kernel_w: 1 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "266_BatchNormalization_bn" type: "BatchNorm" bottom: "265" top: "266" batch_norm_param { use_global_stats: true eps: 9.99999974738e-06 } } layer { name: "266_BatchNormalization" type: "Scale" bottom: "266" top: "266" scale_param { bias_term: true } } layer { name: "267_Add" type: "Eltwise" bottom: "266" bottom: "260" top: "267" eltwise_param { operation: SUM } } layer { name: "268_Conv" type: "Convolution" bottom: "267" top: "268" convolution_param { num_output: 288 bias_term: false group: 1 pad_h: 0 pad_w: 0 kernel_h: 1 kernel_w: 1 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "269_BatchNormalization_bn" type: "BatchNorm" bottom: "268" top: "269" batch_norm_param { use_global_stats: true eps: 9.99999974738e-06 } } layer { name: "269_BatchNormalization" type: "Scale" bottom: "269" top: "269" scale_param { bias_term: true } } layer { name: "270_Relu" type: "ReLU" bottom: "269" top: "270" } layer { name: "271_Conv" type: "Convolution" bottom: "270" top: "271" convolution_param { num_output: 288 bias_term: false group: 288 pad_h: 2 pad_w: 2 kernel_h: 5 kernel_w: 5 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "272_Conv" type: "Convolution" bottom: "271" top: "272" convolution_param { num_output: 48 bias_term: false group: 1 pad_h: 0 pad_w: 0 kernel_h: 1 kernel_w: 1 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "273_BatchNormalization_bn" type: "BatchNorm" bottom: "272" top: "273" batch_norm_param { use_global_stats: true eps: 9.99999974738e-06 } } layer { name: "273_BatchNormalization" type: "Scale" bottom: "273" top: "273" scale_param { bias_term: true } } layer { name: "274_Add" type: "Eltwise" bottom: "273" bottom: "267" top: "274" eltwise_param { operation: SUM } } layer { name: "275_Conv" type: "Convolution" bottom: "274" top: "275" convolution_param { num_output: 288 bias_term: false group: 1 pad_h: 0 pad_w: 0 kernel_h: 1 kernel_w: 1 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "276_BatchNormalization_bn" type: "BatchNorm" bottom: "275" top: "276" batch_norm_param { use_global_stats: true eps: 9.99999974738e-06 } } layer { name: "276_BatchNormalization" type: "Scale" bottom: "276" top: "276" scale_param { bias_term: true } } layer { name: "277_Relu" type: "ReLU" bottom: "276" top: "277" } layer { name: "278_Conv" type: "Convolution" bottom: "277" top: "278" convolution_param { num_output: 288 bias_term: false group: 288 pad_h: 1 pad_w: 1 kernel_h: 3 kernel_w: 3 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "279_Conv" type: "Convolution" bottom: "278" top: "279" convolution_param { num_output: 96 bias_term: false group: 1 pad_h: 0 pad_w: 0 kernel_h: 1 kernel_w: 1 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "280_BatchNormalization_bn" type: "BatchNorm" bottom: "279" top: "280" batch_norm_param { use_global_stats: true eps: 9.99999974738e-06 } } layer { name: "280_BatchNormalization" type: "Scale" bottom: "280" top: "280" scale_param { bias_term: true } } layer { name: "281_Conv" type: "Convolution" bottom: "280" top: "281" convolution_param { num_output: 576 bias_term: false group: 1 pad_h: 0 pad_w: 0 kernel_h: 1 kernel_w: 1 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "282_BatchNormalization_bn" type: "BatchNorm" bottom: "281" top: "282" batch_norm_param { use_global_stats: true eps: 9.99999974738e-06 } } layer { name: "282_BatchNormalization" type: "Scale" bottom: "282" top: "282" scale_param { bias_term: true } } layer { name: "283_Relu" type: "ReLU" bottom: "282" top: "283" } layer { name: "284_Conv" type: "Convolution" bottom: "283" top: "284" convolution_param { num_output: 576 bias_term: false group: 576 pad_h: 1 pad_w: 1 kernel_h: 3 kernel_w: 3 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "285_Conv" type: "Convolution" bottom: "284" top: "285" convolution_param { num_output: 96 bias_term: false group: 1 pad_h: 0 pad_w: 0 kernel_h: 1 kernel_w: 1 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "286_BatchNormalization_bn" type: "BatchNorm" bottom: "285" top: "286" batch_norm_param { use_global_stats: true eps: 9.99999974738e-06 } } layer { name: "286_BatchNormalization" type: "Scale" bottom: "286" top: "286" scale_param { bias_term: true } } layer { name: "287_Add" type: "Eltwise" bottom: "286" bottom: "280" top: "287" eltwise_param { operation: SUM } } layer { name: "288_Conv" type: "Convolution" bottom: "287" top: "288" convolution_param { num_output: 384 bias_term: false group: 1 pad_h: 0 pad_w: 0 kernel_h: 1 kernel_w: 1 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "289_BatchNormalization_bn" type: "BatchNorm" bottom: "288" top: "289" batch_norm_param { use_global_stats: true eps: 9.99999974738e-06 } } layer { name: "289_BatchNormalization" type: "Scale" bottom: "289" top: "289" scale_param { bias_term: true } } layer { name: "290_Relu" type: "ReLU" bottom: "289" top: "290" } layer { name: "291_Conv" type: "Convolution" bottom: "290" top: "291" convolution_param { num_output: 384 bias_term: false group: 384 pad_h: 0 pad_w: 2 kernel_h: 1 kernel_w: 5 stride_h: 2 stride_w: 1 dilation: 1 } } layer { name: "292_Conv" type: "Convolution" bottom: "291" top: "292" convolution_param { num_output: 128 bias_term: false group: 1 pad_h: 0 pad_w: 0 kernel_h: 1 kernel_w: 1 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "293_BatchNormalization_bn" type: "BatchNorm" bottom: "292" top: "293" batch_norm_param { use_global_stats: true eps: 9.99999974738e-06 } } layer { name: "293_BatchNormalization" type: "Scale" bottom: "293" top: "293" scale_param { bias_term: true } } layer { name: "294_Conv" type: "Convolution" bottom: "293" top: "294" convolution_param { num_output: 512 bias_term: false group: 1 pad_h: 0 pad_w: 0 kernel_h: 1 kernel_w: 1 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "295_BatchNormalization_bn" type: "BatchNorm" bottom: "294" top: "295" batch_norm_param { use_global_stats: true eps: 9.99999974738e-06 } } layer { name: "295_BatchNormalization" type: "Scale" bottom: "295" top: "295" scale_param { bias_term: true } } layer { name: "296_Relu" type: "ReLU" bottom: "295" top: "296" } layer { name: "297_Conv" type: "Convolution" bottom: "296" top: "297" convolution_param { num_output: 512 bias_term: false group: 512 pad_h: 0 pad_w: 2 kernel_h: 1 kernel_w: 5 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "298_Conv" type: "Convolution" bottom: "297" top: "298" convolution_param { num_output: 128 bias_term: false group: 1 pad_h: 0 pad_w: 0 kernel_h: 1 kernel_w: 1 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "299_BatchNormalization_bn" type: "BatchNorm" bottom: "298" top: "299" batch_norm_param { use_global_stats: true eps: 9.99999974738e-06 } } layer { name: "299_BatchNormalization" type: "Scale" bottom: "299" top: "299" scale_param { bias_term: true } } layer { name: "300_Add" type: "Eltwise" bottom: "299" bottom: "293" top: "300" eltwise_param { operation: SUM } } layer { name: "301_Conv" type: "Convolution" bottom: "300" top: "301" convolution_param { num_output: 384 bias_term: false group: 1 pad_h: 0 pad_w: 0 kernel_h: 1 kernel_w: 1 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "302_BatchNormalization_bn" type: "BatchNorm" bottom: "301" top: "302" batch_norm_param { use_global_stats: true eps: 9.99999974738e-06 } } layer { name: "302_BatchNormalization" type: "Scale" bottom: "302" top: "302" scale_param { bias_term: true } } layer { name: "303_Relu" type: "ReLU" bottom: "302" top: "303" } layer { name: "304_Conv" type: "Convolution" bottom: "303" top: "304" convolution_param { num_output: 384 bias_term: false group: 384 pad_h: 0 pad_w: 1 kernel_h: 1 kernel_w: 3 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "305_Conv" type: "Convolution" bottom: "304" top: "305" convolution_param { num_output: 152 bias_term: false group: 1 pad_h: 0 pad_w: 0 kernel_h: 1 kernel_w: 1 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "306_BatchNormalization_bn" type: "BatchNorm" bottom: "305" top: "306" batch_norm_param { use_global_stats: true eps: 9.99999974738e-06 } } layer { name: "306_BatchNormalization" type: "Scale" bottom: "306" top: "306" scale_param { bias_term: true } } layer { name: "307_Conv" type: "Convolution" bottom: "306" top: "307" convolution_param { num_output: 96 bias_term: false group: 1 pad_h: 0 pad_w: 0 kernel_h: 1 kernel_w: 1 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "308_BatchNormalization_bn" type: "BatchNorm" bottom: "307" top: "308" batch_norm_param { use_global_stats: true eps: 9.99999974738e-06 } } layer { name: "308_BatchNormalization" type: "Scale" bottom: "308" top: "308" scale_param { bias_term: true } } layer { name: "309_Relu" type: "ReLU" bottom: "308" top: "309" } layer { name: "310_Conv" type: "Convolution" bottom: "309" top: "310" convolution_param { num_output: 7358 bias_term: false group: 1 pad_h: 0 pad_w: 0 kernel_h: 1 kernel_w: 1 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "311_BatchNormalization_bn" type: "BatchNorm" bottom: "310" top: "311" batch_norm_param { use_global_stats: true eps: 9.99999974738e-06 } } layer { name: "311_BatchNormalization" type: "Scale" bottom: "311" top: "311" scale_param { bias_term: true } } layer { name:"ArgMax" type: "ArgMax" bottom: "311" top: "ArgMax" argmax_param { axis: 1 } }