layer { name: "input" type: "Input" top: "data" input_param { shape { dim: 1 dim: 1 dim: 32 dim: 512 } } } layer { name: "power-data" type: "Power" bottom: "data" top: "0" power_param { scale: 0.0039215686274 shift: -0.67 } } layer { name: "182_Conv" type: "Convolution" bottom: "0" top: "182" convolution_param { num_output: 32 bias_term: false group: 1 pad_h: 1 pad_w: 1 kernel_h: 3 kernel_w: 3 stride_h: 2 stride_w: 2 dilation: 1 } } layer { name: "183_BatchNormalization_bn" type: "BatchNorm" bottom: "182" top: "183" batch_norm_param { use_global_stats: true eps: 9.999999747378752e-06 } } layer { name: "183_BatchNormalization" type: "Scale" bottom: "183" top: "183" scale_param { bias_term: true } } layer { name: "184_Relu" type: "ReLU" bottom: "183" top: "184" } layer { name: "185_Conv" type: "Convolution" bottom: "184" top: "185" convolution_param { num_output: 32 bias_term: false group: 32 pad_h: 1 pad_w: 1 kernel_h: 3 kernel_w: 3 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "186_Conv" type: "Convolution" bottom: "185" top: "186" convolution_param { num_output: 32 bias_term: false group: 1 pad_h: 0 pad_w: 0 kernel_h: 1 kernel_w: 1 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "187_BatchNormalization_bn" type: "BatchNorm" bottom: "186" top: "187" batch_norm_param { use_global_stats: true eps: 9.999999747378752e-06 } } layer { name: "187_BatchNormalization" type: "Scale" bottom: "187" top: "187" scale_param { bias_term: true } } layer { name: "188_Relu" type: "ReLU" bottom: "187" top: "188" } layer { name: "189_Conv" type: "Convolution" bottom: "188" top: "189" convolution_param { num_output: 96 bias_term: false group: 1 pad_h: 0 pad_w: 0 kernel_h: 1 kernel_w: 1 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "190_BatchNormalization_bn" type: "BatchNorm" bottom: "189" top: "190" batch_norm_param { use_global_stats: true eps: 9.999999747378752e-06 } } layer { name: "190_BatchNormalization" type: "Scale" bottom: "190" top: "190" scale_param { bias_term: true } } layer { name: "191_Relu" type: "ReLU" bottom: "190" top: "191" } layer { name: "192_Conv" type: "Convolution" bottom: "191" top: "192" convolution_param { num_output: 96 bias_term: false group: 96 pad_h: 1 pad_w: 1 kernel_h: 3 kernel_w: 3 stride_h: 2 stride_w: 2 dilation: 1 } } layer { name: "193_Conv" type: "Convolution" bottom: "192" top: "193" convolution_param { num_output: 24 bias_term: false group: 1 pad_h: 0 pad_w: 0 kernel_h: 1 kernel_w: 1 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "194_BatchNormalization_bn" type: "BatchNorm" bottom: "193" top: "194" batch_norm_param { use_global_stats: true eps: 9.999999747378752e-06 } } layer { name: "194_BatchNormalization" type: "Scale" bottom: "194" top: "194" scale_param { bias_term: true } } layer { name: "195_Conv" type: "Convolution" bottom: "194" top: "195" convolution_param { num_output: 72 bias_term: false group: 1 pad_h: 0 pad_w: 0 kernel_h: 1 kernel_w: 1 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "196_BatchNormalization_bn" type: "BatchNorm" bottom: "195" top: "196" batch_norm_param { use_global_stats: true eps: 9.999999747378752e-06 } } layer { name: "196_BatchNormalization" type: "Scale" bottom: "196" top: "196" scale_param { bias_term: true } } layer { name: "197_Relu" type: "ReLU" bottom: "196" top: "197" } layer { name: "198_Conv" type: "Convolution" bottom: "197" top: "198" convolution_param { num_output: 72 bias_term: false group: 72 pad_h: 1 pad_w: 1 kernel_h: 3 kernel_w: 3 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "199_Conv" type: "Convolution" bottom: "198" top: "199" convolution_param { num_output: 24 bias_term: false group: 1 pad_h: 0 pad_w: 0 kernel_h: 1 kernel_w: 1 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "200_BatchNormalization_bn" type: "BatchNorm" bottom: "199" top: "200" batch_norm_param { use_global_stats: true eps: 9.999999747378752e-06 } } layer { name: "200_BatchNormalization" type: "Scale" bottom: "200" top: "200" scale_param { bias_term: true } } layer { name: "201_Add" type: "Eltwise" bottom: "200" bottom: "194" top: "201" eltwise_param { operation: SUM } } layer { name: "202_Conv" type: "Convolution" bottom: "201" top: "202" convolution_param { num_output: 72 bias_term: false group: 1 pad_h: 0 pad_w: 0 kernel_h: 1 kernel_w: 1 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "203_BatchNormalization_bn" type: "BatchNorm" bottom: "202" top: "203" batch_norm_param { use_global_stats: true eps: 9.999999747378752e-06 } } layer { name: "203_BatchNormalization" type: "Scale" bottom: "203" top: "203" scale_param { bias_term: true } } layer { name: "204_Relu" type: "ReLU" bottom: "203" top: "204" } layer { name: "205_Conv" type: "Convolution" bottom: "204" top: "205" convolution_param { num_output: 72 bias_term: false group: 72 pad_h: 1 pad_w: 1 kernel_h: 3 kernel_w: 3 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "206_Conv" type: "Convolution" bottom: "205" top: "206" convolution_param { num_output: 24 bias_term: false group: 1 pad_h: 0 pad_w: 0 kernel_h: 1 kernel_w: 1 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "207_BatchNormalization_bn" type: "BatchNorm" bottom: "206" top: "207" batch_norm_param { use_global_stats: true eps: 9.999999747378752e-06 } } layer { name: "207_BatchNormalization" type: "Scale" bottom: "207" top: "207" scale_param { bias_term: true } } layer { name: "208_Add" type: "Eltwise" bottom: "207" bottom: "201" top: "208" eltwise_param { operation: SUM } } layer { name: "209_Conv" type: "Convolution" bottom: "208" top: "209" convolution_param { num_output: 72 bias_term: false group: 1 pad_h: 0 pad_w: 0 kernel_h: 1 kernel_w: 1 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "210_BatchNormalization_bn" type: "BatchNorm" bottom: "209" top: "210" batch_norm_param { use_global_stats: true eps: 9.999999747378752e-06 } } layer { name: "210_BatchNormalization" type: "Scale" bottom: "210" top: "210" scale_param { bias_term: true } } layer { name: "211_Relu" type: "ReLU" bottom: "210" top: "211" } layer { name: "212_Conv" type: "Convolution" bottom: "211" top: "212" convolution_param { num_output: 72 bias_term: false group: 72 pad_h: 2 pad_w: 2 kernel_h: 5 kernel_w: 5 stride_h: 2 stride_w: 2 dilation: 1 } } layer { name: "213_Conv" type: "Convolution" bottom: "212" top: "213" convolution_param { num_output: 32 bias_term: false group: 1 pad_h: 0 pad_w: 0 kernel_h: 1 kernel_w: 1 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "214_BatchNormalization_bn" type: "BatchNorm" bottom: "213" top: "214" batch_norm_param { use_global_stats: true eps: 9.999999747378752e-06 } } layer { name: "214_BatchNormalization" type: "Scale" bottom: "214" top: "214" scale_param { bias_term: true } } layer { name: "215_Conv" type: "Convolution" bottom: "214" top: "215" convolution_param { num_output: 96 bias_term: false group: 1 pad_h: 0 pad_w: 0 kernel_h: 1 kernel_w: 1 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "216_BatchNormalization_bn" type: "BatchNorm" bottom: "215" top: "216" batch_norm_param { use_global_stats: true eps: 9.999999747378752e-06 } } layer { name: "216_BatchNormalization" type: "Scale" bottom: "216" top: "216" scale_param { bias_term: true } } layer { name: "217_Relu" type: "ReLU" bottom: "216" top: "217" } layer { name: "218_Conv" type: "Convolution" bottom: "217" top: "218" convolution_param { num_output: 96 bias_term: false group: 96 pad_h: 2 pad_w: 2 kernel_h: 5 kernel_w: 5 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "219_Conv" type: "Convolution" bottom: "218" top: "219" convolution_param { num_output: 32 bias_term: false group: 1 pad_h: 0 pad_w: 0 kernel_h: 1 kernel_w: 1 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "220_BatchNormalization_bn" type: "BatchNorm" bottom: "219" top: "220" batch_norm_param { use_global_stats: true eps: 9.999999747378752e-06 } } layer { name: "220_BatchNormalization" type: "Scale" bottom: "220" top: "220" scale_param { bias_term: true } } layer { name: "221_Add" type: "Eltwise" bottom: "220" bottom: "214" top: "221" eltwise_param { operation: SUM } } layer { name: "222_Conv" type: "Convolution" bottom: "221" top: "222" convolution_param { num_output: 96 bias_term: false group: 1 pad_h: 0 pad_w: 0 kernel_h: 1 kernel_w: 1 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "223_BatchNormalization_bn" type: "BatchNorm" bottom: "222" top: "223" batch_norm_param { use_global_stats: true eps: 9.999999747378752e-06 } } layer { name: "223_BatchNormalization" type: "Scale" bottom: "223" top: "223" scale_param { bias_term: true } } layer { name: "224_Relu" type: "ReLU" bottom: "223" top: "224" } layer { name: "225_Conv" type: "Convolution" bottom: "224" top: "225" convolution_param { num_output: 96 bias_term: false group: 96 pad_h: 2 pad_w: 2 kernel_h: 5 kernel_w: 5 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "226_Conv" type: "Convolution" bottom: "225" top: "226" convolution_param { num_output: 32 bias_term: false group: 1 pad_h: 0 pad_w: 0 kernel_h: 1 kernel_w: 1 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "227_BatchNormalization_bn" type: "BatchNorm" bottom: "226" top: "227" batch_norm_param { use_global_stats: true eps: 9.999999747378752e-06 } } layer { name: "227_BatchNormalization" type: "Scale" bottom: "227" top: "227" scale_param { bias_term: true } } layer { name: "228_Add" type: "Eltwise" bottom: "227" bottom: "221" top: "228" eltwise_param { operation: SUM } } layer { name: "229_Conv" type: "Convolution" bottom: "228" top: "229" convolution_param { num_output: 192 bias_term: false group: 1 pad_h: 0 pad_w: 0 kernel_h: 1 kernel_w: 1 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "230_BatchNormalization_bn" type: "BatchNorm" bottom: "229" top: "230" batch_norm_param { use_global_stats: true eps: 9.999999747378752e-06 } } layer { name: "230_BatchNormalization" type: "Scale" bottom: "230" top: "230" scale_param { bias_term: true } } layer { name: "231_Relu" type: "ReLU" bottom: "230" top: "231" } layer { name: "232_Conv" type: "Convolution" bottom: "231" top: "232" convolution_param { num_output: 192 bias_term: false group: 192 pad_h: 2 pad_w: 2 kernel_h: 5 kernel_w: 5 stride_h: 2 stride_w: 1 dilation: 1 } } layer { name: "233_Conv" type: "Convolution" bottom: "232" top: "233" convolution_param { num_output: 48 bias_term: false group: 1 pad_h: 0 pad_w: 0 kernel_h: 1 kernel_w: 1 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "234_BatchNormalization_bn" type: "BatchNorm" bottom: "233" top: "234" batch_norm_param { use_global_stats: true eps: 9.999999747378752e-06 } } layer { name: "234_BatchNormalization" type: "Scale" bottom: "234" top: "234" scale_param { bias_term: true } } layer { name: "235_Conv" type: "Convolution" bottom: "234" top: "235" convolution_param { num_output: 288 bias_term: false group: 1 pad_h: 0 pad_w: 0 kernel_h: 1 kernel_w: 1 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "236_BatchNormalization_bn" type: "BatchNorm" bottom: "235" top: "236" batch_norm_param { use_global_stats: true eps: 9.999999747378752e-06 } } layer { name: "236_BatchNormalization" type: "Scale" bottom: "236" top: "236" scale_param { bias_term: true } } layer { name: "237_Relu" type: "ReLU" bottom: "236" top: "237" } layer { name: "238_Conv" type: "Convolution" bottom: "237" top: "238" convolution_param { num_output: 288 bias_term: false group: 288 pad_h: 2 pad_w: 2 kernel_h: 5 kernel_w: 5 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "239_Conv" type: "Convolution" bottom: "238" top: "239" convolution_param { num_output: 48 bias_term: false group: 1 pad_h: 0 pad_w: 0 kernel_h: 1 kernel_w: 1 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "240_BatchNormalization_bn" type: "BatchNorm" bottom: "239" top: "240" batch_norm_param { use_global_stats: true eps: 9.999999747378752e-06 } } layer { name: "240_BatchNormalization" type: "Scale" bottom: "240" top: "240" scale_param { bias_term: true } } layer { name: "241_Add" type: "Eltwise" bottom: "240" bottom: "234" top: "241" eltwise_param { operation: SUM } } layer { name: "242_Conv" type: "Convolution" bottom: "241" top: "242" convolution_param { num_output: 288 bias_term: false group: 1 pad_h: 0 pad_w: 0 kernel_h: 1 kernel_w: 1 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "243_BatchNormalization_bn" type: "BatchNorm" bottom: "242" top: "243" batch_norm_param { use_global_stats: true eps: 9.999999747378752e-06 } } layer { name: "243_BatchNormalization" type: "Scale" bottom: "243" top: "243" scale_param { bias_term: true } } layer { name: "244_Relu" type: "ReLU" bottom: "243" top: "244" } layer { name: "245_Conv" type: "Convolution" bottom: "244" top: "245" convolution_param { num_output: 288 bias_term: false group: 288 pad_h: 2 pad_w: 2 kernel_h: 5 kernel_w: 5 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "246_Conv" type: "Convolution" bottom: "245" top: "246" convolution_param { num_output: 48 bias_term: false group: 1 pad_h: 0 pad_w: 0 kernel_h: 1 kernel_w: 1 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "247_BatchNormalization_bn" type: "BatchNorm" bottom: "246" top: "247" batch_norm_param { use_global_stats: true eps: 9.999999747378752e-06 } } layer { name: "247_BatchNormalization" type: "Scale" bottom: "247" top: "247" scale_param { bias_term: true } } layer { name: "248_Add" type: "Eltwise" bottom: "247" bottom: "241" top: "248" eltwise_param { operation: SUM } } layer { name: "249_Conv" type: "Convolution" bottom: "248" top: "249" convolution_param { num_output: 288 bias_term: false group: 1 pad_h: 0 pad_w: 0 kernel_h: 1 kernel_w: 1 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "250_BatchNormalization_bn" type: "BatchNorm" bottom: "249" top: "250" batch_norm_param { use_global_stats: true eps: 9.999999747378752e-06 } } layer { name: "250_BatchNormalization" type: "Scale" bottom: "250" top: "250" scale_param { bias_term: true } } layer { name: "251_Relu" type: "ReLU" bottom: "250" top: "251" } layer { name: "252_Conv" type: "Convolution" bottom: "251" top: "252" convolution_param { num_output: 288 bias_term: false group: 288 pad_h: 1 pad_w: 1 kernel_h: 3 kernel_w: 3 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "253_Conv" type: "Convolution" bottom: "252" top: "253" convolution_param { num_output: 96 bias_term: false group: 1 pad_h: 0 pad_w: 0 kernel_h: 1 kernel_w: 1 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "254_BatchNormalization_bn" type: "BatchNorm" bottom: "253" top: "254" batch_norm_param { use_global_stats: true eps: 9.999999747378752e-06 } } layer { name: "254_BatchNormalization" type: "Scale" bottom: "254" top: "254" scale_param { bias_term: true } } layer { name: "255_Conv" type: "Convolution" bottom: "254" top: "255" convolution_param { num_output: 576 bias_term: false group: 1 pad_h: 0 pad_w: 0 kernel_h: 1 kernel_w: 1 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "256_BatchNormalization_bn" type: "BatchNorm" bottom: "255" top: "256" batch_norm_param { use_global_stats: true eps: 9.999999747378752e-06 } } layer { name: "256_BatchNormalization" type: "Scale" bottom: "256" top: "256" scale_param { bias_term: true } } layer { name: "257_Relu" type: "ReLU" bottom: "256" top: "257" } layer { name: "258_Conv" type: "Convolution" bottom: "257" top: "258" convolution_param { num_output: 576 bias_term: false group: 576 pad_h: 1 pad_w: 1 kernel_h: 3 kernel_w: 3 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "259_Conv" type: "Convolution" bottom: "258" top: "259" convolution_param { num_output: 96 bias_term: false group: 1 pad_h: 0 pad_w: 0 kernel_h: 1 kernel_w: 1 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "260_BatchNormalization_bn" type: "BatchNorm" bottom: "259" top: "260" batch_norm_param { use_global_stats: true eps: 9.999999747378752e-06 } } layer { name: "260_BatchNormalization" type: "Scale" bottom: "260" top: "260" scale_param { bias_term: true } } layer { name: "261_Add" type: "Eltwise" bottom: "260" bottom: "254" top: "261" eltwise_param { operation: SUM } } layer { name: "262_Conv" type: "Convolution" bottom: "261" top: "262" convolution_param { num_output: 384 bias_term: false group: 1 pad_h: 0 pad_w: 0 kernel_h: 1 kernel_w: 1 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "263_BatchNormalization_bn" type: "BatchNorm" bottom: "262" top: "263" batch_norm_param { use_global_stats: true eps: 9.999999747378752e-06 } } layer { name: "263_BatchNormalization" type: "Scale" bottom: "263" top: "263" scale_param { bias_term: true } } layer { name: "264_Relu" type: "ReLU" bottom: "263" top: "264" } layer { name: "265_Conv" type: "Convolution" bottom: "264" top: "265" convolution_param { num_output: 384 bias_term: false group: 384 pad_h: 0 pad_w: 2 kernel_h: 2 kernel_w: 5 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "266_Conv" type: "Convolution" bottom: "265" top: "266" convolution_param { num_output: 152 bias_term: false group: 1 pad_h: 0 pad_w: 0 kernel_h: 1 kernel_w: 1 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "267_BatchNormalization_bn" type: "BatchNorm" bottom: "266" top: "267" batch_norm_param { use_global_stats: true eps: 9.999999747378752e-06 } } layer { name: "267_BatchNormalization" type: "Scale" bottom: "267" top: "267" scale_param { bias_term: true } } layer { name: "268_Conv" type: "Convolution" bottom: "267" top: "268" convolution_param { num_output: 96 bias_term: false group: 1 pad_h: 0 pad_w: 0 kernel_h: 1 kernel_w: 1 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "269_BatchNormalization_bn" type: "BatchNorm" bottom: "268" top: "269" batch_norm_param { use_global_stats: true eps: 9.999999747378752e-06 } } layer { name: "269_BatchNormalization" type: "Scale" bottom: "269" top: "269" scale_param { bias_term: true } } layer { name: "270_Relu" type: "ReLU" bottom: "269" top: "270" } layer { name: "271_Conv" type: "Convolution" bottom: "270" top: "271" convolution_param { num_output: 7358 bias_term: false group: 1 pad_h: 0 pad_w: 0 kernel_h: 1 kernel_w: 1 stride_h: 1 stride_w: 1 dilation: 1 } } layer { name: "272_BatchNormalization_bn" type: "BatchNorm" bottom: "271" top: "272" batch_norm_param { use_global_stats: true eps: 9.999999747378752e-06 } } layer { name: "272_BatchNormalization" type: "Scale" bottom: "272" top: "272" scale_param { bias_term: true } } layer { name:"ArgMax" type: "ArgMax" bottom: "272" top: "ArgMax" argmax_param { axis: 1 } }