name: "PR_Lightweight11LayerNet" input: 'data' input_shape { dim: 1 dim: 1 dim: 32 dim: 512 } #-----------------------------Conv1----------------------------- layer { name: "conv1_1_1" type: "Convolution" bottom: "data" top: "conv1_1" param { lr_mult: 1 decay_mult: 1 } param { lr_mult: 2 decay_mult: 0 } convolution_param { num_output: 32 kernel_size: 7 pad: 3 stride: 2 weight_filler { type: "xavier" } bias_filler { type: "constant" } } } layer { name: "relu1" type: "ReLU" bottom: "conv1_1" top: "conv1_1" } # current shape: 16*256 #-----------------------------ResBlock_1-------------------------- # 1*1 layer { name: "ResBlock_1_1" type: "Convolution" bottom: "conv1_1" top: "ResBlock_1_1" param { lr_mult: 1 decay_mult: 1 } param { lr_mult: 2 decay_mult: 0 } convolution_param { num_output: 32 kernel_size: 1 pad: 0 weight_filler { type: "xavier" } bias_filler { type: "constant" } } } layer { name: "relu2" type: "ReLU" bottom: "ResBlock_1_1" top: "ResBlock_1_1" } # 3*3 layer { name: "ResBlock_1_2" type: "Convolution" bottom: "ResBlock_1_1" top: "ResBlock_1_2" param { lr_mult: 1 decay_mult: 1 } param { lr_mult: 2 decay_mult: 0 } convolution_param { num_output: 32 kernel_size: 3 pad: 1 group: 32 weight_filler { type: "xavier" } bias_filler { type: "constant" } } } layer { name: "relu3" type: "ReLU" bottom: "ResBlock_1_2" top: "ResBlock_1_2" } # 1*1 layer { name: "ResBlock_1_3" type: "Convolution" bottom: "ResBlock_1_2" top: "ResBlock_1_3" param { lr_mult: 1 decay_mult: 1 } param { lr_mult: 2 decay_mult: 0 } convolution_param { num_output: 64 kernel_size: 1 pad: 0 weight_filler { type: "xavier" } bias_filler { type: "constant" } } } layer { name: "relu4" type: "ReLU" bottom: "ResBlock_1_3" top: "ResBlock_1_3" } # 1*1 for the change of channel number change! layer { name: "ResBlock_1_4" type: "Convolution" bottom: "conv1_1" top: "ResBlock_1_4" param { lr_mult: 1 decay_mult: 1 } param { lr_mult: 2 decay_mult: 0 } convolution_param { num_output: 64 kernel_size: 1 pad: 0 weight_filler { type: "xavier" } bias_filler { type: "constant" } } } layer { name: "relu5" type: "ReLU" bottom: "ResBlock_1_4" top: "ResBlock_1_4" } # SUM layer { bottom: "ResBlock_1_4" bottom: "ResBlock_1_3" top: "Block1" name: "Block1" type: "Eltwise" eltwise_param { operation: SUM } } #-----------------------------Pooling1------------------------------- layer { name: "pool1" type: "Pooling" bottom: "Block1" top: "pool1" pooling_param { pool: MAX kernel_size: 2 stride: 2 } } # current shape: 8*128 #-----------------------------ResBlock_3-------------------------- # 1*1 layer { name: "ResBlock_3_1" type: "Convolution" bottom: "pool1" top: "ResBlock_3_1" param { lr_mult: 1 decay_mult: 1 } param { lr_mult: 2 decay_mult: 0 } convolution_param { num_output: 64 kernel_size: 1 pad: 0 weight_filler { type: "xavier" } bias_filler { type: "constant" } } } layer { name: "relu8" type: "ReLU" bottom: "ResBlock_3_1" top: "ResBlock_3_1" } # 3*3 layer { name: "ResBlock_3_2" type: "Convolution" bottom: "ResBlock_3_1" top: "ResBlock_3_2" param { lr_mult: 1 decay_mult: 1 } param { lr_mult: 2 decay_mult: 0 } convolution_param { num_output: 64 kernel_size: 3 pad: 1 group: 64 weight_filler { type: "xavier" } bias_filler { type: "constant" } } } layer { name: "relu9" type: "ReLU" bottom: "ResBlock_3_2" top: "ResBlock_3_2" } # 1*1 layer { name: "ResBlock_3_3" type: "Convolution" bottom: "ResBlock_3_2" top: "ResBlock_3_3" param { lr_mult: 1 decay_mult: 1 } param { lr_mult: 2 decay_mult: 0 } convolution_param { num_output: 128 kernel_size: 1 pad: 0 weight_filler { type: "xavier" } bias_filler { type: "constant" } } } layer { name: "relu10" type: "ReLU" bottom: "ResBlock_3_3" top: "ResBlock_3_3" } # 1*1 for the change of channel number change! layer { name: "ResBlock_3_4" type: "Convolution" bottom: "pool1" top: "ResBlock_3_4" param { lr_mult: 1 decay_mult: 1 } param { lr_mult: 2 decay_mult: 0 } convolution_param { num_output: 128 kernel_size: 1 pad: 0 weight_filler { type: "xavier" } bias_filler { type: "constant" } } } layer { name: "relu5" type: "ReLU" bottom: "ResBlock_3_4" top: "ResBlock_3_4" } # SUM layer { bottom: "ResBlock_3_4" bottom: "ResBlock_3_3" top: "Block3" name: "Block3" type: "Eltwise" eltwise_param { operation: SUM } } #-----------------------------Pooling2------------------------------- layer { name: "pool2" type: "Pooling" bottom: "Block3" top: "pool2" pooling_param { pool: MAX kernel_size: 2 stride: 2 } } # current shape: 4*64 #-----------------------------ResBlock_4-------------------------- # 1*1 layer { name: "ResBlock_4_1" type: "Convolution" bottom: "pool2" top: "ResBlock_4_1" param { lr_mult: 1 decay_mult: 1 } param { lr_mult: 2 decay_mult: 0 } convolution_param { num_output: 64 kernel_size: 1 pad: 0 weight_filler { type: "xavier" } bias_filler { type: "constant" } } } layer { name: "relu11" type: "ReLU" bottom: "ResBlock_4_1" top: "ResBlock_4_1" } # 3*3 layer { name: "ResBlock_4_2" type: "Convolution" bottom: "ResBlock_4_1" top: "ResBlock_4_2" param { lr_mult: 1 decay_mult: 1 } param { lr_mult: 2 decay_mult: 0 } convolution_param { num_output: 64 kernel_size: 3 pad: 1 group: 64 weight_filler { type: "xavier" } bias_filler { type: "constant" } } } layer { name: "relu12" type: "ReLU" bottom: "ResBlock_4_2" top: "ResBlock_4_2" } # 1*1 layer { name: "ResBlock_4_3" type: "Convolution" bottom: "ResBlock_4_2" top: "ResBlock_4_3" param { lr_mult: 1 decay_mult: 1 } param { lr_mult: 2 decay_mult: 0 } convolution_param { num_output: 128 kernel_size: 1 pad: 0 weight_filler { type: "xavier" } bias_filler { type: "constant" } } } layer { name: "relu13" type: "ReLU" bottom: "ResBlock_4_3" top: "ResBlock_4_3" } # 1*1 for the change of channel number change! layer { name: "ResBlock_4_4" type: "Convolution" bottom: "pool2" top: "ResBlock_4_4" param { lr_mult: 1 decay_mult: 1 } param { lr_mult: 2 decay_mult: 0 } convolution_param { num_output: 128 kernel_size: 1 pad: 0 weight_filler { type: "xavier" } bias_filler { type: "constant" } } } layer { name: "relu14" type: "ReLU" bottom: "ResBlock_4_4" top: "ResBlock_4_4" } # SUM layer { bottom: "ResBlock_4_4" bottom: "ResBlock_4_3" top: "Block4" name: "Block4" type: "Eltwise" eltwise_param { operation: SUM } } #-----------------------------ResBlock_5-------------------------- # 1*1 layer { name: "ResBlock_5_1" type: "Convolution" bottom: "Block4" top: "ResBlock_5_1" param { lr_mult: 1 decay_mult: 1 } param { lr_mult: 2 decay_mult: 0 } convolution_param { num_output: 64 kernel_size: 1 pad: 0 weight_filler { type: "xavier" } bias_filler { type: "constant" } } } layer { name: "relu15" type: "ReLU" bottom: "ResBlock_5_1" top: "ResBlock_5_1" } # 3*3 layer { name: "ResBlock_5_2" type: "Convolution" bottom: "ResBlock_5_1" top: "ResBlock_5_2" param { lr_mult: 1 decay_mult: 1 } param { lr_mult: 2 decay_mult: 0 } convolution_param { num_output: 64 kernel_size: 3 pad: 1 group: 64 weight_filler { type: "xavier" } bias_filler { type: "constant" } } } layer { name: "relu16" type: "ReLU" bottom: "ResBlock_5_2" top: "ResBlock_5_2" } # 1*1 layer { name: "ResBlock_5_3" type: "Convolution" bottom: "ResBlock_5_2" top: "ResBlock_5_3" param { lr_mult: 1 decay_mult: 1 } param { lr_mult: 2 decay_mult: 0 } convolution_param { num_output: 128 kernel_size: 1 pad: 0 weight_filler { type: "xavier" } bias_filler { type: "constant" } } } layer { name: "relu17" type: "ReLU" bottom: "ResBlock_5_3" top: "ResBlock_5_3" } # SUM layer { bottom: "Block4" bottom: "ResBlock_5_3" top: "Block5" name: "Block5" type: "Eltwise" eltwise_param { operation: SUM } } #-----------------------------ResBlock_9-------------------------- # 1*1 layer { name: "ResBlock_9_1" type: "Convolution" bottom: "Block5" top: "ResBlock_9_1" param { lr_mult: 1 decay_mult: 1 } param { lr_mult: 2 decay_mult: 0 } convolution_param { num_output: 256 kernel_size: 1 pad: 0 weight_filler { type: "xavier" } bias_filler { type: "constant" } } } layer { name: "relu27" type: "ReLU" bottom: "ResBlock_9_1" top: "ResBlock_9_1" } # 4*4 layer { name: "ResBlock_9_2" type: "Convolution" bottom: "ResBlock_9_1" top: "ResBlock_9_2" param { lr_mult: 1 decay_mult: 1 } param { lr_mult: 2 decay_mult: 0 } convolution_param { num_output: 256 kernel_size: 4 pad_w: 2 group: 256 weight_filler { type: "xavier" } bias_filler { type: "constant" } } } layer { name: "relu28" type: "ReLU" bottom: "ResBlock_9_2" top: "ResBlock_9_2" } # 1*1 layer { name: "ResBlock_added1" type: "Convolution" bottom: "ResBlock_9_2" top: "ResBlock_added1" param { lr_mult: 1 decay_mult: 1 } param { lr_mult: 2 decay_mult: 0 } convolution_param { num_output: 256 kernel_size: 1 pad: 0 weight_filler { type: "xavier" } bias_filler { type: "constant" } } } layer { name: "relu29" type: "ReLU" bottom: "ResBlock_added1" top: "ResBlock_added1" } # 1*1 layer { name: "ResBlock_9_3" type: "Convolution" bottom: "ResBlock_added1" top: "ResBlock_9_3" param { lr_mult: 1 decay_mult: 1 } param { lr_mult: 2 decay_mult: 0 } convolution_param { num_output: 512 kernel_size: 1 pad: 0 weight_filler { type: "xavier" } bias_filler { type: "constant" } } } layer { name: "relu29" type: "ReLU" bottom: "ResBlock_9_3" top: "ResBlock_9_3" } #-----------------------------ResBlock_10-------------------------- # 1*1 layer { name: "ResBlock_10_1" type: "Convolution" bottom: "ResBlock_9_3" top: "ResBlock_10_1" param { lr_mult: 1 decay_mult: 1 } param { lr_mult: 2 decay_mult: 0 } convolution_param { num_output: 256 kernel_size: 1 pad: 0 weight_filler { type: "xavier" } bias_filler { type: "constant" } } } layer { name: "relu30" type: "ReLU" bottom: "ResBlock_10_1" top: "ResBlock_10_1" } # SUM layer { bottom: "ResBlock_10_1" bottom: "ResBlock_added1" top: "Block_End" name: "Block_End" type: "Eltwise" eltwise_param { operation: SUM } } # 1*1 layer { name: "conv_end_yao" type: "Convolution" bottom: "Block_End" top: "conv_end_yao" param { lr_mult: 1 decay_mult: 1 } param { lr_mult: 2 decay_mult: 0 } convolution_param { num_output: 7402 kernel_size: 1 pad: 0 weight_filler { type: "xavier" } bias_filler { type: "constant" } } } #------------------------------------------------------------------------ # #layer { # name: "transpose" # type: "Permute" # bottom: "conv_end_yao" # top: "transpose" # permute_param{ order: 3 order: 2 order: 0 order:1} #} # #layer { # name: "reshape" # type: "Reshape" # bottom: "transpose" # top: "cnn_end" # reshape_param { # shape { dim: -1 } # axis: 1 # num_axes: 2 # } #}