name:"Gender Net" layer { name: 'data' type: 'Input' top: 'data' input_param { shape {dim: 1 dim: 3 dim: 112 dim: 112}} } layer { name: "conv_pre" type: "Convolution" bottom: "data" top: "conv_pre" param { lr_mult: 1 } param { lr_mult: 2 } convolution_param { num_output: 16 pad: 0 kernel_size: 3 stride: 2 weight_filler { type: "xavier" # std: 0.07142857142857142 } bias_filler { type: "constant" } } } layer { name: "BatchNorm1" type: "BatchNorm" bottom: "conv_pre" top: "BatchNorm1" batch_norm_param { use_global_stats: true } include: { phase: TEST} } layer { name: "Scale1" type: "Scale" bottom: "BatchNorm1" top: "Scale1" scale_param { bias_term: true } } layer { name: "relu_pre" type: "ReLU" bottom: "Scale1" top: "relu_pre" } layer { name: "conv1_1" type: "Convolution" bottom: "relu_pre" top: "conv1_1" param { lr_mult: 1 } param { lr_mult: 2 } convolution_param { num_output: 32 pad: 1 kernel_size: 3 stride: 2 weight_filler { type: "xavier" #std: 0.11785113019775792 } bias_filler { type: "constant" } } } layer { name: "BatchNorm2" type: "BatchNorm" bottom: "conv1_1" top: "BatchNorm2" param { lr_mult: 0 } param { lr_mult: 0 } param { lr_mult: 0 } batch_norm_param { use_global_stats: true } include: { phase: TEST} } layer { name: "Scale2" type: "Scale" bottom: "BatchNorm2" top: "Scale2" scale_param { bias_term: true } } layer { name: "relu1_1" type: "ReLU" bottom: "Scale2" top: "relu1_1" } layer { name: "conv1_2" type: "Convolution" bottom: "relu1_1" top: "conv1_2" param { lr_mult: 1 } param { lr_mult: 2 } convolution_param { num_output: 16 pad: 1 kernel_size: 3 stride: 1 weight_filler { type: "xavier" #std: 0.11785113019775792 } bias_filler { type: "constant" } } } layer { name: "BatchNorm3" type: "BatchNorm" bottom: "conv1_2" top: "BatchNorm3" param { lr_mult: 0 } param { lr_mult: 0 } param { lr_mult: 0 } batch_norm_param { use_global_stats: true } include: { phase: TEST} } layer { name: "Scale3" type: "Scale" bottom: "BatchNorm3" top: "Scale3" scale_param { bias_term: true } } layer { name: "relu1_2" type: "ReLU" bottom: "Scale3" top: "relu1_2" } layer { name: "conv1_p" type: "Convolution" bottom: "relu_pre" top: "conv1_p" param { lr_mult: 1 } param { lr_mult: 2 } convolution_param { num_output: 16 pad: 0 kernel_size: 1 stride: 2 weight_filler { type: "xavier" #std: 0.09622504486493763 } bias_filler { type: "constant" } } } layer { name: "BatchNorm1_p" type: "BatchNorm" bottom: "conv1_p" top: "BatchNorm1_p" param { lr_mult: 0 } param { lr_mult: 0 } param { lr_mult: 0 } batch_norm_param { use_global_stats: true } include: { phase: TEST} } layer { name: "Scale1_p" type: "Scale" bottom: "BatchNorm1_p" top: "Scale1_p" scale_param { bias_term: true } } layer { name: "relu1_p" type: "ReLU" bottom: "Scale1_p" top: "relu1_p" } layer { name: "res_1" type: "Eltwise" bottom: "relu1_2" bottom: "relu1_p" top: "res_1" eltwise_param { operation: SUM coeff: 1 coeff: 1 } } layer { name: "convf_1" type: "Convolution" bottom: "res_1" top: "convf_1" param { lr_mult: 1 } param { lr_mult: 2 } convolution_param { num_output: 32 pad: 1 kernel_size: 3 stride: 1 weight_filler { type: "xavier" #std: 0.11785113019775792 } bias_filler { type: "constant" } } } layer { name: "BatchNormf2" type: "BatchNorm" bottom: "convf_1" top: "BatchNormf2" param { lr_mult: 0 } param { lr_mult: 0 } param { lr_mult: 0 } batch_norm_param { use_global_stats: true } include: { phase: TEST} } layer { name: "Scalef2" type: "Scale" bottom: "BatchNormf2" top: "Scalef2" scale_param { bias_term: true } } layer { name: "reluf_1" type: "ReLU" bottom: "Scalef2" top: "reluf_1" } layer { name: "convf_2" type: "Convolution" bottom: "reluf_1" top: "convf_2" param { lr_mult: 1 } param { lr_mult: 2 } convolution_param { num_output: 16 pad: 1 kernel_size: 3 stride: 1 weight_filler { type: "xavier" #std: 0.11785113019775792 } bias_filler { type: "constant" } } } layer { name: "BatchNormf3" type: "BatchNorm" bottom: "convf_2" top: "BatchNormf3" param { lr_mult: 0 } param { lr_mult: 0 } param { lr_mult: 0 } batch_norm_param { use_global_stats: true } include: { phase: TEST} } layer { name: "Scalef3" type: "Scale" bottom: "BatchNormf3" top: "Scalef3" scale_param { bias_term: true } } layer { name: "reluf_2" type: "ReLU" bottom: "Scalef3" top: "reluf_2" } layer { name: "resf_1" type: "Eltwise" bottom: "reluf_2" bottom: "res_1" top: "resf_1" eltwise_param { operation: SUM coeff: 1 coeff: 1 } } layer { name: "conv2_1" type: "Convolution" bottom: "resf_1" top: "conv2_1" param { lr_mult: 1 } param { lr_mult: 2 } convolution_param { num_output: 48 pad: 1 kernel_size: 3 stride: 1 weight_filler { type: "xavier" #std: 0.09622504486493763 } bias_filler { type: "constant" } } } layer { name: "BatchNorm4" type: "BatchNorm" bottom: "conv2_1" top: "BatchNorm4" param { lr_mult: 0 } param { lr_mult: 0 } param { lr_mult: 0 } batch_norm_param { use_global_stats: true } include: { phase: TEST} } layer { name: "Scale4" type: "Scale" bottom: "BatchNorm4" top: "Scale4" scale_param { bias_term: true } } layer { name: "relu2_1" type: "ReLU" bottom: "Scale4" top: "relu2_1" } layer { name: "conv2_2" type: "Convolution" bottom: "relu2_1" top: "conv2_2" param { lr_mult: 1 } param { lr_mult: 2 } convolution_param { num_output: 24 pad: 1 kernel_size: 3 stride: 2 weight_filler { type: "xavier" #std: 0.09622504486493763 } bias_filler { type: "constant" } } } layer { name: "BatchNorm5" type: "BatchNorm" bottom: "conv2_2" top: "BatchNorm5" param { lr_mult: 0 } param { lr_mult: 0 } param { lr_mult: 0 } batch_norm_param { use_global_stats: true } include: { phase: TEST} } layer { name: "Scale5" type: "Scale" bottom: "BatchNorm5" top: "Scale5" scale_param { bias_term: true } } layer { name: "relu2_2" type: "ReLU" bottom: "Scale5" top: "relu2_2" } layer { name: "conv2_p" type: "Convolution" bottom: "resf_1" top: "conv2_p" param { lr_mult: 1 } param { lr_mult: 2 } convolution_param { num_output: 24 pad: 0 kernel_size: 1 stride: 2 weight_filler { type: "xavier" #std: 0.09622504486493763 } bias_filler { type: "constant" } } } layer { name: "BatchNorm2_p" type: "BatchNorm" bottom: "conv2_p" top: "BatchNorm2_p" param { lr_mult: 0 } param { lr_mult: 0 } param { lr_mult: 0 } batch_norm_param { use_global_stats: true } include: { phase: TEST} } layer { name: "Scale2_p" type: "Scale" bottom: "BatchNorm2_p" top: "Scale2_p" scale_param { bias_term: true } } layer { name: "relu2_p" type: "ReLU" bottom: "Scale2_p" top: "relu2_p" } layer { name: "res_2" type: "Eltwise" bottom: "relu2_p" bottom: "relu2_2" top: "res_2" eltwise_param { operation: SUM coeff: 1 coeff: 1 } } layer { name: "convf_3" type: "Convolution" bottom: "res_2" top: "convf_3" param { lr_mult: 1 } param { lr_mult: 2 } convolution_param { num_output: 48 pad: 1 kernel_size: 3 stride: 1 weight_filler { type: "xavier" #std: 0.11785113019775792 } bias_filler { type: "constant" } } } layer { name: "BatchNormf4" type: "BatchNorm" bottom: "convf_3" top: "BatchNormf4" param { lr_mult: 0 } param { lr_mult: 0 } param { lr_mult: 0 } batch_norm_param { use_global_stats: true } include: { phase: TEST} } layer { name: "Scalef4" type: "Scale" bottom: "BatchNormf4" top: "Scalef4" scale_param { bias_term: true } } layer { name: "reluf_3" type: "ReLU" bottom: "Scalef4" top: "reluf_3" } layer { name: "convf_4" type: "Convolution" bottom: "reluf_3" top: "convf_4" param { lr_mult: 1 } param { lr_mult: 2 } convolution_param { num_output: 24 pad: 1 kernel_size: 3 stride: 1 weight_filler { type: "xavier" #std: 0.11785113019775792 } bias_filler { type: "constant" } } } layer { name: "BatchNormf5" type: "BatchNorm" bottom: "convf_4" top: "BatchNormf5" param { lr_mult: 0 } param { lr_mult: 0 } param { lr_mult: 0 } batch_norm_param { use_global_stats: true } include: { phase: TEST} } layer { name: "Scalef5" type: "Scale" bottom: "BatchNormf5" top: "Scalef5" scale_param { bias_term: true } } layer { name: "reluf_4" type: "ReLU" bottom: "Scalef5" top: "reluf_4" } layer { name: "resf_2" type: "Eltwise" bottom: "reluf_4" bottom: "res_2" top: "resf_2" eltwise_param { operation: SUM coeff: 1 coeff: 1 } } layer { name: "conv3_1" type: "Convolution" bottom: "resf_2" top: "conv3_1" param { lr_mult: 1 } param { lr_mult: 2 } convolution_param { num_output: 64 pad: 1 kernel_size: 3 stride: 2 weight_filler { type: "xavier" #std: 0.08333333333333333 } bias_filler { type: "constant" } } } layer { name: "BatchNorm6" type: "BatchNorm" bottom: "conv3_1" top: "BatchNorm6" param { lr_mult: 0 } param { lr_mult: 0 } param { lr_mult: 0 } batch_norm_param { use_global_stats: true } include: { phase: TEST} } layer { name: "Scale6" type: "Scale" bottom: "BatchNorm6" top: "Scale6" scale_param { bias_term: true } } layer { name: "relu3_1" type: "ReLU" bottom: "Scale6" top: "relu3_1" } layer { name: "conv3_2" type: "Convolution" bottom: "relu3_1" top: "conv3_2" param { lr_mult: 1 } param { lr_mult: 2 } convolution_param { num_output: 40 pad: 1 kernel_size: 3 stride: 1 weight_filler { type: "xavier" #std: 0.08333333333333333 } bias_filler { type: "constant" } } } layer { name: "BatchNorm7" type: "BatchNorm" bottom: "conv3_2" top: "BatchNorm7" param { lr_mult: 0 } param { lr_mult: 0 } param { lr_mult: 0 } batch_norm_param { use_global_stats: true } include: { phase: TEST} } layer { name: "Scale7" type: "Scale" bottom: "BatchNorm7" top: "Scale7" scale_param { bias_term: true } } layer { name: "relu3_2" type: "ReLU" bottom: "Scale7" top: "relu3_2" } layer { name: "conv3_p" type: "Convolution" bottom: "resf_2" top: "conv3_p" param { lr_mult: 1 } param { lr_mult: 2 } convolution_param { num_output: 40 pad: 0 kernel_size: 1 stride: 2 weight_filler { type: "xavier" #std: 0.09622504486493763 } bias_filler { type: "constant" } } } layer { name: "BatchNorm3_p" type: "BatchNorm" bottom: "conv3_p" top: "BatchNorm3_p" param { lr_mult: 0 } param { lr_mult: 0 } param { lr_mult: 0 } batch_norm_param { use_global_stats: true } include: { phase: TEST} } layer { name: "Scale3_p" type: "Scale" bottom: "BatchNorm3_p" top: "Scale3_p" scale_param { bias_term: true } } layer { name: "relu3_p" type: "ReLU" bottom: "Scale3_p" top: "relu3_p" } layer { name: "res_3" type: "Eltwise" bottom: "relu3_p" bottom: "relu3_2" top: "res_3" eltwise_param { operation: SUM coeff: 1 coeff: 1 } } layer { name: "conv4_1" type: "Convolution" bottom: "res_3" top: "conv4_1" param { lr_mult: 1 } param { lr_mult: 2 } convolution_param { num_output: 80 pad: 1 kernel_size: 3 stride: 2 weight_filler { type: "xavier" #std: 0.08333333333333333 } bias_filler { type: "constant" } } } layer { name: "BatchNorm8" type: "BatchNorm" bottom: "conv4_1" top: "BatchNorm8" param { lr_mult: 0 } param { lr_mult: 0 } param { lr_mult: 0 } batch_norm_param { use_global_stats: true } include: { phase: TEST} } layer { name: "Scale8" type: "Scale" bottom: "BatchNorm8" top: "Scale8" scale_param { bias_term: true } } layer { name: "relu4_1" type: "ReLU" bottom: "Scale8" top: "relu4_1" } layer { name: "conv4_2" type: "Convolution" bottom: "relu4_1" top: "conv4_2" param { lr_mult: 1 } param { lr_mult: 2 } convolution_param { num_output: 64 pad: 1 kernel_size: 3 stride: 1 weight_filler { type: "xavier" #std: 0.08333333333333333 } bias_filler { type: "constant" } } } layer { name: "BatchNorm9" type: "BatchNorm" bottom: "conv4_2" top: "BatchNorm9" param { lr_mult: 0 } param { lr_mult: 0 } param { lr_mult: 0 } batch_norm_param { use_global_stats: true } include: { phase: TEST} } layer { name: "Scale9" type: "Scale" bottom: "BatchNorm9" top: "Scale9" scale_param { bias_term: true } } layer { name: "relu4_2" type: "ReLU" bottom: "Scale9" top: "relu4_2" } layer { name: "conv4_p" type: "Convolution" bottom: "res_3" top: "conv4_p" param { lr_mult: 1 } param { lr_mult: 2 } convolution_param { num_output: 64 pad: 0 kernel_size: 1 stride: 2 weight_filler { type: "xavier" #std: 0.09622504486493763 } bias_filler { type: "constant" } } } layer { name: "BatchNorm4_p" type: "BatchNorm" bottom: "conv4_p" top: "BatchNorm4_p" param { lr_mult: 0 } param { lr_mult: 0 } param { lr_mult: 0 } batch_norm_param { use_global_stats: true } include: { phase: TEST} } layer { name: "Scale4_p" type: "Scale" bottom: "BatchNorm4_p" top: "Scale4_p" scale_param { bias_term: true } } layer { name: "relu4_p" type: "ReLU" bottom: "Scale4_p" top: "relu4_p" } layer { name: "res_4" type: "Eltwise" bottom: "relu4_p" bottom: "relu4_2" top: "res_4" eltwise_param { operation: SUM coeff: 1 coeff: 1 } } layer { name: "ip1" type: "InnerProduct" bottom: "res_4" top: "ip1" param { lr_mult: 1 } param { lr_mult: 2 } inner_product_param { num_output: 64 weight_filler { type: "xavier" #std: 0.001 } bias_filler { type: "constant" } } } layer { name: "BatchNorm10" type: "BatchNorm" bottom: "ip1" top: "ip1" param { lr_mult: 0 } param { lr_mult: 0 } param { lr_mult: 0 } batch_norm_param { use_global_stats: true } include: { phase: TEST} } layer { name: "Scale10" type: "Scale" bottom: "ip1" top: "ip1" scale_param { bias_term: true } } layer { name: "relu_ip1" type: "ReLU" bottom: "ip1" top: "ip1" } layer { name: "ip2" type: "InnerProduct" bottom: "ip1" top: "ip2" param { lr_mult: 1 } param { lr_mult: 2 } inner_product_param { num_output: 64 weight_filler { type: "xavier" #std: 0.001 } bias_filler { type: "constant" } } } layer { name: "BatchNorm11" type: "BatchNorm" bottom: "ip2" top: "ip2" param { lr_mult: 0 } param { lr_mult: 0 } param { lr_mult: 0 } batch_norm_param { use_global_stats: true } include: { phase: TEST} } layer { name: "Scale11" type: "Scale" bottom: "ip2" top: "ip2" scale_param { bias_term: true } } layer { name: "relu_ip2" type: "ReLU" bottom: "ip2" top: "ip2" } layer { name: "fc3_8W_PosAug" type: "InnerProduct" bottom: "ip2" top: "fc3_8W_PosAug" param { lr_mult: 1 decay_mult: 1 } param { lr_mult: 2 decay_mult: 1 } inner_product_param { num_output: 2 } }