layer { name: 'data' type: 'Input' top: 'data' input_param { shape {dim: 1 dim: 3 dim: 120 dim: 120}} } layer { bottom: "data" top: "conv1" name: "conv1" type: "Convolution" convolution_param { num_output: 64 kernel_size: 7 pad: 3 stride: 2 weight_filler { type: "msra" } bias_term: false } } layer { bottom: "conv1" top: "conv1" name: "bn_conv1" type: "BatchNorm" batch_norm_param { use_global_stats: false } include { phase: TRAIN } } layer { bottom: "conv1" top: "conv1" name: "bn_conv1" type: "BatchNorm" batch_norm_param { use_global_stats: true } include { phase: TEST } } layer { bottom: "conv1" top: "conv1" name: "scale_conv1" type: "Scale" scale_param { bias_term: true } } layer { bottom: "conv1" top: "conv1" name: "conv1_relu" type: "ReLU" } layer { bottom: "conv1" top: "pool1" name: "pool1" type: "Pooling" pooling_param { kernel_size: 3 stride: 2 pool: MAX } } layer { bottom: "pool1" top: "res2a_branch1" name: "res2a_branch1" type: "Convolution" convolution_param { num_output: 64 kernel_size: 1 pad: 0 stride: 1 weight_filler { type: "msra" } bias_term: false } } layer { bottom: "res2a_branch1" top: "res2a_branch1" name: "bn2a_branch1" type: "BatchNorm" batch_norm_param { use_global_stats: false } include { phase: TRAIN } } layer { bottom: "res2a_branch1" top: "res2a_branch1" name: "bn2a_branch1" type: "BatchNorm" batch_norm_param { use_global_stats: true } include { phase: TEST } } layer { bottom: "res2a_branch1" top: "res2a_branch1" name: "scale2a_branch1" type: "Scale" scale_param { bias_term: true } } layer { bottom: "pool1" top: "res2a_branch2a" name: "res2a_branch2a" type: "Convolution" convolution_param { num_output: 64 kernel_size: 3 pad: 1 stride: 1 weight_filler { type: "msra" } bias_term: false } } layer { bottom: "res2a_branch2a" top: "res2a_branch2a" name: "bn2a_branch2a" type: "BatchNorm" batch_norm_param { use_global_stats: false } include { phase: TRAIN } } layer { bottom: "res2a_branch2a" top: "res2a_branch2a" name: "bn2a_branch2a" type: "BatchNorm" batch_norm_param { use_global_stats: true } include { phase: TEST } } layer { bottom: "res2a_branch2a" top: "res2a_branch2a" name: "scale2a_branch2a" type: "Scale" scale_param { bias_term: true } } layer { bottom: "res2a_branch2a" top: "res2a_branch2a" name: "res2a_branch2a_relu" type: "ReLU" } layer { bottom: "res2a_branch2a" top: "res2a_branch2b" name: "res2a_branch2b" type: "Convolution" convolution_param { num_output: 64 kernel_size: 3 pad: 1 stride: 1 weight_filler { type: "msra" } bias_term: false } } layer { bottom: "res2a_branch2b" top: "res2a_branch2b" name: "bn2a_branch2b" type: "BatchNorm" batch_norm_param { use_global_stats: false } include { phase: TRAIN } } layer { bottom: "res2a_branch2b" top: "res2a_branch2b" name: "bn2a_branch2b" type: "BatchNorm" batch_norm_param { use_global_stats: true } include { phase: TEST } } layer { bottom: "res2a_branch2b" top: "res2a_branch2b" name: "scale2a_branch2b" type: "Scale" scale_param { bias_term: true } } layer { bottom: "res2a_branch1" bottom: "res2a_branch2b" top: "res2a" name: "res2a" type: "Eltwise" eltwise_param { operation: SUM } } layer { bottom: "res2a" top: "res2a" name: "res2a_relu" type: "ReLU" } layer { bottom: "res2a" top: "res2b_branch2a" name: "res2b_branch2a" type: "Convolution" convolution_param { num_output: 64 kernel_size: 3 pad: 1 stride: 1 weight_filler { type: "msra" } bias_term: false } } layer { bottom: "res2b_branch2a" top: "res2b_branch2a" name: "bn2b_branch2a" type: "BatchNorm" batch_norm_param { use_global_stats: false } include { phase: TRAIN } } layer { bottom: "res2b_branch2a" top: "res2b_branch2a" name: "bn2b_branch2a" type: "BatchNorm" batch_norm_param { use_global_stats: true } include { phase: TEST } } layer { bottom: "res2b_branch2a" top: "res2b_branch2a" name: "scale2b_branch2a" type: "Scale" scale_param { bias_term: true } } layer { bottom: "res2b_branch2a" top: "res2b_branch2a" name: "res2b_branch2a_relu" type: "ReLU" } layer { bottom: "res2b_branch2a" top: "res2b_branch2b" name: "res2b_branch2b" type: "Convolution" convolution_param { num_output: 64 kernel_size: 3 pad: 1 stride: 1 weight_filler { type: "msra" } bias_term: false } } layer { bottom: "res2b_branch2b" top: "res2b_branch2b" name: "bn2b_branch2b" type: "BatchNorm" batch_norm_param { use_global_stats: false } include { phase: TRAIN } } layer { bottom: "res2b_branch2b" top: "res2b_branch2b" name: "bn2b_branch2b" type: "BatchNorm" batch_norm_param { use_global_stats: true } include { phase: TEST } } layer { bottom: "res2b_branch2b" top: "res2b_branch2b" name: "scale2b_branch2b" type: "Scale" scale_param { bias_term: true } } layer { bottom: "res2a" bottom: "res2b_branch2b" top: "res2b" name: "res2b" type: "Eltwise" eltwise_param { operation: SUM } } layer { bottom: "res2b" top: "res2b" name: "res2b_relu" type: "ReLU" } layer { bottom: "res2b" top: "res3a_branch1" name: "res3a_branch1" type: "Convolution" convolution_param { num_output: 128 kernel_size: 1 pad: 0 stride: 2 weight_filler { type: "msra" } bias_term: false } } layer { bottom: "res3a_branch1" top: "res3a_branch1" name: "bn3a_branch1" type: "BatchNorm" batch_norm_param { use_global_stats: false } include { phase: TRAIN } } layer { bottom: "res3a_branch1" top: "res3a_branch1" name: "bn3a_branch1" type: "BatchNorm" batch_norm_param { use_global_stats: true } include { phase: TEST } } layer { bottom: "res3a_branch1" top: "res3a_branch1" name: "scale3a_branch1" type: "Scale" scale_param { bias_term: true } } layer { bottom: "res2b" top: "res3a_branch2a" name: "res3a_branch2a" type: "Convolution" convolution_param { num_output: 128 kernel_size: 3 pad: 1 stride: 2 weight_filler { type: "msra" } bias_term: false } } layer { bottom: "res3a_branch2a" top: "res3a_branch2a" name: "bn3a_branch2a" type: "BatchNorm" batch_norm_param { use_global_stats: false } include { phase: TRAIN } } layer { bottom: "res3a_branch2a" top: "res3a_branch2a" name: "bn3a_branch2a" type: "BatchNorm" batch_norm_param { use_global_stats: true } include { phase: TEST } } layer { bottom: "res3a_branch2a" top: "res3a_branch2a" name: "scale3a_branch2a" type: "Scale" scale_param { bias_term: true } } layer { bottom: "res3a_branch2a" top: "res3a_branch2a" name: "res3a_branch2a_relu" type: "ReLU" } layer { bottom: "res3a_branch2a" top: "res3a_branch2b" name: "res3a_branch2b" type: "Convolution" convolution_param { num_output: 128 kernel_size: 3 pad: 1 stride: 1 weight_filler { type: "msra" } bias_term: false } } layer { bottom: "res3a_branch2b" top: "res3a_branch2b" name: "bn3a_branch2b" type: "BatchNorm" batch_norm_param { use_global_stats: false } include { phase: TRAIN } } layer { bottom: "res3a_branch2b" top: "res3a_branch2b" name: "bn3a_branch2b" type: "BatchNorm" batch_norm_param { use_global_stats: true } include { phase: TEST } } layer { bottom: "res3a_branch2b" top: "res3a_branch2b" name: "scale3a_branch2b" type: "Scale" scale_param { bias_term: true } } layer { bottom: "res3a_branch1" bottom: "res3a_branch2b" top: "res3a" name: "res3a" type: "Eltwise" eltwise_param { operation: SUM } } layer { bottom: "res3a" top: "res3a" name: "res3a_relu" type: "ReLU" } layer { bottom: "res3a" top: "res3b_branch2a" name: "res3b_branch2a" type: "Convolution" convolution_param { num_output: 128 kernel_size: 3 pad: 1 stride: 1 weight_filler { type: "msra" } bias_term: false } } layer { bottom: "res3b_branch2a" top: "res3b_branch2a" name: "bn3b_branch2a" type: "BatchNorm" batch_norm_param { use_global_stats: false } include { phase: TRAIN } } layer { bottom: "res3b_branch2a" top: "res3b_branch2a" name: "bn3b_branch2a" type: "BatchNorm" batch_norm_param { use_global_stats: true } include { phase: TEST } } layer { bottom: "res3b_branch2a" top: "res3b_branch2a" name: "scale3b_branch2a" type: "Scale" scale_param { bias_term: true } } layer { bottom: "res3b_branch2a" top: "res3b_branch2a" name: "res3b_branch2a_relu" type: "ReLU" } layer { bottom: "res3b_branch2a" top: "res3b_branch2b" name: "res3b_branch2b" type: "Convolution" convolution_param { num_output: 128 kernel_size: 3 pad: 1 stride: 1 weight_filler { type: "msra" } bias_term: false } } layer { bottom: "res3b_branch2b" top: "res3b_branch2b" name: "bn3b_branch2b" type: "BatchNorm" batch_norm_param { use_global_stats: false } include { phase: TRAIN } } layer { bottom: "res3b_branch2b" top: "res3b_branch2b" name: "bn3b_branch2b" type: "BatchNorm" batch_norm_param { use_global_stats: true } include { phase: TEST } } layer { bottom: "res3b_branch2b" top: "res3b_branch2b" name: "scale3b_branch2b" type: "Scale" scale_param { bias_term: true } } layer { bottom: "res3a" bottom: "res3b_branch2b" top: "res3b" name: "res3b" type: "Eltwise" eltwise_param { operation: SUM } } layer { bottom: "res3b" top: "res3b" name: "res3b_relu" type: "ReLU" } layer { bottom: "res3b" top: "res4a_branch1" name: "res4a_branch1" type: "Convolution" convolution_param { num_output: 256 kernel_size: 1 pad: 0 stride: 2 weight_filler { type: "msra" } bias_term: false } } layer { bottom: "res4a_branch1" top: "res4a_branch1" name: "bn4a_branch1" type: "BatchNorm" batch_norm_param { use_global_stats: false } include { phase: TRAIN } } layer { bottom: "res4a_branch1" top: "res4a_branch1" name: "bn4a_branch1" type: "BatchNorm" batch_norm_param { use_global_stats: true } include { phase: TEST } } layer { bottom: "res4a_branch1" top: "res4a_branch1" name: "scale4a_branch1" type: "Scale" scale_param { bias_term: true } } layer { bottom: "res3b" top: "res4a_branch2a" name: "res4a_branch2a" type: "Convolution" convolution_param { num_output: 256 kernel_size: 3 pad: 1 stride: 2 weight_filler { type: "msra" } bias_term: false } } layer { bottom: "res4a_branch2a" top: "res4a_branch2a" name: "bn4a_branch2a" type: "BatchNorm" batch_norm_param { use_global_stats: false } include { phase: TRAIN } } layer { bottom: "res4a_branch2a" top: "res4a_branch2a" name: "bn4a_branch2a" type: "BatchNorm" batch_norm_param { use_global_stats: true } include { phase: TEST } } layer { bottom: "res4a_branch2a" top: "res4a_branch2a" name: "scale4a_branch2a" type: "Scale" scale_param { bias_term: true } } layer { bottom: "res4a_branch2a" top: "res4a_branch2a" name: "res4a_branch2a_relu" type: "ReLU" } layer { bottom: "res4a_branch2a" top: "res4a_branch2b" name: "res4a_branch2b" type: "Convolution" convolution_param { num_output: 256 kernel_size: 3 pad: 1 stride: 1 weight_filler { type: "msra" } bias_term: false } } layer { bottom: "res4a_branch2b" top: "res4a_branch2b" name: "bn4a_branch2b" type: "BatchNorm" batch_norm_param { use_global_stats: false } include { phase: TRAIN } } layer { bottom: "res4a_branch2b" top: "res4a_branch2b" name: "bn4a_branch2b" type: "BatchNorm" batch_norm_param { use_global_stats: true } include { phase: TEST } } layer { bottom: "res4a_branch2b" top: "res4a_branch2b" name: "scale4a_branch2b" type: "Scale" scale_param { bias_term: true } } layer { bottom: "res4a_branch1" bottom: "res4a_branch2b" top: "res4a" name: "res4a" type: "Eltwise" eltwise_param { operation: SUM } } layer { bottom: "res4a" top: "res4a" name: "res4a_relu" type: "ReLU" } layer { bottom: "res4a" top: "res4b_branch2a" name: "res4b_branch2a" type: "Convolution" convolution_param { num_output: 256 kernel_size: 3 pad: 1 stride: 1 weight_filler { type: "msra" } bias_term: false } } layer { bottom: "res4b_branch2a" top: "res4b_branch2a" name: "bn4b_branch2a" type: "BatchNorm" batch_norm_param { use_global_stats: false } include { phase: TRAIN } } layer { bottom: "res4b_branch2a" top: "res4b_branch2a" name: "bn4b_branch2a" type: "BatchNorm" batch_norm_param { use_global_stats: true } include { phase: TEST } } layer { bottom: "res4b_branch2a" top: "res4b_branch2a" name: "scale4b_branch2a" type: "Scale" scale_param { bias_term: true } } layer { bottom: "res4b_branch2a" top: "res4b_branch2a" name: "res4b_branch2a_relu" type: "ReLU" } layer { bottom: "res4b_branch2a" top: "res4b_branch2b" name: "res4b_branch2b" type: "Convolution" convolution_param { num_output: 256 kernel_size: 3 pad: 1 stride: 1 weight_filler { type: "msra" } bias_term: false } } layer { bottom: "res4b_branch2b" top: "res4b_branch2b" name: "bn4b_branch2b" type: "BatchNorm" batch_norm_param { use_global_stats: false } include { phase: TRAIN } } layer { bottom: "res4b_branch2b" top: "res4b_branch2b" name: "bn4b_branch2b" type: "BatchNorm" batch_norm_param { use_global_stats: true } include { phase: TEST } } layer { bottom: "res4b_branch2b" top: "res4b_branch2b" name: "scale4b_branch2b" type: "Scale" scale_param { bias_term: true } } layer { bottom: "res4a" bottom: "res4b_branch2b" top: "res4b" name: "res4b" type: "Eltwise" eltwise_param { operation: SUM } } layer { bottom: "res4b" top: "res4b" name: "res4b_relu" type: "ReLU" } layer { bottom: "res4b" top: "res5a_branch1" name: "res5a_branch1" type: "Convolution" convolution_param { num_output: 512 kernel_size: 1 pad: 0 stride: 2 weight_filler { type: "msra" } bias_term: false } } layer { bottom: "res5a_branch1" top: "res5a_branch1" name: "bn5a_branch1" type: "BatchNorm" batch_norm_param { use_global_stats: false } include { phase: TRAIN } } layer { bottom: "res5a_branch1" top: "res5a_branch1" name: "bn5a_branch1" type: "BatchNorm" batch_norm_param { use_global_stats: true } include { phase: TEST } } layer { bottom: "res5a_branch1" top: "res5a_branch1" name: "scale5a_branch1" type: "Scale" scale_param { bias_term: true } } layer { bottom: "res4b" top: "res5a_branch2a" name: "res5a_branch2a" type: "Convolution" convolution_param { num_output: 512 kernel_size: 3 pad: 1 stride: 2 weight_filler { type: "msra" } bias_term: false } } layer { bottom: "res5a_branch2a" top: "res5a_branch2a" name: "bn5a_branch2a" type: "BatchNorm" batch_norm_param { use_global_stats: false } include { phase: TRAIN } } layer { bottom: "res5a_branch2a" top: "res5a_branch2a" name: "bn5a_branch2a" type: "BatchNorm" batch_norm_param { use_global_stats: true } include { phase: TEST } } layer { bottom: "res5a_branch2a" top: "res5a_branch2a" name: "scale5a_branch2a" type: "Scale" scale_param { bias_term: true } } layer { bottom: "res5a_branch2a" top: "res5a_branch2a" name: "res5a_branch2a_relu" type: "ReLU" } layer { bottom: "res5a_branch2a" top: "res5a_branch2b" name: "res5a_branch2b" type: "Convolution" convolution_param { num_output: 512 kernel_size: 3 pad: 1 stride: 1 weight_filler { type: "msra" } bias_term: false } } layer { bottom: "res5a_branch2b" top: "res5a_branch2b" name: "bn5a_branch2b" type: "BatchNorm" batch_norm_param { use_global_stats: false } include { phase: TRAIN } } layer { bottom: "res5a_branch2b" top: "res5a_branch2b" name: "bn5a_branch2b" type: "BatchNorm" batch_norm_param { use_global_stats: true } include { phase: TEST } } layer { bottom: "res5a_branch2b" top: "res5a_branch2b" name: "scale5a_branch2b" type: "Scale" scale_param { bias_term: true } } layer { bottom: "res5a_branch1" bottom: "res5a_branch2b" top: "res5a" name: "res5a" type: "Eltwise" eltwise_param { operation: SUM } } layer { bottom: "res5a" top: "res5a" name: "res5a_relu" type: "ReLU" } layer { bottom: "res5a" top: "res5b_branch2a" name: "res5b_branch2a" type: "Convolution" convolution_param { num_output: 512 kernel_size: 3 pad: 1 stride: 1 weight_filler { type: "msra" } bias_term: false } } layer { bottom: "res5b_branch2a" top: "res5b_branch2a" name: "bn5b_branch2a" type: "BatchNorm" batch_norm_param { use_global_stats: false } include { phase: TRAIN } } layer { bottom: "res5b_branch2a" top: "res5b_branch2a" name: "bn5b_branch2a" type: "BatchNorm" batch_norm_param { use_global_stats: true } include { phase: TEST } } layer { bottom: "res5b_branch2a" top: "res5b_branch2a" name: "scale5b_branch2a" type: "Scale" scale_param { bias_term: true } } layer { bottom: "res5b_branch2a" top: "res5b_branch2a" name: "res5b_branch2a_relu" type: "ReLU" } layer { bottom: "res5b_branch2a" top: "res5b_branch2b" name: "res5b_branch2b" type: "Convolution" convolution_param { num_output: 512 kernel_size: 3 pad: 1 stride: 1 weight_filler { type: "msra" } bias_term: false } } layer { bottom: "res5b_branch2b" top: "res5b_branch2b" name: "bn5b_branch2b" type: "BatchNorm" batch_norm_param { use_global_stats: false } include { phase: TRAIN } } layer { bottom: "res5b_branch2b" top: "res5b_branch2b" name: "bn5b_branch2b" type: "BatchNorm" batch_norm_param { use_global_stats: true } include { phase: TEST } } layer { bottom: "res5b_branch2b" top: "res5b_branch2b" name: "scale5b_branch2b" type: "Scale" scale_param { bias_term: true } } layer { bottom: "res5a" bottom: "res5b_branch2b" top: "res5b" name: "res5b" type: "Eltwise" eltwise_param { operation: SUM } } layer { bottom: "res5b" top: "res5b" name: "res5b_relu" type: "ReLU" } layer { bottom: "res5b" top: "pool5" name: "pool5" type: "Pooling" pooling_param { kernel_size: 4 stride: 1 pool: AVE } } ################# gender ################# layer { name: "InnerProduct2" type: "InnerProduct" bottom: "res4b" top: "InnerProduct2" inner_product_param { num_output: 2 weight_filler { type: "xavier" } bias_filler { type: "constant" value: 0 } } } layer { name: "BatchNorm7" type: "BatchNorm" bottom: "InnerProduct2" top: "InnerProduct2" batch_norm_param { use_global_stats: false } include { phase: TRAIN } } layer { name: "BatchNorm7" type: "BatchNorm" bottom: "InnerProduct2" top: "InnerProduct2" batch_norm_param { use_global_stats: true } include { phase: TEST } } layer { name: "Scale7" type: "Scale" bottom: "InnerProduct2" top: "InnerProduct2" scale_param { bias_term: true } } layer { name: "ReLU7" type: "ReLU" bottom: "InnerProduct2" top: "InnerProduct2" } ############## age #################### layer { name: "InnerProduct1" type: "InnerProduct" bottom: "pool5" top: "InnerProduct1" inner_product_param { num_output: 512 weight_filler { type: "xavier" } bias_filler { type: "constant" value: 0 } } } layer { name: "BatchNorm85" type: "BatchNorm" bottom: "InnerProduct1" top: "InnerProduct1" batch_norm_param { use_global_stats: false } include { phase: TRAIN } } layer { name: "BatchNorm85" type: "BatchNorm" bottom: "InnerProduct1" top: "InnerProduct1" batch_norm_param { use_global_stats: true } include { phase: TEST } } layer { name: "Scale85" type: "Scale" bottom: "InnerProduct1" top: "InnerProduct1" scale_param { bias_term: true } } layer { name: "ReLU80" type: "ReLU" bottom: "InnerProduct1" top: "InnerProduct1" } layer { bottom: "InnerProduct1" top: "age" name: "age" type: "InnerProduct" param { lr_mult: 1 decay_mult: 1 } param { lr_mult: 2 decay_mult: 1 } inner_product_param { num_output: 95 weight_filler { type: "xavier" } bias_filler { type: "constant" value: 0 } } } ############### expression ################# layer { name: "InnerProduct3" type: "InnerProduct" bottom: "pool5" top: "InnerProduct3" inner_product_param { num_output: 7 weight_filler { type: "xavier" } bias_filler { type: "constant" value: 0 } } } layer { name: "BatchNorm3" type: "BatchNorm" bottom: "InnerProduct3" top: "InnerProduct3" batch_norm_param { use_global_stats: false } include { phase: TRAIN } } layer { name: "BatchNorm3" type: "BatchNorm" bottom: "InnerProduct3" top: "InnerProduct3" batch_norm_param { use_global_stats: true } include { phase: TEST } } layer { name: "Scale3" type: "Scale" bottom: "InnerProduct3" top: "InnerProduct3" scale_param { bias_term: true } } layer { name: "ReLU3" type: "ReLU" bottom: "InnerProduct3" top: "InnerProduct3" } ############### glasses ################# layer { name: "InnerProduct_glasses" type: "InnerProduct" bottom: "pool5" top: "InnerProduct_glasses" inner_product_param { num_output: 3 weight_filler { type: "xavier" } bias_filler { type: "constant" value: 0 } } } layer { name: "BatchNorm_glasses" type: "BatchNorm" bottom: "InnerProduct_glasses" top: "InnerProduct_glasses" batch_norm_param { use_global_stats: false } include { phase: TRAIN } } layer { name: "BatchNorm_glasses" type: "BatchNorm" bottom: "InnerProduct_glasses" top: "InnerProduct_glasses" batch_norm_param { use_global_stats: true } include { phase: TEST } } layer { name: "Scale_glasses" type: "Scale" bottom: "InnerProduct_glasses" top: "InnerProduct_glasses" scale_param { bias_term: true } } layer { name: "ReLU_glasses" type: "ReLU" bottom: "InnerProduct_glasses" top: "InnerProduct_glasses" } ############### beard ################# layer { name: "InnerProduct_beard" type: "InnerProduct" bottom: "pool5" top: "InnerProduct_beard" inner_product_param { num_output: 2 weight_filler { type: "xavier" } bias_filler { type: "constant" value: 0 } } } layer { name: "BatchNorm_beard" type: "BatchNorm" bottom: "InnerProduct_beard" top: "InnerProduct_beard" batch_norm_param { use_global_stats: false } include { phase: TRAIN } } layer { name: "BatchNorm_beard" type: "BatchNorm" bottom: "InnerProduct_beard" top: "InnerProduct_beard" batch_norm_param { use_global_stats: true } include { phase: TEST } } layer { name: "Scale_beard" type: "Scale" bottom: "InnerProduct_beard" top: "InnerProduct_beard" scale_param { bias_term: true } } layer { name: "ReLU_beard" type: "ReLU" bottom: "InnerProduct_beard" top: "InnerProduct_beard" }