name:"HatNet" layer { name: "data" type: "Input" top: "data" input_param { shape {dim: 1 dim: 3 dim: 120 dim: 120} } transform_param { mean_value: 104 mean_value: 117 mean_value: 123 } } layer { name: "conv_pre" type: "Convolution" bottom: "data" top: "conv_pre" param { lr_mult: 1 } param { lr_mult: 2 } convolution_param { num_output: 8 pad: 0 kernel_size: 5 stride: 2 weight_filler { type: "xavier" std: 0.07142857142857142 } bias_filler { type: "constant" } } } layer { name: "BatchNorm1" type: "BatchNorm" bottom: "conv_pre" top: "BatchNorm1" param { lr_mult: 0 } param { lr_mult: 0 } param { lr_mult: 0 } } layer { name: "Scale1" type: "Scale" bottom: "BatchNorm1" top: "Scale1" scale_param { bias_term: true } } layer { name: "relu_pre" type: "ReLU" bottom: "Scale1" top: "relu_pre" } layer { name: "pool_pre" type: "Pooling" bottom: "relu_pre" top: "pool_pre" pooling_param { pool: MAX kernel_size: 2 stride: 2 } } layer { name: "conv1_1" type: "Convolution" bottom: "pool_pre" top: "conv1_1" param { lr_mult: 1 } param { lr_mult: 2 } convolution_param { num_output: 16 pad: 0 kernel_size: 3 stride: 1 weight_filler { type: "xavier" std: 0.11785113019775792 } bias_filler { type: "constant" } } } layer { name: "BatchNorm2" type: "BatchNorm" bottom: "conv1_1" top: "BatchNorm2" param { lr_mult: 0 } param { lr_mult: 0 } param { lr_mult: 0 } } layer { name: "Scale2" type: "Scale" bottom: "BatchNorm2" top: "Scale2" scale_param { bias_term: true } } layer { name: "relu1_1" type: "ReLU" bottom: "Scale2" top: "relu1_1" } layer { name: "conv1_2" type: "Convolution" bottom: "relu1_1" top: "conv1_2" param { lr_mult: 1 } param { lr_mult: 2 } convolution_param { num_output: 16 pad: 0 kernel_size: 3 stride: 1 weight_filler { type: "xavier" std: 0.11785113019775792 } bias_filler { type: "constant" } } } layer { name: "BatchNorm3" type: "BatchNorm" bottom: "conv1_2" top: "BatchNorm3" param { lr_mult: 0 } param { lr_mult: 0 } param { lr_mult: 0 } } layer { name: "Scale3" type: "Scale" bottom: "BatchNorm3" top: "Scale3" scale_param { bias_term: true } } layer { name: "relu1_2" type: "ReLU" bottom: "Scale3" top: "relu1_2" } layer { name: "pool1" type: "Pooling" bottom: "relu1_2" top: "pool1" pooling_param { pool: MAX kernel_size: 2 stride: 2 } } layer { name: "conv2_1" type: "Convolution" bottom: "pool1" top: "conv2_1" param { lr_mult: 1 } param { lr_mult: 2 } convolution_param { num_output: 24 pad: 0 kernel_size: 3 stride: 1 weight_filler { type: "xavier" std: 0.09622504486493763 } bias_filler { type: "constant" } } } layer { name: "BatchNorm4" type: "BatchNorm" bottom: "conv2_1" top: "BatchNorm4" param { lr_mult: 0 } param { lr_mult: 0 } param { lr_mult: 0 } } layer { name: "Scale4" type: "Scale" bottom: "BatchNorm4" top: "Scale4" scale_param { bias_term: true } } layer { name: "relu2_1" type: "ReLU" bottom: "Scale4" top: "relu2_1" } layer { name: "conv2_2" type: "Convolution" bottom: "relu2_1" top: "conv2_2" param { lr_mult: 1 } param { lr_mult: 2 } convolution_param { num_output: 24 pad: 0 kernel_size: 3 stride: 1 weight_filler { type: "xavier" std: 0.09622504486493763 } bias_filler { type: "constant" } } } layer { name: "BatchNorm5" type: "BatchNorm" bottom: "conv2_2" top: "BatchNorm5" param { lr_mult: 0 } param { lr_mult: 0 } param { lr_mult: 0 } } layer { name: "Scale5" type: "Scale" bottom: "BatchNorm5" top: "Scale5" scale_param { bias_term: true } } layer { name: "relu2_2" type: "ReLU" bottom: "Scale5" top: "relu2_2" } layer { name: "pool2" type: "Pooling" bottom: "relu2_2" top: "pool2" pooling_param { pool: MAX kernel_size: 2 stride: 2 } } layer { name: "conv3_1" type: "Convolution" bottom: "pool2" top: "conv3_1" param { lr_mult: 1 } param { lr_mult: 2 } convolution_param { num_output: 40 pad: 1 kernel_size: 3 stride: 1 weight_filler { type: "xavier" std: 0.08333333333333333 } bias_filler { type: "constant" } } } layer { name: "BatchNorm6" type: "BatchNorm" bottom: "conv3_1" top: "BatchNorm6" param { lr_mult: 0 } param { lr_mult: 0 } param { lr_mult: 0 } } layer { name: "Scale6" type: "Scale" bottom: "BatchNorm6" top: "Scale6" scale_param { bias_term: true } } layer { name: "relu3_1" type: "ReLU" bottom: "Scale6" top: "relu3_1" } layer { name: "conv3_2" type: "Convolution" bottom: "relu3_1" top: "conv3_2" param { lr_mult: 1 } param { lr_mult: 2 } convolution_param { num_output: 80 pad: 1 kernel_size: 3 stride: 1 weight_filler { type: "xavier" std: 0.08333333333333333 } bias_filler { type: "constant" } } } layer { name: "BatchNorm7" type: "BatchNorm" bottom: "conv3_2" top: "BatchNorm7" param { lr_mult: 0 } param { lr_mult: 0 } param { lr_mult: 0 } } layer { name: "Scale7" type: "Scale" bottom: "BatchNorm7" top: "Scale7" scale_param { bias_term: true } } layer { name: "relu3_2" type: "ReLU" bottom: "Scale7" top: "relu3_2" } layer { name: "ip1" type: "InnerProduct" bottom: "relu3_2" top: "ip1" param { lr_mult: 1 } param { lr_mult:2 } inner_product_param { num_output: 128 weight_filler { type: "xavier" std: 0.001 } bias_filler { type: "constant" } } } layer { name: "relu_ip1" type: "ReLU" bottom: "ip1" top: "ip1" } layer { name: "ip2" type: "InnerProduct" bottom: "ip1" top: "ip2" param { lr_mult: 1 decay_mult: 10 } param { lr_mult: 2 decay_mult: 10 } inner_product_param { num_output: 128 weight_filler { type: "xavier" std: 0.001 } bias_filler { type: "constant" } } } layer { name: "relu_ip2" type: "ReLU" bottom: "ip2" top: "ip2" } layer { name: "ip3_gender" type: "InnerProduct" bottom: "ip2" top: "ip3_gender" param { lr_mult: 1 decay_mult: 10 } param { lr_mult: 2 decay_mult: 10 } inner_product_param { num_output: 2 weight_filler { type: "xavier" std: 0.001 } bias_filler { type: "constant" } } }