name: "Mnet_vA367_extract_hair" input: "data" input_dim: 1 input_dim: 6 input_dim: 512 input_dim: 512 layer { name: "conv1" type: "Convolution" bottom: "data" top: "conv_blob1" convolution_param { num_output: 16 bias_term: false pad: 1 kernel_size: 3 group: 1 stride: 1 weight_filler { type: "xavier" } dilation: 1 } } layer { name: "batch_norm1" type: "BatchNorm" bottom: "conv_blob1" top: "batch_norm_blob1" batch_norm_param { use_global_stats: true eps: 9.999999747378752e-06 } } layer { name: "bn_scale1" type: "Scale" bottom: "batch_norm_blob1" top: "batch_norm_blob1" scale_param { bias_term: true } } layer { name: "relu1" type: "ReLU" bottom: "batch_norm_blob1" top: "relu_blob1" } layer { name: "conv2" type: "Convolution" bottom: "relu_blob1" top: "conv_blob2" convolution_param { num_output: 8 bias_term: false pad: 1 kernel_size: 3 group: 1 stride: 1 weight_filler { type: "xavier" } dilation: 1 } } layer { name: "batch_norm2" type: "BatchNorm" bottom: "conv_blob2" top: "batch_norm_blob2" batch_norm_param { use_global_stats: true eps: 9.999999747378752e-06 } } layer { name: "bn_scale2" type: "Scale" bottom: "batch_norm_blob2" top: "batch_norm_blob2" scale_param { bias_term: true } } layer { name: "relu2" type: "ReLU" bottom: "batch_norm_blob2" top: "relu_blob2" } layer { name: "max_pool1" type: "Pooling" bottom: "relu_blob1" top: "max_pool_blob1" pooling_param { pool: MAX kernel_size: 2 stride: 2 } } layer { name: "conv3" type: "Convolution" bottom: "max_pool_blob1" top: "conv_blob3" convolution_param { num_output: 32 bias_term: false pad: 1 kernel_size: 3 group: 1 stride: 1 weight_filler { type: "xavier" } dilation: 1 } } layer { name: "batch_norm3" type: "BatchNorm" bottom: "conv_blob3" top: "batch_norm_blob3" batch_norm_param { use_global_stats: true eps: 9.999999747378752e-06 } } layer { name: "bn_scale3" type: "Scale" bottom: "batch_norm_blob3" top: "batch_norm_blob3" scale_param { bias_term: true } } layer { name: "relu3" type: "ReLU" bottom: "batch_norm_blob3" top: "relu_blob3" } layer { name: "conv4" type: "Convolution" bottom: "relu_blob3" top: "conv_blob4" convolution_param { num_output: 16 bias_term: false pad: 1 kernel_size: 3 group: 1 stride: 1 weight_filler { type: "xavier" } dilation: 1 } } layer { name: "batch_norm4" type: "BatchNorm" bottom: "conv_blob4" top: "batch_norm_blob4" batch_norm_param { use_global_stats: true eps: 9.999999747378752e-06 } } layer { name: "bn_scale4" type: "Scale" bottom: "batch_norm_blob4" top: "batch_norm_blob4" scale_param { bias_term: true } } layer { name: "relu4" type: "ReLU" bottom: "batch_norm_blob4" top: "relu_blob4" } layer { name: "max_pool2" type: "Pooling" bottom: "relu_blob3" top: "max_pool_blob2" pooling_param { pool: MAX kernel_size: 2 stride: 2 } } layer { name: "conv5" type: "Convolution" bottom: "max_pool_blob2" top: "conv_blob5" convolution_param { num_output: 64 bias_term: false pad: 1 kernel_size: 3 group: 1 stride: 1 weight_filler { type: "xavier" } dilation: 1 } } layer { name: "batch_norm5" type: "BatchNorm" bottom: "conv_blob5" top: "batch_norm_blob5" batch_norm_param { use_global_stats: true eps: 9.999999747378752e-06 } } layer { name: "bn_scale5" type: "Scale" bottom: "batch_norm_blob5" top: "batch_norm_blob5" scale_param { bias_term: true } } layer { name: "relu5" type: "ReLU" bottom: "batch_norm_blob5" top: "relu_blob5" } layer { name: "conv6" type: "Convolution" bottom: "relu_blob5" top: "conv_blob6" convolution_param { num_output: 32 bias_term: false pad: 1 kernel_size: 3 group: 1 stride: 1 weight_filler { type: "xavier" } dilation: 1 } } layer { name: "batch_norm6" type: "BatchNorm" bottom: "conv_blob6" top: "batch_norm_blob6" batch_norm_param { use_global_stats: true eps: 9.999999747378752e-06 } } layer { name: "bn_scale6" type: "Scale" bottom: "batch_norm_blob6" top: "batch_norm_blob6" scale_param { bias_term: true } } layer { name: "relu6" type: "ReLU" bottom: "batch_norm_blob6" top: "relu_blob6" } layer { name: "max_pool3" type: "Pooling" bottom: "relu_blob5" top: "max_pool_blob3" pooling_param { pool: MAX kernel_size: 2 stride: 2 } } layer { name: "conv7" type: "Convolution" bottom: "max_pool_blob3" top: "conv_blob7" convolution_param { num_output: 128 bias_term: false pad: 1 kernel_size: 3 group: 1 stride: 1 weight_filler { type: "xavier" } dilation: 1 } } layer { name: "batch_norm7" type: "BatchNorm" bottom: "conv_blob7" top: "batch_norm_blob7" batch_norm_param { use_global_stats: true eps: 9.999999747378752e-06 } } layer { name: "bn_scale7" type: "Scale" bottom: "batch_norm_blob7" top: "batch_norm_blob7" scale_param { bias_term: true } } layer { name: "relu7" type: "ReLU" bottom: "batch_norm_blob7" top: "relu_blob7" } layer { name: "conv8" type: "Convolution" bottom: "relu_blob7" top: "conv_blob8" convolution_param { num_output: 64 bias_term: false pad: 1 kernel_size: 3 group: 1 stride: 1 weight_filler { type: "xavier" } dilation: 1 } } layer { name: "batch_norm8" type: "BatchNorm" bottom: "conv_blob8" top: "batch_norm_blob8" batch_norm_param { use_global_stats: true eps: 9.999999747378752e-06 } } layer { name: "bn_scale8" type: "Scale" bottom: "batch_norm_blob8" top: "batch_norm_blob8" scale_param { bias_term: true } } layer { name: "relu8" type: "ReLU" bottom: "batch_norm_blob8" top: "relu_blob8" } layer { name: "max_pool4" type: "Pooling" bottom: "relu_blob7" top: "max_pool_blob4" pooling_param { pool: MAX kernel_size: 2 stride: 2 } } layer { name: "conv9" type: "Convolution" bottom: "max_pool_blob4" top: "conv_blob9" convolution_param { num_output: 64 bias_term: false pad: 1 kernel_size: 3 group: 1 stride: 1 weight_filler { type: "xavier" } dilation: 1 } } layer { name: "batch_norm9" type: "BatchNorm" bottom: "conv_blob9" top: "batch_norm_blob9" batch_norm_param { use_global_stats: true eps: 9.999999747378752e-06 } } layer { name: "bn_scale9" type: "Scale" bottom: "batch_norm_blob9" top: "batch_norm_blob9" scale_param { bias_term: true } } layer { name: "relu9" type: "ReLU" bottom: "batch_norm_blob9" top: "relu_blob9" } layer { name: "cat1" type: "Concat" bottom: "relu_blob9" bottom: "max_pool_blob4" top: "cat_blob1" concat_param { axis: 1 } } layer { name: "conv10" type: "Convolution" bottom: "cat_blob1" top: "conv_blob10" convolution_param { num_output: 128 bias_term: false pad: 1 kernel_size: 3 group: 1 stride: 1 weight_filler { type: "xavier" } dilation: 1 } } layer { name: "batch_norm10" type: "BatchNorm" bottom: "conv_blob10" top: "batch_norm_blob10" batch_norm_param { use_global_stats: true eps: 9.999999747378752e-06 } } layer { name: "bn_scale10" type: "Scale" bottom: "batch_norm_blob10" top: "batch_norm_blob10" scale_param { bias_term: true } } layer { name: "relu10" type: "ReLU" bottom: "batch_norm_blob10" top: "relu_blob10" } layer { name: "conv11" type: "Convolution" bottom: "relu_blob10" top: "conv_blob11" convolution_param { num_output: 128 bias_term: false pad: 2 kernel_size: 5 group: 1 stride: 1 weight_filler { type: "xavier" } dilation: 1 } } layer { name: "conv_transpose1" type: "Deconvolution" bottom: "conv_blob11" top: "conv_transpose_blob1" convolution_param { num_output: 128 pad: 0 kernel_size: 2 stride: 2 weight_filler { type: "xavier" } dilation: 1 bias_filler{ type:"constant" value:0 } } } layer { name: "cat2" type: "Concat" bottom: "relu_blob8" bottom: "conv_transpose_blob1" top: "cat_blob2" concat_param { axis: 1 } } layer { name: "conv12" type: "Convolution" bottom: "cat_blob2" top: "conv_blob12" convolution_param { num_output: 64 bias_term: false pad: 1 kernel_size: 3 group: 1 stride: 1 weight_filler { type: "xavier" } dilation: 1 } } layer { name: "batch_norm11" type: "BatchNorm" bottom: "conv_blob12" top: "batch_norm_blob11" batch_norm_param { use_global_stats: true eps: 9.999999747378752e-06 } } layer { name: "bn_scale11" type: "Scale" bottom: "batch_norm_blob11" top: "batch_norm_blob11" scale_param { bias_term: true } } layer { name: "relu11" type: "ReLU" bottom: "batch_norm_blob11" top: "relu_blob11" } layer { name: "conv13" type: "Convolution" bottom: "relu_blob11" top: "conv_blob13" convolution_param { num_output: 64 bias_term: false pad: 2 kernel_size: 5 group: 1 stride: 1 weight_filler { type: "xavier" } dilation: 1 } } layer { name: "conv_transpose2" type: "Deconvolution" bottom: "conv_blob13" top: "conv_transpose_blob2" convolution_param { num_output: 64 pad: 0 kernel_size: 2 stride: 2 weight_filler { type: "xavier" } dilation: 1 bias_filler{ type:"constant" value:0 } } } layer { name: "cat3" type: "Concat" bottom: "relu_blob6" bottom: "conv_transpose_blob2" top: "cat_blob3" concat_param { axis: 1 } } layer { name: "conv14" type: "Convolution" bottom: "cat_blob3" top: "conv_blob14" convolution_param { num_output: 32 bias_term: false pad: 1 kernel_size: 3 group: 1 stride: 1 weight_filler { type: "xavier" } dilation: 1 } } layer { name: "batch_norm12" type: "BatchNorm" bottom: "conv_blob14" top: "batch_norm_blob12" batch_norm_param { use_global_stats: true eps: 9.999999747378752e-06 } } layer { name: "bn_scale12" type: "Scale" bottom: "batch_norm_blob12" top: "batch_norm_blob12" scale_param { bias_term: true } } layer { name: "relu12" type: "ReLU" bottom: "batch_norm_blob12" top: "relu_blob12" } layer { name: "conv15" type: "Convolution" bottom: "relu_blob12" top: "conv_blob15" convolution_param { num_output: 32 bias_term: false pad: 2 kernel_size: 5 group: 1 stride: 1 weight_filler { type: "xavier" } dilation: 1 } } layer { name: "conv_transpose3" type: "Deconvolution" bottom: "conv_blob15" top: "conv_transpose_blob3" convolution_param { num_output: 32 pad: 0 kernel_size: 2 stride: 2 weight_filler { type: "xavier" } dilation: 1 bias_filler{ type:"constant" value:0 } } } layer { name: "cat4" type: "Concat" bottom: "relu_blob4" bottom: "conv_transpose_blob3" top: "cat_blob4" concat_param { axis: 1 } } layer { name: "conv16" type: "Convolution" bottom: "cat_blob4" top: "conv_blob16" convolution_param { num_output: 16 bias_term: false pad: 1 kernel_size: 3 group: 1 stride: 1 weight_filler { type: "xavier" } dilation: 1 } } layer { name: "batch_norm13" type: "BatchNorm" bottom: "conv_blob16" top: "batch_norm_blob13" batch_norm_param { use_global_stats: true eps: 9.999999747378752e-06 } } layer { name: "bn_scale13" type: "Scale" bottom: "batch_norm_blob13" top: "batch_norm_blob13" scale_param { bias_term: true } } layer { name: "relu13" type: "ReLU" bottom: "batch_norm_blob13" top: "relu_blob13" } layer { name: "conv17" type: "Convolution" bottom: "relu_blob13" top: "conv_blob17" convolution_param { num_output: 16 bias_term: false pad: 2 kernel_size: 5 group: 1 stride: 1 weight_filler { type: "xavier" } dilation: 1 } } layer { name: "conv_transpose4" type: "Deconvolution" bottom: "conv_blob17" top: "conv_transpose_blob4" convolution_param { num_output: 16 pad: 0 kernel_size: 2 stride: 2 weight_filler { type: "xavier" } dilation: 1 bias_filler{ type:"constant" value:0 } } } layer { name: "cat5" type: "Concat" bottom: "relu_blob2" bottom: "conv_transpose_blob4" top: "cat_blob5" concat_param { axis: 1 } } layer { name: "conv18" type: "Convolution" bottom: "cat_blob5" top: "output" convolution_param { num_output: 1 bias_term: false pad: 2 kernel_size: 5 group: 1 stride: 1 weight_filler { type: "xavier" } dilation: 1 } }