diff --git a/contrib/facemaskdetection/.DS_Store b/contrib/facemaskdetection/.DS_Store
new file mode 100644
index 0000000000000000000000000000000000000000..04e1f07897276ac14dd25c8ed9ae73aa4a654fa3
Binary files /dev/null and b/contrib/facemaskdetection/.DS_Store differ
diff --git a/contrib/facemaskdetection/.idea/.gitignore b/contrib/facemaskdetection/.idea/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..73f69e0958611ac6e00bde95641f6699030ad235
--- /dev/null
+++ b/contrib/facemaskdetection/.idea/.gitignore
@@ -0,0 +1,8 @@
+# Default ignored files
+/shelf/
+/workspace.xml
+# Datasource local storage ignored files
+/dataSources/
+/dataSources.local.xml
+# Editor-based HTTP Client requests
+/httpRequests/
diff --git a/contrib/facemaskdetection/.idea/deployment.xml b/contrib/facemaskdetection/.idea/deployment.xml
new file mode 100644
index 0000000000000000000000000000000000000000..8c60698b9f56f18db850d6b633b20a60eace0af9
--- /dev/null
+++ b/contrib/facemaskdetection/.idea/deployment.xml
@@ -0,0 +1,15 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/contrib/facemaskdetection/.idea/inspectionProfiles/profiles_settings.xml b/contrib/facemaskdetection/.idea/inspectionProfiles/profiles_settings.xml
new file mode 100644
index 0000000000000000000000000000000000000000..105ce2da2d6447d11dfe32bfb846c3d5b199fc99
--- /dev/null
+++ b/contrib/facemaskdetection/.idea/inspectionProfiles/profiles_settings.xml
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git a/contrib/facemaskdetection/.idea/misc.xml b/contrib/facemaskdetection/.idea/misc.xml
new file mode 100644
index 0000000000000000000000000000000000000000..5b657cf3eb94a5a534d4a15b60f3b775b31e9f42
--- /dev/null
+++ b/contrib/facemaskdetection/.idea/misc.xml
@@ -0,0 +1,4 @@
+
+
+
+
\ No newline at end of file
diff --git a/contrib/facemaskdetection/.idea/modules.xml b/contrib/facemaskdetection/.idea/modules.xml
new file mode 100644
index 0000000000000000000000000000000000000000..6ecd6268afcb02c4fafcd3a5381de5361ef96b47
--- /dev/null
+++ b/contrib/facemaskdetection/.idea/modules.xml
@@ -0,0 +1,8 @@
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/contrib/facemaskdetection/.idea/newenv_mask.iml b/contrib/facemaskdetection/.idea/newenv_mask.iml
new file mode 100644
index 0000000000000000000000000000000000000000..6bb022d8f151a84ec2a3a768eaed76d15d3de822
--- /dev/null
+++ b/contrib/facemaskdetection/.idea/newenv_mask.iml
@@ -0,0 +1,13 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/contrib/facemaskdetection/README.md b/contrib/facemaskdetection/README.md
new file mode 100755
index 0000000000000000000000000000000000000000..4dddd4a0b881562fef94bdcef435135110c8276a
--- /dev/null
+++ b/contrib/facemaskdetection/README.md
@@ -0,0 +1,42 @@
+## SDK 图像检测样例运行
+
+### 介绍
+提供的demo,实现图像检测样例运行并且输出检测结果写入图片result.jpg
+
+### 准备工作
+样例获取链接(https://gitee.com/zhangdwe/mindxsdk-referenceapps/tree/master/tutorials/ImageDetectionSample/python)
+将样例目录python 从mxsdkreferenceapps/tutorials/ImageDetectionSample文件夹下 移动到${SDK安装路径}/mxVision/samples/mxVision/python/路径下
+
+可以使用mv 命令
+进入到移动后的工程路径下
+
+### 模型转换
+参看图像检测样例运行C++ 章节转换模型
+进入工程 model 文件夹 运行,模型转换脚本model_conversion.sh
+```
+bash model_conversion.sh
+```
+
+### pipeline 准备
+将main.py 文件中 mxpi_modelinfer0插件中的labelPath路径中的${SDK安装路径} 替换为自己的SDK安装路径
+
+### 配置环境变量
+将${SDK安装路径}替换为自己的SDK安装路径; 将${MX_SDK_HOME}替换成对应路径
+
+```
+export MX_SDK_HOME=${SDK安装路径}/mxVision
+
+export LD_LIBRARY_PATH=${MX_SDK_HOME}/lib:${MX_SDK_HOME}/opensource/lib:${MX_SDK_HOME}/opensource/lib64
+```
+
+### 运行
+准备一张待检测图片,放到项目目录下命名为test.jpg
+命令行输入:
+python3.7 main.py
+
+```
+python3.7 main.py
+```
+
+### 查看结果
+结果图片有画框,框的左上角显示推理结果和对应的confidence
\ No newline at end of file
diff --git a/contrib/facemaskdetection/anchor_decode.py b/contrib/facemaskdetection/anchor_decode.py
new file mode 100755
index 0000000000000000000000000000000000000000..07a8818c3b4fde1ee6043727eb7b2499ae56298a
--- /dev/null
+++ b/contrib/facemaskdetection/anchor_decode.py
@@ -0,0 +1,27 @@
+# -*- coding:utf-8 -*-
+import numpy as np
+
+def decode_bbox(anchors, raw_outputs, variances=[0.1, 0.1, 0.2, 0.2]):
+ '''
+ Decode the actual bbox according to the anchors.
+ the anchor value order is:[xmin,ymin, xmax, ymax]
+ :param anchors: numpy array with shape [batch, num_anchors, 4]
+ :param raw_outputs: numpy array with the same shape with anchors
+ :param variances: list of float, default=[0.1, 0.1, 0.2, 0.2]
+ :return:
+ '''
+ anchor_centers_x = (anchors[:, :, 0:1] + anchors[:, :, 2:3]) / 2
+ anchor_centers_y = (anchors[:, :, 1:2] + anchors[:, :, 3:]) / 2
+ anchors_w = anchors[:, :, 2:3] - anchors[:, :, 0:1]
+ anchors_h = anchors[:, :, 3:] - anchors[:, :, 1:2]
+ raw_outputs_rescale = raw_outputs * np.array(variances)
+ predict_center_x = raw_outputs_rescale[:, :, 0:1] * anchors_w + anchor_centers_x
+ predict_center_y = raw_outputs_rescale[:, :, 1:2] * anchors_h + anchor_centers_y
+ predict_w = np.exp(raw_outputs_rescale[:, :, 2:3]) * anchors_w
+ predict_h = np.exp(raw_outputs_rescale[:, :, 3:]) * anchors_h
+ predict_xmin = predict_center_x - predict_w / 2
+ predict_ymin = predict_center_y - predict_h / 2
+ predict_xmax = predict_center_x + predict_w / 2
+ predict_ymax = predict_center_y + predict_h / 2
+ predict_bbox = np.concatenate([predict_xmin, predict_ymin, predict_xmax, predict_ymax], axis=-1)
+ return predict_bbox
\ No newline at end of file
diff --git a/contrib/facemaskdetection/anchor_generator.py b/contrib/facemaskdetection/anchor_generator.py
new file mode 100755
index 0000000000000000000000000000000000000000..ebca4d174ef998f2e119d214c84f0be608022206
--- /dev/null
+++ b/contrib/facemaskdetection/anchor_generator.py
@@ -0,0 +1,48 @@
+# -*- encoding=utf-8 -*-
+import numpy as np
+
+def generate_anchors(feature_map_sizes, anchor_sizes, anchor_ratios, offset=0.5):
+ '''
+ generate anchors.
+ :param feature_map_sizes: list of list, for example: [[40,40], [20,20]]
+ :param anchor_sizes: list of list, for example: [[0.05, 0.075], [0.1, 0.15]]
+ :param anchor_ratios: list of list, for example: [[1, 0.5], [1, 0.5]]
+ :param offset: default to 0.5
+ :return:
+ '''
+ anchor_bboxes = []
+ for idx, feature_size in enumerate(feature_map_sizes):
+ cx = (np.linspace(0, feature_size[0] - 1, feature_size[0]) + 0.5) / feature_size[0]
+ cy = (np.linspace(0, feature_size[1] - 1, feature_size[1]) + 0.5) / feature_size[1]
+ cx_grid, cy_grid = np.meshgrid(cx, cy)
+ cx_grid_expend = np.expand_dims(cx_grid, axis=-1)
+ cy_grid_expend = np.expand_dims(cy_grid, axis=-1)
+ center = np.concatenate((cx_grid_expend, cy_grid_expend), axis=-1)
+
+ num_anchors = len(anchor_sizes[idx]) + len(anchor_ratios[idx]) - 1
+ center_tiled = np.tile(center, (1, 1, 2* num_anchors))
+ anchor_width_heights = []
+
+ # different scales with the first aspect ratio
+ for scale in anchor_sizes[idx]:
+ ratio = anchor_ratios[idx][0] # select the first ratio
+ width = scale * np.sqrt(ratio)
+ height = scale / np.sqrt(ratio)
+ anchor_width_heights.extend([-width / 2.0, -height / 2.0, width / 2.0, height / 2.0])
+
+ # the first scale, with different aspect ratios (except the first one)
+ for ratio in anchor_ratios[idx][1:]:
+ s1 = anchor_sizes[idx][0] # select the first scale
+ width = s1 * np.sqrt(ratio)
+ height = s1 / np.sqrt(ratio)
+ anchor_width_heights.extend([-width / 2.0, -height / 2.0, width / 2.0, height / 2.0])
+
+ bbox_coords = center_tiled + np.array(anchor_width_heights)
+ bbox_coords_reshape = bbox_coords.reshape((-1, 4))
+ anchor_bboxes.append(bbox_coords_reshape)
+ anchor_bboxes = np.concatenate(anchor_bboxes, axis=0)
+ return anchor_bboxes
+
+
+if __name__ == '__main__':
+ anchors = generate_anchors(feature_map_sizes, anchor_sizes, anchor_ratios)
diff --git a/contrib/facemaskdetection/image.py b/contrib/facemaskdetection/image.py
new file mode 100644
index 0000000000000000000000000000000000000000..f1026360cb26b3a6704ddd9ad8603427021ca159
--- /dev/null
+++ b/contrib/facemaskdetection/image.py
@@ -0,0 +1,205 @@
+#!/usr/bin/env python
+# coding=utf-8
+
+"""
+Copyright(C) Huawei Technologies Co.,Ltd. 2012-2021 All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import json
+import os
+import cv2
+import time
+import numpy as np
+import MxpiDataType_pb2 as MxpiDataType
+from StreamManagerApi import StreamManagerApi, MxDataInput, StringVector,InProtobufVector, MxProtobufIn
+from anchor_generator import generate_anchors
+from anchor_decode import decode_bbox
+from nms import single_class_non_max_suppression
+
+
+def inference(image,
+ conf_thresh=0.5,
+ iou_thresh=0.4,
+ target_shape=(260, 260),
+ draw_result=True,
+ show_result=True
+ ):
+ '''
+ Main function of detection inference
+ :param image: 3D numpy array of image
+ :param conf_thresh: the min threshold of classification probabity.
+ :param iou_thresh: the IOU threshold of NMS
+ :param target_shape: the model input size.
+ :param draw_result: whether to daw bounding box to the image.
+ :param show_result: whether to display the image.
+ :return:
+ '''
+ image = np.copy(image)
+ output_info = []
+ height, width, _ = image.shape
+ y_bboxes_output = ids
+ y_cls_output = ids2
+
+ # remove the batch dimension, for batch is always 1 for inference.
+ y_bboxes = decode_bbox(anchors_exp, y_bboxes_output)[0]
+ y_cls = y_cls_output[0]
+ # To speed up, do single class NMS, not multiple classes NMS.
+ bbox_max_scores = np.max(y_cls, axis=1);
+ bbox_max_score_classes = np.argmax(y_cls, axis=1)
+ # keep_idx is the alive bounding box after nms.
+ keep_idxs = single_class_non_max_suppression(y_bboxes,
+ bbox_max_scores,
+ conf_thresh=conf_thresh,
+ iou_thresh=iou_thresh,
+ )
+
+ for idx in keep_idxs:
+ conf = float(bbox_max_scores[idx])
+ class_id = bbox_max_score_classes[idx]
+ bbox = y_bboxes[idx]
+ # clip the coordinate, avoid the value exceed the image boundary.
+ xmin = max(0, int(bbox[0] * width))
+ ymin = max(0, int(bbox[1] * height))
+ xmax = min(int(bbox[2] * width), width)
+ ymax = min(int(bbox[3] * height), height)
+
+ if draw_result:
+ if class_id == 0:
+ color = (0, 255, 0)
+ else:
+ color = (0, 0, 255)
+ cv2.rectangle(image, (xmin , ymin ), (xmax , ymax), color, 2)
+ cv2.putText(image, "%s: %.2f" % (id2class[class_id], conf), (xmin + 2 , ymin - 2),
+ cv2.FONT_HERSHEY_SIMPLEX, 0.8, color)
+ output_info.append([class_id, conf, xmin, ymin, xmax, ymax])
+
+ if show_result:
+ cv2.imwrite("./my_result.jpg", image)
+ return output_info
+
+
+
+
+
+if __name__ == '__main__':
+ streamManagerApi = StreamManagerApi()
+ # init stream manager
+ ret = streamManagerApi.InitManager()
+ if ret != 0:
+ print("Failed to init Stream manager, ret=%s" % str(ret))
+ exit()
+
+ # create streams by pipeline config file
+ pipeline_path = b"main.pipeline"
+ tensor_key = b'appsrc0'
+ ret = streamManagerApi.CreateMultipleStreamsFromFile(pipeline_path)
+ if ret != 0:
+ print("Failed to create Stream, ret=%s" % str(ret))
+ exit()
+
+ # Construct the input of the stream
+ img_path = "./image/test8.jpg"
+ streamName = b'detection'
+ inPluginId = 0
+ #image preprocess
+ image = cv2.imread(img_path)
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
+ height, width, _ = image.shape
+ image_resized = cv2.resize(image, (260, 260))
+ image_np = image_resized / 255.0 # 归一化到0~1
+ image_exp = np.expand_dims(image_np, axis=0).astype(np.float32)
+
+ #input processed image to pipeline
+
+ protobuf_vec = InProtobufVector()
+ mxpi_tensor_package_list = MxpiDataType.MxpiTensorPackageList()
+ tensor_package_vec = mxpi_tensor_package_list.tensorPackageVec.add()
+
+ # add feature data #begin
+ tensorVec = tensor_package_vec.tensorVec.add()
+ tensorVec.memType = 1
+ tensorVec.deviceId = 0
+
+ # Compute the number of bytes of feature data.
+ tensorVec.tensorDataSize = int(
+ height * width * 4)
+ tensorVec.tensorDataType = 0 # float32
+
+ for i in image_exp.shape:
+ tensorVec.tensorShape.append(i)
+
+ tensorVec.dataStr = image_exp.tobytes()
+ protobuf = MxProtobufIn()
+ protobuf.key = tensor_key
+ protobuf.type = b'MxTools.MxpiTensorPackageList'
+ protobuf.protobuf = mxpi_tensor_package_list.SerializeToString()
+ protobuf_vec.push_back(protobuf)
+
+ # Inputs data to a specified stream based on streamName.
+ unique_id = streamManagerApi.SendProtobuf(
+ streamName, inPluginId, protobuf_vec)
+ if unique_id < 0:
+ print("Failed to send data to stream.")
+ exit()
+
+ key_vec = StringVector()
+ key_vec.push_back(b'mxpi_tensorinfer0')
+
+ # get inference result
+ infer_result = streamManagerApi.GetProtobuf(
+ streamName, inPluginId, key_vec)
+ if infer_result.size() == 0:
+ print("infer_result is null")
+ exit()
+ if infer_result[0].errorCode != 0:
+ print("GetProtobuf error. errorCode=%d" % (
+ infer_result[0].errorCode))
+ exit()
+ tensorList = MxpiDataType.MxpiTensorPackageList()
+ tensorList.ParseFromString(infer_result[0].messageBuf)
+
+ # print the infer result
+ ids = np.frombuffer(tensorList.tensorPackageVec[0].tensorVec[0].dataStr, dtype=np.float32)
+ shape = tensorList.tensorPackageVec[0].tensorVec[0].tensorShape
+ ids.resize(shape)
+
+ ids2 = np.frombuffer(tensorList.tensorPackageVec[0].tensorVec[1].dataStr, dtype=np.float32)
+ shape2 = tensorList.tensorPackageVec[0].tensorVec[1].tensorShape
+ ids2.resize(shape2)
+
+
+
+ feature_map_sizes = [[33, 33], [17, 17], [9, 9], [5, 5], [3, 3]]
+ anchor_sizes = [[0.04, 0.056], [0.08, 0.11], [0.16, 0.22], [0.32, 0.45], [0.64, 0.72]]
+ anchor_ratios = [[1, 0.62, 0.42]] * 5
+
+ # generate anchors
+ anchors = generate_anchors(feature_map_sizes, anchor_sizes, anchor_ratios)
+
+ # for inference , the batch size is 1, the model output shape is [1, N, 4],
+ # so we expand dim for anchors to [1, anchor_num, 4]
+ anchors_exp = np.expand_dims(anchors, axis=0)
+
+ id2class = {0: 'Mask', 1: 'NoMask'}
+
+
+
+
+ img = cv2.imread(img_path)
+ inference(img, show_result=True, target_shape=(260, 260))
+
+ # destroy streams
+
+ streamManagerApi.DestroyAllStreams()
diff --git a/contrib/facemaskdetection/image/.DS_Store b/contrib/facemaskdetection/image/.DS_Store
new file mode 100644
index 0000000000000000000000000000000000000000..e7592ee0297974739d0e25100bc9c2a712c82d8e
Binary files /dev/null and b/contrib/facemaskdetection/image/.DS_Store differ
diff --git a/contrib/facemaskdetection/image/test.jpg b/contrib/facemaskdetection/image/test.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..0226b711895c4079c14b258f31260e95b81e515d
Binary files /dev/null and b/contrib/facemaskdetection/image/test.jpg differ
diff --git a/contrib/facemaskdetection/main.pipeline b/contrib/facemaskdetection/main.pipeline
new file mode 100644
index 0000000000000000000000000000000000000000..270c31e038a40d9511e735d00422d82b2e69eb2d
--- /dev/null
+++ b/contrib/facemaskdetection/main.pipeline
@@ -0,0 +1,36 @@
+{
+ "detection": {
+ "stream_config": {
+ "deviceId": "0"
+ },
+ "appsrc0": {
+ "props": {
+ "blocksize": "40960000"
+ },
+ "factory": "appsrc",
+ "next": "mxpi_tensorinfer0"
+ },
+ "mxpi_tensorinfer0": {
+ "props": {
+ "dataSource": "appsrc0",
+ "modelPath": "models/noaipp.om"
+ },
+
+ "factory": "mxpi_tensorinfer",
+ "next": "mxpi_dataserialize0"
+ },
+ "mxpi_dataserialize0": {
+ "props": {
+ "outputDataKeys": "mxpi_tensorinfer0"
+ },
+ "factory": "mxpi_dataserialize",
+ "next": "appsink0"
+ },
+ "appsink0": {
+ "props": {
+ "blocksize": "409600000"
+ },
+ "factory": "appsink"
+ }
+ }
+ }
diff --git a/contrib/facemaskdetection/models/.DS_Store b/contrib/facemaskdetection/models/.DS_Store
new file mode 100644
index 0000000000000000000000000000000000000000..d840bc1bd2b4ad29061632f019fe02f39d9dd894
Binary files /dev/null and b/contrib/facemaskdetection/models/.DS_Store differ
diff --git a/contrib/facemaskdetection/models/face_mask_detection.om b/contrib/facemaskdetection/models/face_mask_detection.om
new file mode 100644
index 0000000000000000000000000000000000000000..0f0d5f05adea6b01d6eb2dcb3355f258d0cd4aea
Binary files /dev/null and b/contrib/facemaskdetection/models/face_mask_detection.om differ
diff --git a/contrib/facemaskdetection/models/face_mask_detection.pb b/contrib/facemaskdetection/models/face_mask_detection.pb
new file mode 100755
index 0000000000000000000000000000000000000000..67c2be1a834a04fbf21fd077bfe4722ec1250386
Binary files /dev/null and b/contrib/facemaskdetection/models/face_mask_detection.pb differ
diff --git a/contrib/facemaskdetection/models/fusion_result.json b/contrib/facemaskdetection/models/fusion_result.json
new file mode 100644
index 0000000000000000000000000000000000000000..fab9bb1a6dc9786f16e37d869818c1c6ffeae177
--- /dev/null
+++ b/contrib/facemaskdetection/models/fusion_result.json
@@ -0,0 +1,52 @@
+{
+ "graphId": "0",
+ "graph_fusion": {
+ "AABiasaddConvFusion": {
+ "effect_times": "10",
+ "match_times": "10"
+ },
+ "ConvBatchnormFusionPass": {
+ "effect_times": "18",
+ "match_times": "18"
+ },
+ "ConvConcatFusionPass": {
+ "effect_times": "0",
+ "match_times": "2"
+ },
+ "ConvToFullyConnectionFusionPass": {
+ "effect_times": "0",
+ "match_times": "28"
+ },
+ "ConvWeightCompressFusionPass": {
+ "effect_times": "0",
+ "match_times": "28"
+ },
+ "GroupConv2DFusionPass": {
+ "effect_times": "0",
+ "match_times": "28"
+ },
+ "SplitConvConcatFusionPass": {
+ "effect_times": "0",
+ "match_times": "2"
+ },
+ "StrideHoistingPass": {
+ "effect_times": "0",
+ "match_times": "18"
+ },
+ "ZConcatExt2FusionPass": {
+ "effect_times": "0",
+ "match_times": "2"
+ }
+ },
+ "sessionId": "0",
+ "ub_fusion": {
+ "TbeAippConvSingleInFusion": {
+ "effect_times": "1",
+ "match_times": "1"
+ },
+ "TbeCommonRules0FusionPass": {
+ "effect_times": "17",
+ "match_times": "17"
+ }
+ }
+}
\ No newline at end of file
diff --git a/contrib/facemaskdetection/models/ge_check_op.json b/contrib/facemaskdetection/models/ge_check_op.json
new file mode 100644
index 0000000000000000000000000000000000000000..e1baf29ea065f43f965314fa5cf29a5599863f8b
--- /dev/null
+++ b/contrib/facemaskdetection/models/ge_check_op.json
@@ -0,0 +1,27 @@
+{
+ "graph_id": 0,
+ "op": [
+ {
+ "error_type": "infer_shape_error",
+ "input0": {
+ "data_type": "DT_FLOAT",
+ "layout": "NHWC",
+ "shape": [
+ 1,
+ 260,
+ 260,
+ 3
+ ]
+ },
+ "name": "data_1_0_huawei_aipp",
+ "output0": {
+ "data_type": "DT_FLOAT",
+ "layout": "ND",
+ "shape": []
+ },
+ "reason": "InferShapeFailed!",
+ "type": "Aipp"
+ }
+ ],
+ "session_id": 0
+}
diff --git a/contrib/facemaskdetection/models/mask.names b/contrib/facemaskdetection/models/mask.names
new file mode 100644
index 0000000000000000000000000000000000000000..c6b8f912bc4aa8c7ccf39d44977eace50a47c31c
--- /dev/null
+++ b/contrib/facemaskdetection/models/mask.names
@@ -0,0 +1,2 @@
+Mask
+NoMask
diff --git a/contrib/facemaskdetection/models/model_conversion.sh b/contrib/facemaskdetection/models/model_conversion.sh
new file mode 100755
index 0000000000000000000000000000000000000000..96da2cd8777a321e66eab8c7fb2e8a82f5d76150
--- /dev/null
+++ b/contrib/facemaskdetection/models/model_conversion.sh
@@ -0,0 +1,36 @@
+#!/bin/bash
+
+# Copyright(C) Huawei Technologies Co.,Ltd. 2012-2021 All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# 该脚本用来将pb模型文件转换成.om模型文件
+# This is used to convert pb model file to .om model file.
+
+
+# 设置环境变量(请确认install_path路径是否正确)
+# Set environment PATH (Please confirm that the install_path is correct).
+
+export install_path=/usr/local/Ascend/ascend-toolkit/latest
+export PATH=/usr/local/python3.7.5/bin:${install_path}/atc/ccec_compiler/bin:${install_path}/atc/bin:$PATH
+export PYTHONPATH=${install_path}/atc/python/site-packages:${install_path}/atc/python/site-packages/auto_tune.egg/auto_tune:${install_path}/atc/python/site-packages/schedule_search.egg
+export LD_LIBRARY_PATH=${install_path}/atc/lib64:$LD_LIBRARY_PATH
+export ASCEND_OPP_PATH=${install_path}/opp
+
+
+# 执行,转换YOLOv3模型
+# Execute, transform YOLOv3 model.
+
+atc --model=./yolov3_tf.pb --framework=3 --output=./yolov3_tf_bs1_fp16 --soc_version=Ascend310 --insert_op_conf=./aipp_yolov3_416_416.aippconfig --input_shape="input/input_data:1,416,416,3" --out_nodes="conv_lbbox/BiasAdd:0;conv_mbbox/BiasAdd:0;conv_sbbox/BiasAdd:0"
+# 说明:out_nodes制定了输出节点的顺序,需要与模型后处理适配。
\ No newline at end of file
diff --git a/contrib/facemaskdetection/nms.py b/contrib/facemaskdetection/nms.py
new file mode 100755
index 0000000000000000000000000000000000000000..c98c7c63a49379e26549530e68b0c31813fb6998
--- /dev/null
+++ b/contrib/facemaskdetection/nms.py
@@ -0,0 +1,59 @@
+# -*- encoding=utf-8
+import numpy as np
+
+def single_class_non_max_suppression(bboxes, confidences, conf_thresh=0.2, iou_thresh=0.5, keep_top_k=-1):
+ '''
+ do nms on single class.
+ Hint: for the specific class, given the bbox and its confidence,
+ 1) sort the bbox according to the confidence from top to down, we call this a set
+ 2) select the bbox with the highest confidence, remove it from set, and do IOU calculate with the rest bbox
+ 3) remove the bbox whose IOU is higher than the iou_thresh from the set,
+ 4) loop step 2 and 3, util the set is empty.
+ :param bboxes: numpy array of 2D, [num_bboxes, 4]
+ :param confidences: numpy array of 1D. [num_bboxes]
+ :param conf_thresh:
+ :param iou_thresh:
+ :param keep_top_k:
+ :return:
+ '''
+ if len(bboxes) == 0: return []
+
+ conf_keep_idx = np.where(confidences > conf_thresh)[0]
+
+ bboxes = bboxes[conf_keep_idx]
+ confidences = confidences[conf_keep_idx]
+
+ pick = []
+ xmin = bboxes[:, 0]
+ ymin = bboxes[:, 1]
+ xmax = bboxes[:, 2]
+ ymax = bboxes[:, 3]
+
+ area = (xmax - xmin + 1e-3) * (ymax - ymin + 1e-3)
+ idxs = np.argsort(confidences)
+
+ while len(idxs) > 0:
+ last = len(idxs) - 1
+ i = idxs[last]
+ pick.append(i)
+
+ # keep top k
+ if keep_top_k != -1:
+ if len(pick) >= keep_top_k:
+ break
+
+ overlap_xmin = np.maximum(xmin[i], xmin[idxs[:last]])
+ overlap_ymin = np.maximum(ymin[i], ymin[idxs[:last]])
+ overlap_xmax = np.minimum(xmax[i], xmax[idxs[:last]])
+ overlap_ymax = np.minimum(ymax[i], ymax[idxs[:last]])
+ overlap_w = np.maximum(0, overlap_xmax - overlap_xmin)
+ overlap_h = np.maximum(0, overlap_ymax - overlap_ymin)
+ overlap_area = overlap_w * overlap_h
+ overlap_ratio = overlap_area / (area[idxs[:last]] + area[i] - overlap_area)
+
+ need_to_be_deleted_idx = np.concatenate(([last], np.where(overlap_ratio > iou_thresh)[0]))
+ idxs = np.delete(idxs, need_to_be_deleted_idx)
+
+ # if the number of final bboxes is less than keep_top_k, we need to pad it.
+ # TODO
+ return conf_keep_idx[pick]
diff --git a/contrib/facemaskdetection/test_image.py b/contrib/facemaskdetection/test_image.py
new file mode 100644
index 0000000000000000000000000000000000000000..5993243c0defa5ea71dbf743a9e35d2d4caf0af2
--- /dev/null
+++ b/contrib/facemaskdetection/test_image.py
@@ -0,0 +1,218 @@
+#!/usr/bin/env python
+# coding=utf-8
+
+"""
+Copyright(C) Huawei Technologies Co.,Ltd. 2012-2021 All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import json
+import os
+import cv2
+import time
+import numpy as np
+import MxpiDataType_pb2 as MxpiDataType
+from StreamManagerApi import StreamManagerApi, MxDataInput, StringVector,InProtobufVector, MxProtobufIn
+from anchor_generator import generate_anchors
+from anchor_decode import decode_bbox
+from nms import single_class_non_max_suppression
+
+
+def inference(image,
+ conf_thresh=0.5,
+ iou_thresh=0.4,
+ target_shape=(260, 260),
+ draw_result=True,
+ show_result=True
+ ):
+ '''
+ Main function of detection inference
+ :param image: 3D numpy array of image
+ :param conf_thresh: the min threshold of classification probabity.
+ :param iou_thresh: the IOU threshold of NMS
+ :param target_shape: the model input size.
+ :param draw_result: whether to daw bounding box to the image.
+ :param show_result: whether to display the image.
+ :return:
+ '''
+ image = np.copy(image)
+ output_info = []
+ height, width, _ = image.shape
+ y_bboxes_output = ids
+ y_cls_output = ids2
+
+ # remove the batch dimension, for batch is always 1 for inference.
+ y_bboxes = decode_bbox(anchors_exp, y_bboxes_output)[0]
+ y_cls = y_cls_output[0]
+ # To speed up, do single class NMS, not multiple classes NMS.
+ bbox_max_scores = np.max(y_cls, axis=1);
+ bbox_max_score_classes = np.argmax(y_cls, axis=1)
+ # keep_idx is the alive bounding box after nms.
+ keep_idxs = single_class_non_max_suppression(y_bboxes,
+ bbox_max_scores,
+ conf_thresh=conf_thresh,
+ iou_thresh=iou_thresh,
+ )
+
+ for idx in keep_idxs:
+ conf = float(bbox_max_scores[idx])
+ class_id = bbox_max_score_classes[idx]
+ bbox = y_bboxes[idx]
+ # clip the coordinate, avoid the value exceed the image boundary.
+ xmin = max(0, int(bbox[0] * width))
+ ymin = max(0, int(bbox[1] * height))
+ xmax = min(int(bbox[2] * width), width)
+ ymax = min(int(bbox[3] * height), height)
+
+ output_info.append([class_id, conf, xmin, ymin, xmax, ymax])
+ return output_info
+
+
+
+
+
+if __name__ == '__main__':
+ streamManagerApi = StreamManagerApi()
+ # init stream manager
+ ret = streamManagerApi.InitManager()
+ if ret != 0:
+ print("Failed to init Stream manager, ret=%s" % str(ret))
+ exit()
+
+ # create streams by pipeline config file
+ pipeline_path = b"main.pipeline"
+ tensor_key = b'appsrc0'
+ ret = streamManagerApi.CreateMultipleStreamsFromFile(pipeline_path)
+ if ret != 0:
+ print("Failed to create Stream, ret=%s" % str(ret))
+ exit()
+
+ # Construct the input of the stream
+ PATH = "./testimages/FaceMaskDataset/test/"
+ infer_time = 0
+ for item in os.listdir(PATH):
+ start_stamp = time.time()
+ img_path = os.path.join(PATH, item)
+ img_name = item.split(".")[0]
+ img_txt = "./testimages/FaceMaskDataset/result_txt/" + img_name + ".txt"
+ if os.path.exists(img_txt):
+ os.remove(img_txt)
+ if os.path.exists(img_path) != 1:
+ print("The test image does not exist.")
+
+ streamName = b'detection'
+ inPluginId = 0
+
+ image = cv2.imread(img_path)
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
+ read_frame_stamp = time.time()
+ height, width, _ = image.shape
+ image_resized = cv2.resize(image, (260,260))
+ image_np = image_resized / 255.0 # 归一化到0~1
+ image_exp = np.expand_dims(image_np, axis=0).astype(np.float32)
+
+ protobuf_vec = InProtobufVector()
+ mxpi_tensor_package_list = MxpiDataType.MxpiTensorPackageList()
+ tensor_package_vec = mxpi_tensor_package_list.tensorPackageVec.add()
+
+
+ # add feature data #begin
+ tensorVec = tensor_package_vec.tensorVec.add()
+ tensorVec.memType = 1
+ tensorVec.deviceId = 0
+
+ # Compute the number of bytes of feature data.
+ tensorVec.tensorDataSize = int(
+ height * width * 4)
+ tensorVec.tensorDataType = 0 # float32
+
+ for i in image_exp.shape:
+ tensorVec.tensorShape.append(i)
+
+
+ tensorVec.dataStr = image_exp.tobytes()
+ protobuf = MxProtobufIn()
+ protobuf.key = tensor_key
+ protobuf.type = b'MxTools.MxpiTensorPackageList'
+ protobuf.protobuf = mxpi_tensor_package_list.SerializeToString()
+ protobuf_vec.push_back(protobuf)
+
+
+ # Inputs data to a specified stream based on streamName.
+ unique_id = streamManagerApi.SendProtobuf(
+ streamName, inPluginId, protobuf_vec)
+ if unique_id < 0:
+ print("Failed to send data to stream.")
+ exit()
+
+ key_vec = StringVector()
+ key_vec.push_back(b'mxpi_tensorinfer0')
+ # get inference result
+ infer_result = streamManagerApi.GetProtobuf(
+ streamName, inPluginId, key_vec)
+ if infer_result.size() == 0:
+ print("infer_result is null")
+ exit()
+ if infer_result[0].errorCode != 0:
+ print("GetProtobuf error. errorCode=%d" % (
+ infer_result[0].errorCode))
+ exit()
+ tensorList = MxpiDataType.MxpiTensorPackageList()
+ tensorList.ParseFromString(infer_result[0].messageBuf)
+
+ # print the infer result
+ ids = np.frombuffer(tensorList.tensorPackageVec[0].tensorVec[0].dataStr, dtype=np.float32)
+ shape = tensorList.tensorPackageVec[0].tensorVec[0].tensorShape
+ ids.resize(shape)
+
+ ids2 = np.frombuffer(tensorList.tensorPackageVec[0].tensorVec[1].dataStr, dtype=np.float32)
+ shape2 = tensorList.tensorPackageVec[0].tensorVec[1].tensorShape
+ ids2.resize(shape2)
+
+ feature_map_sizes = [[33, 33], [17, 17], [9, 9], [5, 5], [3, 3]]
+ anchor_sizes = [[0.04, 0.056], [0.08, 0.11], [0.16, 0.22], [0.32, 0.45], [0.64, 0.72]]
+ anchor_ratios = [[1, 0.62, 0.42]] * 5
+
+ # generate anchors
+ anchors = generate_anchors(feature_map_sizes, anchor_sizes, anchor_ratios)
+
+ # for inference , the batch size is 1, the model output shape is [1, N, 4],
+ # so we expand dim for anchors to [1, anchor_num, 4]
+ anchors_exp = np.expand_dims(anchors, axis=0)
+
+ id2class = {0: 'face_mask', 1: 'face'}
+
+
+
+ img = cv2.imread(img_path)
+ output_info = inference(img, show_result=False, target_shape=(260, 260))
+ inference_stamp = time.time()
+ infer_time += (inference_stamp - read_frame_stamp)
+ print(infer_time)
+ open(img_txt, "a+")
+ for i in range(len(output_info)):
+ with open(img_txt, "a+") as f:
+ result = '{} {} {} {} {} {}'.format(id2class[output_info[i][0]] ,output_info[i][1], output_info[i][2], output_info[i][3], output_info[i][4], output_info[i][5])
+ f.write(result)
+ f.write('\n')
+ print(
+ "read_frame:%f, infer time:%f"
+ % (
+ read_frame_stamp - start_stamp,
+ inference_stamp - read_frame_stamp,
+ )
+ )
+ # destroy streams
+
+ streamManagerApi.DestroyAllStreams()
diff --git a/contrib/facemaskdetection/xmltotxt.py b/contrib/facemaskdetection/xmltotxt.py
new file mode 100644
index 0000000000000000000000000000000000000000..6d387f8ad149721482742491e6bb5058fd4d77aa
--- /dev/null
+++ b/contrib/facemaskdetection/xmltotxt.py
@@ -0,0 +1,43 @@
+import os
+import sys
+import xml.etree.ElementTree as ET
+import glob
+
+def xml_to_txt(indir,outdir):
+
+ os.chdir(indir)
+ annotations = os.listdir('.')
+ annotations = glob.glob(str(annotations)+'*.xml')
+
+ for i, file in enumerate(annotations):
+
+ file_save = file.split('.')[0]+'.txt'
+ file_txt=os.path.join(outdir,file_save)
+ f_w = open(file_txt,'w')
+
+ # actual parsing
+ in_file = open(file)
+ tree=ET.parse(in_file)
+ root = tree.getroot()
+
+ for obj in root.iter('object'):
+ current = list()
+ name = obj.find('name').text
+ xmlbox = obj.find('bndbox')
+ xn = xmlbox.find('xmin').text
+ xx = xmlbox.find('xmax').text
+ yn = xmlbox.find('ymin').text
+ yx = xmlbox.find('ymax').text
+ #print xn
+ label = '{} {} {} {} {}'.format(name, xn, yn, xx, yx)
+ f_w.write(label)
+ f_w.write('\n')
+
+#PATH = "./testimages/FaceMaskDataset/v"
+#for item in os.listdir(PATH):
+# img_path = os.path.join(PATH,item)
+
+indir='/home/zhongzhi2/facemask_project/newenv_mask/testimages/FaceMaskDataset/label' #xml目录
+outdir="/home/zhongzhi2/facemask_project/newenv_mask/testimages/FaceMaskDataset/ground_truth" #txt目录
+
+xml_to_txt(indir,outdir)