From 8acd99d1c04da0990cf271a806bfed0d87a35541 Mon Sep 17 00:00:00 2001 From: weiwei Date: Thu, 16 Mar 2023 19:45:51 +0800 Subject: [PATCH] =?UTF-8?q?=E6=96=B0=E5=A2=9EHDI=20V2.0=E7=89=88=E6=9C=AC?= =?UTF-8?q?=E7=9A=84=E6=A8=A1=E6=8B=9F=E9=A9=B1=E5=8A=A8=E6=9C=8D=E5=8A=A1?= =?UTF-8?q?=201.=20example/drivers=E4=B8=8BHDI=20V1.0=E7=89=88=E6=9C=AC?= =?UTF-8?q?=E7=9A=84=E6=A8=A1=E6=8B=9F=E9=A9=B1=E5=8A=A8=E6=9C=8D=E5=8A=A1?= =?UTF-8?q?=E6=94=BE=E7=BD=AE=E5=88=B0nnrt/v1=5F0=E5=AD=90=E7=9B=AE?= =?UTF-8?q?=E5=BD=95=EF=BC=9B=202.=20example/drivers=E4=B8=8B=E6=96=B0?= =?UTF-8?q?=E5=A2=9EHDI=20V2.0=E7=89=88=E6=9C=AC=E7=9A=84=E6=A8=A1?= =?UTF-8?q?=E6=8B=9F=E9=A9=B1=E5=8A=A8=E6=9C=8D=E5=8A=A1=EF=BC=8C=E6=94=BE?= =?UTF-8?q?=E7=BD=AE=E4=BA=8Ennrt/v2=5F0=E5=AD=90=E7=9B=AE=E5=BD=95?= =?UTF-8?q?=EF=BC=9B?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: weiwei --- example/drivers/nnrt/{ => v1_0}/BUILD.gn | 0 .../nnrt/{ => v1_0}/hdi_cpu_service/BUILD.gn | 8 +- .../include/nnrt_device_service.h | 0 .../hdi_cpu_service/include/node_functions.h | 0 .../hdi_cpu_service/include/node_registry.h | 0 .../include/prepared_model_service.h | 0 .../include/shared_buffer_parser.h | 0 .../hdi_cpu_service/include/validation.h | 0 .../src/nnrt_device_driver.cpp | 0 .../src/nnrt_device_service.cpp | 0 .../hdi_cpu_service/src/node_functions.cpp | 0 .../hdi_cpu_service/src/node_registry.cpp | 0 .../src/prepared_model_service.cpp | 0 .../src/shared_buffer_parser.cpp | 0 .../hdi_cpu_service/src/validation.cpp | 0 example/drivers/nnrt/v2_0/BUILD.gn | 24 + .../nnrt/v2_0/hdi_cpu_service/BUILD.gn | 90 +++ .../include/nnrt_device_service.h | 88 +++ .../hdi_cpu_service/include/node_functions.h | 71 +++ .../hdi_cpu_service/include/node_registry.h | 57 ++ .../include/prepared_model_service.h | 80 +++ .../include/shared_buffer_parser.h | 49 ++ .../v2_0/hdi_cpu_service/include/validation.h | 33 ++ .../src/nnrt_device_driver.cpp | 115 ++++ .../src/nnrt_device_service.cpp | 529 ++++++++++++++++++ .../hdi_cpu_service/src/node_functions.cpp | 373 ++++++++++++ .../hdi_cpu_service/src/node_registry.cpp | 60 ++ .../src/prepared_model_service.cpp | 461 +++++++++++++++ .../src/shared_buffer_parser.cpp | 104 ++++ .../v2_0/hdi_cpu_service/src/validation.cpp | 72 +++ 30 files changed, 2210 insertions(+), 4 deletions(-) rename example/drivers/nnrt/{ => v1_0}/BUILD.gn (100%) rename example/drivers/nnrt/{ => v1_0}/hdi_cpu_service/BUILD.gn (88%) rename example/drivers/nnrt/{ => v1_0}/hdi_cpu_service/include/nnrt_device_service.h (100%) rename example/drivers/nnrt/{ => v1_0}/hdi_cpu_service/include/node_functions.h (100%) rename example/drivers/nnrt/{ => v1_0}/hdi_cpu_service/include/node_registry.h (100%) rename example/drivers/nnrt/{ => v1_0}/hdi_cpu_service/include/prepared_model_service.h (100%) rename example/drivers/nnrt/{ => v1_0}/hdi_cpu_service/include/shared_buffer_parser.h (100%) rename example/drivers/nnrt/{ => v1_0}/hdi_cpu_service/include/validation.h (100%) rename example/drivers/nnrt/{ => v1_0}/hdi_cpu_service/src/nnrt_device_driver.cpp (100%) rename example/drivers/nnrt/{ => v1_0}/hdi_cpu_service/src/nnrt_device_service.cpp (100%) rename example/drivers/nnrt/{ => v1_0}/hdi_cpu_service/src/node_functions.cpp (100%) rename example/drivers/nnrt/{ => v1_0}/hdi_cpu_service/src/node_registry.cpp (100%) rename example/drivers/nnrt/{ => v1_0}/hdi_cpu_service/src/prepared_model_service.cpp (100%) rename example/drivers/nnrt/{ => v1_0}/hdi_cpu_service/src/shared_buffer_parser.cpp (100%) rename example/drivers/nnrt/{ => v1_0}/hdi_cpu_service/src/validation.cpp (100%) create mode 100644 example/drivers/nnrt/v2_0/BUILD.gn create mode 100644 example/drivers/nnrt/v2_0/hdi_cpu_service/BUILD.gn create mode 100644 example/drivers/nnrt/v2_0/hdi_cpu_service/include/nnrt_device_service.h create mode 100644 example/drivers/nnrt/v2_0/hdi_cpu_service/include/node_functions.h create mode 100644 example/drivers/nnrt/v2_0/hdi_cpu_service/include/node_registry.h create mode 100644 example/drivers/nnrt/v2_0/hdi_cpu_service/include/prepared_model_service.h create mode 100644 example/drivers/nnrt/v2_0/hdi_cpu_service/include/shared_buffer_parser.h create mode 100644 example/drivers/nnrt/v2_0/hdi_cpu_service/include/validation.h create mode 100644 example/drivers/nnrt/v2_0/hdi_cpu_service/src/nnrt_device_driver.cpp create mode 100644 example/drivers/nnrt/v2_0/hdi_cpu_service/src/nnrt_device_service.cpp create mode 100644 example/drivers/nnrt/v2_0/hdi_cpu_service/src/node_functions.cpp create mode 100644 example/drivers/nnrt/v2_0/hdi_cpu_service/src/node_registry.cpp create mode 100644 example/drivers/nnrt/v2_0/hdi_cpu_service/src/prepared_model_service.cpp create mode 100644 example/drivers/nnrt/v2_0/hdi_cpu_service/src/shared_buffer_parser.cpp create mode 100644 example/drivers/nnrt/v2_0/hdi_cpu_service/src/validation.cpp diff --git a/example/drivers/nnrt/BUILD.gn b/example/drivers/nnrt/v1_0/BUILD.gn similarity index 100% rename from example/drivers/nnrt/BUILD.gn rename to example/drivers/nnrt/v1_0/BUILD.gn diff --git a/example/drivers/nnrt/hdi_cpu_service/BUILD.gn b/example/drivers/nnrt/v1_0/hdi_cpu_service/BUILD.gn similarity index 88% rename from example/drivers/nnrt/hdi_cpu_service/BUILD.gn rename to example/drivers/nnrt/v1_0/hdi_cpu_service/BUILD.gn index e2f8ab4..6fe5b86 100644 --- a/example/drivers/nnrt/hdi_cpu_service/BUILD.gn +++ b/example/drivers/nnrt/v1_0/hdi_cpu_service/BUILD.gn @@ -15,7 +15,7 @@ import("//build/ohos.gni") import("//drivers/hdf_core/adapter/uhdf2/uhdf.gni") ohos_prebuilt_shared_library("mindspore_demo") { - source = "//drivers/peripheral/nnrt/mindspore/mindspore/libmindspore-lite.huawei.so" + source = "//drivers/peripheral/nnrt/v1_0/mindspore/mindspore/libmindspore-lite.huawei.so" install_images = [chipset_base_dir] subsystem_name = "hdf" @@ -24,8 +24,8 @@ ohos_prebuilt_shared_library("mindspore_demo") { ohos_shared_library("libnnrt_device_service_1.0") { include_dirs = [ - "//drivers/peripheral/nnrt/hdi_cpu_service/include", - "//drivers/peripheral/nnrt/mindspore", + "//drivers/peripheral/nnrt/v1_0/hdi_cpu_service/include", + "//drivers/peripheral/nnrt/v1_0/mindspore", "//third_party/flatbuffers/include", "//commonlibrary/c_utils/base/include" ] @@ -62,7 +62,7 @@ ohos_shared_library("libnnrt_driver") { "src/nnrt_device_driver.cpp" ] deps = [ - "//drivers/peripheral/nnrt/hdi_cpu_service:libnnrt_device_service_1.0", + "//drivers/peripheral/nnrt/v1_0/hdi_cpu_service:libnnrt_device_service_1.0", "//drivers/interface/nnrt/v1_0:libnnrt_stub_1.0" ] diff --git a/example/drivers/nnrt/hdi_cpu_service/include/nnrt_device_service.h b/example/drivers/nnrt/v1_0/hdi_cpu_service/include/nnrt_device_service.h similarity index 100% rename from example/drivers/nnrt/hdi_cpu_service/include/nnrt_device_service.h rename to example/drivers/nnrt/v1_0/hdi_cpu_service/include/nnrt_device_service.h diff --git a/example/drivers/nnrt/hdi_cpu_service/include/node_functions.h b/example/drivers/nnrt/v1_0/hdi_cpu_service/include/node_functions.h similarity index 100% rename from example/drivers/nnrt/hdi_cpu_service/include/node_functions.h rename to example/drivers/nnrt/v1_0/hdi_cpu_service/include/node_functions.h diff --git a/example/drivers/nnrt/hdi_cpu_service/include/node_registry.h b/example/drivers/nnrt/v1_0/hdi_cpu_service/include/node_registry.h similarity index 100% rename from example/drivers/nnrt/hdi_cpu_service/include/node_registry.h rename to example/drivers/nnrt/v1_0/hdi_cpu_service/include/node_registry.h diff --git a/example/drivers/nnrt/hdi_cpu_service/include/prepared_model_service.h b/example/drivers/nnrt/v1_0/hdi_cpu_service/include/prepared_model_service.h similarity index 100% rename from example/drivers/nnrt/hdi_cpu_service/include/prepared_model_service.h rename to example/drivers/nnrt/v1_0/hdi_cpu_service/include/prepared_model_service.h diff --git a/example/drivers/nnrt/hdi_cpu_service/include/shared_buffer_parser.h b/example/drivers/nnrt/v1_0/hdi_cpu_service/include/shared_buffer_parser.h similarity index 100% rename from example/drivers/nnrt/hdi_cpu_service/include/shared_buffer_parser.h rename to example/drivers/nnrt/v1_0/hdi_cpu_service/include/shared_buffer_parser.h diff --git a/example/drivers/nnrt/hdi_cpu_service/include/validation.h b/example/drivers/nnrt/v1_0/hdi_cpu_service/include/validation.h similarity index 100% rename from example/drivers/nnrt/hdi_cpu_service/include/validation.h rename to example/drivers/nnrt/v1_0/hdi_cpu_service/include/validation.h diff --git a/example/drivers/nnrt/hdi_cpu_service/src/nnrt_device_driver.cpp b/example/drivers/nnrt/v1_0/hdi_cpu_service/src/nnrt_device_driver.cpp similarity index 100% rename from example/drivers/nnrt/hdi_cpu_service/src/nnrt_device_driver.cpp rename to example/drivers/nnrt/v1_0/hdi_cpu_service/src/nnrt_device_driver.cpp diff --git a/example/drivers/nnrt/hdi_cpu_service/src/nnrt_device_service.cpp b/example/drivers/nnrt/v1_0/hdi_cpu_service/src/nnrt_device_service.cpp similarity index 100% rename from example/drivers/nnrt/hdi_cpu_service/src/nnrt_device_service.cpp rename to example/drivers/nnrt/v1_0/hdi_cpu_service/src/nnrt_device_service.cpp diff --git a/example/drivers/nnrt/hdi_cpu_service/src/node_functions.cpp b/example/drivers/nnrt/v1_0/hdi_cpu_service/src/node_functions.cpp similarity index 100% rename from example/drivers/nnrt/hdi_cpu_service/src/node_functions.cpp rename to example/drivers/nnrt/v1_0/hdi_cpu_service/src/node_functions.cpp diff --git a/example/drivers/nnrt/hdi_cpu_service/src/node_registry.cpp b/example/drivers/nnrt/v1_0/hdi_cpu_service/src/node_registry.cpp similarity index 100% rename from example/drivers/nnrt/hdi_cpu_service/src/node_registry.cpp rename to example/drivers/nnrt/v1_0/hdi_cpu_service/src/node_registry.cpp diff --git a/example/drivers/nnrt/hdi_cpu_service/src/prepared_model_service.cpp b/example/drivers/nnrt/v1_0/hdi_cpu_service/src/prepared_model_service.cpp similarity index 100% rename from example/drivers/nnrt/hdi_cpu_service/src/prepared_model_service.cpp rename to example/drivers/nnrt/v1_0/hdi_cpu_service/src/prepared_model_service.cpp diff --git a/example/drivers/nnrt/hdi_cpu_service/src/shared_buffer_parser.cpp b/example/drivers/nnrt/v1_0/hdi_cpu_service/src/shared_buffer_parser.cpp similarity index 100% rename from example/drivers/nnrt/hdi_cpu_service/src/shared_buffer_parser.cpp rename to example/drivers/nnrt/v1_0/hdi_cpu_service/src/shared_buffer_parser.cpp diff --git a/example/drivers/nnrt/hdi_cpu_service/src/validation.cpp b/example/drivers/nnrt/v1_0/hdi_cpu_service/src/validation.cpp similarity index 100% rename from example/drivers/nnrt/hdi_cpu_service/src/validation.cpp rename to example/drivers/nnrt/v1_0/hdi_cpu_service/src/validation.cpp diff --git a/example/drivers/nnrt/v2_0/BUILD.gn b/example/drivers/nnrt/v2_0/BUILD.gn new file mode 100644 index 0000000..28ca28b --- /dev/null +++ b/example/drivers/nnrt/v2_0/BUILD.gn @@ -0,0 +1,24 @@ +# Copyright (c) 2022 Huawei Device Co., Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +if (defined(ohos_lite)) { + group("nnrt_entry") { + deps = [ ] + } +} else { + group("nnrt_entry") { + deps = [ + "./hdi_cpu_service:hdf_nnrt_service", + ] + } +} \ No newline at end of file diff --git a/example/drivers/nnrt/v2_0/hdi_cpu_service/BUILD.gn b/example/drivers/nnrt/v2_0/hdi_cpu_service/BUILD.gn new file mode 100644 index 0000000..003de74 --- /dev/null +++ b/example/drivers/nnrt/v2_0/hdi_cpu_service/BUILD.gn @@ -0,0 +1,90 @@ +# Copyright (c) 2022 Huawei Device Co., Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import("//build/ohos.gni") +import("//drivers/hdf_core/adapter/uhdf2/uhdf.gni") + +ohos_prebuilt_shared_library("mindspore_demo") { + source = "//drivers/peripheral/nnrt/v2_0/mindspore/mindspore/libmindspore-lite.huawei.so" + + install_images = [chipset_base_dir] + subsystem_name = "hdf" + part_name = "drivers_peripheral_nnrt" +} + +ohos_shared_library("libnnrt_device_service_2.0") { + include_dirs = [ + "//drivers/peripheral/nnrt/v2_0/hdi_cpu_service/include", + "//drivers/peripheral/nnrt/v2_0/mindspore", + "//third_party/flatbuffers/include", + "//commonlibrary/c_utils/base/include" + ] + sources = [ + "src/nnrt_device_service.cpp", + "src/prepared_model_service.cpp", + "src/node_registry.cpp", + "src/node_functions.cpp", + "src/shared_buffer_parser.cpp", + "src/validation.cpp" + ] + + deps = [ + "//drivers/interface/nnrt/v2_0:nnrt_idl_headers", + "//drivers/interface/nnrt/v2_0:libnnrt_stub_2.0", + ":mindspore_demo" + ] + + external_deps = [ + "hdf_core:libhdf_utils", + "hiviewdfx_hilog_native:libhilog", + "ipc:ipc_core", + "c_utils:utils" + ] + + install_images = [ chipset_base_dir ] + subsystem_name = "hdf" + part_name = "drivers_peripheral_nnrt" +} + +ohos_shared_library("libnnrt_driver") { + include_dirs = [] + sources = [ + "src/nnrt_device_driver.cpp" + ] + deps = [ + "//drivers/peripheral/nnrt/v2_0/hdi_cpu_service:libnnrt_device_service_2.0", + "//drivers/interface/nnrt/v2_0:libnnrt_stub_2.0" + ] + + external_deps = [ + "hdf_core:libhdf_host", + "hdf_core:libhdf_ipc_adapter", + "hdf_core:libhdf_utils", + "hiviewdfx_hilog_native:libhilog", + "ipc:ipc_core", + "c_utils:utils", + "hdf_core:libhdi" + ] + + install_images = [ chipset_base_dir ] + subsystem_name = "hdf" + part_name = "drivers_peripheral_nnrt" +} + +group("hdf_nnrt_service") { + deps = [ + ":mindspore_demo", + ":libnnrt_driver", + ":libnnrt_device_service_2.0", + ] +} \ No newline at end of file diff --git a/example/drivers/nnrt/v2_0/hdi_cpu_service/include/nnrt_device_service.h b/example/drivers/nnrt/v2_0/hdi_cpu_service/include/nnrt_device_service.h new file mode 100644 index 0000000..9419f40 --- /dev/null +++ b/example/drivers/nnrt/v2_0/hdi_cpu_service/include/nnrt_device_service.h @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef OHOS_HDI_NNRT_V2_0_NNRTDEVICESERVICE_H +#define OHOS_HDI_NNRT_V2_0_NNRTDEVICESERVICE_H + +#include + +#include "v2_0/innrt_device.h" +#include "ashmem.h" +#include "include/api/model.h" + +#include "mindspore_schema/model_generated.h" + +namespace OHOS { +namespace HDI { +namespace Nnrt { +namespace V2_0 { +class NnrtDeviceService : public INnrtDevice { +public: + NnrtDeviceService() = default; + virtual ~NnrtDeviceService(); + + int32_t GetDeviceName(std::string& name) override; + + int32_t GetVendorName(std::string& name) override; + + int32_t GetDeviceType(DeviceType& deviceType) override; + + int32_t GetDeviceStatus(DeviceStatus& status) override; + + int32_t GetSupportedOperation(const Model& model, std::vector& ops) override; + + int32_t IsFloat16PrecisionSupported(bool& isSupported) override; + + int32_t IsPerformanceModeSupported(bool& isSupported) override; + + int32_t IsPrioritySupported(bool& isSupported) override; + + int32_t IsDynamicInputSupported(bool& isSupported) override; + + int32_t PrepareModel(const Model& model, const ModelConfig& config, sptr& preparedModel) override; + + int32_t IsModelCacheSupported(bool& isSupported) override; + + int32_t PrepareModelFromModelCache(const std::vector& modelCache, const ModelConfig& config, + sptr& preparedModel) override; + + int32_t AllocateBuffer(uint32_t length, SharedBuffer& buffer) override; + + int32_t ReleaseBuffer(const SharedBuffer& buffer) override; + +private: + int32_t ValidateModelConfig(const ModelConfig& config) const; + int32_t ValidateModel(const Model& model) const; + std::shared_ptr TransModelToGraph(const Model& model) const; + std::unique_ptr TransTensor(const Tensor& tensor) const; + std::unique_ptr TransNode(const Node& node) const; + std::unique_ptr TransSubGraph(const SubGraph& graph, const size_t numTensor) const; + std::shared_ptr TransModelConfig(const ModelConfig& config) const; + int32_t ShowCustomAttributes(const std::map>& extensions) const; + int32_t ParseCustomAttributes(const std::map>& extensions, float& attr1, + std::string& attr2) const; + int32_t ConvertVecToFloat(std::vector vecFloat, float& result) const; + int32_t ConvertVecToString(std::vector vecFloat, std::string& result) const; + +private: + std::shared_ptr m_model {nullptr}; + std::unordered_map> m_ashmems; +}; +} // V2_0 +} // Nnrt +} // HDI +} // OHOS + +#endif // OHOS_HDI_NNRT_V2_0_NNRTDEVICESERVICE_H \ No newline at end of file diff --git a/example/drivers/nnrt/v2_0/hdi_cpu_service/include/node_functions.h b/example/drivers/nnrt/v2_0/hdi_cpu_service/include/node_functions.h new file mode 100644 index 0000000..8e1fbb3 --- /dev/null +++ b/example/drivers/nnrt/v2_0/hdi_cpu_service/include/node_functions.h @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef OHOS_HDI_NNR_NODE_FUNCTIONS_H +#define OHOS_HDI_NNR_NODE_FUNCTIONS_H + +#include + +#include "hdf_base.h" +#include "hdf_log.h" +#include +#include "node_registry.h" + +namespace OHOS { +namespace HDI { +namespace Nnrt { +namespace V2_0 { +template +int32_t ParsePrimitive(const std::vector& primitive, T& attr, + std::function parseFunc) +{ + if (primitive.empty()) { + HDF_LOGE("Primitive data is empty."); + return HDF_FAILURE; + } + + OHOS::MessageParcel parcelData; + bool ret = parcelData.WriteBuffer(primitive.data(), primitive.size()); + if (!ret) { + HDF_LOGE("Write data to MessageParcel failed."); + return HDF_FAILURE; + } + + ret = parseFunc(parcelData, attr); + if (!ret) { + HDF_LOGE("Unmarshalling data failed."); + return HDF_FAILURE; + } + return HDF_SUCCESS; +} + +PrimUniquePtr GetAddPrimitive(const std::vector& primitive); +PrimUniquePtr GetAvgPoolPrimitive(const std::vector& primitive); +PrimUniquePtr GetConcatPrimitive(const std::vector& primitive); +PrimUniquePtr GetConv2dPrimitive(const std::vector& primitive); +PrimUniquePtr GetFullConnectionPrimitive(const std::vector& primitive); +PrimUniquePtr GetMaxPoolFusionPrimitive(const std::vector& primitive); +PrimUniquePtr GetMatMulFusionPrimitive(const std::vector& primitive); +PrimUniquePtr GetSoftmaxPrimitive(const std::vector& primitive); +PrimUniquePtr GetReshapePrimitive(const std::vector& primitive); +PrimUniquePtr GetScaleFusionPrimitive(const std::vector& primitive); +PrimUniquePtr GetActivationPrimitive(const std::vector& primitive); +PrimUniquePtr GetQuantDTypeCastPrimitive(const std::vector& primitive); +PrimUniquePtr GetMulFusionPrimitive(const std::vector& primitive); +} // namespace V2_0 +} // namespace Nnrt +} // namespace HDI +} // namespace OHOS +#endif // OHOS_HDI_NNR_NODE_FUNCTIONS_H \ No newline at end of file diff --git a/example/drivers/nnrt/v2_0/hdi_cpu_service/include/node_registry.h b/example/drivers/nnrt/v2_0/hdi_cpu_service/include/node_registry.h new file mode 100644 index 0000000..17d4b51 --- /dev/null +++ b/example/drivers/nnrt/v2_0/hdi_cpu_service/include/node_registry.h @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef OHOS_HDI_NNR_NODE_REGISTRY_H +#define OHOS_HDI_NNR_NODE_REGISTRY_H + +#include +#include +#include + +#include "v2_0/nnrt_types.h" +#include "mindspore_schema/model_generated.h" + +namespace OHOS { +namespace HDI { +namespace Nnrt { +namespace V2_0 { +using PrimUniquePtr = std::unique_ptr; +class NodeRegistry { +public: + struct Registrar { + Registrar() = delete; + Registrar(NodeType type, std::function&)> nodeFunc); + }; + +public: + static NodeRegistry& GetSingleton(); + std::function&)> GetNodeFunc(NodeType type) const; + bool IsNodeTypeExist(NodeType type) const; + +private: + NodeRegistry() {}; + NodeRegistry(const NodeRegistry&) = delete; + NodeRegistry& operator=(const NodeRegistry&) = delete; + +private: + std::unordered_map&)>> m_nodeRegs; +}; + +#define REGISTER_NODE(nodeName, nodeType, funcPtr) static NodeRegistry::Registrar g_##nodeName(nodeType, funcPtr) +} // namespace V2_0 +} // namespace Nnrt +} // namespace HDI +} // namespace OHOS +#endif // OHOS_HDI_NNR_NODE_REGISTRY_H \ No newline at end of file diff --git a/example/drivers/nnrt/v2_0/hdi_cpu_service/include/prepared_model_service.h b/example/drivers/nnrt/v2_0/hdi_cpu_service/include/prepared_model_service.h new file mode 100644 index 0000000..c52ed06 --- /dev/null +++ b/example/drivers/nnrt/v2_0/hdi_cpu_service/include/prepared_model_service.h @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef OHOS_HDI_NNR_V2_0_PREPAREDMODELSERVICE_H +#define OHOS_HDI_NNR_V2_0_PREPAREDMODELSERVICE_H + +#include "v2_0/iprepared_model.h" +#include "include/api/data_type.h" +#include "include/api/context.h" +#include "include/api/types.h" +#include "include/api/model.h" +#include "mindspore_schema/model_generated.h" +#include "ashmem.h" + +namespace OHOS { +namespace HDI { +namespace Nnrt { +namespace V2_0 { +constexpr int DYNAMIC_SHAPE_FLAG = -1; +class PreparedModelService : public IPreparedModel { +public: + PreparedModelService() = default; + + virtual ~PreparedModelService(); + + explicit PreparedModelService(std::shared_ptr context); + + int32_t Compile(std::shared_ptr graph); + + int32_t Compile(const void* modelBuffer, size_t length); + + int32_t ExportModelCache(std::vector& modelCache) override; + + int32_t Run(const std::vector& inputs, const std::vector& outputs, + std::vector>& outputsDims, std::vector& isOutputBufferEnough) override; + + int32_t GetInputDimRanges(std::vector>& minInputDims, + std::vector>& maxInputDims) override; + +private: + int32_t SetInputs(const std::vector& inputs); + int32_t SetOutputs(const std::vector& outputs); + int32_t GetMSInputsAndOutputs(); + int32_t CompareTensor(const IOTensor& tensor, const mindspore::MSTensor& msTensor); + sptr ParseBuffer(const SharedBuffer& buffer); + int32_t UpdateOutput(const std::vector& outputs, + std::vector>& outputsDims, std::vector& isOutputBufferEnough); + void ResetInputAndOutput(); + +private: + std::shared_ptr m_graph {nullptr}; + std::shared_ptr m_context {nullptr}; + flatbuffers::FlatBufferBuilder m_builder; + std::shared_ptr m_model {nullptr}; + sptr m_cacheBuffer {nullptr}; + std::vector> m_inputAshmems; + std::vector m_inputs; + std::vector> m_outputAshmems; + std::vector m_outputs; + std::vector> m_inputDims; + bool m_isDynamicShape {false}; +}; +} // V2_0 +} // Nnrt +} // HDI +} // OHOS + +#endif // OHOS_HDI_NNR_V2_0_PREPAREDMODELSERVICE_H \ No newline at end of file diff --git a/example/drivers/nnrt/v2_0/hdi_cpu_service/include/shared_buffer_parser.h b/example/drivers/nnrt/v2_0/hdi_cpu_service/include/shared_buffer_parser.h new file mode 100644 index 0000000..8e74154 --- /dev/null +++ b/example/drivers/nnrt/v2_0/hdi_cpu_service/include/shared_buffer_parser.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef OHOS_HDI_NNR_V2_0_SHARED_BUFFER_PARSER_H +#define OHOS_HDI_NNR_V2_0_SHARED_BUFFER_PARSER_H + +#include "ashmem.h" +#include "v2_0/nnrt_types.h" + +namespace OHOS { +namespace HDI { +namespace Nnrt { +namespace V2_0 { +namespace { +const int INVALID_FD = -1; +} + +class SharedBufferParser { +public: + SharedBufferParser() {}; + ~SharedBufferParser(); + + int32_t Init(const SharedBuffer& buffer); + int32_t Init(const std::string& name, int32_t size); + void* GetBufferPtr(); + SharedBuffer GetBuffer(); + +private: + SharedBuffer m_buffer; + sptr m_ashptr {nullptr}; + void* m_bufferAddr {nullptr}; +}; +} // V2_0 +} // Nnrt +} // HDI +} // OHOS +#endif // OHOS_HDI_NNR_V2_0_SHARED_BUFFER_PARSER_H \ No newline at end of file diff --git a/example/drivers/nnrt/v2_0/hdi_cpu_service/include/validation.h b/example/drivers/nnrt/v2_0/hdi_cpu_service/include/validation.h new file mode 100644 index 0000000..ffcdf50 --- /dev/null +++ b/example/drivers/nnrt/v2_0/hdi_cpu_service/include/validation.h @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef OHOS_HDI_NNRT_VALIDATION_H +#define OHOS_HDI_NNRT_VALIDATION_H + +#include "v2_0/nnrt_types.h" + +namespace OHOS { +namespace HDI { +namespace Nnrt { +namespace V2_0 { +int32_t ValidatePerformanceMode(PerformanceMode mode); +int32_t ValidatePriority(Priority priority); +int32_t ValidateDataType(DataType dataType); +int32_t ValidateFormat(Format format); +} // namespace V2_0 +} // namespace Nnrt +} // namespace HDI +} // namespace OHOS +#endif // OHOS_HDI_NNRT_VALIDATION_H \ No newline at end of file diff --git a/example/drivers/nnrt/v2_0/hdi_cpu_service/src/nnrt_device_driver.cpp b/example/drivers/nnrt/v2_0/hdi_cpu_service/src/nnrt_device_driver.cpp new file mode 100644 index 0000000..fab6e89 --- /dev/null +++ b/example/drivers/nnrt/v2_0/hdi_cpu_service/src/nnrt_device_driver.cpp @@ -0,0 +1,115 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include "v2_0/nnrt_device_stub.h" + +using namespace OHOS::HDI::Nnrt::V2_0; + +struct HdfNnrtDeviceHost { + struct IDeviceIoService ioService; + OHOS::sptr stub; +}; + +static int32_t NnrtDeviceDriverDispatch(struct HdfDeviceIoClient *client, int cmdId, struct HdfSBuf *data, + struct HdfSBuf *reply) +{ + auto *hdfNnrtDeviceHost = CONTAINER_OF(client->device->service, struct HdfNnrtDeviceHost, ioService); + + OHOS::MessageParcel *dataParcel = nullptr; + OHOS::MessageParcel *replyParcel = nullptr; + OHOS::MessageOption option; + + if (SbufToParcel(data, &dataParcel) != HDF_SUCCESS) { + HDF_LOGE("%{public}s:invalid data sbuf object to dispatch", __func__); + return HDF_ERR_INVALID_PARAM; + } + if (SbufToParcel(reply, &replyParcel) != HDF_SUCCESS) { + HDF_LOGE("%{public}s:invalid reply sbuf object to dispatch", __func__); + return HDF_ERR_INVALID_PARAM; + } + + return hdfNnrtDeviceHost->stub->SendRequest(cmdId, *dataParcel, *replyParcel, option); +} + +static int HdfNnrtDeviceDriverInit(struct HdfDeviceObject *deviceObject) +{ + HDF_LOGI("HdfNnrtDeviceDriverInit enter"); + return HDF_SUCCESS; +} + +static int HdfNnrtDeviceDriverBind(struct HdfDeviceObject *deviceObject) +{ + HDF_LOGI("HdfNnrtDeviceDriverBind enter"); + + auto *hdfNnrtDeviceHost = new (std::nothrow) HdfNnrtDeviceHost; + if (hdfNnrtDeviceHost == nullptr) { + HDF_LOGE("%{public}s: failed to create create HdfNnrtDeviceHost object", __func__); + return HDF_FAILURE; + } + + hdfNnrtDeviceHost->ioService.Dispatch = NnrtDeviceDriverDispatch; + hdfNnrtDeviceHost->ioService.Open = NULL; + hdfNnrtDeviceHost->ioService.Release = NULL; + + auto serviceImpl = INnrtDevice::Get(true); + if (serviceImpl == nullptr) { + HDF_LOGE("%{public}s: failed to get of implement service", __func__); + delete hdfNnrtDeviceHost; + return HDF_FAILURE; + } + + hdfNnrtDeviceHost->stub = OHOS::HDI::ObjectCollector::GetInstance().GetOrNewObject(serviceImpl, + INnrtDevice::GetDescriptor()); + if (hdfNnrtDeviceHost->stub == nullptr) { + HDF_LOGE("%{public}s: failed to get stub object", __func__); + delete hdfNnrtDeviceHost; + return HDF_FAILURE; + } + + deviceObject->service = &hdfNnrtDeviceHost->ioService; + return HDF_SUCCESS; +} + +static void HdfNnrtDeviceDriverRelease(struct HdfDeviceObject *deviceObject) +{ + HDF_LOGI("HdfNnrtDeviceDriverRelease enter"); + if (deviceObject->service == nullptr) { + HDF_LOGE("HdfNnrtDeviceDriverRelease not initted"); + return; + } + + auto *hdfNnrtDeviceHost = CONTAINER_OF(deviceObject->service, struct HdfNnrtDeviceHost, ioService); + delete hdfNnrtDeviceHost; +} + +struct HdfDriverEntry g_nnrtdeviceDriverEntry = { + .moduleVersion = 2, + .moduleName = "nnrt", + .Bind = HdfNnrtDeviceDriverBind, + .Init = HdfNnrtDeviceDriverInit, + .Release = HdfNnrtDeviceDriverRelease, +}; + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ +HDF_INIT(g_nnrtdeviceDriverEntry); +#ifdef __cplusplus +} +#endif /* __cplusplus */ \ No newline at end of file diff --git a/example/drivers/nnrt/v2_0/hdi_cpu_service/src/nnrt_device_service.cpp b/example/drivers/nnrt/v2_0/hdi_cpu_service/src/nnrt_device_service.cpp new file mode 100644 index 0000000..77ca239 --- /dev/null +++ b/example/drivers/nnrt/v2_0/hdi_cpu_service/src/nnrt_device_service.cpp @@ -0,0 +1,529 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnrt_device_service.h" + +#include +#include "hdf_log.h" +#include "ashmem.h" +#include "securec.h" + +#include "node_registry.h" +#include "prepared_model_service.h" +#include "shared_buffer_parser.h" +#include "validation.h" + +namespace OHOS { +namespace HDI { +namespace Nnrt { +namespace V2_0 { +extern "C" INnrtDevice *NnrtDeviceImplGetInstance(void) +{ + return new (std::nothrow) NnrtDeviceService(); +} + +NnrtDeviceService::~NnrtDeviceService() +{ + for (auto ash : m_ashmems) { + ash.second->UnmapAshmem(); + ash.second->CloseAshmem(); + } +} + +int32_t NnrtDeviceService::GetDeviceName(std::string& name) +{ + name = "RK3568-CPU"; + return HDF_SUCCESS; +} + +int32_t NnrtDeviceService::GetVendorName(std::string& name) +{ + name = "Rockchip"; + return HDF_SUCCESS; +} + +int32_t NnrtDeviceService::GetDeviceType(DeviceType& deviceType) +{ + deviceType = DeviceType::CPU; + return HDF_SUCCESS; +} + +int32_t NnrtDeviceService::GetDeviceStatus(DeviceStatus& status) +{ + status = DeviceStatus::AVAILABLE; + return HDF_SUCCESS; +} + +int32_t NnrtDeviceService::GetSupportedOperation(const Model& model, std::vector& ops) +{ + size_t nodeSize = model.nodes.size(); + auto nodes = model.nodes; + ops.resize(nodeSize, false); + auto& regInstance = NodeRegistry::GetSingleton(); + for (size_t i = 0; i < nodeSize; i++) { + ops[i] = regInstance.IsNodeTypeExist(nodes[i].nodeType); + } + return HDF_SUCCESS; +} + +int32_t NnrtDeviceService::IsFloat16PrecisionSupported(bool& isSupported) +{ + isSupported = true; + return HDF_SUCCESS; +} + +int32_t NnrtDeviceService::IsPerformanceModeSupported(bool& isSupported) +{ + isSupported = true; + return HDF_SUCCESS; +} + +int32_t NnrtDeviceService::IsPrioritySupported(bool& isSupported) +{ + isSupported = false; + return HDF_SUCCESS; +} + +int32_t NnrtDeviceService::IsDynamicInputSupported(bool& isSupported) +{ + isSupported = true; + return HDF_SUCCESS; +} + +int32_t NnrtDeviceService::ShowCustomAttributes(const std::map>& extensions) const +{ + float attr1{0.0}; + std::string attr2; + + auto ret = ParseCustomAttributes(extensions, attr1, attr2); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parsing custom attributes failed."); + return ret; + } + + if (attr1 != 0.0f) { + HDF_LOGI("Set attr1: %f", attr1); + } + + if (!attr2.empty()) { + HDF_LOGI("Set attr2: %s", attr2.c_str()); + } + + return HDF_SUCCESS; +} + +int32_t NnrtDeviceService::PrepareModel(const Model& model, const ModelConfig& config, + sptr& preparedModel) +{ + auto ret = ValidateModel(model); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Model is invalid."); + return ret; + } + + auto graph = TransModelToGraph(model); + if (graph == nullptr) { + HDF_LOGE("Transfrom model to graph failed."); + return HDF_ERR_INVALID_PARAM; + } + + ret = ValidateModelConfig(config); + if (ret != HDF_SUCCESS) { + HDF_LOGE("ModelConfig is invalid."); + return ret; + } + + ret = ShowCustomAttributes(config.extensions); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Showing custom attributes failed."); + return ret; + } + + auto context = TransModelConfig(config); + sptr service = new (std::nothrow) PreparedModelService(context); + if (service == nullptr) { + HDF_LOGE("Create new PreparedModelService instance failed."); + return HDF_ERR_MALLOC_FAIL; + } + + ret = service->Compile(graph); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Prepared model failed."); + return ret; + } + + preparedModel = service; + return HDF_SUCCESS; +} + +int32_t NnrtDeviceService::IsModelCacheSupported(bool& isSupported) +{ + isSupported = true; + return HDF_SUCCESS; +} + +int32_t NnrtDeviceService::PrepareModelFromModelCache(const std::vector& modelCache, + const ModelConfig& config, sptr& preparedModel) +{ + HDF_LOGD("Using cache to prepare model."); + + // modelCache must be 1, because PreparedModel only export one cache file. + if (modelCache.size() != 1) { + HDF_LOGE("The size of modelCache vector is not valid, it should be one elememt in that vector."); + return HDF_ERR_INVALID_PARAM; + } + + SharedBufferParser parser; + auto ret = parser.Init(modelCache[0]); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parse modle buffer failed."); + return HDF_ERR_INVALID_PARAM; + } + + ret = ValidateModelConfig(config); + if (ret != HDF_SUCCESS) { + HDF_LOGE("ModelConfig is invalid."); + return ret; + } + + ret = ShowCustomAttributes(config.extensions); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Showing custom attributes failed."); + return ret; + } + + auto context = TransModelConfig(config); + sptr service = new (std::nothrow) PreparedModelService(context); + if (service == nullptr) { + HDF_LOGE("Create new instance PreparedModelService failed."); + return HDF_ERR_MALLOC_FAIL; + } + + void* modelBuffer = parser.GetBufferPtr(); + ret = service->Compile(modelBuffer, modelCache[0].dataSize); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Prepared model failed."); + return ret; + } + + preparedModel = service; + return HDF_SUCCESS; +} + +int32_t NnrtDeviceService::AllocateBuffer(uint32_t length, SharedBuffer& buffer) +{ + sptr ashptr = Ashmem::CreateAshmem("allocateBuffer", length); + if (ashptr == nullptr) { + HDF_LOGE("Create shared memory failed."); + return HDF_FAILURE; + } + + if (!ashptr->MapReadAndWriteAshmem()) { + HDF_LOGE("Map allocate buffer failed."); + return HDF_FAILURE; + } + + buffer.fd = ashptr->GetAshmemFd(); + buffer.bufferSize = ashptr->GetAshmemSize(); + buffer.offset = 0; + buffer.dataSize = length; + + m_ashmems[buffer.fd] = ashptr; + return HDF_SUCCESS; +} + +int32_t NnrtDeviceService::ReleaseBuffer(const SharedBuffer& buffer) +{ + // parser will close current fd. + SharedBufferParser parser; + auto ret = parser.Init(buffer); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parse buffer failed."); + return HDF_ERR_INVALID_PARAM; + } + + for (auto& ash : m_ashmems) { + ash.second->UnmapAshmem(); + ash.second->CloseAshmem(); + } + m_ashmems.clear(); + return HDF_SUCCESS; +} + +int32_t NnrtDeviceService::ValidateModelConfig(const ModelConfig& config) const +{ + if (!ValidatePerformanceMode(config.mode)) { + HDF_LOGE("PerformanceMode is invalid. mode=%d", config.mode); + return HDF_ERR_INVALID_PARAM; + } + + if (!ValidatePriority(config.priority)) { + HDF_LOGE("Priority is invalid. priority=%d", config.priority); + return HDF_ERR_INVALID_PARAM; + } + + return HDF_SUCCESS; +} + +int32_t NnrtDeviceService::ValidateModel(const Model& model) const +{ + if (model.allTensors.empty()) { + HDF_LOGE("Model has no tensors."); + return HDF_ERR_INVALID_PARAM; + } + + if (model.subGraph.empty()) { + HDF_LOGE("Model has no subGraphs."); + return HDF_ERR_INVALID_PARAM; + } + + if (model.nodes.empty()) { + HDF_LOGE("Model has no nodes."); + return HDF_ERR_INVALID_PARAM; + } + + if (model.inputIndex.empty()) { + HDF_LOGE("Model has no input."); + return HDF_ERR_INVALID_PARAM; + } + + if (model.outputIndex.empty()) { + HDF_LOGE("Model has no output."); + return HDF_ERR_INVALID_PARAM; + } + + size_t tensorSize = model.allTensors.size(); + for (auto index : model.inputIndex) { + if (index > tensorSize) { + HDF_LOGE("Input index is invalid, index=%u", index); + return HDF_ERR_INVALID_PARAM; + } + } + + for (auto index : model.outputIndex) { + if (index > tensorSize) { + HDF_LOGE("Output index is invalid, index=%u", index); + return HDF_ERR_INVALID_PARAM; + } + } + + return HDF_SUCCESS; +} + +std::shared_ptr NnrtDeviceService::TransModelToGraph(const Model& model) const +{ + auto metaGraph = std::make_shared(); + metaGraph->name = model.name; + metaGraph->version = mindspore::Version(); + + std::unique_ptr transTensor{nullptr}; + for (auto tensor : model.allTensors) { + transTensor = TransTensor(tensor); + if (transTensor == nullptr) { + HDF_LOGE("Transform tensor failed."); + return nullptr; + } + metaGraph->allTensors.emplace_back(std::move(transTensor)); + } + metaGraph->inputIndex = model.inputIndex; + metaGraph->outputIndex = model.outputIndex; + + // Transform node + std::unique_ptr transNode {nullptr}; + for (auto& node : model.nodes) { + transNode = TransNode(node); + if (transNode == nullptr) { + HDF_LOGE("Transform node failed, node name=%{public}s", node.name.c_str()); + return nullptr; + } + metaGraph->nodes.emplace_back(std::move(transNode)); + } + + // Transform subgraph + const size_t numTensor = model.allTensors.size(); + for (auto graph : model.subGraph) { + metaGraph->subGraph.emplace_back(TransSubGraph(graph, numTensor)); + } + return metaGraph; +} + +std::unique_ptr NnrtDeviceService::TransTensor(const Tensor& tensor) const +{ + if (!ValidateDataType(tensor.dataType)) { + HDF_LOGE("DataType of tensor is invalid. dataType=%d", tensor.dataType); + return nullptr; + } + + if (!ValidateFormat(tensor.format)) { + HDF_LOGE("Format of tensor is invalid. format=%d", tensor.format); + return nullptr; + } + + auto schemaTensor = std::make_unique(); + schemaTensor->name = tensor.name; + schemaTensor->dataType = static_cast(tensor.dataType); + schemaTensor->format = static_cast(tensor.format); + schemaTensor->dims = tensor.dims; + for (auto param : tensor.quantParams) { + auto quantParam = std::make_unique(); + quantParam->scale = param.scale; + quantParam->zeroPoint = param.zeroPoint; + quantParam->numBits = param.numBits; + quantParam->inited = true; + schemaTensor->quantParams.emplace_back(std::move(quantParam)); + } + + if (tensor.data.fd != INVALID_FD) { + SharedBufferParser parser; + auto ret = parser.Init(tensor.data); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parse tensor data failed."); + return nullptr; + } + + auto data = parser.GetBufferPtr(); + schemaTensor->data.resize(tensor.data.dataSize); + auto memRet = memcpy_s(const_cast(schemaTensor->data.data()), + tensor.data.dataSize, data, tensor.data.dataSize); + if (memRet != EOK) { + HDF_LOGW("Copy tensor data failed."); + return nullptr; + } + } + return schemaTensor; +} + +std::unique_ptr NnrtDeviceService::TransNode(const Node& node) const +{ + auto cnode = std::make_unique(); + cnode->name = node.name; + cnode->inputIndex = node.inputIndex; + cnode->outputIndex = node.outputIndex; + cnode->quantType = static_cast(node.quantType); + + auto& regInstance = NodeRegistry::GetSingleton(); + auto parseFunc = regInstance.GetNodeFunc(node.nodeType); + auto primitive = parseFunc(node.nodeAttr); + if (primitive == nullptr) { + HDF_LOGE("Parse primitve data failed. node name=%{public}s", node.name.c_str()); + return nullptr; + } + + cnode->primitive = std::move(primitive); + return cnode; +} + +std::unique_ptr NnrtDeviceService::TransSubGraph(const SubGraph& graph, + const size_t numTensor) const +{ + auto subGraph = std::make_unique(); + subGraph->name = graph.name; + subGraph->inputIndices = graph.inputIndices; + subGraph->outputIndices = graph.outputIndices; + subGraph->nodeIndices = graph.nodeIndices; + subGraph->tensorIndices.reserve(numTensor); + for (size_t i = 0; i < numTensor; i++) { + subGraph->tensorIndices.emplace_back(static_cast(i)); + } + return subGraph; +} + +std::shared_ptr NnrtDeviceService::TransModelConfig(const ModelConfig& config) const +{ + auto context = std::make_shared(); + const int cpuThreadNum = 2; + const int cpuNoAffinities = 0; + const int cpuBigCore = 1; + const int cpuLittleCore = 2; + context->SetThreadNum(cpuThreadNum); + + int mode = cpuNoAffinities; + switch (config.mode) { + case PerformanceMode::PERFORMANCE_LOW: + case PerformanceMode::PERFORMANCE_MEDIUM: + mode = cpuLittleCore; + break; + case PerformanceMode::PERFORMANCE_HIGH: + case PerformanceMode::PERFORMANCE_EXTREME: + mode = cpuBigCore; + break; + default: + mode = cpuNoAffinities; + } + context->SetThreadAffinity(mode); + + auto cpuInfo = std::make_shared(); + cpuInfo->SetEnableFP16(config.enableFloat16); + auto& deviceInfos = context->MutableDeviceInfo(); + deviceInfos.emplace_back(cpuInfo); + return context; +} + +int32_t NnrtDeviceService::ConvertVecToFloat(std::vector vecFloat, float& result) const +{ + if (vecFloat.size() != sizeof(float)) { + HDF_LOGE("Size of the int8_t vector dose not match a float value."); + return HDF_ERR_INVALID_PARAM; + } + + result = *(reinterpret_cast(vecFloat.data())); + return HDF_SUCCESS; +} + +int32_t NnrtDeviceService::ConvertVecToString(std::vector vecFloat, std::string& result) const +{ + if (vecFloat.empty()) { + HDF_LOGE("int8_t vector is empty."); + return HDF_ERR_INVALID_PARAM; + } + + result = reinterpret_cast(vecFloat.data()); + return HDF_SUCCESS; +} + +int32_t NnrtDeviceService::ParseCustomAttributes(const std::map>& extensions, + float& attr1, std::string& attr2) const +{ + int32_t ret; + for (auto extension : extensions) { + if (extension.first == "attr1") { + ret = ConvertVecToFloat(extension.second, attr1); + if (ret != HDF_SUCCESS) { + HDF_LOGE("ConvertVecToFloat failed."); + return ret; + } + if (attr1 <= 0.0f || attr1 > 1.0f) { + HDF_LOGE("attr1 is out of range (0,1]."); + return HDF_ERR_INVALID_PARAM; + } + } else if (extension.first == "attr2") { + ret = ConvertVecToString(extension.second, attr2); + if (ret != HDF_SUCCESS) { + HDF_LOGE("ConvertVecToString failed."); + return ret; + } + if (attr2 != "LOW" || attr2 != "HIGH") { + HDF_LOGE("attr2 is neither LOW nor HIGH."); + return HDF_ERR_INVALID_PARAM; + } + } + } + + return HDF_SUCCESS; +} +} // V2_0 +} // Nnrt +} // HDI +} // OHOS diff --git a/example/drivers/nnrt/v2_0/hdi_cpu_service/src/node_functions.cpp b/example/drivers/nnrt/v2_0/hdi_cpu_service/src/node_functions.cpp new file mode 100644 index 0000000..fb7a701 --- /dev/null +++ b/example/drivers/nnrt/v2_0/hdi_cpu_service/src/node_functions.cpp @@ -0,0 +1,373 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "node_functions.h" + +#include "node_registry.h" +#include +#include + +namespace OHOS { +namespace HDI { +namespace Nnrt { +namespace V2_0 { +PrimUniquePtr GetAddPrimitive(const std::vector& primitive) +{ + AddFusion addAttr; + auto ret = ParsePrimitive(primitive, addAttr, AddFusionBlockUnmarshalling); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parse primitive data of AddFusion operator failed."); + return nullptr; + } + + auto prim = std::make_unique(); + prim->value.type = mindspore::schema::PrimitiveType_AddFusion; + auto attr = new (std::nothrow) mindspore::schema::AddFusionT; + if (attr == nullptr) { + HDF_LOGE("Create AddFusion primitive failed."); + return nullptr; + } + attr->activation_type = static_cast(addAttr.activationType); + prim->value.value = attr; + return prim; +} + +PrimUniquePtr GetAvgPoolPrimitive(const std::vector& primitive) +{ + AvgPoolFusion avgPoolAttr; + auto ret = ParsePrimitive(primitive, avgPoolAttr, AvgPoolFusionBlockUnmarshalling); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parse primitive data of AvgPoolFusion operator failed."); + return nullptr; + } + + auto prim = std::make_unique(); + prim->value.type = mindspore::schema::PrimitiveType_AvgPoolFusion; + + auto attr = new (std::nothrow) mindspore::schema::AvgPoolFusionT; + if (attr == nullptr) { + HDF_LOGE("Create AvgPoolFusion primitive failed."); + return nullptr; + } + attr->kernel_size = avgPoolAttr.kernelSize; + attr->strides = avgPoolAttr.strides; + attr->pad = avgPoolAttr.pad; + attr->pad_mode = static_cast(avgPoolAttr.padMode); + attr->round_mode = static_cast(avgPoolAttr.roundMode); + attr->format = static_cast(avgPoolAttr.format); + attr->global = avgPoolAttr.global; + attr->activation_type = static_cast(avgPoolAttr.activationType); + prim->value.value = attr; + return prim; +} + +PrimUniquePtr GetConcatPrimitive(const std::vector& primitive) +{ + Concat concatAttr; + auto ret = ParsePrimitive(primitive, concatAttr, ConcatBlockUnmarshalling); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parse primitive data of Concat operator failed."); + return nullptr; + } + + auto prim = std::make_unique(); + prim->value.type = mindspore::schema::PrimitiveType_Concat; + + auto attr = new (std::nothrow) mindspore::schema::ConcatT; + if (attr == nullptr) { + HDF_LOGE("Create concat primitive failed."); + return nullptr; + } + attr->axis = concatAttr.axis; + prim->value.value = attr; + return prim; +} + +PrimUniquePtr GetConv2dPrimitive(const std::vector& primitive) +{ + Conv2DFusion conv2dAttr; + auto ret = ParsePrimitive(primitive, conv2dAttr, Conv2DFusionBlockUnmarshalling); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parse primitive data of Conv2DFusion operator failed."); + return nullptr; + } + + auto prim = std::make_unique(); + prim->value.type = mindspore::schema::PrimitiveType_Conv2DFusion; + + auto attr = new (std::nothrow) mindspore::schema::Conv2DFusionT; + if (attr == nullptr) { + HDF_LOGE("Create Conv2DFusion primitive failed."); + return nullptr; + } + + attr->kernel_size = conv2dAttr.kernelSize; + attr->stride = conv2dAttr.stride; + attr->dilation = conv2dAttr.dilation; + attr->pad_mode = static_cast(conv2dAttr.padMode); + attr->pad_list = conv2dAttr.padList; + attr->group = conv2dAttr.group; + attr->in_channel = conv2dAttr.inChannel; + attr->out_channel = conv2dAttr.outChannel; + attr->activation_type = static_cast(conv2dAttr.activationType); + + prim->value.value = attr; + return prim; +} + +PrimUniquePtr GetFullConnectionPrimitive(const std::vector& primitive) +{ + FullConnection fullConnAttr; + auto ret = ParsePrimitive(primitive, fullConnAttr, FullConnectionBlockUnmarshalling); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parse primitive data of FullConnection operator failed."); + return nullptr; + } + + auto prim = std::make_unique(); + prim->value.type = mindspore::schema::PrimitiveType_FullConnection; + + auto attr = new (std::nothrow) mindspore::schema::FullConnectionT; + if (attr == nullptr) { + HDF_LOGE("Create FullConnection primitive failed."); + return nullptr; + } + + attr->has_bias = fullConnAttr.hasBias; + attr->use_axis = fullConnAttr.useAxis; + attr->axis = fullConnAttr.axis; + attr->activation_type = static_cast(fullConnAttr.activationType); + + prim->value.value = attr; + return prim; +} + +PrimUniquePtr GetMaxPoolFusionPrimitive(const std::vector& primitive) +{ + MaxPoolFusion maxPoolAttr; + auto ret = ParsePrimitive(primitive, maxPoolAttr, MaxPoolFusionBlockUnmarshalling); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parse primitive data of MaxPoolFusion operator failed."); + return nullptr; + } + + auto prim = std::make_unique(); + prim->value.type = mindspore::schema::PrimitiveType_MaxPoolFusion; + + auto attr = new (std::nothrow) mindspore::schema::MaxPoolFusionT; + if (attr == nullptr) { + HDF_LOGE("Create MaxPoolFusion primitive failed."); + return nullptr; + } + + attr->kernel_size = maxPoolAttr.kernelSize; + attr->strides = maxPoolAttr.strides; + attr->pad = maxPoolAttr.pad; + attr->pad_mode = static_cast(maxPoolAttr.padMode); + attr->format = static_cast(maxPoolAttr.format); + attr->global = maxPoolAttr.global; + attr->activation_type = static_cast(maxPoolAttr.activationType); + + prim->value.value = attr; + return prim; +} + +PrimUniquePtr GetMatMulFusionPrimitive(const std::vector& primitive) +{ + MatMulFusion matmulAttr; + auto ret = ParsePrimitive(primitive, matmulAttr, MatMulFusionBlockUnmarshalling); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parse primitive data of MatMulFusion operator failed."); + return nullptr; + } + + auto prim = std::make_unique(); + prim->value.type = mindspore::schema::PrimitiveType_MatMulFusion; + + auto attr = new (std::nothrow) mindspore::schema::MatMulFusionT; + if (attr == nullptr) { + HDF_LOGE("Create MatMulFusion primitive failed."); + return nullptr; + } + + attr->transpose_a = matmulAttr.transposeA; + attr->transpose_b = matmulAttr.transposeB; + attr->activation_type = static_cast(matmulAttr.activationType); + + prim->value.value = attr; + return prim; +} + +PrimUniquePtr GetSoftmaxPrimitive(const std::vector& primitive) +{ + Softmax softmaxAttr; + auto ret = ParsePrimitive(primitive, softmaxAttr, SoftmaxBlockUnmarshalling); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parse primitive data of Softmax operator failed."); + return nullptr; + } + + auto prim = std::make_unique(); + prim->value.type = mindspore::schema::PrimitiveType_Softmax; + + auto attr = new (std::nothrow) mindspore::schema::SoftmaxT; + if (attr == nullptr) { + HDF_LOGE("Create Softmax primitive failed."); + return nullptr; + } + + attr->axis = softmaxAttr.axis; + prim->value.value = attr; + return prim; +} + +PrimUniquePtr GetReshapePrimitive(const std::vector& primitive) +{ + Reshape reshapeAttr; + auto ret = ParsePrimitive(primitive, reshapeAttr, ReshapeBlockUnmarshalling); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parse primitive data of Reshape operator failed."); + return nullptr; + } + + auto prim = std::make_unique(); + prim->value.type = mindspore::schema::PrimitiveType_Reshape; + + auto attr = new (std::nothrow) mindspore::schema::ReshapeT; + if (attr == nullptr) { + HDF_LOGE("Create Reshape primitive failed."); + return nullptr; + } + + prim->value.value = attr; + return prim; +} + +PrimUniquePtr GetScaleFusionPrimitive(const std::vector& primitive) +{ + ScaleFusion scaleAttr; + auto ret = ParsePrimitive(primitive, scaleAttr, ScaleFusionBlockUnmarshalling); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parse primitive data of ScaleFusion operator failed."); + return nullptr; + } + + auto prim = std::make_unique(); + prim->value.type = mindspore::schema::PrimitiveType_ScaleFusion; + + auto attr = new (std::nothrow) mindspore::schema::ScaleFusionT; + if (attr == nullptr) { + HDF_LOGE("Create ScaleFusion primitive failed."); + return nullptr; + } + + attr->axis = scaleAttr.axis; + attr->activation_type = static_cast(scaleAttr.activationType); + prim->value.value = attr; + return prim; +} + +PrimUniquePtr GetActivationPrimitive(const std::vector& primitive) +{ + Activation actAttr; + auto ret = ParsePrimitive(primitive, actAttr, ActivationBlockUnmarshalling); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parse primitive data of Activation operator failed."); + return nullptr; + } + + auto prim = std::make_unique(); + prim->value.type = mindspore::schema::PrimitiveType_Activation; + + auto attr = new (std::nothrow) mindspore::schema::ActivationT; + if (attr == nullptr) { + HDF_LOGE("Create Activation primitive failed."); + return nullptr; + } + + attr->alpha = actAttr.alpha; + attr->min_val = actAttr.minVal; + attr->max_val = actAttr.maxVal; + attr->approximate = actAttr.approximate; + attr->activation_type = static_cast(actAttr.activationType); + + prim->value.value = attr; + return prim; +} + +PrimUniquePtr GetQuantDTypeCastPrimitive(const std::vector& primitive) +{ + QuantDTypeCast quantAttr; + auto ret = ParsePrimitive(primitive, quantAttr, QuantDTypeCastBlockUnmarshalling); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parse primitive data of QuantDTypeCast operator failed."); + return nullptr; + } + + auto prim = std::make_unique(); + prim->value.type = mindspore::schema::PrimitiveType_QuantDTypeCast; + + auto attr = new (std::nothrow) mindspore::schema::QuantDTypeCastT; + if (attr == nullptr) { + HDF_LOGE("Create QuantDTypeCast primitive failed."); + return nullptr; + } + + attr->src_t = quantAttr.srcT; + attr->dst_t = quantAttr.dstT; + prim->value.value = attr; + return prim; +} + +PrimUniquePtr GetMulFusionPrimitive(const std::vector& primitive) +{ + MulFusion mulAttr; + auto ret = ParsePrimitive(primitive, mulAttr, MulFusionBlockUnmarshalling); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parse primitive data of MulFusion operator failed."); + return nullptr; + } + + auto prim = std::make_unique(); + prim->value.type = mindspore::schema::PrimitiveType_MulFusion; + + auto attr = new (std::nothrow) mindspore::schema::MulFusionT; + if (attr == nullptr) { + HDF_LOGE("Create MulFusion primitive failed."); + return nullptr; + } + + attr->activation_type = static_cast(mulAttr.activationType); + prim->value.value = attr; + return prim; +} + +REGISTER_NODE(Activation, NodeType::NODE_TYPE_ACTIVATION, GetActivationPrimitive); +REGISTER_NODE(AddFusion, NodeType::NODE_TYPE_ADD_FUSION, GetAddPrimitive); +REGISTER_NODE(AvgPoolFusion, NodeType::NODE_TYPE_AVGPOOL_FUSION, GetAvgPoolPrimitive); +REGISTER_NODE(Concat, NodeType::NODE_TYPE_CONCAT, GetConcatPrimitive); +REGISTER_NODE(Conv2DFusion, NodeType::NODE_TYPE_CONV2D_FUSION, GetConv2dPrimitive); +REGISTER_NODE(FullConnection, NodeType::NODE_TYPE_FULL_CONNECTION, GetFullConnectionPrimitive); +REGISTER_NODE(MaxPoolFusion, NodeType::NODE_TYPE_MAX_POOL_FUSION, GetMaxPoolFusionPrimitive); +REGISTER_NODE(MatMulFusion, NodeType::NODE_TYPE_MATMUL_FUSION, GetMatMulFusionPrimitive); +REGISTER_NODE(Reshape, NodeType::NODE_TYPE_RESHAPE, GetReshapePrimitive); +REGISTER_NODE(Softmax, NodeType::NODE_TYPE_SOFTMAX, GetSoftmaxPrimitive); +REGISTER_NODE(ScaleFusion, NodeType::NODE_TYPE_SCALE_FUSION, GetScaleFusionPrimitive); +REGISTER_NODE(QuantDTypeCast, NodeType::NODE_TYPE_QUANT_DTYPE_CAST, GetQuantDTypeCastPrimitive); +REGISTER_NODE(MulFusion, NodeType::NODE_TYPE_MUL_FUSION, GetMulFusionPrimitive); +} // namespace V2_0 +} // namespace Nnrt +} // namespace HDI +} // namespace OHOS \ No newline at end of file diff --git a/example/drivers/nnrt/v2_0/hdi_cpu_service/src/node_registry.cpp b/example/drivers/nnrt/v2_0/hdi_cpu_service/src/node_registry.cpp new file mode 100644 index 0000000..f6537ad --- /dev/null +++ b/example/drivers/nnrt/v2_0/hdi_cpu_service/src/node_registry.cpp @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "node_registry.h" + +#include "hdf_log.h" + +namespace OHOS { +namespace HDI { +namespace Nnrt { +namespace V2_0 { +NodeRegistry& NodeRegistry::GetSingleton() +{ + static NodeRegistry registry; + return registry; +} + +NodeRegistry::Registrar::Registrar(NodeType type, std::function&)> nodeFunc) +{ + auto& registry = NodeRegistry::GetSingleton(); + if (registry.m_nodeRegs.find(type) != registry.m_nodeRegs.end()) { + HDF_LOGW("Node has been registered. nodeType=%d", type); + } else { + registry.m_nodeRegs[type] = nodeFunc; + } +} + +std::function&)> NodeRegistry::GetNodeFunc(NodeType type) const +{ + if (m_nodeRegs.find(type) == m_nodeRegs.end()) { + HDF_LOGW("Node type is not found. nodeType=%d", type); + return nullptr; + } + + return m_nodeRegs.at(type); +} + +bool NodeRegistry::IsNodeTypeExist(NodeType type) const +{ + if (m_nodeRegs.find(type) == m_nodeRegs.end()) { + return false; + } + return true; +} +} // namespace V2_0 +} // namespace Nnrt +} // namespace HDI +} // namespace OHOS \ No newline at end of file diff --git a/example/drivers/nnrt/v2_0/hdi_cpu_service/src/prepared_model_service.cpp b/example/drivers/nnrt/v2_0/hdi_cpu_service/src/prepared_model_service.cpp new file mode 100644 index 0000000..1d3edf6 --- /dev/null +++ b/example/drivers/nnrt/v2_0/hdi_cpu_service/src/prepared_model_service.cpp @@ -0,0 +1,461 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "prepared_model_service.h" + +#include +#include "securec.h" +#include "hdf_log.h" + +#include "shared_buffer_parser.h" + +namespace OHOS { +namespace HDI { +namespace Nnrt { +namespace V2_0 { +PreparedModelService::PreparedModelService(std::shared_ptr context) + : m_context(context) {} + +PreparedModelService::~PreparedModelService() +{ + if (m_cacheBuffer != nullptr) { + m_cacheBuffer->CloseAshmem(); + } + + for (auto& inputAsh : m_inputAshmems) { + inputAsh->UnmapAshmem(); + inputAsh->CloseAshmem(); + } + + for (auto& outputAsh : m_outputAshmems) { + outputAsh->UnmapAshmem(); + outputAsh->CloseAshmem(); + } +} + +int32_t PreparedModelService::ExportModelCache(std::vector& modelCache) +{ + if (!modelCache.empty()) { + HDF_LOGE("The parameters of ExportModelCache should be an empty vector."); + return HDF_ERR_INVALID_PARAM; + } + + if (m_cacheBuffer != nullptr) { + auto fd = m_cacheBuffer->GetAshmemFd(); + auto size = m_cacheBuffer->GetAshmemSize(); + + // SharedBuffer: fd, bufferSize, offset, dataSize + modelCache.emplace_back(SharedBuffer{fd, size, 0, size}); + return HDF_SUCCESS; + } + + auto size = m_builder.GetSize(); + auto buffer = m_builder.GetBufferPointer(); + const char* name = m_graph != nullptr ? m_graph->name.c_str() : "CacheModel"; + sptr cache = Ashmem::CreateAshmem(name, size); + if (cache == nullptr) { + HDF_LOGE("Create shared memory failed."); + return HDF_ERR_MALLOC_FAIL; + } + + bool ret = cache->MapReadAndWriteAshmem(); + if (!ret) { + HDF_LOGE("Map fd to write cache failed."); + return HDF_FAILURE; + } + + ret = cache->WriteToAshmem(buffer, size, 0); + cache->UnmapAshmem(); + if (!ret) { + HDF_LOGE("Write cache failed."); + return HDF_FAILURE; + } + + m_cacheBuffer = cache; + + // SharedBuffer: fd, bufferSize, offset, dataSize + modelCache.emplace_back(SharedBuffer {cache->GetAshmemFd(), cache->GetAshmemSize(), 0, cache->GetAshmemSize()}); + return HDF_SUCCESS; +} + +int32_t PreparedModelService::Run(const std::vector& inputs, const std::vector& outputs, + std::vector>& outputsDims, std::vector& isOutputBufferEnough) +{ + auto ret = SetInputs(inputs); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Inputs tensor is invalid."); + return ret; + } + + if (!m_isDynamicShape) { + ret = SetOutputs(outputs); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Output tensor is invalid."); + ResetInputAndOutput(); + return ret; + } + } + + auto msRet = m_model->Predict(m_inputs, &m_outputs); + if (msRet != mindspore::kSuccess) { + HDF_LOGE("Run model failed."); + ResetInputAndOutput(); + return HDF_FAILURE; + } + + ret = UpdateOutput(outputs, outputsDims, isOutputBufferEnough); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Update output dimension or data failed."); + ResetInputAndOutput(); + return ret; + } + + ResetInputAndOutput(); + + return HDF_SUCCESS; +} + +int32_t PreparedModelService::GetInputDimRanges(std::vector>& minInputDims, + std::vector>& maxInputDims) +{ + if (m_inputDims.empty()) { + HDF_LOGE("Model has not been prepared yet."); + return HDF_ERR_INVALID_PARAM; + } + + minInputDims.clear(); + maxInputDims.clear(); + + for (auto inputShape : m_inputDims) { + std::vector minInputShape; + std::vector maxInputShape; + for (auto dim : inputShape) { + if (dim != DYNAMIC_SHAPE_FLAG) { // Min and max are same if the dimension is fixed. + if (dim <= 0) { + HDF_LOGE("Dimesion value is invalid."); + return HDF_ERR_INVALID_PARAM; + } + minInputShape.push_back(static_cast(dim)); + maxInputShape.push_back(static_cast(dim)); + } else { // Dimension range is [1, 10]. + minInputShape.push_back(1); + maxInputShape.push_back(10); + } + } + minInputDims.push_back(std::move(minInputShape)); + maxInputDims.push_back(std::move(maxInputShape)); + } + + return HDF_SUCCESS; +} + +int32_t PreparedModelService::UpdateOutput(const std::vector& outputs, + std::vector>& outputsDims, std::vector& isOutputBufferEnough) +{ + bool isEnough {true}; + size_t outputSize = m_outputs.size(); + isOutputBufferEnough.resize(outputSize, true); + for (size_t i = 0; i < outputSize; i++) { + auto& msOutput = m_outputs[i]; + auto& output = outputs[i]; + + auto msShape = msOutput.Shape(); + outputsDims.emplace_back(msShape.begin(), msShape.end()); + + auto dataSize = msOutput.DataSize(); + if (dataSize > output.data.bufferSize) { + HDF_LOGE("Output buffer is not enough. actual size %{public}zu, buffer size %{public}u", + dataSize, output.data.bufferSize); + isOutputBufferEnough[i] = false; + isEnough= false; + } + + if (isEnough && m_isDynamicShape) { + auto msData = msOutput.MutableData(); + SharedBufferParser parser; + auto ret = parser.Init(output.data); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parse %zu th output data failed.", i); + return HDF_ERR_INVALID_PARAM; + } + + auto data = parser.GetBufferPtr(); + auto memRet = memcpy_s(data, dataSize, msData, dataSize); + if (memRet != EOK) { + HDF_LOGE("Copy output memory failed."); + return HDF_FAILURE; + } + } + } + + return HDF_SUCCESS; +} + +void PreparedModelService::ResetInputAndOutput() +{ + for (auto& msInput : m_inputs) { + msInput.SetData(nullptr); + } + + if (!m_isDynamicShape) { + for (auto& msOutput : m_outputs) { + msOutput.SetData(nullptr); + } + } +} + +int32_t PreparedModelService::Compile(std::shared_ptr graph) +{ + if (graph == nullptr) { + HDF_LOGE("Graph cannot be nullptr"); + return HDF_ERR_INVALID_PARAM; + } + for (auto i : graph->inputIndex) { + auto inputShape = graph->allTensors[i]->dims; + auto iter = std::find(inputShape.begin(), inputShape.end(), DYNAMIC_SHAPE_FLAG); + if (iter != inputShape.end()) { + m_isDynamicShape = true; + break; + } + } + auto offset = mindspore::schema::MetaGraph::Pack(m_builder, graph.get()); + m_builder.Finish(offset); + mindspore::schema::FinishMetaGraphBuffer(m_builder, offset); + auto modelSize = m_builder.GetSize(); + uint8_t* modelBuffer = m_builder.GetBufferPointer(); + if (modelBuffer == nullptr) { + HDF_LOGE("Model is invalid."); + return HDF_FAILURE; + } + + m_model = std::make_shared(); + mindspore::Status msRet = m_model->Build(modelBuffer, modelSize, mindspore::kMindIR, m_context); + if (msRet != mindspore::kSuccess) { + HDF_LOGE("Prepare model failed, please make sure model is validate."); + return HDF_FAILURE; + } + + auto ret = GetMSInputsAndOutputs(); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Model without inputs or outputs is invalid."); + return ret; + } + + for (auto input : m_inputs) { + m_inputDims.push_back(input.Shape()); + } + + return HDF_SUCCESS; +} + +int32_t PreparedModelService::Compile(const void* modelBuffer, size_t length) +{ + if (modelBuffer == nullptr || length == 0) { + HDF_LOGE("ModelBuffer cannot be nullptr and length cannot be zero."); + return HDF_ERR_INVALID_PARAM; + } + + m_model = std::make_shared(); + mindspore::Status msRet = m_model->Build(modelBuffer, length, mindspore::kMindIR, m_context); + if (msRet != mindspore::kSuccess) { + HDF_LOGE("Prepare model from cache failed, please make sure model cache is valid."); + return HDF_FAILURE; + } + + auto ret = GetMSInputsAndOutputs(); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Model without inputs or outputs is invalid."); + return ret; + } + + for (auto input : m_inputs) { + auto shapes = input.Shape(); + if (std::find(shapes.begin(), shapes.end(), DYNAMIC_SHAPE_FLAG) != shapes.end()) { + m_isDynamicShape = true; + break; + } + } + + for (auto input : m_inputs) { + m_inputDims.push_back(input.Shape()); + } + + return HDF_SUCCESS; +} + +int32_t PreparedModelService::SetInputs(const std::vector& inputs) +{ + if (inputs.size() != m_inputs.size()) { + HDF_LOGE("inputs size is invalid. expect: %zu, actual: %zu", m_inputs.size(), inputs.size()); + return HDF_ERR_INVALID_PARAM; + } + for (auto& ash : m_inputAshmems) { + ash->UnmapAshmem(); + ash->CloseAshmem(); + } + m_inputAshmems.clear(); + + int32_t ret {0}; + size_t inputSize = m_inputs.size(); + std::vector> tmpAllDims; + for (size_t i = 0; i < inputSize; i++) { + auto& input = inputs[i]; + auto& msInput = m_inputs[i]; + ret = CompareTensor(input, msInput); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Inputs tensor is not match that of model. Please check input tensor."); + return ret; + } + tmpAllDims.emplace_back(input.dimensions.begin(), input.dimensions.end()); + } + + if (m_isDynamicShape) { + auto msRet = m_model->Resize(m_inputs, tmpAllDims); + if (msRet != mindspore::kSuccess) { + HDF_LOGE("Resize for dynamic inputs failed."); + return HDF_FAILURE; + } + ret = GetMSInputsAndOutputs(); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Get ms inputs or outputs failed after resize."); + return ret; + } + } + + for (size_t i = 0; i < inputSize; i++) { + auto& input = inputs[i]; + auto& msInput = m_inputs[i]; + sptr ashptr = ParseBuffer(input.data); + if (ashptr == nullptr) { + HDF_LOGE("Parse %zuth input data failed.", i); + return HDF_ERR_INVALID_PARAM; + } + + auto data = const_cast(ashptr->ReadFromAshmem(input.data.dataSize, 0)); + msInput.SetData(data); + m_inputAshmems.emplace_back(ashptr); + } + return HDF_SUCCESS; +} + +int32_t PreparedModelService::SetOutputs(const std::vector& outputs) +{ + HDF_LOGI("Start Set outputs, m_outputs size=%zu", m_outputs.size()); + if (outputs.size() != m_outputs.size()) { + HDF_LOGE("outputs size is invalid. expect: %{public}zu, actual: %{public}zu", m_outputs.size(), outputs.size()); + return HDF_ERR_INVALID_PARAM; + } + for (auto ash : m_outputAshmems) { + ash->UnmapAshmem(); + ash->CloseAshmem(); + } + m_outputAshmems.clear(); + + for (size_t i = 0; i < m_outputs.size(); i++) { + auto& output = outputs[i]; + auto& msOutput = m_outputs[i]; + + sptr ashptr = ParseBuffer(output.data); + if (ashptr == nullptr) { + HDF_LOGE("Parse %{public}zu th output data failed.", i); + return HDF_ERR_INVALID_PARAM; + } + + auto data = const_cast(ashptr->ReadFromAshmem(output.data.dataSize, 0)); + msOutput.SetAllocator(nullptr); + msOutput.SetData(data); + m_outputAshmems.emplace_back(ashptr); + } + return HDF_SUCCESS; +} + +int32_t PreparedModelService::GetMSInputsAndOutputs() +{ + m_inputs = m_model->GetInputs(); + if (m_inputs.empty()) { + HDF_LOGE("Get inputs failed."); + return HDF_FAILURE; + } + + m_outputs = m_model->GetOutputs(); + if (m_outputs.empty()) { + HDF_LOGE("Get outputs failed."); + return HDF_FAILURE; + } + return HDF_SUCCESS; +} + +int32_t PreparedModelService::CompareTensor(const IOTensor& tensor, const mindspore::MSTensor& msTensor) +{ + auto dataType = static_cast(msTensor.DataType()); + if (tensor.dataType != dataType) { + HDF_LOGE("Data type of tensor dose not match that of model."); + return HDF_ERR_INVALID_PARAM; + } + + auto format = static_cast(msTensor.format()); + if (tensor.format != format) { + HDF_LOGE("Format of tensor dose not match that of model."); + return HDF_ERR_INVALID_PARAM; + } + + if (tensor.dimensions.size() != msTensor.Shape().size()) { + HDF_LOGE("Rank of tensor dose not match that of model."); + return HDF_ERR_INVALID_PARAM; + } + + for (size_t i = 0; i < tensor.dimensions.size(); i++) { + if (msTensor.Shape()[i] != DYNAMIC_SHAPE_FLAG && tensor.dimensions[i] != msTensor.Shape()[i]) { + HDF_LOGE("The Shape of tensor dose not match that of model."); + return HDF_ERR_INVALID_PARAM; + } + } + + return HDF_SUCCESS; +} + +sptr PreparedModelService::ParseBuffer(const SharedBuffer& buffer) +{ + if (buffer.fd == -1) { + HDF_LOGE("Invalid buffer fd, it cannot be -1."); + return nullptr; + } + + HDF_LOGW("NNRT buffer fd=%{public}d, length=%{public}u", buffer.fd, buffer.dataSize); + + sptr ashptr = new (std::nothrow) Ashmem(buffer.fd, buffer.bufferSize); + if (ashptr == nullptr) { + HDF_LOGE("Create shared memory failed."); + return nullptr; + } + + if (!ashptr->MapReadAndWriteAshmem()) { + HDF_LOGE("Map buffer fd to address failed."); + return nullptr; + } + + const void* data = ashptr->ReadFromAshmem(buffer.dataSize, buffer.offset); + if (data == nullptr) { + HDF_LOGE("Get data address failed."); + ashptr->UnmapAshmem(); + ashptr->CloseAshmem(); + return nullptr; + } + return ashptr; +} +} // V2_0 +} // Nnrt +} // HDI +} // OHOS diff --git a/example/drivers/nnrt/v2_0/hdi_cpu_service/src/shared_buffer_parser.cpp b/example/drivers/nnrt/v2_0/hdi_cpu_service/src/shared_buffer_parser.cpp new file mode 100644 index 0000000..69416b6 --- /dev/null +++ b/example/drivers/nnrt/v2_0/hdi_cpu_service/src/shared_buffer_parser.cpp @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef OHOS_HDI_NNR_V2_0_UTILS_H +#define OHOS_HDI_NNR_V2_0_UTILS_H + +#include "shared_buffer_parser.h" + +#include +#include "ashmem.h" +#include "v2_0/nnrt_types.h" +#include "hdf_log.h" + +namespace OHOS { +namespace HDI { +namespace Nnrt { +namespace V2_0 { +SharedBufferParser::~SharedBufferParser() +{ + if (m_ashptr != nullptr) { + m_ashptr->UnmapAshmem(); + m_ashptr->CloseAshmem(); + m_bufferAddr = nullptr; + } +} + +int32_t SharedBufferParser::Init(const std::string& name, int32_t size) +{ + HDF_LOGI("Init SharedBufferParser from name and size."); + sptr ashptr = Ashmem::CreateAshmem(name.c_str(), size); + if (ashptr == nullptr) { + HDF_LOGE("Create ashmen from size failed."); + return HDF_FAILURE; + } + + SharedBuffer buffer; + buffer.fd = ashptr->GetAshmemFd(); + buffer.bufferSize = ashptr->GetAshmemSize(); + buffer.offset = 0; + buffer.dataSize = size; + + auto ret = Init(buffer); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Init SharedBufferParser failed."); + return ret; + } + return HDF_SUCCESS; +} + +int32_t SharedBufferParser::Init(const SharedBuffer& buffer) +{ + if (buffer.fd == INVALID_FD) { + HDF_LOGE("Invalid buffer fd, it cannot be %{public}d.", INVALID_FD); + return HDF_ERR_INVALID_PARAM; + } + + m_ashptr = new (std::nothrow) Ashmem(buffer.fd, buffer.bufferSize); + if (m_ashptr == nullptr) { + HDF_LOGE("Create ashmem failed."); + return HDF_FAILURE; + } + + if (!m_ashptr->MapReadAndWriteAshmem()) { + HDF_LOGE("Map buffer fd to address failed."); + return HDF_FAILURE; + } + + auto bufferAddr = m_ashptr->ReadFromAshmem(buffer.dataSize, buffer.offset); + if (bufferAddr == nullptr) { + HDF_LOGE("Invalid dataSize or offset of SharedBuffer."); + return HDF_ERR_INVALID_PARAM; + } + m_bufferAddr = const_cast(bufferAddr); + + m_buffer = buffer; + return HDF_SUCCESS; +} + +void* SharedBufferParser::GetBufferPtr() +{ + return m_bufferAddr; +} + +SharedBuffer SharedBufferParser::GetBuffer() +{ + return m_buffer; +} +} // V2_0 +} // Nnrt +} // HDI +} // OHOS +#endif // OHOS_HDI_NNR_V2_0_UTILS_H \ No newline at end of file diff --git a/example/drivers/nnrt/v2_0/hdi_cpu_service/src/validation.cpp b/example/drivers/nnrt/v2_0/hdi_cpu_service/src/validation.cpp new file mode 100644 index 0000000..03521c7 --- /dev/null +++ b/example/drivers/nnrt/v2_0/hdi_cpu_service/src/validation.cpp @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "validation.h" + +namespace OHOS { +namespace HDI { +namespace Nnrt { +namespace V2_0 { +int32_t ValidatePerformanceMode(PerformanceMode mode) +{ + if (mode < PerformanceMode::PERFORMANCE_NONE || mode > PerformanceMode::PERFORMANCE_EXTREME) { + return false; + } + + return true; +} + +int32_t ValidatePriority(Priority priority) +{ + if (priority < Priority::PRIORITY_NONE || priority > Priority::PRIORITY_HIGH) { + return false; + } + + return true; +} + +int32_t ValidateDataType(DataType dataType) +{ + if (dataType < DataType::DATA_TYPE_UNKNOWN || dataType > DataType::DATA_TYPE_FLOAT64) { + return false; + } + + if (dataType > DataType::DATA_TYPE_UNKNOWN && dataType < DataType::DATA_TYPE_BOOL) { + return false; + } + + if (dataType > DataType::DATA_TYPE_BOOL && dataType < DataType::DATA_TYPE_INT8) { + return false; + } + + if (dataType > DataType::DATA_TYPE_UINT64 && dataType < DataType::DATA_TYPE_FLOAT16) { + return false; + } + + return true; +} + +int32_t ValidateFormat(Format format) +{ + if (format < Format::FORMAT_NONE || format > Format::FORMAT_NHWC) { + return false; + } + + return true; +} +} // namespace V2_0 +} // namespace Nnrt +} // namespace HDI +} // namespace OHOS \ No newline at end of file -- Gitee