From 69e9310c55689183ce9c13f6a9d12293fb74ac83 Mon Sep 17 00:00:00 2001 From: wangchuanxia Date: Thu, 16 Mar 2023 10:19:21 +0800 Subject: [PATCH 01/12] add input dim ranges check and compatibility modification Signed-off-by: wangchuanxia --- frameworks/BUILD.gn | 4 +- frameworks/native/device.h | 1 + frameworks/native/device_manager.cpp | 63 +-- frameworks/native/device_manager.h | 2 - frameworks/native/device_registrar.h | 2 +- frameworks/native/execution_plan.cpp | 12 + frameworks/native/execution_plan.h | 4 + frameworks/native/executor.cpp | 51 +++ frameworks/native/executor.h | 1 + .../{hdi_device.cpp => hdi_device_v1_0.cpp} | 130 +++++- .../{hdi_device.h => hdi_device_v1_0.h} | 18 +- frameworks/native/hdi_device_v2_0.cpp | 417 ++++++++++++++++++ frameworks/native/hdi_device_v2_0.h | 69 +++ frameworks/native/hdi_interfaces.h | 29 -- frameworks/native/hdi_prepared_model.cpp | 76 +++- frameworks/native/hdi_prepared_model.h | 7 +- frameworks/native/inner_model.cpp | 1 - frameworks/native/ops/cast_builder.cpp | 1 - frameworks/native/transform.cpp | 128 ------ frameworks/native/transform.h | 14 - 20 files changed, 777 insertions(+), 253 deletions(-) rename frameworks/native/{hdi_device.cpp => hdi_device_v1_0.cpp} (70%) rename frameworks/native/{hdi_device.h => hdi_device_v1_0.h} (84%) create mode 100644 frameworks/native/hdi_device_v2_0.cpp create mode 100644 frameworks/native/hdi_device_v2_0.h delete mode 100644 frameworks/native/hdi_interfaces.h diff --git a/frameworks/BUILD.gn b/frameworks/BUILD.gn index 321fb2f..aba713d 100644 --- a/frameworks/BUILD.gn +++ b/frameworks/BUILD.gn @@ -23,7 +23,8 @@ nnrt_sources = [ "native/device_registrar.cpp", "native/execution_plan.cpp", "native/executor.cpp", - "native/hdi_device.cpp", + "native/hdi_device_v1_0.cpp", + "native/hdi_device_v2_0.cpp", "native/hdi_prepared_model.cpp", "native/inner_model.cpp", "native/memory_manager.cpp", @@ -122,6 +123,7 @@ ohos_shared_library("libneural_network_runtime") { external_deps = [ "c_utils:utils", "drivers_interface_nnrt:libnnrt_proxy_1.0", + "drivers_interface_nnrt:libnnrt_proxy_2.0", "hdf_core:libhdf_utils", "hilog_native:libhilog", "hitrace_native:libhitracechain", diff --git a/frameworks/native/device.h b/frameworks/native/device.h index 93415e4..c34e043 100644 --- a/frameworks/native/device.h +++ b/frameworks/native/device.h @@ -34,6 +34,7 @@ public: virtual OH_NN_ReturnCode GetDeviceName(std::string& name) = 0; virtual OH_NN_ReturnCode GetVendorName(std::string& name) = 0; + virtual OH_NN_ReturnCode GetVersion(std::string& version) = 0; virtual OH_NN_ReturnCode GetDeviceType(OH_NN_DeviceType& deviceType) = 0; virtual OH_NN_ReturnCode GetDeviceStatus(DeviceStatus& status) = 0; virtual OH_NN_ReturnCode GetSupportedOperation(std::shared_ptr model, diff --git a/frameworks/native/device_manager.cpp b/frameworks/native/device_manager.cpp index 6ad79bb..6abafab 100644 --- a/frameworks/native/device_manager.cpp +++ b/frameworks/native/device_manager.cpp @@ -15,8 +15,6 @@ #include "device_manager.h" -#include "hdi_interfaces.h" -#include "hdi_device.h" #include "common/log.h" #include "common/utils.h" @@ -70,13 +68,20 @@ const std::string& DeviceManager::GetDeviceName(size_t deviceId) return m_tmpDeviceName; } - m_tmpDeviceName = GenUniqueName(deviceName, vendorName); + std::string version; + ret = iter->second->GetVersion(version); + if (ret != OH_NN_SUCCESS) { + LOGE("Get version failed."); + return m_tmpDeviceName; + } + + m_tmpDeviceName = GenUniqueName(deviceName, vendorName, version); return m_tmpDeviceName; } -std::string DeviceManager::GenUniqueName(const std::string& deviceName, const std::string& vendorName) const +std::string DeviceManager::GenUniqueName(const std::string& deviceName, const std::string& vendorName, const std::string& version) const { - return deviceName + "_" + vendorName; + return deviceName + "_" + vendorName + "_" + version; } OH_NN_ReturnCode DeviceManager::RegisterDevice(std::function()> creator) @@ -106,8 +111,15 @@ OH_NN_ReturnCode DeviceManager::RegisterDevice(std::functionGetVersion(version); + if (ret != OH_NN_SUCCESS) { + LOGE("Get version failed."); + return ret; + } + const std::lock_guard lock(m_mtx); - std::string uniqueName = GenUniqueName(deviceName, vendorName); + std::string uniqueName = GenUniqueName(deviceName, vendorName, version); auto setResult = m_uniqueName.emplace(uniqueName); if (!setResult.second) { LOGE("Device already exists, cannot register again. deviceName=%s, vendorName=%s", @@ -119,45 +131,6 @@ OH_NN_ReturnCode DeviceManager::RegisterDevice(std::function iDevice = V1_0::INnrtDevice::Get(); - if (iDevice == nullptr) { - LOGW("Get HDI device failed."); - return; - } - - std::string deviceName; - std::string vendorName; - auto hdiRet = iDevice->GetDeviceName(deviceName); - if (hdiRet != HDF_SUCCESS) { - LOGW("Get device name failed. ErrorCode=%d", hdiRet); - return; - } - hdiRet = iDevice->GetVendorName(vendorName); - if (hdiRet != HDF_SUCCESS) { - LOGW("Get vendor name failed. ErrorCode=%d", hdiRet); - return; - } - - std::string uniqueName = GenUniqueName(deviceName, vendorName); - const std::lock_guard lock(m_mtx); - auto setResult = m_uniqueName.emplace(uniqueName); - if (!setResult.second) { - LOGW("Device already exists, cannot register again. deviceName=%s, vendorName=%s", - deviceName.c_str(), vendorName.c_str()); - return; - } - - std::shared_ptr device = CreateSharedPtr(iDevice); - if (device == nullptr) { - LOGW("Failed to register device, because fail to create device instance."); - return; - } - m_devices.emplace(std::hash{}(uniqueName), device); -} - bool DeviceManager::IsValidDevice(std::shared_ptr device) const { DeviceStatus status {DeviceStatus::UNKNOWN}; diff --git a/frameworks/native/device_manager.h b/frameworks/native/device_manager.h index 20d4bf0..2602ce3 100644 --- a/frameworks/native/device_manager.h +++ b/frameworks/native/device_manager.h @@ -40,7 +40,6 @@ public: static DeviceManager& GetInstance() { static DeviceManager instance; - instance.DiscoverHDIDevices(); return instance; } @@ -49,7 +48,6 @@ private: DeviceManager(const DeviceManager&) = delete; DeviceManager& operator=(const DeviceManager&) = delete; - void DiscoverHDIDevices(); std::string GenUniqueName(const std::string& deviceName, const std::string& vendorName) const; bool IsValidDevice(std::shared_ptr device) const; diff --git a/frameworks/native/device_registrar.h b/frameworks/native/device_registrar.h index a964529..521a075 100644 --- a/frameworks/native/device_registrar.h +++ b/frameworks/native/device_registrar.h @@ -34,7 +34,7 @@ public: #define REGISTER_DEVICE(deviceName, vendorName, creator) \ namespace { \ - static OHOS::NeuralNetworkRuntime::DeviceRegistrar g_##deviceName##_##vendorName##_device_registrar(creator) \ + static OHOS::NeuralNetworkRuntime::DeviceRegistrar g_##deviceName##_##vendorName##_device_registrar(creator); \ } // namespace } // namespace NeuralNetworkRuntime } // OHOS diff --git a/frameworks/native/execution_plan.cpp b/frameworks/native/execution_plan.cpp index b1ddfe3..2542f18 100644 --- a/frameworks/native/execution_plan.cpp +++ b/frameworks/native/execution_plan.cpp @@ -23,6 +23,18 @@ namespace OHOS { namespace NeuralNetworkRuntime { +OH_NN_ReturnCode ExecutionPlan::GetInputDimRanges(uint32_t index, std::vector& minInputDims, + std::vector& maxInputDims) +{ + // todo + // OH_NN_ReturnCode ret = m_preparedModel->GetInputDimRanges(index, minInputDims, maxInputDims); + // if (ret != OH_NN_SUCCESS) { + // LOGE("ExecutionPlan GetInputDimRanges() failed."); + // return ret; + // } + + return OH_NN_SUCCESS; +} OH_NN_ReturnCode ExecutionPlan::Run(const std::vector>& inputTensors, std::vector>& outputTensors) { diff --git a/frameworks/native/execution_plan.h b/frameworks/native/execution_plan.h index 9644a32..e54078f 100644 --- a/frameworks/native/execution_plan.h +++ b/frameworks/native/execution_plan.h @@ -29,6 +29,10 @@ public: ExecutionPlan(std::shared_ptr preparedModel, std::shared_ptr device) : m_preparedModel(preparedModel), m_device(device) {}; + + OH_NN_ReturnCode ExecutionPlan::GetInputDimRanges(uint32_t index, + std::vector& minInputDims, + std::vector& maxInputDims) OH_NN_ReturnCode Run(const std::vector>& inputTensors, std::vector>& outputTensors); diff --git a/frameworks/native/executor.cpp b/frameworks/native/executor.cpp index f99d28c..151a582 100644 --- a/frameworks/native/executor.cpp +++ b/frameworks/native/executor.cpp @@ -19,6 +19,7 @@ #include "common/utils.h" #include "common/scoped_trace.h" +#include "transform.h" namespace OHOS { @@ -113,8 +114,50 @@ void Executor::SetInputTensorWithNewBuffer(uint32_t index, } +OH_NN_ReturnCode Executor::CheckInputDimRanges(uint32_t index, const OH_NN_Tensor& nnTensor) const +{ + std::vector minInputDims; + std::vector maxInputDims; + auto ret = m_executionPlan->GetInputDimRanges(index, minInputDims, maxInputDims); + if (ret != OH_NN_SUCCESS) { + LOGE("Get the dimension ranges of input %u failed. ErrorCode=%d", index, ret); + return ret; + } + + std::vector tensorShape = ConstructVectorFromArray(nnTensor.dimensions, nnTensor.dimensionCount); + if (minInputDims.size() != maxInputDims.size() && maxInputDims.size() != tensorShape.size()) { + LOGE("Size of minInputDims, maxInputDims and tensorShape of input %u are not equal.", index); + return OH_NN_INVALID_PARAMETER; + } + + for (size_t j = 0; j < tensorShape.size(); ++j) { + // Dimensions cannot be negative + if (tensorShape[j] < 0) { + LOGE("Dimension %zu of input %u is %d.", j, index, tensorShape[j]); + return OH_NN_INVALID_PARAMETER; + } + uint32_t dim = static_cast(tensorShape[j]); + if (dim < minInputDims[j] || dim > maxInputDims[j]) { + LOGE("The %zuth dimension of the %uth input is %u, which is out of range(%u, %u)", + j, index, dim, minInputDims[j], maxInputDims[j]); + return OH_NN_INVALID_PARAMETER; + } + } + + return OH_NN_SUCCESS; +} + + OH_NN_ReturnCode Executor::SetInput(uint32_t index, const OH_NN_Tensor& nnTensor, const void* buffer, size_t length) { + auto nnRet = CheckInputDimRanges(index, nnTensor); + if (nnRet == OH_NN_OPERATION_FORBIDDEN) { + LOGI("Skip input demension bounds check."); + } else if (nnRet != OH_NN_SUCCESS) { + LOGE("SetInput failed, Check the range of the %uth input dimension ranges failed.", index); + return nnRet; + } + std::shared_ptr inputTensor = CreateSharedPtr(); if (inputTensor == nullptr) { LOGE("SetInput failed, error happened when creating NNTensor."); @@ -181,6 +224,14 @@ OH_NN_ReturnCode Executor::SetInput(uint32_t index, const OH_NN_Tensor& nnTensor OH_NN_ReturnCode Executor::SetInputFromMemory(uint32_t index, const OH_NN_Tensor& nnTensor, const OH_NN_Memory& memory) { + auto nnRet = CheckInputDimRanges(index, nnTensor); + if (nnRet == OH_NN_OPERATION_FORBIDDEN) { + LOGI("Skip input demension bounds check."); + } else if (nnRet != OH_NN_SUCCESS) { + LOGE("SetInputFromMemory failed, Check the range of the %uth input dimension ranges failed.", index); + return nnRet; + } + // Build a input tensor std::shared_ptr inputTensor = CreateSharedPtr(); if (inputTensor == nullptr) { diff --git a/frameworks/native/executor.h b/frameworks/native/executor.h index f7a98eb..c7b2061 100644 --- a/frameworks/native/executor.h +++ b/frameworks/native/executor.h @@ -49,6 +49,7 @@ private: const void* buffer, size_t dataLength, size_t curBufferLength); void SetInputTensorWithNewBuffer(uint32_t index, std::shared_ptr inputTensor, const void* inputBuffer, size_t length, bool isInnerMem); + OH_NN_ReturnCode CheckInputDimRanges(uint32_t index, const OH_NN_Tensor& nnTensor) const; private: struct ExeTensor { diff --git a/frameworks/native/hdi_device.cpp b/frameworks/native/hdi_device_v1_0.cpp similarity index 70% rename from frameworks/native/hdi_device.cpp rename to frameworks/native/hdi_device_v1_0.cpp index b360ea7..15729f5 100644 --- a/frameworks/native/hdi_device.cpp +++ b/frameworks/native/hdi_device_v1_0.cpp @@ -18,6 +18,7 @@ #include "hdf_base.h" #include "mindir.h" +#include "device_registrar.h" #include "hdi_prepared_model.h" #include "memory_manager.h" #include "transform.h" @@ -26,12 +27,72 @@ namespace OHOS { namespace NeuralNetworkRuntime { -HDIDevice::HDIDevice(OHOS::sptr device) : m_iDevice(device) +namespace { +OH_NN_DeviceType TransHDIDeviceV1_0Type(const V1_0::DeviceType& iDeviceType) +{ + switch (iDeviceType) { + case V1_0::DeviceType::CPU: + return OH_NN_CPU; + case V1_0::DeviceType::GPU: + return OH_NN_GPU; + case V1_0::DeviceType::ACCELERATOR: + return OH_NN_ACCELERATOR; + default: + return OH_NN_OTHERS; + } +} + +DeviceStatus TransHDIDeviceV1_0Status(const V1_0::DeviceStatus& iDeviceStatus) +{ + switch (iDeviceStatus) { + case V1_0::DeviceStatus::AVAILABLE: + return DeviceStatus::AVAILABLE; + case V1_0::DeviceStatus::BUSY: + return DeviceStatus::BUSY; + case V1_0::DeviceStatus::OFFLINE: + return DeviceStatus::OFFLINE; + default: + return DeviceStatus::UNKNOWN; + } +} + +V1_0::PerformanceMode TransPerformanceMode(const OH_NN_PerformanceMode& mode) +{ + switch (mode) { + case OH_NN_PERFORMANCE_LOW: + return V1_0::PerformanceMode::PERFORMANCE_LOW; + case OH_NN_PERFORMANCE_MEDIUM: + return V1_0::PerformanceMode::PERFORMANCE_MEDIUM; + case OH_NN_PERFORMANCE_HIGH: + return V1_0::PerformanceMode::PERFORMANCE_HIGH; + case OH_NN_PERFORMANCE_EXTREME: + return V1_0::PerformanceMode::PERFORMANCE_EXTREME; + default: + return V1_0::PerformanceMode::PERFORMANCE_NONE; + } +} + +V1_0::Priority TransPriority(const OH_NN_Priority& priority) +{ + switch (priority) { + case OH_NN_PRIORITY_LOW: + return V1_0::Priority::PRIORITY_LOW; + case OH_NN_PRIORITY_MEDIUM: + return V1_0::Priority::PRIORITY_MEDIUM; + case OH_NN_PRIORITY_HIGH: + return V1_0::Priority::PRIORITY_HIGH; + default: + return V1_0::Priority::PRIORITY_NONE; + } +} +} + +HDIDeviceV1_0::HDIDeviceV1_0(OHOS::sptr device) : m_iDevice(device) { device->GetVersion(m_hdiVersion.first, m_hdiVersion.second); } -OH_NN_ReturnCode HDIDevice::GetDeviceName(std::string& name) +OH_NN_ReturnCode HDIDeviceV1_0::GetDeviceName(std::string& name) { auto ret = m_iDevice->GetDeviceName(name); if (ret != HDF_SUCCESS) { @@ -41,7 +102,7 @@ OH_NN_ReturnCode HDIDevice::GetDeviceName(std::string& name) return OH_NN_SUCCESS; } -OH_NN_ReturnCode HDIDevice::GetVendorName(std::string& name) +OH_NN_ReturnCode HDIDeviceV1_0::GetVendorName(std::string& name) { auto ret = m_iDevice->GetVendorName(name); if (ret != HDF_SUCCESS) { @@ -51,7 +112,13 @@ OH_NN_ReturnCode HDIDevice::GetVendorName(std::string& name) return OH_NN_SUCCESS; } -OH_NN_ReturnCode HDIDevice::GetDeviceType(OH_NN_DeviceType& deviceType) +OH_NN_ReturnCode HDIDeviceV1_0::GetVersion(std::string& version) +{ + version = 'v' + std::to_string(m_hdiVersion.first) + '_' + std::to_string(m_hdiVersion.second); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV1_0::GetDeviceType(OH_NN_DeviceType& deviceType) { V1_0::DeviceType iDeviceType; auto ret = m_iDevice->GetDeviceType(iDeviceType); @@ -60,11 +127,11 @@ OH_NN_ReturnCode HDIDevice::GetDeviceType(OH_NN_DeviceType& deviceType) return OH_NN_UNAVALIDABLE_DEVICE; } - deviceType = HDIToNN::TransHDIDeviceType(iDeviceType); + deviceType = TransHDIDeviceV1_0Type(iDeviceType); return OH_NN_SUCCESS; } -OH_NN_ReturnCode HDIDevice::GetDeviceStatus(DeviceStatus& status) +OH_NN_ReturnCode HDIDeviceV1_0::GetDeviceStatus(DeviceStatus& status) { V1_0::DeviceStatus iDeviceStatus; auto ret = m_iDevice->GetDeviceStatus(iDeviceStatus); @@ -72,11 +139,11 @@ OH_NN_ReturnCode HDIDevice::GetDeviceStatus(DeviceStatus& status) LOGE("Get HDI device status failed. ErrorCode=%d", ret); return OH_NN_UNAVALIDABLE_DEVICE; } - status = HDIToNN::TransHDIDeviceStatus(iDeviceStatus); + status = TransHDIDeviceV1_0Status(iDeviceStatus); return OH_NN_SUCCESS; } -OH_NN_ReturnCode HDIDevice::GetSupportedOperation(std::shared_ptr model, +OH_NN_ReturnCode HDIDeviceV1_0::GetSupportedOperation(std::shared_ptr model, std::vector& ops) { if (model == nullptr) { @@ -117,7 +184,7 @@ OH_NN_ReturnCode HDIDevice::GetSupportedOperation(std::shared_ptrIsFloat16PrecisionSupported(isSupported); if (ret != HDF_SUCCESS) { @@ -127,7 +194,7 @@ OH_NN_ReturnCode HDIDevice::IsFloat16PrecisionSupported(bool& isSupported) return OH_NN_SUCCESS; } -OH_NN_ReturnCode HDIDevice::IsPerformanceModeSupported(bool& isSupported) +OH_NN_ReturnCode HDIDeviceV1_0::IsPerformanceModeSupported(bool& isSupported) { auto ret = m_iDevice->IsPerformanceModeSupported(isSupported); if (ret != HDF_SUCCESS) { @@ -137,7 +204,7 @@ OH_NN_ReturnCode HDIDevice::IsPerformanceModeSupported(bool& isSupported) return OH_NN_SUCCESS; } -OH_NN_ReturnCode HDIDevice::IsPrioritySupported(bool& isSupported) +OH_NN_ReturnCode HDIDeviceV1_0::IsPrioritySupported(bool& isSupported) { auto ret = m_iDevice->IsPrioritySupported(isSupported); if (ret != HDF_SUCCESS) { @@ -147,7 +214,7 @@ OH_NN_ReturnCode HDIDevice::IsPrioritySupported(bool& isSupported) return OH_NN_SUCCESS; } -OH_NN_ReturnCode HDIDevice::IsDynamicInputSupported(bool& isSupported) +OH_NN_ReturnCode HDIDeviceV1_0::IsDynamicInputSupported(bool& isSupported) { auto ret = m_iDevice->IsDynamicInputSupported(isSupported); if (ret != HDF_SUCCESS) { @@ -157,7 +224,7 @@ OH_NN_ReturnCode HDIDevice::IsDynamicInputSupported(bool& isSupported) return OH_NN_SUCCESS; } -OH_NN_ReturnCode HDIDevice::IsModelCacheSupported(bool& isSupported) +OH_NN_ReturnCode HDIDeviceV1_0::IsModelCacheSupported(bool& isSupported) { auto ret = m_iDevice->IsModelCacheSupported(isSupported); if (ret != HDF_SUCCESS) { @@ -167,7 +234,7 @@ OH_NN_ReturnCode HDIDevice::IsModelCacheSupported(bool& isSupported) return OH_NN_SUCCESS; } -OH_NN_ReturnCode HDIDevice::PrepareModel(std::shared_ptr model, +OH_NN_ReturnCode HDIDeviceV1_0::PrepareModel(std::shared_ptr model, const ModelConfig& config, std::shared_ptr& preparedModel) { @@ -196,8 +263,8 @@ OH_NN_ReturnCode HDIDevice::PrepareModel(std::shared_ptr iPreparedModel; auto preparedRet = m_iDevice->PrepareModel(*iModel, iModelConfig, iPreparedModel); @@ -222,7 +289,7 @@ OH_NN_ReturnCode HDIDevice::PrepareModel(std::shared_ptr& modelCache, +OH_NN_ReturnCode HDIDeviceV1_0::PrepareModelFromModelCache(const std::vector& modelCache, const ModelConfig& config, std::shared_ptr& preparedModel) { @@ -242,8 +309,8 @@ OH_NN_ReturnCode HDIDevice::PrepareModelFromModelCache(const std::vector iPreparedModel; auto hdiRet = m_iDevice->PrepareModelFromModelCache(iBuffers, iModelConfig, iPreparedModel); @@ -260,7 +327,7 @@ OH_NN_ReturnCode HDIDevice::PrepareModelFromModelCache(const std::vector HDIDeviceV1_0Creator() +{ + // only one device from HDI now + OHOS::sptr iDevice = V1_0::INnrtDevice::Get(); + if (iDevice == nullptr) { + LOGE("Get HDI device failed."); + return nullptr; + } + + std::shared_ptr device = CreateSharedPtr(iDevice); + if (device == nullptr) { + LOGE("Create device failed."); + } + + return device; +} + +REGISTER_DEVICE(DeviceV1_0, VendorV1_0, HDIDeviceV1_0Creator) } // namespace NeuralNetworkRuntime } // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/hdi_device.h b/frameworks/native/hdi_device_v1_0.h similarity index 84% rename from frameworks/native/hdi_device.h rename to frameworks/native/hdi_device_v1_0.h index d795832..94f0242 100644 --- a/frameworks/native/hdi_device.h +++ b/frameworks/native/hdi_device_v1_0.h @@ -13,22 +13,28 @@ * limitations under the License. */ -#ifndef NEURAL_NETWORK_RUNTIME_HDI_DEVICE_H -#define NEURAL_NETWORK_RUNTIME_HDI_DEVICE_H +#ifndef NEURAL_NETWORK_RUNTIME_HDI_DEVICE_V1_0_H +#define NEURAL_NETWORK_RUNTIME_HDI_DEVICE_V1_0_H #include "refbase.h" -#include "hdi_interfaces.h" +#include +#include +#include #include "device.h" namespace OHOS { namespace NeuralNetworkRuntime { -class HDIDevice : public Device { + +namespace V1_0 = OHOS::HDI::Nnrt::V1_0; + +class HDIDeviceV1_0 : public Device { public: - explicit HDIDevice(OHOS::sptr device); + explicit HDIDeviceV1_0(OHOS::sptr device); OH_NN_ReturnCode GetDeviceName(std::string& name) override; OH_NN_ReturnCode GetVendorName(std::string& name) override; + OH_NN_ReturnCode GetVersion(std::string& name) override; OH_NN_ReturnCode GetDeviceType(OH_NN_DeviceType& deviceType) override; OH_NN_ReturnCode GetDeviceStatus(DeviceStatus& status) override; OH_NN_ReturnCode GetSupportedOperation(std::shared_ptr model, @@ -60,4 +66,4 @@ private: }; } // namespace NeuralNetworkRuntime } // namespace OHOS -#endif // NEURAL_NETWORK_RUNTIME_HDI_DEVICE_H \ No newline at end of file +#endif // NEURAL_NETWORK_RUNTIME_HDI_DEVICE_V1_0_H \ No newline at end of file diff --git a/frameworks/native/hdi_device_v2_0.cpp b/frameworks/native/hdi_device_v2_0.cpp new file mode 100644 index 0000000..56b4666 --- /dev/null +++ b/frameworks/native/hdi_device_v2_0.cpp @@ -0,0 +1,417 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "hdi_device.h" + +#include "hdf_base.h" +#include "mindir.h" + +#include "device_registrar.h" +#include "prepared_model.h" // todo +#include "memory_manager.h" +#include "transform.h" +#include "common/log.h" +#include "common/utils.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace { +OH_NN_DeviceType TransHDIDeviceV2_0Type(const V2_0::DeviceType& iDeviceType) +{ + switch (iDeviceType) { + case V2_0::DeviceType::CPU: + return OH_NN_CPU; + case V2_0::DeviceType::GPU: + return OH_NN_GPU; + case V2_0::DeviceType::ACCELERATOR: + return OH_NN_ACCELERATOR; + default: + return OH_NN_OTHERS; + } +} + +DeviceStatus TransHDIDeviceV2_0Status(const V2_0::DeviceStatus& iDeviceStatus) +{ + switch (iDeviceStatus) { + case V2_0::DeviceStatus::AVAILABLE: + return DeviceStatus::AVAILABLE; + case V2_0::DeviceStatus::BUSY: + return DeviceStatus::BUSY; + case V2_0::DeviceStatus::OFFLINE: + return DeviceStatus::OFFLINE; + default: + return DeviceStatus::UNKNOWN; + } +} + +V2_0::PerformanceMode TransPerformanceMode(const OH_NN_PerformanceMode& mode) +{ + switch (mode) { + case OH_NN_PERFORMANCE_LOW: + return V2_0::PerformanceMode::PERFORMANCE_LOW; + case OH_NN_PERFORMANCE_MEDIUM: + return V2_0::PerformanceMode::PERFORMANCE_MEDIUM; + case OH_NN_PERFORMANCE_HIGH: + return V2_0::PerformanceMode::PERFORMANCE_HIGH; + case OH_NN_PERFORMANCE_EXTREME: + return V2_0::PerformanceMode::PERFORMANCE_EXTREME; + default: + return V2_0::PerformanceMode::PERFORMANCE_NONE; + } +} + +V2_0::Priority TransPriority(const OH_NN_Priority& priority) +{ + switch (priority) { + case OH_NN_PRIORITY_LOW: + return V2_0::Priority::PRIORITY_LOW; + case OH_NN_PRIORITY_MEDIUM: + return V2_0::Priority::PRIORITY_MEDIUM; + case OH_NN_PRIORITY_HIGH: + return V2_0::Priority::PRIORITY_HIGH; + default: + return V2_0::Priority::PRIORITY_NONE; + } +} +} + +HDIDeviceV2_0::HDIDeviceV2_0(OHOS::sptr device) : m_iDevice(device) +{ + device->GetVersion(m_hdiVersion.first, m_hdiVersion.second); +} + +OH_NN_ReturnCode HDIDeviceV2_0::GetDeviceName(std::string& name) +{ + auto ret = m_iDevice->GetDeviceName(name); + if (ret != HDF_SUCCESS) { + LOGE("Get HDI device name failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::GetVendorName(std::string& name) +{ + auto ret = m_iDevice->GetVendorName(name); + if (ret != HDF_SUCCESS) { + LOGE("Get HDI device vendor name failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::GetVersion(std::string& version) +{ + version = 'v' + std::to_string(m_hdiVersion.first) + '_' + std::to_string(m_hdiVersion.second); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::GetDeviceType(OH_NN_DeviceType& deviceType) +{ + V2_0::DeviceType iDeviceType; + auto ret = m_iDevice->GetDeviceType(iDeviceType); + if (ret != HDF_SUCCESS) { + LOGE("Get HDI device type failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + + deviceType = TransHDIDeviceV2_0Type(iDeviceType); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::GetDeviceStatus(DeviceStatus& status) +{ + V2_0::DeviceStatus iDeviceStatus; + auto ret = m_iDevice->GetDeviceStatus(iDeviceStatus); + if (ret != HDF_SUCCESS) { + LOGE("Get HDI device status failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + status = TransHDIDeviceV2_0Status(iDeviceStatus); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::GetSupportedOperation(std::shared_ptr model, + std::vector& ops) +{ + if (model == nullptr) { + LOGE("Model is nullptr, cannot query supported operation."); + return OH_NN_NULL_PTR; + } + + V2_0::SharedBuffer tensorBuffer {INVALID_FD, 0, 0, 0}; + size_t tensorSize = mindspore::lite::MindIR_LiteGraph_GetConstTensorSize(model.get()); + int32_t hdiRet {0}; + if (tensorSize > 0) { + hdiRet = m_iDevice->AllocateBuffer(tensorSize, tensorBuffer); + if (hdiRet != HDF_SUCCESS || tensorBuffer.fd == INVALID_FD) { + LOGE("Allocate tensor buffer error when get supported operation. ErrorCode: %d", hdiRet); + return OH_NN_FAILED; + } + } + + auto iModel = mindspore::lite::MindIR_LiteGraph_To_Model(model.get(), tensorBuffer); + if (iModel == nullptr) { + LOGE("Parse litegraph to hdi model failed."); + ReleaseSharedBuffer(tensorBuffer); + return OH_NN_FAILED; + } + + hdiRet = m_iDevice->GetSupportedOperation(*iModel, ops); + + mindspore::lite::MindIR_Model_Destroy(&iModel); + auto ret = ReleaseSharedBuffer(tensorBuffer); + if (ret != OH_NN_SUCCESS) { + LOGE("Release tensorBuffer failed."); + return OH_NN_FAILED; + } + if (hdiRet != HDF_SUCCESS) { + LOGE("Get supported operation failed. ErrorCode=%d", hdiRet); + return OH_NN_UNAVALIDABLE_DEVICE; + } + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::IsFloat16PrecisionSupported(bool& isSupported) +{ + auto ret = m_iDevice->IsFloat16PrecisionSupported(isSupported); + if (ret != HDF_SUCCESS) { + LOGE("Query fp16 precision supported failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::IsPerformanceModeSupported(bool& isSupported) +{ + auto ret = m_iDevice->IsPerformanceModeSupported(isSupported); + if (ret != HDF_SUCCESS) { + LOGE("Query performance mode supported failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::IsPrioritySupported(bool& isSupported) +{ + auto ret = m_iDevice->IsPrioritySupported(isSupported); + if (ret != HDF_SUCCESS) { + LOGE("Query priority supported failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::IsDynamicInputSupported(bool& isSupported) +{ + auto ret = m_iDevice->IsDynamicInputSupported(isSupported); + if (ret != HDF_SUCCESS) { + LOGE("Query dynamic input supported failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::IsModelCacheSupported(bool& isSupported) +{ + auto ret = m_iDevice->IsModelCacheSupported(isSupported); + if (ret != HDF_SUCCESS) { + LOGE("Query cache model supported failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::PrepareModel(std::shared_ptr model, + const ModelConfig& config, + std::shared_ptr& preparedModel) +{ + if (model == nullptr) { + LOGE("Model is nullptr, cannot prepare model."); + return OH_NN_INVALID_PARAMETER; + } + + V2_0::SharedBuffer tensorBuffer {INVALID_FD, 0, 0, 0}; + size_t tensorSize = mindspore::lite::MindIR_LiteGraph_GetConstTensorSize(model.get()); + int32_t hdiRet {0}; + if (tensorSize > 0) { + hdiRet = m_iDevice->AllocateBuffer(tensorSize, tensorBuffer); + if (hdiRet != HDF_SUCCESS || tensorBuffer.fd == INVALID_FD) { + LOGE("Allocate tensor buffer error when prepare model. ErrorCode: %d", hdiRet); + return OH_NN_FAILED; + } + } + + V2_0::Model* iModel = mindspore::lite::MindIR_LiteGraph_To_Model(model.get(), tensorBuffer); + if (iModel == nullptr) { + LOGE("Parse litegraph to hdi model failed."); + ReleaseSharedBuffer(tensorBuffer); + return OH_NN_FAILED; + } + + V2_0::ModelConfig iModelConfig; + iModelConfig.enableFloat16 = config.enableFloat16; + iModelConfig.mode = TransPerformanceMode(config.mode); + iModelConfig.priority = TransPriority(config.priority); + OHOS::sptr iPreparedModel; + + auto preparedRet = m_iDevice->PrepareModel(*iModel, iModelConfig, iPreparedModel); + + mindspore::lite::MindIR_Model_Destroy(&iModel); + auto ret = ReleaseSharedBuffer(tensorBuffer); + if (ret != OH_NN_SUCCESS) { + LOGE("Release tensorBuffer failed."); + return OH_NN_FAILED; + } + if (preparedRet != HDF_SUCCESS || iPreparedModel == nullptr) { + LOGE("Prepare model failed. ErrorCode=%d", preparedRet); + return OH_NN_FAILED; + } + + // preparedModel = CreateSharedPtr(iPreparedModel); // todo + // if (preparedModel == nullptr) { + // LOGE("Prepare model failed, because fail to create preparedModel instance."); + // return OH_NN_MEMORY_ERROR; + // } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::PrepareModelFromModelCache(const std::vector& modelCache, + const ModelConfig& config, + std::shared_ptr& preparedModel) +{ + std::vector iBuffers; + auto memManager = MemoryManager::GetInstance(); + Memory memory; + OH_NN_ReturnCode ret; + size_t modelCacheSize = modelCache.size(); + for (size_t i = 0; i < modelCacheSize; i++) { + ret = memManager->GetMemory(modelCache[i].buffer, memory); + if (ret != OH_NN_SUCCESS) { + LOGE("The %zuth model cache is invalid. Please put valid model cache.", i + 1); + return ret; + } + iBuffers.emplace_back(V2_0::SharedBuffer {memory.fd, memory.length, 0, memory.length}); + } + + V2_0::ModelConfig iModelConfig; + iModelConfig.enableFloat16 = config.enableFloat16; + iModelConfig.mode = TransPerformanceMode(config.mode); + iModelConfig.priority = TransPriority(config.priority); + + OHOS::sptr iPreparedModel; + auto hdiRet = m_iDevice->PrepareModelFromModelCache(iBuffers, iModelConfig, iPreparedModel); + if (hdiRet != HDF_SUCCESS) { + LOGE("Prepare model from cache failed. ErrorCode=%d", hdiRet); + return OH_NN_UNAVALIDABLE_DEVICE; + } + + // preparedModel = CreateSharedPtr(iPreparedModel); // todo + // if (preparedModel == nullptr) { + // LOGE("Prepare model from model cache failed, because fail to create preparedModel instance."); + // return OH_NN_MEMORY_ERROR; + // } + return OH_NN_SUCCESS; +} + +void* HDIDeviceV2_0::AllocateBuffer(size_t length) +{ + if (length == 0) { + LOGE("The length param is invalid, length=0"); + return nullptr; + } + + V2_0::SharedBuffer buffer; + auto ret = m_iDevice->AllocateBuffer(length, buffer); + if (ret != HDF_SUCCESS) { + LOGE("Allocate buffer error. ErrorCode: %d", ret); + return nullptr; + } + + auto memManager = MemoryManager::GetInstance(); + auto addr = memManager->MapMemory(buffer.fd, length); + if (addr == nullptr) { + LOGE("Map fd to address failed."); + } + return addr; +} + +OH_NN_ReturnCode HDIDeviceV2_0::ReleaseBuffer(const void* buffer) +{ + if (buffer == nullptr) { + LOGE("Buffer is nullptr, no need to release."); + return OH_NN_INVALID_PARAMETER; + } + + auto memManager = MemoryManager::GetInstance(); + Memory memory; + auto ret = memManager->GetMemory(buffer, memory); + if (ret != OH_NN_SUCCESS) { + LOGE("Invalid Buffer, it is not NNRt buffer."); + return ret; + } + + V2_0::SharedBuffer hdiBuffer {memory.fd, memory.length, 0, memory.length}; + auto deviceResult = m_iDevice->ReleaseBuffer(hdiBuffer); + if (deviceResult != HDF_SUCCESS) { + LOGE("Device release buffer error. ErrorCode: %d", deviceResult); + return OH_NN_FAILED; + } + + ret = memManager->UnMapMemory(buffer); + if (ret != OH_NN_SUCCESS) { + LOGE("Unmap memory failed."); + return ret; + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::ReleaseSharedBuffer(const V2_0::SharedBuffer& buffer) +{ + if (buffer.fd == INVALID_FD) { + LOGI("No need to release. fd=%d", INVALID_FD); + return OH_NN_SUCCESS; + } + + auto ret = m_iDevice->ReleaseBuffer(buffer); + if (ret != HDF_SUCCESS) { + LOGE("Device release buffer error. ErrorCode=%d", ret); + return OH_NN_FAILED; + } + return OH_NN_SUCCESS; +} + +std::shared_ptr HDIDeviceV2_0Creator() +{ + // only one device from HDI now + OHOS::sptr iDevice = V2_0::INnrtDevice::Get(); + if (iDevice == nullptr) { + LOGE("Get HDI device failed."); + return nullptr; + } + + std::shared_ptr device = CreateSharedPtr(iDevice); + if (device == nullptr) { + LOGE("Create device failed."); + } + + return device; +} + +REGISTER_DEVICE(DeviceV2_0, VendorV2_0, HDIDeviceV2_0Creator) +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/hdi_device_v2_0.h b/frameworks/native/hdi_device_v2_0.h new file mode 100644 index 0000000..b343b1c --- /dev/null +++ b/frameworks/native/hdi_device_v2_0.h @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_HDI_DEVICE_V2_0_H +#define NEURAL_NETWORK_RUNTIME_HDI_DEVICE_V2_0_H + +#include "refbase.h" +#include +#include +#include + +#include "device.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { + +namespace V2_0 = OHOS::HDI::Nnrt::V2_0; + +class HDIDeviceV2_0 : public Device { +public: + explicit HDIDeviceV2_0(OHOS::sptr device); + + OH_NN_ReturnCode GetDeviceName(std::string& name) override; + OH_NN_ReturnCode GetVendorName(std::string& name) override; + OH_NN_ReturnCode GetVersion(std::string& name) override; + OH_NN_ReturnCode GetDeviceType(OH_NN_DeviceType& deviceType) override; + OH_NN_ReturnCode GetDeviceStatus(DeviceStatus& status) override; + OH_NN_ReturnCode GetSupportedOperation(std::shared_ptr model, + std::vector& ops) override; + + OH_NN_ReturnCode IsFloat16PrecisionSupported(bool& isSupported) override; + OH_NN_ReturnCode IsPerformanceModeSupported(bool& isSupported) override; + OH_NN_ReturnCode IsPrioritySupported(bool& isSupported) override; + OH_NN_ReturnCode IsDynamicInputSupported(bool& isSupported) override; + OH_NN_ReturnCode IsModelCacheSupported(bool& isSupported) override; + + OH_NN_ReturnCode PrepareModel(std::shared_ptr model, + const ModelConfig& config, + std::shared_ptr& preparedModel) override; + OH_NN_ReturnCode PrepareModelFromModelCache(const std::vector& modelCache, + const ModelConfig& config, + std::shared_ptr& preparedModel) override; + + void* AllocateBuffer(size_t length) override; + OH_NN_ReturnCode ReleaseBuffer(const void* buffer) override; + +private: + OH_NN_ReturnCode ReleaseSharedBuffer(const V2_0::SharedBuffer& buffer); + +private: + // first: major version, second: minor version + std::pair m_hdiVersion; + OHOS::sptr m_iDevice {nullptr}; +}; +} // namespace NeuralNetworkRuntime +} // namespace OHOS +#endif // NEURAL_NETWORK_RUNTIME_HDI_DEVICE_V2_0_H \ No newline at end of file diff --git a/frameworks/native/hdi_interfaces.h b/frameworks/native/hdi_interfaces.h deleted file mode 100644 index 1d3416b..0000000 --- a/frameworks/native/hdi_interfaces.h +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright (c) 2022 Huawei Device Co., Ltd. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef NEURAL_NETWORK_RUNTIME_HDI_INTERFACES_H -#define NEURAL_NETWORK_RUNTIME_HDI_INTERFACES_H - -#include -#include -#include - -namespace OHOS { -namespace NeuralNetworkRuntime { -namespace V1_0 = OHOS::HDI::Nnrt::V1_0; -} // namespace NeuralNetworkRuntime -} // namespace OHOS - -#endif // NEURAL_NETWORK_RUNTIME_HDI_INTERFACES_H \ No newline at end of file diff --git a/frameworks/native/hdi_prepared_model.cpp b/frameworks/native/hdi_prepared_model.cpp index 491aec6..a8ac6fc 100644 --- a/frameworks/native/hdi_prepared_model.cpp +++ b/frameworks/native/hdi_prepared_model.cpp @@ -21,6 +21,78 @@ namespace OHOS { namespace NeuralNetworkRuntime { +namespace { +V1_0::DataType TransDataType(const OH_NN_DataType& dataType) +{ + switch (dataType) { + case OH_NN_BOOL: + return V1_0::DataType::DATA_TYPE_BOOL; + case OH_NN_INT8: + return V1_0::DataType::DATA_TYPE_INT8; + case OH_NN_INT16: + return V1_0::DataType::DATA_TYPE_INT16; + case OH_NN_INT32: + return V1_0::DataType::DATA_TYPE_INT32; + case OH_NN_INT64: + return V1_0::DataType::DATA_TYPE_INT64; + case OH_NN_UINT8: + return V1_0::DataType::DATA_TYPE_UINT8; + case OH_NN_UINT16: + return V1_0::DataType::DATA_TYPE_UINT16; + case OH_NN_UINT32: + return V1_0::DataType::DATA_TYPE_UINT32; + case OH_NN_UINT64: + return V1_0::DataType::DATA_TYPE_UINT64; + case OH_NN_FLOAT16: + return V1_0::DataType::DATA_TYPE_FLOAT16; + case OH_NN_FLOAT32: + return V1_0::DataType::DATA_TYPE_FLOAT32; + case OH_NN_FLOAT64: + return V1_0::DataType::DATA_TYPE_FLOAT64; + default: + return V1_0::DataType::DATA_TYPE_UNKNOWN; + } +} + +V1_0::Format TransFormat(const OH_NN_Format& format) +{ + switch (format) { + case OH_NN_FORMAT_NCHW: + return V1_0::Format::FORMAT_NCHW; + case OH_NN_FORMAT_NHWC: + return V1_0::Format::FORMAT_NHWC; + default: + return V1_0::Format::FORMAT_NONE; + } +} + +V1_0::IOTensor TransIOTensor(const IOTensor& tensor) +{ + V1_0::IOTensor iTensor; + iTensor.name = tensor.name; + iTensor.dataType = TransDataType(tensor.dataType); + iTensor.dimensions = tensor.dimensions; + iTensor.format = TransFormat(tensor.format); + + V1_0::SharedBuffer iBuffer {INVALID_FD, 0, 0, 0}; + if (tensor.data != nullptr) { + auto memManager = MemoryManager::GetInstance(); + Memory memory; + auto ret = memManager->GetMemory(tensor.data, memory); + if (ret != OH_NN_SUCCESS) { + LOGE("Invalid Tensor buffer, cannot transform to fd."); + } else { + iBuffer.fd = memory.fd; + iBuffer.bufferSize = memory.length; + iBuffer.offset = 0; + iBuffer.dataSize = memory.length; + } + } + iTensor.data = iBuffer; + + return iTensor; +} +} HDIPreparedModel::HDIPreparedModel(OHOS::sptr hdiPreparedModel) : m_hdiPreparedModel(hdiPreparedModel) { @@ -61,7 +133,7 @@ OH_NN_ReturnCode HDIPreparedModel::Run(const std::vector& inputs, cons V1_0::IOTensor iTensor; std::vector iInputTensors; for (auto& input: inputs) { - iTensor = NNToHDI::TransIOTensor(input); + iTensor = TransIOTensor(input); if (iTensor.data.fd == INVALID_FD) { LOGE("Transform inputs tensor failed, cannot find data file descriptor."); return OH_NN_INVALID_PARAMETER; @@ -71,7 +143,7 @@ OH_NN_ReturnCode HDIPreparedModel::Run(const std::vector& inputs, cons std::vector iOutputTensors; for (auto& output: outputs) { - iTensor = NNToHDI::TransIOTensor(output); + iTensor = TransIOTensor(output); if (iTensor.data.fd == INVALID_FD) { LOGE("Transform outputs tensor failed, cannot find data file descriptor."); return OH_NN_INVALID_PARAMETER; diff --git a/frameworks/native/hdi_prepared_model.h b/frameworks/native/hdi_prepared_model.h index d111977..8d381c4 100644 --- a/frameworks/native/hdi_prepared_model.h +++ b/frameworks/native/hdi_prepared_model.h @@ -20,12 +20,17 @@ #include #include "refbase.h" -#include "hdi_interfaces.h" #include "prepared_model.h" #include "cpp_type.h" +#include +#include +#include namespace OHOS { namespace NeuralNetworkRuntime { + +namespace V1_0 = OHOS::HDI::Nnrt::V1_0; + class HDIPreparedModel : public PreparedModel { public: explicit HDIPreparedModel(OHOS::sptr hdiPreparedModel); diff --git a/frameworks/native/inner_model.cpp b/frameworks/native/inner_model.cpp index bcd20c6..205222f 100644 --- a/frameworks/native/inner_model.cpp +++ b/frameworks/native/inner_model.cpp @@ -24,7 +24,6 @@ #include "common/utils.h" #include "common/scoped_trace.h" #include "device_manager.h" -#include "hdi_device.h" #include "validation.h" #include "ops_builder.h" #include "ops_registry.h" diff --git a/frameworks/native/ops/cast_builder.cpp b/frameworks/native/ops/cast_builder.cpp index 81dc1eb..6336926 100644 --- a/frameworks/native/ops/cast_builder.cpp +++ b/frameworks/native/ops/cast_builder.cpp @@ -57,7 +57,6 @@ OH_NN_ReturnCode CastBuilder::Build(const std::vector& paramsIndex, LOGE("[Cast] Type of cast operator is not validation."); return OH_NN_INVALID_PARAMETER; } - *castTypeInt = (OH_NN_DataType)NNToHDI::TransDataType(*castTypeInt); if (!paramsIndex.empty()) { LOGE("[Cast] Cast expects no parameters"); diff --git a/frameworks/native/transform.cpp b/frameworks/native/transform.cpp index ea0d339..d3705d5 100644 --- a/frameworks/native/transform.cpp +++ b/frameworks/native/transform.cpp @@ -25,134 +25,6 @@ const uint32_t BIT16_TO_BYTE = 2; const uint32_t BIT32_TO_BYTE = 4; const uint32_t BIT64_TO_BYTE = 8; -OH_NN_DeviceType HDIToNN::TransHDIDeviceType(const V1_0::DeviceType& iDeviceType) -{ - switch (iDeviceType) { - case V1_0::DeviceType::CPU: - return OH_NN_CPU; - case V1_0::DeviceType::GPU: - return OH_NN_GPU; - case V1_0::DeviceType::ACCELERATOR: - return OH_NN_ACCELERATOR; - default: - return OH_NN_OTHERS; - } -} - -DeviceStatus HDIToNN::TransHDIDeviceStatus(const V1_0::DeviceStatus& iDeviceStatus) -{ - switch (iDeviceStatus) { - case V1_0::DeviceStatus::AVAILABLE: - return DeviceStatus::AVAILABLE; - case V1_0::DeviceStatus::BUSY: - return DeviceStatus::BUSY; - case V1_0::DeviceStatus::OFFLINE: - return DeviceStatus::OFFLINE; - default: - return DeviceStatus::UNKNOWN; - } -} - -V1_0::PerformanceMode NNToHDI::TransPerformanceMode(const OH_NN_PerformanceMode& mode) -{ - switch (mode) { - case OH_NN_PERFORMANCE_LOW: - return V1_0::PerformanceMode::PERFORMANCE_LOW; - case OH_NN_PERFORMANCE_MEDIUM: - return V1_0::PerformanceMode::PERFORMANCE_MEDIUM; - case OH_NN_PERFORMANCE_HIGH: - return V1_0::PerformanceMode::PERFORMANCE_HIGH; - case OH_NN_PERFORMANCE_EXTREME: - return V1_0::PerformanceMode::PERFORMANCE_EXTREME; - default: - return V1_0::PerformanceMode::PERFORMANCE_NONE; - } -} -V1_0::Priority NNToHDI::TransPriority(const OH_NN_Priority& priority) -{ - switch (priority) { - case OH_NN_PRIORITY_LOW: - return V1_0::Priority::PRIORITY_LOW; - case OH_NN_PRIORITY_MEDIUM: - return V1_0::Priority::PRIORITY_MEDIUM; - case OH_NN_PRIORITY_HIGH: - return V1_0::Priority::PRIORITY_HIGH; - default: - return V1_0::Priority::PRIORITY_NONE; - } -} - -V1_0::DataType NNToHDI::TransDataType(const OH_NN_DataType& dataType) -{ - switch (dataType) { - case OH_NN_BOOL: - return V1_0::DataType::DATA_TYPE_BOOL; - case OH_NN_INT8: - return V1_0::DataType::DATA_TYPE_INT8; - case OH_NN_INT16: - return V1_0::DataType::DATA_TYPE_INT16; - case OH_NN_INT32: - return V1_0::DataType::DATA_TYPE_INT32; - case OH_NN_INT64: - return V1_0::DataType::DATA_TYPE_INT64; - case OH_NN_UINT8: - return V1_0::DataType::DATA_TYPE_UINT8; - case OH_NN_UINT16: - return V1_0::DataType::DATA_TYPE_UINT16; - case OH_NN_UINT32: - return V1_0::DataType::DATA_TYPE_UINT32; - case OH_NN_UINT64: - return V1_0::DataType::DATA_TYPE_UINT64; - case OH_NN_FLOAT16: - return V1_0::DataType::DATA_TYPE_FLOAT16; - case OH_NN_FLOAT32: - return V1_0::DataType::DATA_TYPE_FLOAT32; - case OH_NN_FLOAT64: - return V1_0::DataType::DATA_TYPE_FLOAT64; - default: - return V1_0::DataType::DATA_TYPE_UNKNOWN; - } -} - -V1_0::Format NNToHDI::TransFormat(const OH_NN_Format& format) -{ - switch (format) { - case OH_NN_FORMAT_NCHW: - return V1_0::Format::FORMAT_NCHW; - case OH_NN_FORMAT_NHWC: - return V1_0::Format::FORMAT_NHWC; - default: - return V1_0::Format::FORMAT_NONE; - } -} - -V1_0::IOTensor NNToHDI::TransIOTensor(const IOTensor& tensor) -{ - V1_0::IOTensor iTensor; - iTensor.name = tensor.name; - iTensor.dataType = TransDataType(tensor.dataType); - iTensor.dimensions = tensor.dimensions; - iTensor.format = TransFormat(tensor.format); - - V1_0::SharedBuffer iBuffer {INVALID_FD, 0, 0, 0}; - if (tensor.data != nullptr) { - auto memManager = MemoryManager::GetInstance(); - Memory memory; - auto ret = memManager->GetMemory(tensor.data, memory); - if (ret != OH_NN_SUCCESS) { - LOGE("Invalid Tensor buffer, cannot transform to fd."); - } else { - iBuffer.fd = memory.fd; - iBuffer.bufferSize = memory.length; - iBuffer.offset = 0; - iBuffer.dataSize = memory.length; - } - } - iTensor.data = iBuffer; - - return iTensor; -} - uint32_t GetTypeSize(OH_NN_DataType type) { switch (type) { diff --git a/frameworks/native/transform.h b/frameworks/native/transform.h index 2472ad3..24d54e8 100644 --- a/frameworks/native/transform.h +++ b/frameworks/native/transform.h @@ -16,7 +16,6 @@ #ifndef NEURAL_NETWORK_RUNTIME_TRANSFORM_H #define NEURAL_NETWORK_RUNTIME_TRANSFORM_H -#include "hdi_interfaces.h" #include "interfaces/kits/c/neural_network_runtime_type.h" #include "cpp_type.h" #include "mindir.h" @@ -38,19 +37,6 @@ std::vector ConstructVectorFromArray(const T* data, size_t size) uint32_t GetTypeSize(OH_NN_DataType type); -namespace HDIToNN { -OH_NN_DeviceType TransHDIDeviceType(const V1_0::DeviceType& iDeviceType); -DeviceStatus TransHDIDeviceStatus(const V1_0::DeviceStatus& iDeviceStatus); -} // namespace HDIToNN - -namespace NNToHDI { -V1_0::PerformanceMode TransPerformanceMode(const OH_NN_PerformanceMode& mode); -V1_0::Priority TransPriority(const OH_NN_Priority& priority); -V1_0::DataType TransDataType(const OH_NN_DataType& dataType); -V1_0::Format TransFormat(const OH_NN_Format& format); -V1_0::IOTensor TransIOTensor(const IOTensor& tensor); -} // namespace NNToHDI - namespace NNToMS { mindspore::lite::DataType TransformDataType(OH_NN_DataType type); mindspore::lite::Format TransformFormat(OH_NN_Format type); -- Gitee From 05d7cf43b061dff2d9df2f4dffd41c80db9af00e Mon Sep 17 00:00:00 2001 From: wangchuanxia Date: Thu, 16 Mar 2023 10:32:14 +0800 Subject: [PATCH 02/12] bugfix Signed-off-by: wangchuanxia --- frameworks/native/device_manager.cpp | 3 ++- frameworks/native/device_manager.h | 3 ++- frameworks/native/execution_plan.cpp | 1 + frameworks/native/execution_plan.h | 6 +++--- frameworks/native/executor.cpp | 6 +++--- frameworks/native/hdi_device_v1_0.cpp | 2 +- frameworks/native/hdi_device_v1_0.h | 2 +- frameworks/native/hdi_device_v2_0.cpp | 2 +- frameworks/native/hdi_device_v2_0.h | 2 +- 9 files changed, 15 insertions(+), 12 deletions(-) diff --git a/frameworks/native/device_manager.cpp b/frameworks/native/device_manager.cpp index 6abafab..25961e1 100644 --- a/frameworks/native/device_manager.cpp +++ b/frameworks/native/device_manager.cpp @@ -79,7 +79,8 @@ const std::string& DeviceManager::GetDeviceName(size_t deviceId) return m_tmpDeviceName; } -std::string DeviceManager::GenUniqueName(const std::string& deviceName, const std::string& vendorName, const std::string& version) const +std::string DeviceManager::GenUniqueName( + const std::string& deviceName, const std::string& vendorName, const std::string& version) const { return deviceName + "_" + vendorName + "_" + version; } diff --git a/frameworks/native/device_manager.h b/frameworks/native/device_manager.h index 2602ce3..2885a5d 100644 --- a/frameworks/native/device_manager.h +++ b/frameworks/native/device_manager.h @@ -48,7 +48,8 @@ private: DeviceManager(const DeviceManager&) = delete; DeviceManager& operator=(const DeviceManager&) = delete; - std::string GenUniqueName(const std::string& deviceName, const std::string& vendorName) const; + std::string GenUniqueName( + const std::string& deviceName, const std::string& vendorName, const std::string& version) const; bool IsValidDevice(std::shared_ptr device) const; private: diff --git a/frameworks/native/execution_plan.cpp b/frameworks/native/execution_plan.cpp index 2542f18..20c4322 100644 --- a/frameworks/native/execution_plan.cpp +++ b/frameworks/native/execution_plan.cpp @@ -35,6 +35,7 @@ OH_NN_ReturnCode ExecutionPlan::GetInputDimRanges(uint32_t index, std::vector>& inputTensors, std::vector>& outputTensors) { diff --git a/frameworks/native/execution_plan.h b/frameworks/native/execution_plan.h index e54078f..0ec9c93 100644 --- a/frameworks/native/execution_plan.h +++ b/frameworks/native/execution_plan.h @@ -30,9 +30,9 @@ public: : m_preparedModel(preparedModel), m_device(device) {}; - OH_NN_ReturnCode ExecutionPlan::GetInputDimRanges(uint32_t index, - std::vector& minInputDims, - std::vector& maxInputDims) + OH_NN_ReturnCode GetInputDimRanges(uint32_t index, + std::vector& minInputDims, + std::vector& maxInputDims); OH_NN_ReturnCode Run(const std::vector>& inputTensors, std::vector>& outputTensors); diff --git a/frameworks/native/executor.cpp b/frameworks/native/executor.cpp index 151a582..be21e80 100644 --- a/frameworks/native/executor.cpp +++ b/frameworks/native/executor.cpp @@ -138,7 +138,7 @@ OH_NN_ReturnCode Executor::CheckInputDimRanges(uint32_t index, const OH_NN_Tenso } uint32_t dim = static_cast(tensorShape[j]); if (dim < minInputDims[j] || dim > maxInputDims[j]) { - LOGE("The %zuth dimension of the %uth input is %u, which is out of range(%u, %u)", + LOGE("The %zuth dimension of the %uth input is %u, which is out of range [%u, %u]", j, index, dim, minInputDims[j], maxInputDims[j]); return OH_NN_INVALID_PARAMETER; } @@ -152,7 +152,7 @@ OH_NN_ReturnCode Executor::SetInput(uint32_t index, const OH_NN_Tensor& nnTensor { auto nnRet = CheckInputDimRanges(index, nnTensor); if (nnRet == OH_NN_OPERATION_FORBIDDEN) { - LOGI("Skip input demension bounds check."); + LOGI("Skip input dimension bounds check."); } else if (nnRet != OH_NN_SUCCESS) { LOGE("SetInput failed, Check the range of the %uth input dimension ranges failed.", index); return nnRet; @@ -226,7 +226,7 @@ OH_NN_ReturnCode Executor::SetInputFromMemory(uint32_t index, const OH_NN_Tensor { auto nnRet = CheckInputDimRanges(index, nnTensor); if (nnRet == OH_NN_OPERATION_FORBIDDEN) { - LOGI("Skip input demension bounds check."); + LOGI("Skip input dimension bounds check."); } else if (nnRet != OH_NN_SUCCESS) { LOGE("SetInputFromMemory failed, Check the range of the %uth input dimension ranges failed.", index); return nnRet; diff --git a/frameworks/native/hdi_device_v1_0.cpp b/frameworks/native/hdi_device_v1_0.cpp index 15729f5..6302a73 100644 --- a/frameworks/native/hdi_device_v1_0.cpp +++ b/frameworks/native/hdi_device_v1_0.cpp @@ -13,7 +13,7 @@ * limitations under the License. */ -#include "hdi_device.h" +#include "hdi_device_v1_0.h" #include "hdf_base.h" #include "mindir.h" diff --git a/frameworks/native/hdi_device_v1_0.h b/frameworks/native/hdi_device_v1_0.h index 94f0242..e28beb1 100644 --- a/frameworks/native/hdi_device_v1_0.h +++ b/frameworks/native/hdi_device_v1_0.h @@ -34,7 +34,7 @@ public: OH_NN_ReturnCode GetDeviceName(std::string& name) override; OH_NN_ReturnCode GetVendorName(std::string& name) override; - OH_NN_ReturnCode GetVersion(std::string& name) override; + OH_NN_ReturnCode GetVersion(std::string& version) override; OH_NN_ReturnCode GetDeviceType(OH_NN_DeviceType& deviceType) override; OH_NN_ReturnCode GetDeviceStatus(DeviceStatus& status) override; OH_NN_ReturnCode GetSupportedOperation(std::shared_ptr model, diff --git a/frameworks/native/hdi_device_v2_0.cpp b/frameworks/native/hdi_device_v2_0.cpp index 56b4666..32fe5d0 100644 --- a/frameworks/native/hdi_device_v2_0.cpp +++ b/frameworks/native/hdi_device_v2_0.cpp @@ -13,7 +13,7 @@ * limitations under the License. */ -#include "hdi_device.h" +#include "hdi_device_v2_0.h" #include "hdf_base.h" #include "mindir.h" diff --git a/frameworks/native/hdi_device_v2_0.h b/frameworks/native/hdi_device_v2_0.h index b343b1c..fee7831 100644 --- a/frameworks/native/hdi_device_v2_0.h +++ b/frameworks/native/hdi_device_v2_0.h @@ -34,7 +34,7 @@ public: OH_NN_ReturnCode GetDeviceName(std::string& name) override; OH_NN_ReturnCode GetVendorName(std::string& name) override; - OH_NN_ReturnCode GetVersion(std::string& name) override; + OH_NN_ReturnCode GetVersion(std::string& version) override; OH_NN_ReturnCode GetDeviceType(OH_NN_DeviceType& deviceType) override; OH_NN_ReturnCode GetDeviceStatus(DeviceStatus& status) override; OH_NN_ReturnCode GetSupportedOperation(std::shared_ptr model, -- Gitee From 11d21ab8153ace378f6bc2c2b4fd6ee0655e73a9 Mon Sep 17 00:00:00 2001 From: yuhanshi Date: Thu, 16 Mar 2023 12:41:37 +0800 Subject: [PATCH 03/12] Add HDIPreparedModelV2_0 Signed-off-by: yuhanshi --- BUILD.gn | 8 +- bundle.json | 3 - frameworks/BUILD.gn | 3 +- frameworks/native/hdi_device_v1_0.cpp | 8 +- frameworks/native/hdi_prepared_model_v1_0.cpp | 163 +++++++++++++++++ frameworks/native/hdi_prepared_model_v1_0.h | 51 ++++++ frameworks/native/hdi_prepared_model_v2_0.cpp | 169 ++++++++++++++++++ frameworks/native/hdi_prepared_model_v2_0.h | 55 ++++++ frameworks/native/prepared_model.h | 4 + 9 files changed, 452 insertions(+), 12 deletions(-) create mode 100644 frameworks/native/hdi_prepared_model_v1_0.cpp create mode 100644 frameworks/native/hdi_prepared_model_v1_0.h create mode 100644 frameworks/native/hdi_prepared_model_v2_0.cpp create mode 100644 frameworks/native/hdi_prepared_model_v2_0.h diff --git a/BUILD.gn b/BUILD.gn index bc1599a..a6f0866 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -17,7 +17,7 @@ group("nnrt_target") { deps = [ "frameworks:libneural_network_runtime" ] } -group("nnrt_test_target") { - testonly = true - deps = [ "test/unittest:unittest" ] -} +# group("nnrt_test_target") { +# testonly = true +# deps = [ "test/unittest:unittest" ] +# } diff --git a/bundle.json b/bundle.json index 06ee229..f95ed56 100644 --- a/bundle.json +++ b/bundle.json @@ -41,9 +41,6 @@ "header_base":"//foundation/ai/neural_network_runtime/interfaces/innerkits/c" } } - ], - "test": [ - "//foundation/ai/neural_network_runtime:nnrt_test_target" ] } } diff --git a/frameworks/BUILD.gn b/frameworks/BUILD.gn index aba713d..f509649 100644 --- a/frameworks/BUILD.gn +++ b/frameworks/BUILD.gn @@ -25,7 +25,8 @@ nnrt_sources = [ "native/executor.cpp", "native/hdi_device_v1_0.cpp", "native/hdi_device_v2_0.cpp", - "native/hdi_prepared_model.cpp", + "native/hdi_prepared_model_v1_0.cpp", + "native/hdi_prepared_model_v2_0.cpp", "native/inner_model.cpp", "native/memory_manager.cpp", "native/neural_network_runtime.cpp", diff --git a/frameworks/native/hdi_device_v1_0.cpp b/frameworks/native/hdi_device_v1_0.cpp index 6302a73..d502d29 100644 --- a/frameworks/native/hdi_device_v1_0.cpp +++ b/frameworks/native/hdi_device_v1_0.cpp @@ -19,7 +19,7 @@ #include "mindir.h" #include "device_registrar.h" -#include "hdi_prepared_model.h" +#include "hdi_prepared_model_v1_0.h" #include "memory_manager.h" #include "transform.h" #include "common/log.h" @@ -280,7 +280,7 @@ OH_NN_ReturnCode HDIDeviceV1_0::PrepareModel(std::shared_ptr(iPreparedModel); + preparedModel = CreateSharedPtr(iPreparedModel); if (preparedModel == nullptr) { LOGE("Prepare model failed, because fail to create preparedModel instance."); return OH_NN_MEMORY_ERROR; @@ -319,7 +319,7 @@ OH_NN_ReturnCode HDIDeviceV1_0::PrepareModelFromModelCache(const std::vector(iPreparedModel); + preparedModel = CreateSharedPtr(iPreparedModel); if (preparedModel == nullptr) { LOGE("Prepare model from model cache failed, because fail to create preparedModel instance."); return OH_NN_MEMORY_ERROR; @@ -414,4 +414,4 @@ std::shared_ptr HDIDeviceV1_0Creator() REGISTER_DEVICE(DeviceV1_0, VendorV1_0, HDIDeviceV1_0Creator) } // namespace NeuralNetworkRuntime -} // namespace OHOS \ No newline at end of file +} // namespace OHOS diff --git a/frameworks/native/hdi_prepared_model_v1_0.cpp b/frameworks/native/hdi_prepared_model_v1_0.cpp new file mode 100644 index 0000000..898d379 --- /dev/null +++ b/frameworks/native/hdi_prepared_model_v1_0.cpp @@ -0,0 +1,163 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "hdi_prepared_model_v1_0.h" + +#include "common/log.h" +#include "memory_manager.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace { +V1_0::DataType TransDataType(const OH_NN_DataType& dataType) +{ + switch (dataType) { + case OH_NN_BOOL: + return V1_0::DataType::DATA_TYPE_BOOL; + case OH_NN_INT8: + return V1_0::DataType::DATA_TYPE_INT8; + case OH_NN_INT16: + return V1_0::DataType::DATA_TYPE_INT16; + case OH_NN_INT32: + return V1_0::DataType::DATA_TYPE_INT32; + case OH_NN_INT64: + return V1_0::DataType::DATA_TYPE_INT64; + case OH_NN_UINT8: + return V1_0::DataType::DATA_TYPE_UINT8; + case OH_NN_UINT16: + return V1_0::DataType::DATA_TYPE_UINT16; + case OH_NN_UINT32: + return V1_0::DataType::DATA_TYPE_UINT32; + case OH_NN_UINT64: + return V1_0::DataType::DATA_TYPE_UINT64; + case OH_NN_FLOAT16: + return V1_0::DataType::DATA_TYPE_FLOAT16; + case OH_NN_FLOAT32: + return V1_0::DataType::DATA_TYPE_FLOAT32; + case OH_NN_FLOAT64: + return V1_0::DataType::DATA_TYPE_FLOAT64; + default: + return V1_0::DataType::DATA_TYPE_UNKNOWN; + } +} + +V1_0::Format TransFormat(const OH_NN_Format& format) +{ + switch (format) { + case OH_NN_FORMAT_NCHW: + return V1_0::Format::FORMAT_NCHW; + case OH_NN_FORMAT_NHWC: + return V1_0::Format::FORMAT_NHWC; + default: + return V1_0::Format::FORMAT_NONE; + } +} + +V1_0::IOTensor TransIOTensor(const IOTensor& tensor) +{ + V1_0::IOTensor iTensor; + iTensor.name = tensor.name; + iTensor.dataType = TransDataType(tensor.dataType); + iTensor.dimensions = tensor.dimensions; + iTensor.format = TransFormat(tensor.format); + + V1_0::SharedBuffer iBuffer {INVALID_FD, 0, 0, 0}; + if (tensor.data != nullptr) { + auto memManager = MemoryManager::GetInstance(); + Memory memory; + auto ret = memManager->GetMemory(tensor.data, memory); + if (ret != OH_NN_SUCCESS) { + LOGE("Invalid Tensor buffer, cannot transform to fd."); + } else { + iBuffer.fd = memory.fd; + iBuffer.bufferSize = memory.length; + iBuffer.offset = 0; + iBuffer.dataSize = memory.length; + } + } + iTensor.data = iBuffer; + + return iTensor; +} +} // unamed namespace + +HDIPreparedModelV1_0::HDIPreparedModelV1_0(OHOS::sptr hdiPreparedModel) + : m_hdiPreparedModel(hdiPreparedModel) +{ + hdiPreparedModel->GetVersion(m_hdiVersion.first, m_hdiVersion.second); +} + +OH_NN_ReturnCode HDIPreparedModelV1_0::ExportModelCache(std::vector& modelCache) +{ + if (!modelCache.empty()) { + LOGE("The vector of modelCache should be empty. size=%zu", modelCache.size()); + return OH_NN_INVALID_PARAMETER; + } + + std::vector iBuffers; + auto ret = m_hdiPreparedModel->ExportModelCache(iBuffers); + if (ret != HDF_SUCCESS) { + LOGE("Export model cache failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + + auto memManager = MemoryManager::GetInstance(); + for (size_t i = 0; i < iBuffers.size(); i++) { + auto addr = memManager->MapMemory(iBuffers[i].fd, iBuffers[i].bufferSize); + if (addr == nullptr) { + LOGE("Export the %zuth model cache failed, cannot not map fd to address.", i + 1); + return OH_NN_MEMORY_ERROR; + } + ModelBuffer modelbuffer {addr, iBuffers[i].bufferSize}; + modelCache.emplace_back(modelbuffer); + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIPreparedModelV1_0::Run(const std::vector& inputs, const std::vector& outputs, + std::vector>& outputsDims, std::vector& isOutputBufferEnough) +{ + V1_0::IOTensor iTensor; + std::vector iInputTensors; + for (auto& input: inputs) { + iTensor = TransIOTensor(input); + if (iTensor.data.fd == INVALID_FD) { + LOGE("Transform inputs tensor failed, cannot find data file descriptor."); + return OH_NN_INVALID_PARAMETER; + } + iInputTensors.emplace_back(iTensor); + } + + std::vector iOutputTensors; + for (auto& output: outputs) { + iTensor = TransIOTensor(output); + if (iTensor.data.fd == INVALID_FD) { + LOGE("Transform outputs tensor failed, cannot find data file descriptor."); + return OH_NN_INVALID_PARAMETER; + } + iOutputTensors.emplace_back(iTensor); + } + + auto ret = m_hdiPreparedModel->Run(iInputTensors, iOutputTensors, outputsDims, isOutputBufferEnough); + if (ret != HDF_SUCCESS || outputsDims.empty()) { + LOGE("Run model failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + + return OH_NN_SUCCESS; +} +} // namespace NeuralNetworkRuntime +} // OHOS \ No newline at end of file diff --git a/frameworks/native/hdi_prepared_model_v1_0.h b/frameworks/native/hdi_prepared_model_v1_0.h new file mode 100644 index 0000000..f5a8911 --- /dev/null +++ b/frameworks/native/hdi_prepared_model_v1_0.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +#ifndef NEURAL_NETWORK_RUNTIME_HDI_PREPARED_MODEL_V1_0_H +#define NEURAL_NETWORK_RUNTIME_HDI_PREPARED_MODEL_V1_0_H + +#include + +#include "refbase.h" +#include "prepared_model.h" +#include "cpp_type.h" +#include +#include +#include + +namespace V1_0 = OHOS::HDI::Nnrt::V1_0; + +namespace OHOS { +namespace NeuralNetworkRuntime { +class HDIPreparedModelV1_0 : public PreparedModel { +public: + explicit HDIPreparedModelV1_0(OHOS::sptr hdiPreparedModel); + + OH_NN_ReturnCode ExportModelCache(std::vector& modelCache) override; + + OH_NN_ReturnCode Run(const std::vector& inputs, + const std::vector& outputs, + std::vector>& outputsDims, + std::vector& isOutputBufferEnough) override; + +private: + // first: major version, second: minor version + std::pair m_hdiVersion; + OHOS::sptr m_hdiPreparedModel {nullptr}; +}; +} // namespace NeuralNetworkRuntime +} // OHOS +#endif // NEURAL_NETWORK_RUNTIME_HDI_PREPARED_MODEL_V1_0_H \ No newline at end of file diff --git a/frameworks/native/hdi_prepared_model_v2_0.cpp b/frameworks/native/hdi_prepared_model_v2_0.cpp new file mode 100644 index 0000000..406a454 --- /dev/null +++ b/frameworks/native/hdi_prepared_model_v2_0.cpp @@ -0,0 +1,169 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "hdi_prepared_model_v2_0.h" + +#include "common/log.h" +#include "memory_manager.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace { +V2_0::DataType TransDataType(const OH_NN_DataType& dataType) +{ + switch (dataType) { + case OH_NN_BOOL: + return V2_0::DataType::DATA_TYPE_BOOL; + case OH_NN_INT8: + return V2_0::DataType::DATA_TYPE_INT8; + case OH_NN_INT16: + return V2_0::DataType::DATA_TYPE_INT16; + case OH_NN_INT32: + return V2_0::DataType::DATA_TYPE_INT32; + case OH_NN_INT64: + return V2_0::DataType::DATA_TYPE_INT64; + case OH_NN_UINT8: + return V2_0::DataType::DATA_TYPE_UINT8; + case OH_NN_UINT16: + return V2_0::DataType::DATA_TYPE_UINT16; + case OH_NN_UINT32: + return V2_0::DataType::DATA_TYPE_UINT32; + case OH_NN_UINT64: + return V2_0::DataType::DATA_TYPE_UINT64; + case OH_NN_FLOAT16: + return V2_0::DataType::DATA_TYPE_FLOAT16; + case OH_NN_FLOAT32: + return V2_0::DataType::DATA_TYPE_FLOAT32; + case OH_NN_FLOAT64: + return V2_0::DataType::DATA_TYPE_FLOAT64; + default: + return V2_0::DataType::DATA_TYPE_UNKNOWN; + } +} + +V2_0::Format TransFormat(const OH_NN_Format& format) +{ + switch (format) { + case OH_NN_FORMAT_NCHW: + return V2_0::Format::FORMAT_NCHW; + case OH_NN_FORMAT_NHWC: + return V2_0::Format::FORMAT_NHWC; + default: + return V2_0::Format::FORMAT_NONE; + } +} + +V2_0::IOTensor TransIOTensor(const IOTensor& tensor) +{ + V2_0::IOTensor iTensor; + iTensor.name = tensor.name; + iTensor.dataType = TransDataType(tensor.dataType); + iTensor.dimensions = tensor.dimensions; + iTensor.format = TransFormat(tensor.format); + + V2_0::SharedBuffer iBuffer {INVALID_FD, 0, 0, 0}; + if (tensor.data != nullptr) { + auto memManager = MemoryManager::GetInstance(); + Memory memory; + auto ret = memManager->GetMemory(tensor.data, memory); + if (ret != OH_NN_SUCCESS) { + LOGE("Invalid Tensor buffer, cannot transform to fd."); + } else { + iBuffer.fd = memory.fd; + iBuffer.bufferSize = memory.length; + iBuffer.offset = 0; + iBuffer.dataSize = memory.length; + } + } + iTensor.data = iBuffer; + + return iTensor; +} +} // unamed namespace + +HDIPreparedModelV2_0::HDIPreparedModelV2_0(OHOS::sptr hdiPreparedModel) + : m_hdiPreparedModel(hdiPreparedModel) +{ + hdiPreparedModel->GetVersion(m_hdiVersion.first, m_hdiVersion.second); +} + +OH_NN_ReturnCode HDIPreparedModelV2_0::ExportModelCache(std::vector& modelCache) +{ + if (!modelCache.empty()) { + LOGE("The vector of modelCache should be empty. size=%zu", modelCache.size()); + return OH_NN_INVALID_PARAMETER; + } + + std::vector iBuffers; + auto ret = m_hdiPreparedModel->ExportModelCache(iBuffers); + if (ret != HDF_SUCCESS) { + LOGE("Export model cache failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + + auto memManager = MemoryManager::GetInstance(); + for (size_t i = 0; i < iBuffers.size(); i++) { + auto addr = memManager->MapMemory(iBuffers[i].fd, iBuffers[i].bufferSize); + if (addr == nullptr) { + LOGE("Export the %zuth model cache failed, cannot not map fd to address.", i + 1); + return OH_NN_MEMORY_ERROR; + } + ModelBuffer modelbuffer {addr, iBuffers[i].bufferSize}; + modelCache.emplace_back(modelbuffer); + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIPreparedModelV2_0::Run(const std::vector& inputs, const std::vector& outputs, + std::vector>& outputsDims, std::vector& isOutputBufferEnough) +{ + V2_0::IOTensor iTensor; + std::vector iInputTensors; + for (auto& input: inputs) { + iTensor = TransIOTensor(input); + if (iTensor.data.fd == INVALID_FD) { + LOGE("Transform inputs tensor failed, cannot find data file descriptor."); + return OH_NN_INVALID_PARAMETER; + } + iInputTensors.emplace_back(iTensor); + } + + std::vector iOutputTensors; + for (auto& output: outputs) { + iTensor = TransIOTensor(output); + if (iTensor.data.fd == INVALID_FD) { + LOGE("Transform outputs tensor failed, cannot find data file descriptor."); + return OH_NN_INVALID_PARAMETER; + } + iOutputTensors.emplace_back(iTensor); + } + + auto ret = m_hdiPreparedModel->Run(iInputTensors, iOutputTensors, outputsDims, isOutputBufferEnough); + if (ret != HDF_SUCCESS || outputsDims.empty()) { + LOGE("Run model failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIPreparedModelV2_0::GetInputDimRanges(std::vector>& minInputDims, + std::vector>& maxInputDims) +{ + return OH_NN_OPERATION_FORBIDDEN; +} +} // namespace NeuralNetworkRuntime +} // OHOS \ No newline at end of file diff --git a/frameworks/native/hdi_prepared_model_v2_0.h b/frameworks/native/hdi_prepared_model_v2_0.h new file mode 100644 index 0000000..ad42dcb --- /dev/null +++ b/frameworks/native/hdi_prepared_model_v2_0.h @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +#ifndef NEURAL_NETWORK_RUNTIME_HDI_PREPARED_MODEL_V2_0_H +#define NEURAL_NETWORK_RUNTIME_HDI_PREPARED_MODEL_V2_0_H + +#include + +#include +#include +#include + +#include "refbase.h" +#include "prepared_model.h" +#include "cpp_type.h" + +namespace V2_0 = OHOS::HDI::Nnrt::V2_0; + +namespace OHOS { +namespace NeuralNetworkRuntime { +class HDIPreparedModelV2_0 : public PreparedModel { +public: + explicit HDIPreparedModelV2_0(OHOS::sptr hdiPreparedModel); + + OH_NN_ReturnCode ExportModelCache(std::vector& modelCache) override; + + OH_NN_ReturnCode Run(const std::vector& inputs, + const std::vector& outputs, + std::vector>& outputsDims, + std::vector& isOutputBufferEnough) override; + + OH_NN_ReturnCode GetInputDimRanges(std::vector>& minInputDims, + std::vector>& maxInputDims) override; + +private: + // first: major version, second: minor version + std::pair m_hdiVersion; + OHOS::sptr m_hdiPreparedModel {nullptr}; +}; +} // namespace NeuralNetworkRuntime +} // OHOS +#endif // NEURAL_NETWORK_RUNTIME_HDI_PREPARED_MODEL_V2_0_H \ No newline at end of file diff --git a/frameworks/native/prepared_model.h b/frameworks/native/prepared_model.h index 6574131..2d25f6f 100644 --- a/frameworks/native/prepared_model.h +++ b/frameworks/native/prepared_model.h @@ -34,6 +34,10 @@ public: const std::vector& outputs, std::vector>& outputsDims, std::vector& isOutputBufferEnough) = 0; + + virtual OH_NN_ReturnCode GetInputDimRanges( + std::vector>& minInputDims, + std::vector>& maxInputDims) { return OH_NN_OPERATION_FORBIDDEN; } }; } // OHOS } // namespace NeuralNetworkRuntime -- Gitee From 2af96e79281de14e2b2167839bab43bed0d2a0ed Mon Sep 17 00:00:00 2001 From: wangchuanxia Date: Thu, 16 Mar 2023 15:36:02 +0800 Subject: [PATCH 04/12] device discover and get input dim ranges Signed-off-by: wangchuanxia --- frameworks/BUILD.gn | 2 + ...hdi_prepared_model.h => device_discover.h} | 39 +---- frameworks/native/device_discover_v1_0.cpp | 55 ++++++ frameworks/native/device_discover_v2_0.cpp | 55 ++++++ frameworks/native/device_manager.cpp | 32 ++++ frameworks/native/device_manager.h | 4 + frameworks/native/execution_plan.cpp | 15 +- frameworks/native/execution_plan.h | 5 +- frameworks/native/executor.cpp | 32 +++- frameworks/native/hdi_device_v1_0.cpp | 20 --- frameworks/native/hdi_device_v2_0.cpp | 42 ++--- frameworks/native/hdi_prepared_model.cpp | 163 ------------------ frameworks/native/hdi_prepared_model_v2_0.cpp | 8 +- 13 files changed, 207 insertions(+), 265 deletions(-) rename frameworks/native/{hdi_prepared_model.h => device_discover.h} (39%) create mode 100644 frameworks/native/device_discover_v1_0.cpp create mode 100644 frameworks/native/device_discover_v2_0.cpp delete mode 100644 frameworks/native/hdi_prepared_model.cpp diff --git a/frameworks/BUILD.gn b/frameworks/BUILD.gn index f509649..f677512 100644 --- a/frameworks/BUILD.gn +++ b/frameworks/BUILD.gn @@ -23,6 +23,8 @@ nnrt_sources = [ "native/device_registrar.cpp", "native/execution_plan.cpp", "native/executor.cpp", + "native/device_discover_v1_0.cpp", + "native/device_discover_v2_0.cpp", "native/hdi_device_v1_0.cpp", "native/hdi_device_v2_0.cpp", "native/hdi_prepared_model_v1_0.cpp", diff --git a/frameworks/native/hdi_prepared_model.h b/frameworks/native/device_discover.h similarity index 39% rename from frameworks/native/hdi_prepared_model.h rename to frameworks/native/device_discover.h index 8d381c4..fd86f1f 100644 --- a/frameworks/native/hdi_prepared_model.h +++ b/frameworks/native/device_discover.h @@ -13,40 +13,19 @@ * limitations under the License. */ +#ifndef NEURAL_NETWORK_RUNTIME_DEVICE_DISCOVER_H +#define NEURAL_NETWORK_RUNTIME_DEVICE_DISCOVER_H -#ifndef NEURAL_NETWORK_RUNTIME_HDI_PREPARED_MODEL_H -#define NEURAL_NETWORK_RUNTIME_HDI_PREPARED_MODEL_H +#include +#include -#include - -#include "refbase.h" -#include "prepared_model.h" -#include "cpp_type.h" -#include -#include -#include +#include "device.h" namespace OHOS { namespace NeuralNetworkRuntime { +std::shared_ptr DiscoverHDIDevicesV1_0(std::string& deviceName, std::string& vendorName, std::string& version); +std::shared_ptr DiscoverHDIDevicesV2_0(std::string& deviceName, std::string& vendorName, std::string& version); -namespace V1_0 = OHOS::HDI::Nnrt::V1_0; - -class HDIPreparedModel : public PreparedModel { -public: - explicit HDIPreparedModel(OHOS::sptr hdiPreparedModel); - - OH_NN_ReturnCode ExportModelCache(std::vector& modelCache) override; - - OH_NN_ReturnCode Run(const std::vector& inputs, - const std::vector& outputs, - std::vector>& outputsDims, - std::vector& isOutputBufferEnough) override; - -private: - // first: major version, second: minor version - std::pair m_hdiVersion; - OHOS::sptr m_hdiPreparedModel {nullptr}; -}; } // namespace NeuralNetworkRuntime -} // OHOS -#endif // NEURAL_NETWORK_RUNTIME_HDI_PREPARED_MODEL_H \ No newline at end of file +} // namespace OHOS +#endif // NEURAL_NETWORK_RUNTIME_DEVICE_DISCOVER_H \ No newline at end of file diff --git a/frameworks/native/device_discover_v1_0.cpp b/frameworks/native/device_discover_v1_0.cpp new file mode 100644 index 0000000..fded72c --- /dev/null +++ b/frameworks/native/device_discover_v1_0.cpp @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "device_discover.h" +#include "hdi_device_v1_0.h" +#include "common/log.h" +#include "common/utils.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +std::shared_ptr DiscoverHDIDevicesV1_0(std::string& deviceName, std::string& vendorName, std::string& version) +{ + // only one device from HDI now. + OHOS::sptr iDevice = V1_0::INnrtDevice::Get(); + if (iDevice == nullptr) { + LOGW("Get HDI device failed."); + return; + } + + auto hdiRet = iDevice->GetDeviceName(deviceName); + if (hdiRet != HDF_SUCCESS) { + LOGW("Get device name failed. ErrorCode=%d", hdiRet); + return; + } + hdiRet = iDevice->GetVendorName(vendorName); + if (hdiRet != HDF_SUCCESS) { + LOGW("Get vendor name failed. ErrorCode=%d", hdiRet); + return; + } + hdiRet = iDevice->GetVersion(version); + if (hdiRet != HDF_SUCCESS) { + LOGW("Get version failed. ErrorCode=%d", hdiRet); + return; + } + + std::shared_ptr device = CreateSharedPtr(iDevice); + if (device == nullptr) { + LOGW("Failed to register device, because fail to create device instance."); + } + return device; +} +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/device_discover_v2_0.cpp b/frameworks/native/device_discover_v2_0.cpp new file mode 100644 index 0000000..36b0871 --- /dev/null +++ b/frameworks/native/device_discover_v2_0.cpp @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "device_discover.h" +#include "hdi_device_v2_0.h" +#include "common/log.h" +#include "common/utils.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +std::shared_ptr DiscoverHDIDevicesV2_0(std::string& deviceName, std::string& vendorName, std::string& version) +{ + // only one device from HDI now. + OHOS::sptr iDevice = V2_0::INnrtDevice::Get(); + if (iDevice == nullptr) { + LOGW("Get HDI device failed."); + return; + } + + auto hdiRet = iDevice->GetDeviceName(deviceName); + if (hdiRet != HDF_SUCCESS) { + LOGW("Get device name failed. ErrorCode=%d", hdiRet); + return; + } + hdiRet = iDevice->GetVendorName(vendorName); + if (hdiRet != HDF_SUCCESS) { + LOGW("Get vendor name failed. ErrorCode=%d", hdiRet); + return; + } + hdiRet = iDevice->GetVersion(version); + if (hdiRet != HDF_SUCCESS) { + LOGW("Get version failed. ErrorCode=%d", hdiRet); + return; + } + + std::shared_ptr device = CreateSharedPtr(iDevice); + if (device == nullptr) { + LOGW("Failed to register device, because fail to create device instance."); + } + return device; +} +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/device_manager.cpp b/frameworks/native/device_manager.cpp index 25961e1..bb45963 100644 --- a/frameworks/native/device_manager.cpp +++ b/frameworks/native/device_manager.cpp @@ -14,6 +14,7 @@ */ #include "device_manager.h" +#include "device_discover.h" #include "common/log.h" #include "common/utils.h" @@ -132,6 +133,37 @@ OH_NN_ReturnCode DeviceManager::RegisterDevice(std::function device) +{ + std::string uniqueName = GenUniqueName(deviceName, vendorName, version); + const std::lock_guard lock(m_mtx); + auto setResult = m_uniqueName.emplace(uniqueName); + if (!setResult.second) { + LOGW("Device already exists, cannot register again. deviceName=%s, vendorName=%s", + deviceName.c_str(), vendorName.c_str()); + return; + } + + m_devices.emplace(std::hash{}(uniqueName), device); +} + +void DiscoverHDIDevices() +{ + std::string deviceName; + std::string vendorName; + std::string version; + std::shared_ptr deviceV1_0 = DiscoverHDIDevicesV1_0(deviceName, vendorName, version); + if (deviceV1_0 != nullptr) { + AddDevice(deviceName, vendorName, version, deviceV1_0); + } + + std::shared_ptr deviceV2_0 = DiscoverHDIDevicesV2_0(deviceName, vendorName, version); + if (deviceV2_0 != nullptr) { + AddDevice(deviceName, vendorName, version, deviceV2_0); + } +} + bool DeviceManager::IsValidDevice(std::shared_ptr device) const { DeviceStatus status {DeviceStatus::UNKNOWN}; diff --git a/frameworks/native/device_manager.h b/frameworks/native/device_manager.h index 2885a5d..4d8b9fb 100644 --- a/frameworks/native/device_manager.h +++ b/frameworks/native/device_manager.h @@ -40,6 +40,7 @@ public: static DeviceManager& GetInstance() { static DeviceManager instance; + instance.DiscoverHDIDevices(); return instance; } @@ -48,6 +49,9 @@ private: DeviceManager(const DeviceManager&) = delete; DeviceManager& operator=(const DeviceManager&) = delete; + void AddDevice(const std::string& deviceName, const std::string& vendorName, + const std::string& version, std::shared_ptr device); + void DiscoverHDIDevices(); std::string GenUniqueName( const std::string& deviceName, const std::string& vendorName, const std::string& version) const; bool IsValidDevice(std::shared_ptr device) const; diff --git a/frameworks/native/execution_plan.cpp b/frameworks/native/execution_plan.cpp index 20c4322..5199199 100644 --- a/frameworks/native/execution_plan.cpp +++ b/frameworks/native/execution_plan.cpp @@ -23,15 +23,14 @@ namespace OHOS { namespace NeuralNetworkRuntime { -OH_NN_ReturnCode ExecutionPlan::GetInputDimRanges(uint32_t index, std::vector& minInputDims, - std::vector& maxInputDims) +OH_NN_ReturnCode ExecutionPlan::GetInputDimRanges(std::vector>& minInputDims, + std::vector>& maxInputDims) { - // todo - // OH_NN_ReturnCode ret = m_preparedModel->GetInputDimRanges(index, minInputDims, maxInputDims); - // if (ret != OH_NN_SUCCESS) { - // LOGE("ExecutionPlan GetInputDimRanges() failed."); - // return ret; - // } + OH_NN_ReturnCode ret = m_preparedModel->GetInputDimRanges(minInputDims, maxInputDims); + if (ret != OH_NN_SUCCESS) { + LOGE("ExecutionPlan GetInputDimRanges() failed."); + return ret; + } return OH_NN_SUCCESS; } diff --git a/frameworks/native/execution_plan.h b/frameworks/native/execution_plan.h index 0ec9c93..54f4648 100644 --- a/frameworks/native/execution_plan.h +++ b/frameworks/native/execution_plan.h @@ -30,9 +30,8 @@ public: : m_preparedModel(preparedModel), m_device(device) {}; - OH_NN_ReturnCode GetInputDimRanges(uint32_t index, - std::vector& minInputDims, - std::vector& maxInputDims); + OH_NN_ReturnCode GetInputDimRanges(std::vector>& minInputDims, + std::vector>& maxInputDims); OH_NN_ReturnCode Run(const std::vector>& inputTensors, std::vector>& outputTensors); diff --git a/frameworks/native/executor.cpp b/frameworks/native/executor.cpp index be21e80..93670eb 100644 --- a/frameworks/native/executor.cpp +++ b/frameworks/native/executor.cpp @@ -116,30 +116,44 @@ void Executor::SetInputTensorWithNewBuffer(uint32_t index, OH_NN_ReturnCode Executor::CheckInputDimRanges(uint32_t index, const OH_NN_Tensor& nnTensor) const { - std::vector minInputDims; - std::vector maxInputDims; - auto ret = m_executionPlan->GetInputDimRanges(index, minInputDims, maxInputDims); + std::vector> minInputDims; + std::vector> maxInputDims; + auto ret = m_executionPlan->GetInputDimRanges(minInputDims, maxInputDims); if (ret != OH_NN_SUCCESS) { LOGE("Get the dimension ranges of input %u failed. ErrorCode=%d", index, ret); return ret; } + if (index >= minInputDims.size()) { + LOGE("index is %u, which exceeds the size of minInputDims:%zu.", index, minInputDims.size()); + return OH_NN_INVALID_PARAMETER; + } + + if (index >= maxInputDims.size()) { + LOGE("index is %u, which exceeds the size of maxInputDims:%zu.", index, maxInputDims.size()); + return OH_NN_INVALID_PARAMETER; + } + + const std::vector& minSingleInputDims = minInputDims[index]; + const std::vector& maxSingleInputDims = maxInputDims[index]; + std::vector tensorShape = ConstructVectorFromArray(nnTensor.dimensions, nnTensor.dimensionCount); - if (minInputDims.size() != maxInputDims.size() && maxInputDims.size() != tensorShape.size()) { - LOGE("Size of minInputDims, maxInputDims and tensorShape of input %u are not equal.", index); + size_t tensorShapeSize = tensorShape.size(); + if (minSingleInputDims.size() != tensorShapeSize || maxSingleInputDims.size() != tensorShapeSize) { + LOGE("Size of minSingleInputDims, maxSingleInputDims and tensorShape of input %u are not equal.", index); return OH_NN_INVALID_PARAMETER; } - for (size_t j = 0; j < tensorShape.size(); ++j) { + for (size_t j = 0; j < tensorShapeSize; ++j) { // Dimensions cannot be negative if (tensorShape[j] < 0) { LOGE("Dimension %zu of input %u is %d.", j, index, tensorShape[j]); return OH_NN_INVALID_PARAMETER; } uint32_t dim = static_cast(tensorShape[j]); - if (dim < minInputDims[j] || dim > maxInputDims[j]) { - LOGE("The %zuth dimension of the %uth input is %u, which is out of range [%u, %u]", - j, index, dim, minInputDims[j], maxInputDims[j]); + if (dim < minSingleInputDims[j] || dim > maxSingleInputDims[j]) { + LOGE("Dimension %zu of input %u is %u, which is out of range [%u, %u]", + j, index, dim, minSingleInputDims[j], maxSingleInputDims[j]); return OH_NN_INVALID_PARAMETER; } } diff --git a/frameworks/native/hdi_device_v1_0.cpp b/frameworks/native/hdi_device_v1_0.cpp index d502d29..721103b 100644 --- a/frameworks/native/hdi_device_v1_0.cpp +++ b/frameworks/native/hdi_device_v1_0.cpp @@ -18,7 +18,6 @@ #include "hdf_base.h" #include "mindir.h" -#include "device_registrar.h" #include "hdi_prepared_model_v1_0.h" #include "memory_manager.h" #include "transform.h" @@ -394,24 +393,5 @@ OH_NN_ReturnCode HDIDeviceV1_0::ReleaseSharedBuffer(const V1_0::SharedBuffer& bu } return OH_NN_SUCCESS; } - -std::shared_ptr HDIDeviceV1_0Creator() -{ - // only one device from HDI now - OHOS::sptr iDevice = V1_0::INnrtDevice::Get(); - if (iDevice == nullptr) { - LOGE("Get HDI device failed."); - return nullptr; - } - - std::shared_ptr device = CreateSharedPtr(iDevice); - if (device == nullptr) { - LOGE("Create device failed."); - } - - return device; -} - -REGISTER_DEVICE(DeviceV1_0, VendorV1_0, HDIDeviceV1_0Creator) } // namespace NeuralNetworkRuntime } // namespace OHOS diff --git a/frameworks/native/hdi_device_v2_0.cpp b/frameworks/native/hdi_device_v2_0.cpp index 32fe5d0..1ba9f9d 100644 --- a/frameworks/native/hdi_device_v2_0.cpp +++ b/frameworks/native/hdi_device_v2_0.cpp @@ -18,8 +18,7 @@ #include "hdf_base.h" #include "mindir.h" -#include "device_registrar.h" -#include "prepared_model.h" // todo +#include "hdi_prepared_model_v2_0.h" #include "memory_manager.h" #include "transform.h" #include "common/log.h" @@ -280,11 +279,11 @@ OH_NN_ReturnCode HDIDeviceV2_0::PrepareModel(std::shared_ptr(iPreparedModel); // todo - // if (preparedModel == nullptr) { - // LOGE("Prepare model failed, because fail to create preparedModel instance."); - // return OH_NN_MEMORY_ERROR; - // } + preparedModel = CreateSharedPtr(iPreparedModel); + if (preparedModel == nullptr) { + LOGE("Prepare model failed, because fail to create preparedModel instance."); + return OH_NN_MEMORY_ERROR; + } return OH_NN_SUCCESS; } @@ -319,11 +318,11 @@ OH_NN_ReturnCode HDIDeviceV2_0::PrepareModelFromModelCache(const std::vector(iPreparedModel); // todo - // if (preparedModel == nullptr) { - // LOGE("Prepare model from model cache failed, because fail to create preparedModel instance."); - // return OH_NN_MEMORY_ERROR; - // } + preparedModel = CreateSharedPtr(iPreparedModel); + if (preparedModel == nullptr) { + LOGE("Prepare model from model cache failed, because fail to create preparedModel instance."); + return OH_NN_MEMORY_ERROR; + } return OH_NN_SUCCESS; } @@ -394,24 +393,5 @@ OH_NN_ReturnCode HDIDeviceV2_0::ReleaseSharedBuffer(const V2_0::SharedBuffer& bu } return OH_NN_SUCCESS; } - -std::shared_ptr HDIDeviceV2_0Creator() -{ - // only one device from HDI now - OHOS::sptr iDevice = V2_0::INnrtDevice::Get(); - if (iDevice == nullptr) { - LOGE("Get HDI device failed."); - return nullptr; - } - - std::shared_ptr device = CreateSharedPtr(iDevice); - if (device == nullptr) { - LOGE("Create device failed."); - } - - return device; -} - -REGISTER_DEVICE(DeviceV2_0, VendorV2_0, HDIDeviceV2_0Creator) } // namespace NeuralNetworkRuntime } // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/hdi_prepared_model.cpp b/frameworks/native/hdi_prepared_model.cpp deleted file mode 100644 index a8ac6fc..0000000 --- a/frameworks/native/hdi_prepared_model.cpp +++ /dev/null @@ -1,163 +0,0 @@ -/* - * Copyright (c) 2022 Huawei Device Co., Ltd. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "hdi_prepared_model.h" - -#include "common/log.h" -#include "memory_manager.h" -#include "transform.h" - -namespace OHOS { -namespace NeuralNetworkRuntime { -namespace { -V1_0::DataType TransDataType(const OH_NN_DataType& dataType) -{ - switch (dataType) { - case OH_NN_BOOL: - return V1_0::DataType::DATA_TYPE_BOOL; - case OH_NN_INT8: - return V1_0::DataType::DATA_TYPE_INT8; - case OH_NN_INT16: - return V1_0::DataType::DATA_TYPE_INT16; - case OH_NN_INT32: - return V1_0::DataType::DATA_TYPE_INT32; - case OH_NN_INT64: - return V1_0::DataType::DATA_TYPE_INT64; - case OH_NN_UINT8: - return V1_0::DataType::DATA_TYPE_UINT8; - case OH_NN_UINT16: - return V1_0::DataType::DATA_TYPE_UINT16; - case OH_NN_UINT32: - return V1_0::DataType::DATA_TYPE_UINT32; - case OH_NN_UINT64: - return V1_0::DataType::DATA_TYPE_UINT64; - case OH_NN_FLOAT16: - return V1_0::DataType::DATA_TYPE_FLOAT16; - case OH_NN_FLOAT32: - return V1_0::DataType::DATA_TYPE_FLOAT32; - case OH_NN_FLOAT64: - return V1_0::DataType::DATA_TYPE_FLOAT64; - default: - return V1_0::DataType::DATA_TYPE_UNKNOWN; - } -} - -V1_0::Format TransFormat(const OH_NN_Format& format) -{ - switch (format) { - case OH_NN_FORMAT_NCHW: - return V1_0::Format::FORMAT_NCHW; - case OH_NN_FORMAT_NHWC: - return V1_0::Format::FORMAT_NHWC; - default: - return V1_0::Format::FORMAT_NONE; - } -} - -V1_0::IOTensor TransIOTensor(const IOTensor& tensor) -{ - V1_0::IOTensor iTensor; - iTensor.name = tensor.name; - iTensor.dataType = TransDataType(tensor.dataType); - iTensor.dimensions = tensor.dimensions; - iTensor.format = TransFormat(tensor.format); - - V1_0::SharedBuffer iBuffer {INVALID_FD, 0, 0, 0}; - if (tensor.data != nullptr) { - auto memManager = MemoryManager::GetInstance(); - Memory memory; - auto ret = memManager->GetMemory(tensor.data, memory); - if (ret != OH_NN_SUCCESS) { - LOGE("Invalid Tensor buffer, cannot transform to fd."); - } else { - iBuffer.fd = memory.fd; - iBuffer.bufferSize = memory.length; - iBuffer.offset = 0; - iBuffer.dataSize = memory.length; - } - } - iTensor.data = iBuffer; - - return iTensor; -} -} -HDIPreparedModel::HDIPreparedModel(OHOS::sptr hdiPreparedModel) - : m_hdiPreparedModel(hdiPreparedModel) -{ - hdiPreparedModel->GetVersion(m_hdiVersion.first, m_hdiVersion.second); -} - -OH_NN_ReturnCode HDIPreparedModel::ExportModelCache(std::vector& modelCache) -{ - if (!modelCache.empty()) { - LOGE("The vector of modelCache should be empty. size=%zu", modelCache.size()); - return OH_NN_INVALID_PARAMETER; - } - - std::vector iBuffers; - auto ret = m_hdiPreparedModel->ExportModelCache(iBuffers); - if (ret != HDF_SUCCESS) { - LOGE("Export model cache failed. ErrorCode=%d", ret); - return OH_NN_UNAVALIDABLE_DEVICE; - } - - auto memManager = MemoryManager::GetInstance(); - for (size_t i = 0; i < iBuffers.size(); i++) { - auto addr = memManager->MapMemory(iBuffers[i].fd, iBuffers[i].bufferSize); - if (addr == nullptr) { - LOGE("Export the %zuth model cache failed, cannot not map fd to address.", i + 1); - return OH_NN_MEMORY_ERROR; - } - ModelBuffer modelbuffer {addr, iBuffers[i].bufferSize}; - modelCache.emplace_back(modelbuffer); - } - - return OH_NN_SUCCESS; -} - -OH_NN_ReturnCode HDIPreparedModel::Run(const std::vector& inputs, const std::vector& outputs, - std::vector>& outputsDims, std::vector& isOutputBufferEnough) -{ - V1_0::IOTensor iTensor; - std::vector iInputTensors; - for (auto& input: inputs) { - iTensor = TransIOTensor(input); - if (iTensor.data.fd == INVALID_FD) { - LOGE("Transform inputs tensor failed, cannot find data file descriptor."); - return OH_NN_INVALID_PARAMETER; - } - iInputTensors.emplace_back(iTensor); - } - - std::vector iOutputTensors; - for (auto& output: outputs) { - iTensor = TransIOTensor(output); - if (iTensor.data.fd == INVALID_FD) { - LOGE("Transform outputs tensor failed, cannot find data file descriptor."); - return OH_NN_INVALID_PARAMETER; - } - iOutputTensors.emplace_back(iTensor); - } - - auto ret = m_hdiPreparedModel->Run(iInputTensors, iOutputTensors, outputsDims, isOutputBufferEnough); - if (ret != HDF_SUCCESS || outputsDims.empty()) { - LOGE("Run model failed. ErrorCode=%d", ret); - return OH_NN_UNAVALIDABLE_DEVICE; - } - - return OH_NN_SUCCESS; -} -} // namespace NeuralNetworkRuntime -} // OHOS \ No newline at end of file diff --git a/frameworks/native/hdi_prepared_model_v2_0.cpp b/frameworks/native/hdi_prepared_model_v2_0.cpp index 406a454..d9c6bc2 100644 --- a/frameworks/native/hdi_prepared_model_v2_0.cpp +++ b/frameworks/native/hdi_prepared_model_v2_0.cpp @@ -163,7 +163,13 @@ OH_NN_ReturnCode HDIPreparedModelV2_0::Run(const std::vector& inputs, OH_NN_ReturnCode HDIPreparedModelV2_0::GetInputDimRanges(std::vector>& minInputDims, std::vector>& maxInputDims) { - return OH_NN_OPERATION_FORBIDDEN; + auto ret = m_hdiPreparedModel->GetInputDimRanges(minInputDims, maxInputDims); + if (ret != HDF_SUCCESS) { + LOGE("GetInputDimRanges failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + + return OH_NN_SUCCESS; } } // namespace NeuralNetworkRuntime } // OHOS \ No newline at end of file -- Gitee From f4eee782a0a95242c465dabf34a230891333b55d Mon Sep 17 00:00:00 2001 From: wangchuanxia Date: Thu, 16 Mar 2023 15:59:16 +0800 Subject: [PATCH 05/12] bugfix Signed-off-by: wangchuanxia --- frameworks/native/device_discover_v1_0.cpp | 12 +++++++----- frameworks/native/device_discover_v2_0.cpp | 12 +++++++----- frameworks/native/device_manager.cpp | 2 +- 3 files changed, 15 insertions(+), 11 deletions(-) diff --git a/frameworks/native/device_discover_v1_0.cpp b/frameworks/native/device_discover_v1_0.cpp index fded72c..e5f802e 100644 --- a/frameworks/native/device_discover_v1_0.cpp +++ b/frameworks/native/device_discover_v1_0.cpp @@ -26,24 +26,26 @@ std::shared_ptr DiscoverHDIDevicesV1_0(std::string& deviceName, std::str OHOS::sptr iDevice = V1_0::INnrtDevice::Get(); if (iDevice == nullptr) { LOGW("Get HDI device failed."); - return; + return nullptr; } auto hdiRet = iDevice->GetDeviceName(deviceName); if (hdiRet != HDF_SUCCESS) { LOGW("Get device name failed. ErrorCode=%d", hdiRet); - return; + return nullptr; } hdiRet = iDevice->GetVendorName(vendorName); if (hdiRet != HDF_SUCCESS) { LOGW("Get vendor name failed. ErrorCode=%d", hdiRet); - return; + return nullptr; } - hdiRet = iDevice->GetVersion(version); + std::pair hdiVersion; + hdiRet = iDevice->GetVersion(hdiVersion.first, hdiVersion.second); if (hdiRet != HDF_SUCCESS) { LOGW("Get version failed. ErrorCode=%d", hdiRet); - return; + return nullptr; } + version = 'v' + std::to_string(hdiVersion.first) + '_' + std::to_string(hdiVersion.second); std::shared_ptr device = CreateSharedPtr(iDevice); if (device == nullptr) { diff --git a/frameworks/native/device_discover_v2_0.cpp b/frameworks/native/device_discover_v2_0.cpp index 36b0871..de5e8b7 100644 --- a/frameworks/native/device_discover_v2_0.cpp +++ b/frameworks/native/device_discover_v2_0.cpp @@ -26,24 +26,26 @@ std::shared_ptr DiscoverHDIDevicesV2_0(std::string& deviceName, std::str OHOS::sptr iDevice = V2_0::INnrtDevice::Get(); if (iDevice == nullptr) { LOGW("Get HDI device failed."); - return; + return nullptr; } auto hdiRet = iDevice->GetDeviceName(deviceName); if (hdiRet != HDF_SUCCESS) { LOGW("Get device name failed. ErrorCode=%d", hdiRet); - return; + return nullptr; } hdiRet = iDevice->GetVendorName(vendorName); if (hdiRet != HDF_SUCCESS) { LOGW("Get vendor name failed. ErrorCode=%d", hdiRet); - return; + return nullptr; } - hdiRet = iDevice->GetVersion(version); + std::pair hdiVersion; + hdiRet = iDevice->GetVersion(hdiVersion.first, hdiVersion.second); if (hdiRet != HDF_SUCCESS) { LOGW("Get version failed. ErrorCode=%d", hdiRet); - return; + return nullptr; } + version = 'v' + std::to_string(hdiVersion.first) + '_' + std::to_string(hdiVersion.second); std::shared_ptr device = CreateSharedPtr(iDevice); if (device == nullptr) { diff --git a/frameworks/native/device_manager.cpp b/frameworks/native/device_manager.cpp index bb45963..75ac674 100644 --- a/frameworks/native/device_manager.cpp +++ b/frameworks/native/device_manager.cpp @@ -148,7 +148,7 @@ void DeviceManager::AddDevice(const std::string& deviceName, const std::string& m_devices.emplace(std::hash{}(uniqueName), device); } -void DiscoverHDIDevices() +void DeviceManager::DiscoverHDIDevices() { std::string deviceName; std::string vendorName; -- Gitee From 9522117f5808712f845da3b74f7a776a7834ef46 Mon Sep 17 00:00:00 2001 From: weiwei Date: Thu, 16 Mar 2023 19:45:51 +0800 Subject: [PATCH 06/12] =?UTF-8?q?=E6=96=B0=E5=A2=9EHDI=20V2.0=E7=89=88?= =?UTF-8?q?=E6=9C=AC=E7=9A=84=E6=A8=A1=E6=8B=9F=E9=A9=B1=E5=8A=A8=E6=9C=8D?= =?UTF-8?q?=E5=8A=A1=201.=20example/drivers=E4=B8=8BHDI=20V1.0=E7=89=88?= =?UTF-8?q?=E6=9C=AC=E7=9A=84=E6=A8=A1=E6=8B=9F=E9=A9=B1=E5=8A=A8=E6=9C=8D?= =?UTF-8?q?=E5=8A=A1=E6=94=BE=E7=BD=AE=E5=88=B0nnrt/v1=5F0=E5=AD=90?= =?UTF-8?q?=E7=9B=AE=E5=BD=95=EF=BC=9B=202.=20example/drivers=E4=B8=8B?= =?UTF-8?q?=E6=96=B0=E5=A2=9EHDI=20V2.0=E7=89=88=E6=9C=AC=E7=9A=84?= =?UTF-8?q?=E6=A8=A1=E6=8B=9F=E9=A9=B1=E5=8A=A8=E6=9C=8D=E5=8A=A1=EF=BC=8C?= =?UTF-8?q?=E6=94=BE=E7=BD=AE=E4=BA=8Ennrt/v2=5F0=E5=AD=90=E7=9B=AE?= =?UTF-8?q?=E5=BD=95=EF=BC=9B?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: weiwei --- example/drivers/nnrt/{ => v1_0}/BUILD.gn | 0 .../nnrt/{ => v1_0}/hdi_cpu_service/BUILD.gn | 0 .../include/nnrt_device_service.h | 0 .../hdi_cpu_service/include/node_functions.h | 0 .../hdi_cpu_service/include/node_registry.h | 0 .../include/prepared_model_service.h | 0 .../include/shared_buffer_parser.h | 0 .../hdi_cpu_service/include/validation.h | 0 .../src/nnrt_device_driver.cpp | 0 .../src/nnrt_device_service.cpp | 0 .../hdi_cpu_service/src/node_functions.cpp | 0 .../hdi_cpu_service/src/node_registry.cpp | 0 .../src/prepared_model_service.cpp | 0 .../src/shared_buffer_parser.cpp | 0 .../hdi_cpu_service/src/validation.cpp | 0 example/drivers/nnrt/v2_0/BUILD.gn | 24 + .../nnrt/v2_0/hdi_cpu_service/BUILD.gn | 90 +++ .../include/nnrt_device_service.h | 88 +++ .../hdi_cpu_service/include/node_functions.h | 71 +++ .../hdi_cpu_service/include/node_registry.h | 57 ++ .../include/prepared_model_service.h | 80 +++ .../include/shared_buffer_parser.h | 49 ++ .../v2_0/hdi_cpu_service/include/validation.h | 33 ++ .../src/nnrt_device_driver.cpp | 115 ++++ .../src/nnrt_device_service.cpp | 529 ++++++++++++++++++ .../hdi_cpu_service/src/node_functions.cpp | 373 ++++++++++++ .../hdi_cpu_service/src/node_registry.cpp | 60 ++ .../src/prepared_model_service.cpp | 461 +++++++++++++++ .../src/shared_buffer_parser.cpp | 104 ++++ .../v2_0/hdi_cpu_service/src/validation.cpp | 72 +++ 30 files changed, 2206 insertions(+) rename example/drivers/nnrt/{ => v1_0}/BUILD.gn (100%) rename example/drivers/nnrt/{ => v1_0}/hdi_cpu_service/BUILD.gn (100%) rename example/drivers/nnrt/{ => v1_0}/hdi_cpu_service/include/nnrt_device_service.h (100%) rename example/drivers/nnrt/{ => v1_0}/hdi_cpu_service/include/node_functions.h (100%) rename example/drivers/nnrt/{ => v1_0}/hdi_cpu_service/include/node_registry.h (100%) rename example/drivers/nnrt/{ => v1_0}/hdi_cpu_service/include/prepared_model_service.h (100%) rename example/drivers/nnrt/{ => v1_0}/hdi_cpu_service/include/shared_buffer_parser.h (100%) rename example/drivers/nnrt/{ => v1_0}/hdi_cpu_service/include/validation.h (100%) rename example/drivers/nnrt/{ => v1_0}/hdi_cpu_service/src/nnrt_device_driver.cpp (100%) rename example/drivers/nnrt/{ => v1_0}/hdi_cpu_service/src/nnrt_device_service.cpp (100%) rename example/drivers/nnrt/{ => v1_0}/hdi_cpu_service/src/node_functions.cpp (100%) rename example/drivers/nnrt/{ => v1_0}/hdi_cpu_service/src/node_registry.cpp (100%) rename example/drivers/nnrt/{ => v1_0}/hdi_cpu_service/src/prepared_model_service.cpp (100%) rename example/drivers/nnrt/{ => v1_0}/hdi_cpu_service/src/shared_buffer_parser.cpp (100%) rename example/drivers/nnrt/{ => v1_0}/hdi_cpu_service/src/validation.cpp (100%) create mode 100644 example/drivers/nnrt/v2_0/BUILD.gn create mode 100644 example/drivers/nnrt/v2_0/hdi_cpu_service/BUILD.gn create mode 100644 example/drivers/nnrt/v2_0/hdi_cpu_service/include/nnrt_device_service.h create mode 100644 example/drivers/nnrt/v2_0/hdi_cpu_service/include/node_functions.h create mode 100644 example/drivers/nnrt/v2_0/hdi_cpu_service/include/node_registry.h create mode 100644 example/drivers/nnrt/v2_0/hdi_cpu_service/include/prepared_model_service.h create mode 100644 example/drivers/nnrt/v2_0/hdi_cpu_service/include/shared_buffer_parser.h create mode 100644 example/drivers/nnrt/v2_0/hdi_cpu_service/include/validation.h create mode 100644 example/drivers/nnrt/v2_0/hdi_cpu_service/src/nnrt_device_driver.cpp create mode 100644 example/drivers/nnrt/v2_0/hdi_cpu_service/src/nnrt_device_service.cpp create mode 100644 example/drivers/nnrt/v2_0/hdi_cpu_service/src/node_functions.cpp create mode 100644 example/drivers/nnrt/v2_0/hdi_cpu_service/src/node_registry.cpp create mode 100644 example/drivers/nnrt/v2_0/hdi_cpu_service/src/prepared_model_service.cpp create mode 100644 example/drivers/nnrt/v2_0/hdi_cpu_service/src/shared_buffer_parser.cpp create mode 100644 example/drivers/nnrt/v2_0/hdi_cpu_service/src/validation.cpp diff --git a/example/drivers/nnrt/BUILD.gn b/example/drivers/nnrt/v1_0/BUILD.gn similarity index 100% rename from example/drivers/nnrt/BUILD.gn rename to example/drivers/nnrt/v1_0/BUILD.gn diff --git a/example/drivers/nnrt/hdi_cpu_service/BUILD.gn b/example/drivers/nnrt/v1_0/hdi_cpu_service/BUILD.gn similarity index 100% rename from example/drivers/nnrt/hdi_cpu_service/BUILD.gn rename to example/drivers/nnrt/v1_0/hdi_cpu_service/BUILD.gn diff --git a/example/drivers/nnrt/hdi_cpu_service/include/nnrt_device_service.h b/example/drivers/nnrt/v1_0/hdi_cpu_service/include/nnrt_device_service.h similarity index 100% rename from example/drivers/nnrt/hdi_cpu_service/include/nnrt_device_service.h rename to example/drivers/nnrt/v1_0/hdi_cpu_service/include/nnrt_device_service.h diff --git a/example/drivers/nnrt/hdi_cpu_service/include/node_functions.h b/example/drivers/nnrt/v1_0/hdi_cpu_service/include/node_functions.h similarity index 100% rename from example/drivers/nnrt/hdi_cpu_service/include/node_functions.h rename to example/drivers/nnrt/v1_0/hdi_cpu_service/include/node_functions.h diff --git a/example/drivers/nnrt/hdi_cpu_service/include/node_registry.h b/example/drivers/nnrt/v1_0/hdi_cpu_service/include/node_registry.h similarity index 100% rename from example/drivers/nnrt/hdi_cpu_service/include/node_registry.h rename to example/drivers/nnrt/v1_0/hdi_cpu_service/include/node_registry.h diff --git a/example/drivers/nnrt/hdi_cpu_service/include/prepared_model_service.h b/example/drivers/nnrt/v1_0/hdi_cpu_service/include/prepared_model_service.h similarity index 100% rename from example/drivers/nnrt/hdi_cpu_service/include/prepared_model_service.h rename to example/drivers/nnrt/v1_0/hdi_cpu_service/include/prepared_model_service.h diff --git a/example/drivers/nnrt/hdi_cpu_service/include/shared_buffer_parser.h b/example/drivers/nnrt/v1_0/hdi_cpu_service/include/shared_buffer_parser.h similarity index 100% rename from example/drivers/nnrt/hdi_cpu_service/include/shared_buffer_parser.h rename to example/drivers/nnrt/v1_0/hdi_cpu_service/include/shared_buffer_parser.h diff --git a/example/drivers/nnrt/hdi_cpu_service/include/validation.h b/example/drivers/nnrt/v1_0/hdi_cpu_service/include/validation.h similarity index 100% rename from example/drivers/nnrt/hdi_cpu_service/include/validation.h rename to example/drivers/nnrt/v1_0/hdi_cpu_service/include/validation.h diff --git a/example/drivers/nnrt/hdi_cpu_service/src/nnrt_device_driver.cpp b/example/drivers/nnrt/v1_0/hdi_cpu_service/src/nnrt_device_driver.cpp similarity index 100% rename from example/drivers/nnrt/hdi_cpu_service/src/nnrt_device_driver.cpp rename to example/drivers/nnrt/v1_0/hdi_cpu_service/src/nnrt_device_driver.cpp diff --git a/example/drivers/nnrt/hdi_cpu_service/src/nnrt_device_service.cpp b/example/drivers/nnrt/v1_0/hdi_cpu_service/src/nnrt_device_service.cpp similarity index 100% rename from example/drivers/nnrt/hdi_cpu_service/src/nnrt_device_service.cpp rename to example/drivers/nnrt/v1_0/hdi_cpu_service/src/nnrt_device_service.cpp diff --git a/example/drivers/nnrt/hdi_cpu_service/src/node_functions.cpp b/example/drivers/nnrt/v1_0/hdi_cpu_service/src/node_functions.cpp similarity index 100% rename from example/drivers/nnrt/hdi_cpu_service/src/node_functions.cpp rename to example/drivers/nnrt/v1_0/hdi_cpu_service/src/node_functions.cpp diff --git a/example/drivers/nnrt/hdi_cpu_service/src/node_registry.cpp b/example/drivers/nnrt/v1_0/hdi_cpu_service/src/node_registry.cpp similarity index 100% rename from example/drivers/nnrt/hdi_cpu_service/src/node_registry.cpp rename to example/drivers/nnrt/v1_0/hdi_cpu_service/src/node_registry.cpp diff --git a/example/drivers/nnrt/hdi_cpu_service/src/prepared_model_service.cpp b/example/drivers/nnrt/v1_0/hdi_cpu_service/src/prepared_model_service.cpp similarity index 100% rename from example/drivers/nnrt/hdi_cpu_service/src/prepared_model_service.cpp rename to example/drivers/nnrt/v1_0/hdi_cpu_service/src/prepared_model_service.cpp diff --git a/example/drivers/nnrt/hdi_cpu_service/src/shared_buffer_parser.cpp b/example/drivers/nnrt/v1_0/hdi_cpu_service/src/shared_buffer_parser.cpp similarity index 100% rename from example/drivers/nnrt/hdi_cpu_service/src/shared_buffer_parser.cpp rename to example/drivers/nnrt/v1_0/hdi_cpu_service/src/shared_buffer_parser.cpp diff --git a/example/drivers/nnrt/hdi_cpu_service/src/validation.cpp b/example/drivers/nnrt/v1_0/hdi_cpu_service/src/validation.cpp similarity index 100% rename from example/drivers/nnrt/hdi_cpu_service/src/validation.cpp rename to example/drivers/nnrt/v1_0/hdi_cpu_service/src/validation.cpp diff --git a/example/drivers/nnrt/v2_0/BUILD.gn b/example/drivers/nnrt/v2_0/BUILD.gn new file mode 100644 index 0000000..28ca28b --- /dev/null +++ b/example/drivers/nnrt/v2_0/BUILD.gn @@ -0,0 +1,24 @@ +# Copyright (c) 2022 Huawei Device Co., Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +if (defined(ohos_lite)) { + group("nnrt_entry") { + deps = [ ] + } +} else { + group("nnrt_entry") { + deps = [ + "./hdi_cpu_service:hdf_nnrt_service", + ] + } +} \ No newline at end of file diff --git a/example/drivers/nnrt/v2_0/hdi_cpu_service/BUILD.gn b/example/drivers/nnrt/v2_0/hdi_cpu_service/BUILD.gn new file mode 100644 index 0000000..003de74 --- /dev/null +++ b/example/drivers/nnrt/v2_0/hdi_cpu_service/BUILD.gn @@ -0,0 +1,90 @@ +# Copyright (c) 2022 Huawei Device Co., Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import("//build/ohos.gni") +import("//drivers/hdf_core/adapter/uhdf2/uhdf.gni") + +ohos_prebuilt_shared_library("mindspore_demo") { + source = "//drivers/peripheral/nnrt/v2_0/mindspore/mindspore/libmindspore-lite.huawei.so" + + install_images = [chipset_base_dir] + subsystem_name = "hdf" + part_name = "drivers_peripheral_nnrt" +} + +ohos_shared_library("libnnrt_device_service_2.0") { + include_dirs = [ + "//drivers/peripheral/nnrt/v2_0/hdi_cpu_service/include", + "//drivers/peripheral/nnrt/v2_0/mindspore", + "//third_party/flatbuffers/include", + "//commonlibrary/c_utils/base/include" + ] + sources = [ + "src/nnrt_device_service.cpp", + "src/prepared_model_service.cpp", + "src/node_registry.cpp", + "src/node_functions.cpp", + "src/shared_buffer_parser.cpp", + "src/validation.cpp" + ] + + deps = [ + "//drivers/interface/nnrt/v2_0:nnrt_idl_headers", + "//drivers/interface/nnrt/v2_0:libnnrt_stub_2.0", + ":mindspore_demo" + ] + + external_deps = [ + "hdf_core:libhdf_utils", + "hiviewdfx_hilog_native:libhilog", + "ipc:ipc_core", + "c_utils:utils" + ] + + install_images = [ chipset_base_dir ] + subsystem_name = "hdf" + part_name = "drivers_peripheral_nnrt" +} + +ohos_shared_library("libnnrt_driver") { + include_dirs = [] + sources = [ + "src/nnrt_device_driver.cpp" + ] + deps = [ + "//drivers/peripheral/nnrt/v2_0/hdi_cpu_service:libnnrt_device_service_2.0", + "//drivers/interface/nnrt/v2_0:libnnrt_stub_2.0" + ] + + external_deps = [ + "hdf_core:libhdf_host", + "hdf_core:libhdf_ipc_adapter", + "hdf_core:libhdf_utils", + "hiviewdfx_hilog_native:libhilog", + "ipc:ipc_core", + "c_utils:utils", + "hdf_core:libhdi" + ] + + install_images = [ chipset_base_dir ] + subsystem_name = "hdf" + part_name = "drivers_peripheral_nnrt" +} + +group("hdf_nnrt_service") { + deps = [ + ":mindspore_demo", + ":libnnrt_driver", + ":libnnrt_device_service_2.0", + ] +} \ No newline at end of file diff --git a/example/drivers/nnrt/v2_0/hdi_cpu_service/include/nnrt_device_service.h b/example/drivers/nnrt/v2_0/hdi_cpu_service/include/nnrt_device_service.h new file mode 100644 index 0000000..9419f40 --- /dev/null +++ b/example/drivers/nnrt/v2_0/hdi_cpu_service/include/nnrt_device_service.h @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef OHOS_HDI_NNRT_V2_0_NNRTDEVICESERVICE_H +#define OHOS_HDI_NNRT_V2_0_NNRTDEVICESERVICE_H + +#include + +#include "v2_0/innrt_device.h" +#include "ashmem.h" +#include "include/api/model.h" + +#include "mindspore_schema/model_generated.h" + +namespace OHOS { +namespace HDI { +namespace Nnrt { +namespace V2_0 { +class NnrtDeviceService : public INnrtDevice { +public: + NnrtDeviceService() = default; + virtual ~NnrtDeviceService(); + + int32_t GetDeviceName(std::string& name) override; + + int32_t GetVendorName(std::string& name) override; + + int32_t GetDeviceType(DeviceType& deviceType) override; + + int32_t GetDeviceStatus(DeviceStatus& status) override; + + int32_t GetSupportedOperation(const Model& model, std::vector& ops) override; + + int32_t IsFloat16PrecisionSupported(bool& isSupported) override; + + int32_t IsPerformanceModeSupported(bool& isSupported) override; + + int32_t IsPrioritySupported(bool& isSupported) override; + + int32_t IsDynamicInputSupported(bool& isSupported) override; + + int32_t PrepareModel(const Model& model, const ModelConfig& config, sptr& preparedModel) override; + + int32_t IsModelCacheSupported(bool& isSupported) override; + + int32_t PrepareModelFromModelCache(const std::vector& modelCache, const ModelConfig& config, + sptr& preparedModel) override; + + int32_t AllocateBuffer(uint32_t length, SharedBuffer& buffer) override; + + int32_t ReleaseBuffer(const SharedBuffer& buffer) override; + +private: + int32_t ValidateModelConfig(const ModelConfig& config) const; + int32_t ValidateModel(const Model& model) const; + std::shared_ptr TransModelToGraph(const Model& model) const; + std::unique_ptr TransTensor(const Tensor& tensor) const; + std::unique_ptr TransNode(const Node& node) const; + std::unique_ptr TransSubGraph(const SubGraph& graph, const size_t numTensor) const; + std::shared_ptr TransModelConfig(const ModelConfig& config) const; + int32_t ShowCustomAttributes(const std::map>& extensions) const; + int32_t ParseCustomAttributes(const std::map>& extensions, float& attr1, + std::string& attr2) const; + int32_t ConvertVecToFloat(std::vector vecFloat, float& result) const; + int32_t ConvertVecToString(std::vector vecFloat, std::string& result) const; + +private: + std::shared_ptr m_model {nullptr}; + std::unordered_map> m_ashmems; +}; +} // V2_0 +} // Nnrt +} // HDI +} // OHOS + +#endif // OHOS_HDI_NNRT_V2_0_NNRTDEVICESERVICE_H \ No newline at end of file diff --git a/example/drivers/nnrt/v2_0/hdi_cpu_service/include/node_functions.h b/example/drivers/nnrt/v2_0/hdi_cpu_service/include/node_functions.h new file mode 100644 index 0000000..8e1fbb3 --- /dev/null +++ b/example/drivers/nnrt/v2_0/hdi_cpu_service/include/node_functions.h @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef OHOS_HDI_NNR_NODE_FUNCTIONS_H +#define OHOS_HDI_NNR_NODE_FUNCTIONS_H + +#include + +#include "hdf_base.h" +#include "hdf_log.h" +#include +#include "node_registry.h" + +namespace OHOS { +namespace HDI { +namespace Nnrt { +namespace V2_0 { +template +int32_t ParsePrimitive(const std::vector& primitive, T& attr, + std::function parseFunc) +{ + if (primitive.empty()) { + HDF_LOGE("Primitive data is empty."); + return HDF_FAILURE; + } + + OHOS::MessageParcel parcelData; + bool ret = parcelData.WriteBuffer(primitive.data(), primitive.size()); + if (!ret) { + HDF_LOGE("Write data to MessageParcel failed."); + return HDF_FAILURE; + } + + ret = parseFunc(parcelData, attr); + if (!ret) { + HDF_LOGE("Unmarshalling data failed."); + return HDF_FAILURE; + } + return HDF_SUCCESS; +} + +PrimUniquePtr GetAddPrimitive(const std::vector& primitive); +PrimUniquePtr GetAvgPoolPrimitive(const std::vector& primitive); +PrimUniquePtr GetConcatPrimitive(const std::vector& primitive); +PrimUniquePtr GetConv2dPrimitive(const std::vector& primitive); +PrimUniquePtr GetFullConnectionPrimitive(const std::vector& primitive); +PrimUniquePtr GetMaxPoolFusionPrimitive(const std::vector& primitive); +PrimUniquePtr GetMatMulFusionPrimitive(const std::vector& primitive); +PrimUniquePtr GetSoftmaxPrimitive(const std::vector& primitive); +PrimUniquePtr GetReshapePrimitive(const std::vector& primitive); +PrimUniquePtr GetScaleFusionPrimitive(const std::vector& primitive); +PrimUniquePtr GetActivationPrimitive(const std::vector& primitive); +PrimUniquePtr GetQuantDTypeCastPrimitive(const std::vector& primitive); +PrimUniquePtr GetMulFusionPrimitive(const std::vector& primitive); +} // namespace V2_0 +} // namespace Nnrt +} // namespace HDI +} // namespace OHOS +#endif // OHOS_HDI_NNR_NODE_FUNCTIONS_H \ No newline at end of file diff --git a/example/drivers/nnrt/v2_0/hdi_cpu_service/include/node_registry.h b/example/drivers/nnrt/v2_0/hdi_cpu_service/include/node_registry.h new file mode 100644 index 0000000..17d4b51 --- /dev/null +++ b/example/drivers/nnrt/v2_0/hdi_cpu_service/include/node_registry.h @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef OHOS_HDI_NNR_NODE_REGISTRY_H +#define OHOS_HDI_NNR_NODE_REGISTRY_H + +#include +#include +#include + +#include "v2_0/nnrt_types.h" +#include "mindspore_schema/model_generated.h" + +namespace OHOS { +namespace HDI { +namespace Nnrt { +namespace V2_0 { +using PrimUniquePtr = std::unique_ptr; +class NodeRegistry { +public: + struct Registrar { + Registrar() = delete; + Registrar(NodeType type, std::function&)> nodeFunc); + }; + +public: + static NodeRegistry& GetSingleton(); + std::function&)> GetNodeFunc(NodeType type) const; + bool IsNodeTypeExist(NodeType type) const; + +private: + NodeRegistry() {}; + NodeRegistry(const NodeRegistry&) = delete; + NodeRegistry& operator=(const NodeRegistry&) = delete; + +private: + std::unordered_map&)>> m_nodeRegs; +}; + +#define REGISTER_NODE(nodeName, nodeType, funcPtr) static NodeRegistry::Registrar g_##nodeName(nodeType, funcPtr) +} // namespace V2_0 +} // namespace Nnrt +} // namespace HDI +} // namespace OHOS +#endif // OHOS_HDI_NNR_NODE_REGISTRY_H \ No newline at end of file diff --git a/example/drivers/nnrt/v2_0/hdi_cpu_service/include/prepared_model_service.h b/example/drivers/nnrt/v2_0/hdi_cpu_service/include/prepared_model_service.h new file mode 100644 index 0000000..c52ed06 --- /dev/null +++ b/example/drivers/nnrt/v2_0/hdi_cpu_service/include/prepared_model_service.h @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef OHOS_HDI_NNR_V2_0_PREPAREDMODELSERVICE_H +#define OHOS_HDI_NNR_V2_0_PREPAREDMODELSERVICE_H + +#include "v2_0/iprepared_model.h" +#include "include/api/data_type.h" +#include "include/api/context.h" +#include "include/api/types.h" +#include "include/api/model.h" +#include "mindspore_schema/model_generated.h" +#include "ashmem.h" + +namespace OHOS { +namespace HDI { +namespace Nnrt { +namespace V2_0 { +constexpr int DYNAMIC_SHAPE_FLAG = -1; +class PreparedModelService : public IPreparedModel { +public: + PreparedModelService() = default; + + virtual ~PreparedModelService(); + + explicit PreparedModelService(std::shared_ptr context); + + int32_t Compile(std::shared_ptr graph); + + int32_t Compile(const void* modelBuffer, size_t length); + + int32_t ExportModelCache(std::vector& modelCache) override; + + int32_t Run(const std::vector& inputs, const std::vector& outputs, + std::vector>& outputsDims, std::vector& isOutputBufferEnough) override; + + int32_t GetInputDimRanges(std::vector>& minInputDims, + std::vector>& maxInputDims) override; + +private: + int32_t SetInputs(const std::vector& inputs); + int32_t SetOutputs(const std::vector& outputs); + int32_t GetMSInputsAndOutputs(); + int32_t CompareTensor(const IOTensor& tensor, const mindspore::MSTensor& msTensor); + sptr ParseBuffer(const SharedBuffer& buffer); + int32_t UpdateOutput(const std::vector& outputs, + std::vector>& outputsDims, std::vector& isOutputBufferEnough); + void ResetInputAndOutput(); + +private: + std::shared_ptr m_graph {nullptr}; + std::shared_ptr m_context {nullptr}; + flatbuffers::FlatBufferBuilder m_builder; + std::shared_ptr m_model {nullptr}; + sptr m_cacheBuffer {nullptr}; + std::vector> m_inputAshmems; + std::vector m_inputs; + std::vector> m_outputAshmems; + std::vector m_outputs; + std::vector> m_inputDims; + bool m_isDynamicShape {false}; +}; +} // V2_0 +} // Nnrt +} // HDI +} // OHOS + +#endif // OHOS_HDI_NNR_V2_0_PREPAREDMODELSERVICE_H \ No newline at end of file diff --git a/example/drivers/nnrt/v2_0/hdi_cpu_service/include/shared_buffer_parser.h b/example/drivers/nnrt/v2_0/hdi_cpu_service/include/shared_buffer_parser.h new file mode 100644 index 0000000..8e74154 --- /dev/null +++ b/example/drivers/nnrt/v2_0/hdi_cpu_service/include/shared_buffer_parser.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef OHOS_HDI_NNR_V2_0_SHARED_BUFFER_PARSER_H +#define OHOS_HDI_NNR_V2_0_SHARED_BUFFER_PARSER_H + +#include "ashmem.h" +#include "v2_0/nnrt_types.h" + +namespace OHOS { +namespace HDI { +namespace Nnrt { +namespace V2_0 { +namespace { +const int INVALID_FD = -1; +} + +class SharedBufferParser { +public: + SharedBufferParser() {}; + ~SharedBufferParser(); + + int32_t Init(const SharedBuffer& buffer); + int32_t Init(const std::string& name, int32_t size); + void* GetBufferPtr(); + SharedBuffer GetBuffer(); + +private: + SharedBuffer m_buffer; + sptr m_ashptr {nullptr}; + void* m_bufferAddr {nullptr}; +}; +} // V2_0 +} // Nnrt +} // HDI +} // OHOS +#endif // OHOS_HDI_NNR_V2_0_SHARED_BUFFER_PARSER_H \ No newline at end of file diff --git a/example/drivers/nnrt/v2_0/hdi_cpu_service/include/validation.h b/example/drivers/nnrt/v2_0/hdi_cpu_service/include/validation.h new file mode 100644 index 0000000..ffcdf50 --- /dev/null +++ b/example/drivers/nnrt/v2_0/hdi_cpu_service/include/validation.h @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef OHOS_HDI_NNRT_VALIDATION_H +#define OHOS_HDI_NNRT_VALIDATION_H + +#include "v2_0/nnrt_types.h" + +namespace OHOS { +namespace HDI { +namespace Nnrt { +namespace V2_0 { +int32_t ValidatePerformanceMode(PerformanceMode mode); +int32_t ValidatePriority(Priority priority); +int32_t ValidateDataType(DataType dataType); +int32_t ValidateFormat(Format format); +} // namespace V2_0 +} // namespace Nnrt +} // namespace HDI +} // namespace OHOS +#endif // OHOS_HDI_NNRT_VALIDATION_H \ No newline at end of file diff --git a/example/drivers/nnrt/v2_0/hdi_cpu_service/src/nnrt_device_driver.cpp b/example/drivers/nnrt/v2_0/hdi_cpu_service/src/nnrt_device_driver.cpp new file mode 100644 index 0000000..fab6e89 --- /dev/null +++ b/example/drivers/nnrt/v2_0/hdi_cpu_service/src/nnrt_device_driver.cpp @@ -0,0 +1,115 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include "v2_0/nnrt_device_stub.h" + +using namespace OHOS::HDI::Nnrt::V2_0; + +struct HdfNnrtDeviceHost { + struct IDeviceIoService ioService; + OHOS::sptr stub; +}; + +static int32_t NnrtDeviceDriverDispatch(struct HdfDeviceIoClient *client, int cmdId, struct HdfSBuf *data, + struct HdfSBuf *reply) +{ + auto *hdfNnrtDeviceHost = CONTAINER_OF(client->device->service, struct HdfNnrtDeviceHost, ioService); + + OHOS::MessageParcel *dataParcel = nullptr; + OHOS::MessageParcel *replyParcel = nullptr; + OHOS::MessageOption option; + + if (SbufToParcel(data, &dataParcel) != HDF_SUCCESS) { + HDF_LOGE("%{public}s:invalid data sbuf object to dispatch", __func__); + return HDF_ERR_INVALID_PARAM; + } + if (SbufToParcel(reply, &replyParcel) != HDF_SUCCESS) { + HDF_LOGE("%{public}s:invalid reply sbuf object to dispatch", __func__); + return HDF_ERR_INVALID_PARAM; + } + + return hdfNnrtDeviceHost->stub->SendRequest(cmdId, *dataParcel, *replyParcel, option); +} + +static int HdfNnrtDeviceDriverInit(struct HdfDeviceObject *deviceObject) +{ + HDF_LOGI("HdfNnrtDeviceDriverInit enter"); + return HDF_SUCCESS; +} + +static int HdfNnrtDeviceDriverBind(struct HdfDeviceObject *deviceObject) +{ + HDF_LOGI("HdfNnrtDeviceDriverBind enter"); + + auto *hdfNnrtDeviceHost = new (std::nothrow) HdfNnrtDeviceHost; + if (hdfNnrtDeviceHost == nullptr) { + HDF_LOGE("%{public}s: failed to create create HdfNnrtDeviceHost object", __func__); + return HDF_FAILURE; + } + + hdfNnrtDeviceHost->ioService.Dispatch = NnrtDeviceDriverDispatch; + hdfNnrtDeviceHost->ioService.Open = NULL; + hdfNnrtDeviceHost->ioService.Release = NULL; + + auto serviceImpl = INnrtDevice::Get(true); + if (serviceImpl == nullptr) { + HDF_LOGE("%{public}s: failed to get of implement service", __func__); + delete hdfNnrtDeviceHost; + return HDF_FAILURE; + } + + hdfNnrtDeviceHost->stub = OHOS::HDI::ObjectCollector::GetInstance().GetOrNewObject(serviceImpl, + INnrtDevice::GetDescriptor()); + if (hdfNnrtDeviceHost->stub == nullptr) { + HDF_LOGE("%{public}s: failed to get stub object", __func__); + delete hdfNnrtDeviceHost; + return HDF_FAILURE; + } + + deviceObject->service = &hdfNnrtDeviceHost->ioService; + return HDF_SUCCESS; +} + +static void HdfNnrtDeviceDriverRelease(struct HdfDeviceObject *deviceObject) +{ + HDF_LOGI("HdfNnrtDeviceDriverRelease enter"); + if (deviceObject->service == nullptr) { + HDF_LOGE("HdfNnrtDeviceDriverRelease not initted"); + return; + } + + auto *hdfNnrtDeviceHost = CONTAINER_OF(deviceObject->service, struct HdfNnrtDeviceHost, ioService); + delete hdfNnrtDeviceHost; +} + +struct HdfDriverEntry g_nnrtdeviceDriverEntry = { + .moduleVersion = 2, + .moduleName = "nnrt", + .Bind = HdfNnrtDeviceDriverBind, + .Init = HdfNnrtDeviceDriverInit, + .Release = HdfNnrtDeviceDriverRelease, +}; + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ +HDF_INIT(g_nnrtdeviceDriverEntry); +#ifdef __cplusplus +} +#endif /* __cplusplus */ \ No newline at end of file diff --git a/example/drivers/nnrt/v2_0/hdi_cpu_service/src/nnrt_device_service.cpp b/example/drivers/nnrt/v2_0/hdi_cpu_service/src/nnrt_device_service.cpp new file mode 100644 index 0000000..77ca239 --- /dev/null +++ b/example/drivers/nnrt/v2_0/hdi_cpu_service/src/nnrt_device_service.cpp @@ -0,0 +1,529 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnrt_device_service.h" + +#include +#include "hdf_log.h" +#include "ashmem.h" +#include "securec.h" + +#include "node_registry.h" +#include "prepared_model_service.h" +#include "shared_buffer_parser.h" +#include "validation.h" + +namespace OHOS { +namespace HDI { +namespace Nnrt { +namespace V2_0 { +extern "C" INnrtDevice *NnrtDeviceImplGetInstance(void) +{ + return new (std::nothrow) NnrtDeviceService(); +} + +NnrtDeviceService::~NnrtDeviceService() +{ + for (auto ash : m_ashmems) { + ash.second->UnmapAshmem(); + ash.second->CloseAshmem(); + } +} + +int32_t NnrtDeviceService::GetDeviceName(std::string& name) +{ + name = "RK3568-CPU"; + return HDF_SUCCESS; +} + +int32_t NnrtDeviceService::GetVendorName(std::string& name) +{ + name = "Rockchip"; + return HDF_SUCCESS; +} + +int32_t NnrtDeviceService::GetDeviceType(DeviceType& deviceType) +{ + deviceType = DeviceType::CPU; + return HDF_SUCCESS; +} + +int32_t NnrtDeviceService::GetDeviceStatus(DeviceStatus& status) +{ + status = DeviceStatus::AVAILABLE; + return HDF_SUCCESS; +} + +int32_t NnrtDeviceService::GetSupportedOperation(const Model& model, std::vector& ops) +{ + size_t nodeSize = model.nodes.size(); + auto nodes = model.nodes; + ops.resize(nodeSize, false); + auto& regInstance = NodeRegistry::GetSingleton(); + for (size_t i = 0; i < nodeSize; i++) { + ops[i] = regInstance.IsNodeTypeExist(nodes[i].nodeType); + } + return HDF_SUCCESS; +} + +int32_t NnrtDeviceService::IsFloat16PrecisionSupported(bool& isSupported) +{ + isSupported = true; + return HDF_SUCCESS; +} + +int32_t NnrtDeviceService::IsPerformanceModeSupported(bool& isSupported) +{ + isSupported = true; + return HDF_SUCCESS; +} + +int32_t NnrtDeviceService::IsPrioritySupported(bool& isSupported) +{ + isSupported = false; + return HDF_SUCCESS; +} + +int32_t NnrtDeviceService::IsDynamicInputSupported(bool& isSupported) +{ + isSupported = true; + return HDF_SUCCESS; +} + +int32_t NnrtDeviceService::ShowCustomAttributes(const std::map>& extensions) const +{ + float attr1{0.0}; + std::string attr2; + + auto ret = ParseCustomAttributes(extensions, attr1, attr2); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parsing custom attributes failed."); + return ret; + } + + if (attr1 != 0.0f) { + HDF_LOGI("Set attr1: %f", attr1); + } + + if (!attr2.empty()) { + HDF_LOGI("Set attr2: %s", attr2.c_str()); + } + + return HDF_SUCCESS; +} + +int32_t NnrtDeviceService::PrepareModel(const Model& model, const ModelConfig& config, + sptr& preparedModel) +{ + auto ret = ValidateModel(model); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Model is invalid."); + return ret; + } + + auto graph = TransModelToGraph(model); + if (graph == nullptr) { + HDF_LOGE("Transfrom model to graph failed."); + return HDF_ERR_INVALID_PARAM; + } + + ret = ValidateModelConfig(config); + if (ret != HDF_SUCCESS) { + HDF_LOGE("ModelConfig is invalid."); + return ret; + } + + ret = ShowCustomAttributes(config.extensions); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Showing custom attributes failed."); + return ret; + } + + auto context = TransModelConfig(config); + sptr service = new (std::nothrow) PreparedModelService(context); + if (service == nullptr) { + HDF_LOGE("Create new PreparedModelService instance failed."); + return HDF_ERR_MALLOC_FAIL; + } + + ret = service->Compile(graph); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Prepared model failed."); + return ret; + } + + preparedModel = service; + return HDF_SUCCESS; +} + +int32_t NnrtDeviceService::IsModelCacheSupported(bool& isSupported) +{ + isSupported = true; + return HDF_SUCCESS; +} + +int32_t NnrtDeviceService::PrepareModelFromModelCache(const std::vector& modelCache, + const ModelConfig& config, sptr& preparedModel) +{ + HDF_LOGD("Using cache to prepare model."); + + // modelCache must be 1, because PreparedModel only export one cache file. + if (modelCache.size() != 1) { + HDF_LOGE("The size of modelCache vector is not valid, it should be one elememt in that vector."); + return HDF_ERR_INVALID_PARAM; + } + + SharedBufferParser parser; + auto ret = parser.Init(modelCache[0]); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parse modle buffer failed."); + return HDF_ERR_INVALID_PARAM; + } + + ret = ValidateModelConfig(config); + if (ret != HDF_SUCCESS) { + HDF_LOGE("ModelConfig is invalid."); + return ret; + } + + ret = ShowCustomAttributes(config.extensions); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Showing custom attributes failed."); + return ret; + } + + auto context = TransModelConfig(config); + sptr service = new (std::nothrow) PreparedModelService(context); + if (service == nullptr) { + HDF_LOGE("Create new instance PreparedModelService failed."); + return HDF_ERR_MALLOC_FAIL; + } + + void* modelBuffer = parser.GetBufferPtr(); + ret = service->Compile(modelBuffer, modelCache[0].dataSize); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Prepared model failed."); + return ret; + } + + preparedModel = service; + return HDF_SUCCESS; +} + +int32_t NnrtDeviceService::AllocateBuffer(uint32_t length, SharedBuffer& buffer) +{ + sptr ashptr = Ashmem::CreateAshmem("allocateBuffer", length); + if (ashptr == nullptr) { + HDF_LOGE("Create shared memory failed."); + return HDF_FAILURE; + } + + if (!ashptr->MapReadAndWriteAshmem()) { + HDF_LOGE("Map allocate buffer failed."); + return HDF_FAILURE; + } + + buffer.fd = ashptr->GetAshmemFd(); + buffer.bufferSize = ashptr->GetAshmemSize(); + buffer.offset = 0; + buffer.dataSize = length; + + m_ashmems[buffer.fd] = ashptr; + return HDF_SUCCESS; +} + +int32_t NnrtDeviceService::ReleaseBuffer(const SharedBuffer& buffer) +{ + // parser will close current fd. + SharedBufferParser parser; + auto ret = parser.Init(buffer); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parse buffer failed."); + return HDF_ERR_INVALID_PARAM; + } + + for (auto& ash : m_ashmems) { + ash.second->UnmapAshmem(); + ash.second->CloseAshmem(); + } + m_ashmems.clear(); + return HDF_SUCCESS; +} + +int32_t NnrtDeviceService::ValidateModelConfig(const ModelConfig& config) const +{ + if (!ValidatePerformanceMode(config.mode)) { + HDF_LOGE("PerformanceMode is invalid. mode=%d", config.mode); + return HDF_ERR_INVALID_PARAM; + } + + if (!ValidatePriority(config.priority)) { + HDF_LOGE("Priority is invalid. priority=%d", config.priority); + return HDF_ERR_INVALID_PARAM; + } + + return HDF_SUCCESS; +} + +int32_t NnrtDeviceService::ValidateModel(const Model& model) const +{ + if (model.allTensors.empty()) { + HDF_LOGE("Model has no tensors."); + return HDF_ERR_INVALID_PARAM; + } + + if (model.subGraph.empty()) { + HDF_LOGE("Model has no subGraphs."); + return HDF_ERR_INVALID_PARAM; + } + + if (model.nodes.empty()) { + HDF_LOGE("Model has no nodes."); + return HDF_ERR_INVALID_PARAM; + } + + if (model.inputIndex.empty()) { + HDF_LOGE("Model has no input."); + return HDF_ERR_INVALID_PARAM; + } + + if (model.outputIndex.empty()) { + HDF_LOGE("Model has no output."); + return HDF_ERR_INVALID_PARAM; + } + + size_t tensorSize = model.allTensors.size(); + for (auto index : model.inputIndex) { + if (index > tensorSize) { + HDF_LOGE("Input index is invalid, index=%u", index); + return HDF_ERR_INVALID_PARAM; + } + } + + for (auto index : model.outputIndex) { + if (index > tensorSize) { + HDF_LOGE("Output index is invalid, index=%u", index); + return HDF_ERR_INVALID_PARAM; + } + } + + return HDF_SUCCESS; +} + +std::shared_ptr NnrtDeviceService::TransModelToGraph(const Model& model) const +{ + auto metaGraph = std::make_shared(); + metaGraph->name = model.name; + metaGraph->version = mindspore::Version(); + + std::unique_ptr transTensor{nullptr}; + for (auto tensor : model.allTensors) { + transTensor = TransTensor(tensor); + if (transTensor == nullptr) { + HDF_LOGE("Transform tensor failed."); + return nullptr; + } + metaGraph->allTensors.emplace_back(std::move(transTensor)); + } + metaGraph->inputIndex = model.inputIndex; + metaGraph->outputIndex = model.outputIndex; + + // Transform node + std::unique_ptr transNode {nullptr}; + for (auto& node : model.nodes) { + transNode = TransNode(node); + if (transNode == nullptr) { + HDF_LOGE("Transform node failed, node name=%{public}s", node.name.c_str()); + return nullptr; + } + metaGraph->nodes.emplace_back(std::move(transNode)); + } + + // Transform subgraph + const size_t numTensor = model.allTensors.size(); + for (auto graph : model.subGraph) { + metaGraph->subGraph.emplace_back(TransSubGraph(graph, numTensor)); + } + return metaGraph; +} + +std::unique_ptr NnrtDeviceService::TransTensor(const Tensor& tensor) const +{ + if (!ValidateDataType(tensor.dataType)) { + HDF_LOGE("DataType of tensor is invalid. dataType=%d", tensor.dataType); + return nullptr; + } + + if (!ValidateFormat(tensor.format)) { + HDF_LOGE("Format of tensor is invalid. format=%d", tensor.format); + return nullptr; + } + + auto schemaTensor = std::make_unique(); + schemaTensor->name = tensor.name; + schemaTensor->dataType = static_cast(tensor.dataType); + schemaTensor->format = static_cast(tensor.format); + schemaTensor->dims = tensor.dims; + for (auto param : tensor.quantParams) { + auto quantParam = std::make_unique(); + quantParam->scale = param.scale; + quantParam->zeroPoint = param.zeroPoint; + quantParam->numBits = param.numBits; + quantParam->inited = true; + schemaTensor->quantParams.emplace_back(std::move(quantParam)); + } + + if (tensor.data.fd != INVALID_FD) { + SharedBufferParser parser; + auto ret = parser.Init(tensor.data); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parse tensor data failed."); + return nullptr; + } + + auto data = parser.GetBufferPtr(); + schemaTensor->data.resize(tensor.data.dataSize); + auto memRet = memcpy_s(const_cast(schemaTensor->data.data()), + tensor.data.dataSize, data, tensor.data.dataSize); + if (memRet != EOK) { + HDF_LOGW("Copy tensor data failed."); + return nullptr; + } + } + return schemaTensor; +} + +std::unique_ptr NnrtDeviceService::TransNode(const Node& node) const +{ + auto cnode = std::make_unique(); + cnode->name = node.name; + cnode->inputIndex = node.inputIndex; + cnode->outputIndex = node.outputIndex; + cnode->quantType = static_cast(node.quantType); + + auto& regInstance = NodeRegistry::GetSingleton(); + auto parseFunc = regInstance.GetNodeFunc(node.nodeType); + auto primitive = parseFunc(node.nodeAttr); + if (primitive == nullptr) { + HDF_LOGE("Parse primitve data failed. node name=%{public}s", node.name.c_str()); + return nullptr; + } + + cnode->primitive = std::move(primitive); + return cnode; +} + +std::unique_ptr NnrtDeviceService::TransSubGraph(const SubGraph& graph, + const size_t numTensor) const +{ + auto subGraph = std::make_unique(); + subGraph->name = graph.name; + subGraph->inputIndices = graph.inputIndices; + subGraph->outputIndices = graph.outputIndices; + subGraph->nodeIndices = graph.nodeIndices; + subGraph->tensorIndices.reserve(numTensor); + for (size_t i = 0; i < numTensor; i++) { + subGraph->tensorIndices.emplace_back(static_cast(i)); + } + return subGraph; +} + +std::shared_ptr NnrtDeviceService::TransModelConfig(const ModelConfig& config) const +{ + auto context = std::make_shared(); + const int cpuThreadNum = 2; + const int cpuNoAffinities = 0; + const int cpuBigCore = 1; + const int cpuLittleCore = 2; + context->SetThreadNum(cpuThreadNum); + + int mode = cpuNoAffinities; + switch (config.mode) { + case PerformanceMode::PERFORMANCE_LOW: + case PerformanceMode::PERFORMANCE_MEDIUM: + mode = cpuLittleCore; + break; + case PerformanceMode::PERFORMANCE_HIGH: + case PerformanceMode::PERFORMANCE_EXTREME: + mode = cpuBigCore; + break; + default: + mode = cpuNoAffinities; + } + context->SetThreadAffinity(mode); + + auto cpuInfo = std::make_shared(); + cpuInfo->SetEnableFP16(config.enableFloat16); + auto& deviceInfos = context->MutableDeviceInfo(); + deviceInfos.emplace_back(cpuInfo); + return context; +} + +int32_t NnrtDeviceService::ConvertVecToFloat(std::vector vecFloat, float& result) const +{ + if (vecFloat.size() != sizeof(float)) { + HDF_LOGE("Size of the int8_t vector dose not match a float value."); + return HDF_ERR_INVALID_PARAM; + } + + result = *(reinterpret_cast(vecFloat.data())); + return HDF_SUCCESS; +} + +int32_t NnrtDeviceService::ConvertVecToString(std::vector vecFloat, std::string& result) const +{ + if (vecFloat.empty()) { + HDF_LOGE("int8_t vector is empty."); + return HDF_ERR_INVALID_PARAM; + } + + result = reinterpret_cast(vecFloat.data()); + return HDF_SUCCESS; +} + +int32_t NnrtDeviceService::ParseCustomAttributes(const std::map>& extensions, + float& attr1, std::string& attr2) const +{ + int32_t ret; + for (auto extension : extensions) { + if (extension.first == "attr1") { + ret = ConvertVecToFloat(extension.second, attr1); + if (ret != HDF_SUCCESS) { + HDF_LOGE("ConvertVecToFloat failed."); + return ret; + } + if (attr1 <= 0.0f || attr1 > 1.0f) { + HDF_LOGE("attr1 is out of range (0,1]."); + return HDF_ERR_INVALID_PARAM; + } + } else if (extension.first == "attr2") { + ret = ConvertVecToString(extension.second, attr2); + if (ret != HDF_SUCCESS) { + HDF_LOGE("ConvertVecToString failed."); + return ret; + } + if (attr2 != "LOW" || attr2 != "HIGH") { + HDF_LOGE("attr2 is neither LOW nor HIGH."); + return HDF_ERR_INVALID_PARAM; + } + } + } + + return HDF_SUCCESS; +} +} // V2_0 +} // Nnrt +} // HDI +} // OHOS diff --git a/example/drivers/nnrt/v2_0/hdi_cpu_service/src/node_functions.cpp b/example/drivers/nnrt/v2_0/hdi_cpu_service/src/node_functions.cpp new file mode 100644 index 0000000..fb7a701 --- /dev/null +++ b/example/drivers/nnrt/v2_0/hdi_cpu_service/src/node_functions.cpp @@ -0,0 +1,373 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "node_functions.h" + +#include "node_registry.h" +#include +#include + +namespace OHOS { +namespace HDI { +namespace Nnrt { +namespace V2_0 { +PrimUniquePtr GetAddPrimitive(const std::vector& primitive) +{ + AddFusion addAttr; + auto ret = ParsePrimitive(primitive, addAttr, AddFusionBlockUnmarshalling); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parse primitive data of AddFusion operator failed."); + return nullptr; + } + + auto prim = std::make_unique(); + prim->value.type = mindspore::schema::PrimitiveType_AddFusion; + auto attr = new (std::nothrow) mindspore::schema::AddFusionT; + if (attr == nullptr) { + HDF_LOGE("Create AddFusion primitive failed."); + return nullptr; + } + attr->activation_type = static_cast(addAttr.activationType); + prim->value.value = attr; + return prim; +} + +PrimUniquePtr GetAvgPoolPrimitive(const std::vector& primitive) +{ + AvgPoolFusion avgPoolAttr; + auto ret = ParsePrimitive(primitive, avgPoolAttr, AvgPoolFusionBlockUnmarshalling); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parse primitive data of AvgPoolFusion operator failed."); + return nullptr; + } + + auto prim = std::make_unique(); + prim->value.type = mindspore::schema::PrimitiveType_AvgPoolFusion; + + auto attr = new (std::nothrow) mindspore::schema::AvgPoolFusionT; + if (attr == nullptr) { + HDF_LOGE("Create AvgPoolFusion primitive failed."); + return nullptr; + } + attr->kernel_size = avgPoolAttr.kernelSize; + attr->strides = avgPoolAttr.strides; + attr->pad = avgPoolAttr.pad; + attr->pad_mode = static_cast(avgPoolAttr.padMode); + attr->round_mode = static_cast(avgPoolAttr.roundMode); + attr->format = static_cast(avgPoolAttr.format); + attr->global = avgPoolAttr.global; + attr->activation_type = static_cast(avgPoolAttr.activationType); + prim->value.value = attr; + return prim; +} + +PrimUniquePtr GetConcatPrimitive(const std::vector& primitive) +{ + Concat concatAttr; + auto ret = ParsePrimitive(primitive, concatAttr, ConcatBlockUnmarshalling); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parse primitive data of Concat operator failed."); + return nullptr; + } + + auto prim = std::make_unique(); + prim->value.type = mindspore::schema::PrimitiveType_Concat; + + auto attr = new (std::nothrow) mindspore::schema::ConcatT; + if (attr == nullptr) { + HDF_LOGE("Create concat primitive failed."); + return nullptr; + } + attr->axis = concatAttr.axis; + prim->value.value = attr; + return prim; +} + +PrimUniquePtr GetConv2dPrimitive(const std::vector& primitive) +{ + Conv2DFusion conv2dAttr; + auto ret = ParsePrimitive(primitive, conv2dAttr, Conv2DFusionBlockUnmarshalling); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parse primitive data of Conv2DFusion operator failed."); + return nullptr; + } + + auto prim = std::make_unique(); + prim->value.type = mindspore::schema::PrimitiveType_Conv2DFusion; + + auto attr = new (std::nothrow) mindspore::schema::Conv2DFusionT; + if (attr == nullptr) { + HDF_LOGE("Create Conv2DFusion primitive failed."); + return nullptr; + } + + attr->kernel_size = conv2dAttr.kernelSize; + attr->stride = conv2dAttr.stride; + attr->dilation = conv2dAttr.dilation; + attr->pad_mode = static_cast(conv2dAttr.padMode); + attr->pad_list = conv2dAttr.padList; + attr->group = conv2dAttr.group; + attr->in_channel = conv2dAttr.inChannel; + attr->out_channel = conv2dAttr.outChannel; + attr->activation_type = static_cast(conv2dAttr.activationType); + + prim->value.value = attr; + return prim; +} + +PrimUniquePtr GetFullConnectionPrimitive(const std::vector& primitive) +{ + FullConnection fullConnAttr; + auto ret = ParsePrimitive(primitive, fullConnAttr, FullConnectionBlockUnmarshalling); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parse primitive data of FullConnection operator failed."); + return nullptr; + } + + auto prim = std::make_unique(); + prim->value.type = mindspore::schema::PrimitiveType_FullConnection; + + auto attr = new (std::nothrow) mindspore::schema::FullConnectionT; + if (attr == nullptr) { + HDF_LOGE("Create FullConnection primitive failed."); + return nullptr; + } + + attr->has_bias = fullConnAttr.hasBias; + attr->use_axis = fullConnAttr.useAxis; + attr->axis = fullConnAttr.axis; + attr->activation_type = static_cast(fullConnAttr.activationType); + + prim->value.value = attr; + return prim; +} + +PrimUniquePtr GetMaxPoolFusionPrimitive(const std::vector& primitive) +{ + MaxPoolFusion maxPoolAttr; + auto ret = ParsePrimitive(primitive, maxPoolAttr, MaxPoolFusionBlockUnmarshalling); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parse primitive data of MaxPoolFusion operator failed."); + return nullptr; + } + + auto prim = std::make_unique(); + prim->value.type = mindspore::schema::PrimitiveType_MaxPoolFusion; + + auto attr = new (std::nothrow) mindspore::schema::MaxPoolFusionT; + if (attr == nullptr) { + HDF_LOGE("Create MaxPoolFusion primitive failed."); + return nullptr; + } + + attr->kernel_size = maxPoolAttr.kernelSize; + attr->strides = maxPoolAttr.strides; + attr->pad = maxPoolAttr.pad; + attr->pad_mode = static_cast(maxPoolAttr.padMode); + attr->format = static_cast(maxPoolAttr.format); + attr->global = maxPoolAttr.global; + attr->activation_type = static_cast(maxPoolAttr.activationType); + + prim->value.value = attr; + return prim; +} + +PrimUniquePtr GetMatMulFusionPrimitive(const std::vector& primitive) +{ + MatMulFusion matmulAttr; + auto ret = ParsePrimitive(primitive, matmulAttr, MatMulFusionBlockUnmarshalling); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parse primitive data of MatMulFusion operator failed."); + return nullptr; + } + + auto prim = std::make_unique(); + prim->value.type = mindspore::schema::PrimitiveType_MatMulFusion; + + auto attr = new (std::nothrow) mindspore::schema::MatMulFusionT; + if (attr == nullptr) { + HDF_LOGE("Create MatMulFusion primitive failed."); + return nullptr; + } + + attr->transpose_a = matmulAttr.transposeA; + attr->transpose_b = matmulAttr.transposeB; + attr->activation_type = static_cast(matmulAttr.activationType); + + prim->value.value = attr; + return prim; +} + +PrimUniquePtr GetSoftmaxPrimitive(const std::vector& primitive) +{ + Softmax softmaxAttr; + auto ret = ParsePrimitive(primitive, softmaxAttr, SoftmaxBlockUnmarshalling); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parse primitive data of Softmax operator failed."); + return nullptr; + } + + auto prim = std::make_unique(); + prim->value.type = mindspore::schema::PrimitiveType_Softmax; + + auto attr = new (std::nothrow) mindspore::schema::SoftmaxT; + if (attr == nullptr) { + HDF_LOGE("Create Softmax primitive failed."); + return nullptr; + } + + attr->axis = softmaxAttr.axis; + prim->value.value = attr; + return prim; +} + +PrimUniquePtr GetReshapePrimitive(const std::vector& primitive) +{ + Reshape reshapeAttr; + auto ret = ParsePrimitive(primitive, reshapeAttr, ReshapeBlockUnmarshalling); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parse primitive data of Reshape operator failed."); + return nullptr; + } + + auto prim = std::make_unique(); + prim->value.type = mindspore::schema::PrimitiveType_Reshape; + + auto attr = new (std::nothrow) mindspore::schema::ReshapeT; + if (attr == nullptr) { + HDF_LOGE("Create Reshape primitive failed."); + return nullptr; + } + + prim->value.value = attr; + return prim; +} + +PrimUniquePtr GetScaleFusionPrimitive(const std::vector& primitive) +{ + ScaleFusion scaleAttr; + auto ret = ParsePrimitive(primitive, scaleAttr, ScaleFusionBlockUnmarshalling); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parse primitive data of ScaleFusion operator failed."); + return nullptr; + } + + auto prim = std::make_unique(); + prim->value.type = mindspore::schema::PrimitiveType_ScaleFusion; + + auto attr = new (std::nothrow) mindspore::schema::ScaleFusionT; + if (attr == nullptr) { + HDF_LOGE("Create ScaleFusion primitive failed."); + return nullptr; + } + + attr->axis = scaleAttr.axis; + attr->activation_type = static_cast(scaleAttr.activationType); + prim->value.value = attr; + return prim; +} + +PrimUniquePtr GetActivationPrimitive(const std::vector& primitive) +{ + Activation actAttr; + auto ret = ParsePrimitive(primitive, actAttr, ActivationBlockUnmarshalling); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parse primitive data of Activation operator failed."); + return nullptr; + } + + auto prim = std::make_unique(); + prim->value.type = mindspore::schema::PrimitiveType_Activation; + + auto attr = new (std::nothrow) mindspore::schema::ActivationT; + if (attr == nullptr) { + HDF_LOGE("Create Activation primitive failed."); + return nullptr; + } + + attr->alpha = actAttr.alpha; + attr->min_val = actAttr.minVal; + attr->max_val = actAttr.maxVal; + attr->approximate = actAttr.approximate; + attr->activation_type = static_cast(actAttr.activationType); + + prim->value.value = attr; + return prim; +} + +PrimUniquePtr GetQuantDTypeCastPrimitive(const std::vector& primitive) +{ + QuantDTypeCast quantAttr; + auto ret = ParsePrimitive(primitive, quantAttr, QuantDTypeCastBlockUnmarshalling); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parse primitive data of QuantDTypeCast operator failed."); + return nullptr; + } + + auto prim = std::make_unique(); + prim->value.type = mindspore::schema::PrimitiveType_QuantDTypeCast; + + auto attr = new (std::nothrow) mindspore::schema::QuantDTypeCastT; + if (attr == nullptr) { + HDF_LOGE("Create QuantDTypeCast primitive failed."); + return nullptr; + } + + attr->src_t = quantAttr.srcT; + attr->dst_t = quantAttr.dstT; + prim->value.value = attr; + return prim; +} + +PrimUniquePtr GetMulFusionPrimitive(const std::vector& primitive) +{ + MulFusion mulAttr; + auto ret = ParsePrimitive(primitive, mulAttr, MulFusionBlockUnmarshalling); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parse primitive data of MulFusion operator failed."); + return nullptr; + } + + auto prim = std::make_unique(); + prim->value.type = mindspore::schema::PrimitiveType_MulFusion; + + auto attr = new (std::nothrow) mindspore::schema::MulFusionT; + if (attr == nullptr) { + HDF_LOGE("Create MulFusion primitive failed."); + return nullptr; + } + + attr->activation_type = static_cast(mulAttr.activationType); + prim->value.value = attr; + return prim; +} + +REGISTER_NODE(Activation, NodeType::NODE_TYPE_ACTIVATION, GetActivationPrimitive); +REGISTER_NODE(AddFusion, NodeType::NODE_TYPE_ADD_FUSION, GetAddPrimitive); +REGISTER_NODE(AvgPoolFusion, NodeType::NODE_TYPE_AVGPOOL_FUSION, GetAvgPoolPrimitive); +REGISTER_NODE(Concat, NodeType::NODE_TYPE_CONCAT, GetConcatPrimitive); +REGISTER_NODE(Conv2DFusion, NodeType::NODE_TYPE_CONV2D_FUSION, GetConv2dPrimitive); +REGISTER_NODE(FullConnection, NodeType::NODE_TYPE_FULL_CONNECTION, GetFullConnectionPrimitive); +REGISTER_NODE(MaxPoolFusion, NodeType::NODE_TYPE_MAX_POOL_FUSION, GetMaxPoolFusionPrimitive); +REGISTER_NODE(MatMulFusion, NodeType::NODE_TYPE_MATMUL_FUSION, GetMatMulFusionPrimitive); +REGISTER_NODE(Reshape, NodeType::NODE_TYPE_RESHAPE, GetReshapePrimitive); +REGISTER_NODE(Softmax, NodeType::NODE_TYPE_SOFTMAX, GetSoftmaxPrimitive); +REGISTER_NODE(ScaleFusion, NodeType::NODE_TYPE_SCALE_FUSION, GetScaleFusionPrimitive); +REGISTER_NODE(QuantDTypeCast, NodeType::NODE_TYPE_QUANT_DTYPE_CAST, GetQuantDTypeCastPrimitive); +REGISTER_NODE(MulFusion, NodeType::NODE_TYPE_MUL_FUSION, GetMulFusionPrimitive); +} // namespace V2_0 +} // namespace Nnrt +} // namespace HDI +} // namespace OHOS \ No newline at end of file diff --git a/example/drivers/nnrt/v2_0/hdi_cpu_service/src/node_registry.cpp b/example/drivers/nnrt/v2_0/hdi_cpu_service/src/node_registry.cpp new file mode 100644 index 0000000..f6537ad --- /dev/null +++ b/example/drivers/nnrt/v2_0/hdi_cpu_service/src/node_registry.cpp @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "node_registry.h" + +#include "hdf_log.h" + +namespace OHOS { +namespace HDI { +namespace Nnrt { +namespace V2_0 { +NodeRegistry& NodeRegistry::GetSingleton() +{ + static NodeRegistry registry; + return registry; +} + +NodeRegistry::Registrar::Registrar(NodeType type, std::function&)> nodeFunc) +{ + auto& registry = NodeRegistry::GetSingleton(); + if (registry.m_nodeRegs.find(type) != registry.m_nodeRegs.end()) { + HDF_LOGW("Node has been registered. nodeType=%d", type); + } else { + registry.m_nodeRegs[type] = nodeFunc; + } +} + +std::function&)> NodeRegistry::GetNodeFunc(NodeType type) const +{ + if (m_nodeRegs.find(type) == m_nodeRegs.end()) { + HDF_LOGW("Node type is not found. nodeType=%d", type); + return nullptr; + } + + return m_nodeRegs.at(type); +} + +bool NodeRegistry::IsNodeTypeExist(NodeType type) const +{ + if (m_nodeRegs.find(type) == m_nodeRegs.end()) { + return false; + } + return true; +} +} // namespace V2_0 +} // namespace Nnrt +} // namespace HDI +} // namespace OHOS \ No newline at end of file diff --git a/example/drivers/nnrt/v2_0/hdi_cpu_service/src/prepared_model_service.cpp b/example/drivers/nnrt/v2_0/hdi_cpu_service/src/prepared_model_service.cpp new file mode 100644 index 0000000..1d3edf6 --- /dev/null +++ b/example/drivers/nnrt/v2_0/hdi_cpu_service/src/prepared_model_service.cpp @@ -0,0 +1,461 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "prepared_model_service.h" + +#include +#include "securec.h" +#include "hdf_log.h" + +#include "shared_buffer_parser.h" + +namespace OHOS { +namespace HDI { +namespace Nnrt { +namespace V2_0 { +PreparedModelService::PreparedModelService(std::shared_ptr context) + : m_context(context) {} + +PreparedModelService::~PreparedModelService() +{ + if (m_cacheBuffer != nullptr) { + m_cacheBuffer->CloseAshmem(); + } + + for (auto& inputAsh : m_inputAshmems) { + inputAsh->UnmapAshmem(); + inputAsh->CloseAshmem(); + } + + for (auto& outputAsh : m_outputAshmems) { + outputAsh->UnmapAshmem(); + outputAsh->CloseAshmem(); + } +} + +int32_t PreparedModelService::ExportModelCache(std::vector& modelCache) +{ + if (!modelCache.empty()) { + HDF_LOGE("The parameters of ExportModelCache should be an empty vector."); + return HDF_ERR_INVALID_PARAM; + } + + if (m_cacheBuffer != nullptr) { + auto fd = m_cacheBuffer->GetAshmemFd(); + auto size = m_cacheBuffer->GetAshmemSize(); + + // SharedBuffer: fd, bufferSize, offset, dataSize + modelCache.emplace_back(SharedBuffer{fd, size, 0, size}); + return HDF_SUCCESS; + } + + auto size = m_builder.GetSize(); + auto buffer = m_builder.GetBufferPointer(); + const char* name = m_graph != nullptr ? m_graph->name.c_str() : "CacheModel"; + sptr cache = Ashmem::CreateAshmem(name, size); + if (cache == nullptr) { + HDF_LOGE("Create shared memory failed."); + return HDF_ERR_MALLOC_FAIL; + } + + bool ret = cache->MapReadAndWriteAshmem(); + if (!ret) { + HDF_LOGE("Map fd to write cache failed."); + return HDF_FAILURE; + } + + ret = cache->WriteToAshmem(buffer, size, 0); + cache->UnmapAshmem(); + if (!ret) { + HDF_LOGE("Write cache failed."); + return HDF_FAILURE; + } + + m_cacheBuffer = cache; + + // SharedBuffer: fd, bufferSize, offset, dataSize + modelCache.emplace_back(SharedBuffer {cache->GetAshmemFd(), cache->GetAshmemSize(), 0, cache->GetAshmemSize()}); + return HDF_SUCCESS; +} + +int32_t PreparedModelService::Run(const std::vector& inputs, const std::vector& outputs, + std::vector>& outputsDims, std::vector& isOutputBufferEnough) +{ + auto ret = SetInputs(inputs); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Inputs tensor is invalid."); + return ret; + } + + if (!m_isDynamicShape) { + ret = SetOutputs(outputs); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Output tensor is invalid."); + ResetInputAndOutput(); + return ret; + } + } + + auto msRet = m_model->Predict(m_inputs, &m_outputs); + if (msRet != mindspore::kSuccess) { + HDF_LOGE("Run model failed."); + ResetInputAndOutput(); + return HDF_FAILURE; + } + + ret = UpdateOutput(outputs, outputsDims, isOutputBufferEnough); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Update output dimension or data failed."); + ResetInputAndOutput(); + return ret; + } + + ResetInputAndOutput(); + + return HDF_SUCCESS; +} + +int32_t PreparedModelService::GetInputDimRanges(std::vector>& minInputDims, + std::vector>& maxInputDims) +{ + if (m_inputDims.empty()) { + HDF_LOGE("Model has not been prepared yet."); + return HDF_ERR_INVALID_PARAM; + } + + minInputDims.clear(); + maxInputDims.clear(); + + for (auto inputShape : m_inputDims) { + std::vector minInputShape; + std::vector maxInputShape; + for (auto dim : inputShape) { + if (dim != DYNAMIC_SHAPE_FLAG) { // Min and max are same if the dimension is fixed. + if (dim <= 0) { + HDF_LOGE("Dimesion value is invalid."); + return HDF_ERR_INVALID_PARAM; + } + minInputShape.push_back(static_cast(dim)); + maxInputShape.push_back(static_cast(dim)); + } else { // Dimension range is [1, 10]. + minInputShape.push_back(1); + maxInputShape.push_back(10); + } + } + minInputDims.push_back(std::move(minInputShape)); + maxInputDims.push_back(std::move(maxInputShape)); + } + + return HDF_SUCCESS; +} + +int32_t PreparedModelService::UpdateOutput(const std::vector& outputs, + std::vector>& outputsDims, std::vector& isOutputBufferEnough) +{ + bool isEnough {true}; + size_t outputSize = m_outputs.size(); + isOutputBufferEnough.resize(outputSize, true); + for (size_t i = 0; i < outputSize; i++) { + auto& msOutput = m_outputs[i]; + auto& output = outputs[i]; + + auto msShape = msOutput.Shape(); + outputsDims.emplace_back(msShape.begin(), msShape.end()); + + auto dataSize = msOutput.DataSize(); + if (dataSize > output.data.bufferSize) { + HDF_LOGE("Output buffer is not enough. actual size %{public}zu, buffer size %{public}u", + dataSize, output.data.bufferSize); + isOutputBufferEnough[i] = false; + isEnough= false; + } + + if (isEnough && m_isDynamicShape) { + auto msData = msOutput.MutableData(); + SharedBufferParser parser; + auto ret = parser.Init(output.data); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Parse %zu th output data failed.", i); + return HDF_ERR_INVALID_PARAM; + } + + auto data = parser.GetBufferPtr(); + auto memRet = memcpy_s(data, dataSize, msData, dataSize); + if (memRet != EOK) { + HDF_LOGE("Copy output memory failed."); + return HDF_FAILURE; + } + } + } + + return HDF_SUCCESS; +} + +void PreparedModelService::ResetInputAndOutput() +{ + for (auto& msInput : m_inputs) { + msInput.SetData(nullptr); + } + + if (!m_isDynamicShape) { + for (auto& msOutput : m_outputs) { + msOutput.SetData(nullptr); + } + } +} + +int32_t PreparedModelService::Compile(std::shared_ptr graph) +{ + if (graph == nullptr) { + HDF_LOGE("Graph cannot be nullptr"); + return HDF_ERR_INVALID_PARAM; + } + for (auto i : graph->inputIndex) { + auto inputShape = graph->allTensors[i]->dims; + auto iter = std::find(inputShape.begin(), inputShape.end(), DYNAMIC_SHAPE_FLAG); + if (iter != inputShape.end()) { + m_isDynamicShape = true; + break; + } + } + auto offset = mindspore::schema::MetaGraph::Pack(m_builder, graph.get()); + m_builder.Finish(offset); + mindspore::schema::FinishMetaGraphBuffer(m_builder, offset); + auto modelSize = m_builder.GetSize(); + uint8_t* modelBuffer = m_builder.GetBufferPointer(); + if (modelBuffer == nullptr) { + HDF_LOGE("Model is invalid."); + return HDF_FAILURE; + } + + m_model = std::make_shared(); + mindspore::Status msRet = m_model->Build(modelBuffer, modelSize, mindspore::kMindIR, m_context); + if (msRet != mindspore::kSuccess) { + HDF_LOGE("Prepare model failed, please make sure model is validate."); + return HDF_FAILURE; + } + + auto ret = GetMSInputsAndOutputs(); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Model without inputs or outputs is invalid."); + return ret; + } + + for (auto input : m_inputs) { + m_inputDims.push_back(input.Shape()); + } + + return HDF_SUCCESS; +} + +int32_t PreparedModelService::Compile(const void* modelBuffer, size_t length) +{ + if (modelBuffer == nullptr || length == 0) { + HDF_LOGE("ModelBuffer cannot be nullptr and length cannot be zero."); + return HDF_ERR_INVALID_PARAM; + } + + m_model = std::make_shared(); + mindspore::Status msRet = m_model->Build(modelBuffer, length, mindspore::kMindIR, m_context); + if (msRet != mindspore::kSuccess) { + HDF_LOGE("Prepare model from cache failed, please make sure model cache is valid."); + return HDF_FAILURE; + } + + auto ret = GetMSInputsAndOutputs(); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Model without inputs or outputs is invalid."); + return ret; + } + + for (auto input : m_inputs) { + auto shapes = input.Shape(); + if (std::find(shapes.begin(), shapes.end(), DYNAMIC_SHAPE_FLAG) != shapes.end()) { + m_isDynamicShape = true; + break; + } + } + + for (auto input : m_inputs) { + m_inputDims.push_back(input.Shape()); + } + + return HDF_SUCCESS; +} + +int32_t PreparedModelService::SetInputs(const std::vector& inputs) +{ + if (inputs.size() != m_inputs.size()) { + HDF_LOGE("inputs size is invalid. expect: %zu, actual: %zu", m_inputs.size(), inputs.size()); + return HDF_ERR_INVALID_PARAM; + } + for (auto& ash : m_inputAshmems) { + ash->UnmapAshmem(); + ash->CloseAshmem(); + } + m_inputAshmems.clear(); + + int32_t ret {0}; + size_t inputSize = m_inputs.size(); + std::vector> tmpAllDims; + for (size_t i = 0; i < inputSize; i++) { + auto& input = inputs[i]; + auto& msInput = m_inputs[i]; + ret = CompareTensor(input, msInput); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Inputs tensor is not match that of model. Please check input tensor."); + return ret; + } + tmpAllDims.emplace_back(input.dimensions.begin(), input.dimensions.end()); + } + + if (m_isDynamicShape) { + auto msRet = m_model->Resize(m_inputs, tmpAllDims); + if (msRet != mindspore::kSuccess) { + HDF_LOGE("Resize for dynamic inputs failed."); + return HDF_FAILURE; + } + ret = GetMSInputsAndOutputs(); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Get ms inputs or outputs failed after resize."); + return ret; + } + } + + for (size_t i = 0; i < inputSize; i++) { + auto& input = inputs[i]; + auto& msInput = m_inputs[i]; + sptr ashptr = ParseBuffer(input.data); + if (ashptr == nullptr) { + HDF_LOGE("Parse %zuth input data failed.", i); + return HDF_ERR_INVALID_PARAM; + } + + auto data = const_cast(ashptr->ReadFromAshmem(input.data.dataSize, 0)); + msInput.SetData(data); + m_inputAshmems.emplace_back(ashptr); + } + return HDF_SUCCESS; +} + +int32_t PreparedModelService::SetOutputs(const std::vector& outputs) +{ + HDF_LOGI("Start Set outputs, m_outputs size=%zu", m_outputs.size()); + if (outputs.size() != m_outputs.size()) { + HDF_LOGE("outputs size is invalid. expect: %{public}zu, actual: %{public}zu", m_outputs.size(), outputs.size()); + return HDF_ERR_INVALID_PARAM; + } + for (auto ash : m_outputAshmems) { + ash->UnmapAshmem(); + ash->CloseAshmem(); + } + m_outputAshmems.clear(); + + for (size_t i = 0; i < m_outputs.size(); i++) { + auto& output = outputs[i]; + auto& msOutput = m_outputs[i]; + + sptr ashptr = ParseBuffer(output.data); + if (ashptr == nullptr) { + HDF_LOGE("Parse %{public}zu th output data failed.", i); + return HDF_ERR_INVALID_PARAM; + } + + auto data = const_cast(ashptr->ReadFromAshmem(output.data.dataSize, 0)); + msOutput.SetAllocator(nullptr); + msOutput.SetData(data); + m_outputAshmems.emplace_back(ashptr); + } + return HDF_SUCCESS; +} + +int32_t PreparedModelService::GetMSInputsAndOutputs() +{ + m_inputs = m_model->GetInputs(); + if (m_inputs.empty()) { + HDF_LOGE("Get inputs failed."); + return HDF_FAILURE; + } + + m_outputs = m_model->GetOutputs(); + if (m_outputs.empty()) { + HDF_LOGE("Get outputs failed."); + return HDF_FAILURE; + } + return HDF_SUCCESS; +} + +int32_t PreparedModelService::CompareTensor(const IOTensor& tensor, const mindspore::MSTensor& msTensor) +{ + auto dataType = static_cast(msTensor.DataType()); + if (tensor.dataType != dataType) { + HDF_LOGE("Data type of tensor dose not match that of model."); + return HDF_ERR_INVALID_PARAM; + } + + auto format = static_cast(msTensor.format()); + if (tensor.format != format) { + HDF_LOGE("Format of tensor dose not match that of model."); + return HDF_ERR_INVALID_PARAM; + } + + if (tensor.dimensions.size() != msTensor.Shape().size()) { + HDF_LOGE("Rank of tensor dose not match that of model."); + return HDF_ERR_INVALID_PARAM; + } + + for (size_t i = 0; i < tensor.dimensions.size(); i++) { + if (msTensor.Shape()[i] != DYNAMIC_SHAPE_FLAG && tensor.dimensions[i] != msTensor.Shape()[i]) { + HDF_LOGE("The Shape of tensor dose not match that of model."); + return HDF_ERR_INVALID_PARAM; + } + } + + return HDF_SUCCESS; +} + +sptr PreparedModelService::ParseBuffer(const SharedBuffer& buffer) +{ + if (buffer.fd == -1) { + HDF_LOGE("Invalid buffer fd, it cannot be -1."); + return nullptr; + } + + HDF_LOGW("NNRT buffer fd=%{public}d, length=%{public}u", buffer.fd, buffer.dataSize); + + sptr ashptr = new (std::nothrow) Ashmem(buffer.fd, buffer.bufferSize); + if (ashptr == nullptr) { + HDF_LOGE("Create shared memory failed."); + return nullptr; + } + + if (!ashptr->MapReadAndWriteAshmem()) { + HDF_LOGE("Map buffer fd to address failed."); + return nullptr; + } + + const void* data = ashptr->ReadFromAshmem(buffer.dataSize, buffer.offset); + if (data == nullptr) { + HDF_LOGE("Get data address failed."); + ashptr->UnmapAshmem(); + ashptr->CloseAshmem(); + return nullptr; + } + return ashptr; +} +} // V2_0 +} // Nnrt +} // HDI +} // OHOS diff --git a/example/drivers/nnrt/v2_0/hdi_cpu_service/src/shared_buffer_parser.cpp b/example/drivers/nnrt/v2_0/hdi_cpu_service/src/shared_buffer_parser.cpp new file mode 100644 index 0000000..69416b6 --- /dev/null +++ b/example/drivers/nnrt/v2_0/hdi_cpu_service/src/shared_buffer_parser.cpp @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef OHOS_HDI_NNR_V2_0_UTILS_H +#define OHOS_HDI_NNR_V2_0_UTILS_H + +#include "shared_buffer_parser.h" + +#include +#include "ashmem.h" +#include "v2_0/nnrt_types.h" +#include "hdf_log.h" + +namespace OHOS { +namespace HDI { +namespace Nnrt { +namespace V2_0 { +SharedBufferParser::~SharedBufferParser() +{ + if (m_ashptr != nullptr) { + m_ashptr->UnmapAshmem(); + m_ashptr->CloseAshmem(); + m_bufferAddr = nullptr; + } +} + +int32_t SharedBufferParser::Init(const std::string& name, int32_t size) +{ + HDF_LOGI("Init SharedBufferParser from name and size."); + sptr ashptr = Ashmem::CreateAshmem(name.c_str(), size); + if (ashptr == nullptr) { + HDF_LOGE("Create ashmen from size failed."); + return HDF_FAILURE; + } + + SharedBuffer buffer; + buffer.fd = ashptr->GetAshmemFd(); + buffer.bufferSize = ashptr->GetAshmemSize(); + buffer.offset = 0; + buffer.dataSize = size; + + auto ret = Init(buffer); + if (ret != HDF_SUCCESS) { + HDF_LOGE("Init SharedBufferParser failed."); + return ret; + } + return HDF_SUCCESS; +} + +int32_t SharedBufferParser::Init(const SharedBuffer& buffer) +{ + if (buffer.fd == INVALID_FD) { + HDF_LOGE("Invalid buffer fd, it cannot be %{public}d.", INVALID_FD); + return HDF_ERR_INVALID_PARAM; + } + + m_ashptr = new (std::nothrow) Ashmem(buffer.fd, buffer.bufferSize); + if (m_ashptr == nullptr) { + HDF_LOGE("Create ashmem failed."); + return HDF_FAILURE; + } + + if (!m_ashptr->MapReadAndWriteAshmem()) { + HDF_LOGE("Map buffer fd to address failed."); + return HDF_FAILURE; + } + + auto bufferAddr = m_ashptr->ReadFromAshmem(buffer.dataSize, buffer.offset); + if (bufferAddr == nullptr) { + HDF_LOGE("Invalid dataSize or offset of SharedBuffer."); + return HDF_ERR_INVALID_PARAM; + } + m_bufferAddr = const_cast(bufferAddr); + + m_buffer = buffer; + return HDF_SUCCESS; +} + +void* SharedBufferParser::GetBufferPtr() +{ + return m_bufferAddr; +} + +SharedBuffer SharedBufferParser::GetBuffer() +{ + return m_buffer; +} +} // V2_0 +} // Nnrt +} // HDI +} // OHOS +#endif // OHOS_HDI_NNR_V2_0_UTILS_H \ No newline at end of file diff --git a/example/drivers/nnrt/v2_0/hdi_cpu_service/src/validation.cpp b/example/drivers/nnrt/v2_0/hdi_cpu_service/src/validation.cpp new file mode 100644 index 0000000..03521c7 --- /dev/null +++ b/example/drivers/nnrt/v2_0/hdi_cpu_service/src/validation.cpp @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "validation.h" + +namespace OHOS { +namespace HDI { +namespace Nnrt { +namespace V2_0 { +int32_t ValidatePerformanceMode(PerformanceMode mode) +{ + if (mode < PerformanceMode::PERFORMANCE_NONE || mode > PerformanceMode::PERFORMANCE_EXTREME) { + return false; + } + + return true; +} + +int32_t ValidatePriority(Priority priority) +{ + if (priority < Priority::PRIORITY_NONE || priority > Priority::PRIORITY_HIGH) { + return false; + } + + return true; +} + +int32_t ValidateDataType(DataType dataType) +{ + if (dataType < DataType::DATA_TYPE_UNKNOWN || dataType > DataType::DATA_TYPE_FLOAT64) { + return false; + } + + if (dataType > DataType::DATA_TYPE_UNKNOWN && dataType < DataType::DATA_TYPE_BOOL) { + return false; + } + + if (dataType > DataType::DATA_TYPE_BOOL && dataType < DataType::DATA_TYPE_INT8) { + return false; + } + + if (dataType > DataType::DATA_TYPE_UINT64 && dataType < DataType::DATA_TYPE_FLOAT16) { + return false; + } + + return true; +} + +int32_t ValidateFormat(Format format) +{ + if (format < Format::FORMAT_NONE || format > Format::FORMAT_NHWC) { + return false; + } + + return true; +} +} // namespace V2_0 +} // namespace Nnrt +} // namespace HDI +} // namespace OHOS \ No newline at end of file -- Gitee From cd22fd7e8df61314c53a78da8126d6bd8f47b6e4 Mon Sep 17 00:00:00 2001 From: wangchuanxia Date: Fri, 7 Apr 2023 10:55:34 +0800 Subject: [PATCH 07/12] add returncode Signed-off-by: wangchuanxia --- BUILD.gn | 8 +- frameworks/BUILD.gn | 1 + frameworks/native/device_discover_v2_0.cpp | 14 ++- frameworks/native/hdi_device_v1_0.cpp | 4 +- frameworks/native/hdi_device_v2_0.cpp | 115 ++++++++++++------ frameworks/native/hdi_prepared_model_v2_0.cpp | 25 ++-- .../native/hdi_returncode_transform.cpp | 67 ++++++++++ frameworks/native/hdi_returncode_transform.h | 29 +++++ 8 files changed, 203 insertions(+), 60 deletions(-) create mode 100644 frameworks/native/hdi_returncode_transform.cpp create mode 100644 frameworks/native/hdi_returncode_transform.h diff --git a/BUILD.gn b/BUILD.gn index a6f0866..bc1599a 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -17,7 +17,7 @@ group("nnrt_target") { deps = [ "frameworks:libneural_network_runtime" ] } -# group("nnrt_test_target") { -# testonly = true -# deps = [ "test/unittest:unittest" ] -# } +group("nnrt_test_target") { + testonly = true + deps = [ "test/unittest:unittest" ] +} diff --git a/frameworks/BUILD.gn b/frameworks/BUILD.gn index f677512..bc88015 100644 --- a/frameworks/BUILD.gn +++ b/frameworks/BUILD.gn @@ -29,6 +29,7 @@ nnrt_sources = [ "native/hdi_device_v2_0.cpp", "native/hdi_prepared_model_v1_0.cpp", "native/hdi_prepared_model_v2_0.cpp", + "native/hdi_returncode_transform.cpp", "native/inner_model.cpp", "native/memory_manager.cpp", "native/neural_network_runtime.cpp", diff --git a/frameworks/native/device_discover_v2_0.cpp b/frameworks/native/device_discover_v2_0.cpp index de5e8b7..d4a1cac 100644 --- a/frameworks/native/device_discover_v2_0.cpp +++ b/frameworks/native/device_discover_v2_0.cpp @@ -15,6 +15,7 @@ #include "device_discover.h" #include "hdi_device_v2_0.h" +#include "hdi_returncode_transform.h" #include "common/log.h" #include "common/utils.h" @@ -29,20 +30,23 @@ std::shared_ptr DiscoverHDIDevicesV2_0(std::string& deviceName, std::str return nullptr; } - auto hdiRet = iDevice->GetDeviceName(deviceName); + V2_0::NNRT_ReturnCode returnCode; + auto hdiRet = iDevice->GetDeviceName(deviceName, returnCode); if (hdiRet != HDF_SUCCESS) { - LOGW("Get device name failed. ErrorCode=%d", hdiRet); + LOGW("Get device name failed. ErrorCode=%{public}d, innerHDIRet=%{public}s", + hdiRet, ConverterRetToString(returnCode).c_str()); return nullptr; } - hdiRet = iDevice->GetVendorName(vendorName); + hdiRet = iDevice->GetVendorName(vendorName, returnCode); if (hdiRet != HDF_SUCCESS) { - LOGW("Get vendor name failed. ErrorCode=%d", hdiRet); + LOGW("Get vendor name failed. ErrorCode=%{public}d, innerHDIRet=%{public}s", + hdiRet, ConverterRetToString(returnCode).c_str()); return nullptr; } std::pair hdiVersion; hdiRet = iDevice->GetVersion(hdiVersion.first, hdiVersion.second); if (hdiRet != HDF_SUCCESS) { - LOGW("Get version failed. ErrorCode=%d", hdiRet); + LOGW("Get version failed. ErrorCode=%{public}d", hdiRet); return nullptr; } version = 'v' + std::to_string(hdiVersion.first) + '_' + std::to_string(hdiVersion.second); diff --git a/frameworks/native/hdi_device_v1_0.cpp b/frameworks/native/hdi_device_v1_0.cpp index 721103b..3808364 100644 --- a/frameworks/native/hdi_device_v1_0.cpp +++ b/frameworks/native/hdi_device_v1_0.cpp @@ -150,7 +150,7 @@ OH_NN_ReturnCode HDIDeviceV1_0::GetSupportedOperation(std::shared_ptr 0) { @@ -242,7 +242,7 @@ OH_NN_ReturnCode HDIDeviceV1_0::PrepareModel(std::shared_ptr 0) { diff --git a/frameworks/native/hdi_device_v2_0.cpp b/frameworks/native/hdi_device_v2_0.cpp index 1ba9f9d..5e01560 100644 --- a/frameworks/native/hdi_device_v2_0.cpp +++ b/frameworks/native/hdi_device_v2_0.cpp @@ -19,6 +19,7 @@ #include "mindir.h" #include "hdi_prepared_model_v2_0.h" +#include "hdi_returncode_transform.h" #include "memory_manager.h" #include "transform.h" #include "common/log.h" @@ -93,9 +94,11 @@ HDIDeviceV2_0::HDIDeviceV2_0(OHOS::sptr device) : m_iDevice(d OH_NN_ReturnCode HDIDeviceV2_0::GetDeviceName(std::string& name) { - auto ret = m_iDevice->GetDeviceName(name); + V2_0::NNRT_ReturnCode returnCode; + auto ret = m_iDevice->GetDeviceName(name, returnCode); if (ret != HDF_SUCCESS) { - LOGE("Get HDI device name failed. ErrorCode=%d", ret); + LOGE("Get HDI device name failed. ErrorCode=%{public}d, innerHDIRet=%{public}s", + ret, ConverterRetToString(returnCode).c_str()); return OH_NN_UNAVALIDABLE_DEVICE; } return OH_NN_SUCCESS; @@ -103,9 +106,11 @@ OH_NN_ReturnCode HDIDeviceV2_0::GetDeviceName(std::string& name) OH_NN_ReturnCode HDIDeviceV2_0::GetVendorName(std::string& name) { - auto ret = m_iDevice->GetVendorName(name); + V2_0::NNRT_ReturnCode returnCode; + auto ret = m_iDevice->GetVendorName(name, returnCode); if (ret != HDF_SUCCESS) { - LOGE("Get HDI device vendor name failed. ErrorCode=%d", ret); + LOGE("Get HDI device vendor name failed. ErrorCode=%{public}d, innerHDIRet=%{public}s", + ret, ConverterRetToString(returnCode).c_str()); return OH_NN_UNAVALIDABLE_DEVICE; } return OH_NN_SUCCESS; @@ -120,9 +125,11 @@ OH_NN_ReturnCode HDIDeviceV2_0::GetVersion(std::string& version) OH_NN_ReturnCode HDIDeviceV2_0::GetDeviceType(OH_NN_DeviceType& deviceType) { V2_0::DeviceType iDeviceType; - auto ret = m_iDevice->GetDeviceType(iDeviceType); + V2_0::NNRT_ReturnCode returnCode; + auto ret = m_iDevice->GetDeviceType(iDeviceType, returnCode); if (ret != HDF_SUCCESS) { - LOGE("Get HDI device type failed. ErrorCode=%d", ret); + LOGE("Get HDI device type failed. ErrorCode=%{public}d, innerHDIRet=%{public}s", + ret, ConverterRetToString(returnCode).c_str()); return OH_NN_UNAVALIDABLE_DEVICE; } @@ -133,9 +140,11 @@ OH_NN_ReturnCode HDIDeviceV2_0::GetDeviceType(OH_NN_DeviceType& deviceType) OH_NN_ReturnCode HDIDeviceV2_0::GetDeviceStatus(DeviceStatus& status) { V2_0::DeviceStatus iDeviceStatus; - auto ret = m_iDevice->GetDeviceStatus(iDeviceStatus); + V2_0::NNRT_ReturnCode returnCode; + auto ret = m_iDevice->GetDeviceStatus(iDeviceStatus, returnCode); if (ret != HDF_SUCCESS) { - LOGE("Get HDI device status failed. ErrorCode=%d", ret); + LOGE("Get HDI device status failed. ErrorCode=%{public}d, innerHDIRet=%{public}s", + ret, ConverterRetToString(returnCode).c_str()); return OH_NN_UNAVALIDABLE_DEVICE; } status = TransHDIDeviceV2_0Status(iDeviceStatus); @@ -150,25 +159,28 @@ OH_NN_ReturnCode HDIDeviceV2_0::GetSupportedOperation(std::shared_ptr 0) { - hdiRet = m_iDevice->AllocateBuffer(tensorSize, tensorBuffer); + V2_0::NNRT_ReturnCode returnCode; + hdiRet = m_iDevice->AllocateBuffer(tensorSize, tensorBuffer, returnCode); if (hdiRet != HDF_SUCCESS || tensorBuffer.fd == INVALID_FD) { - LOGE("Allocate tensor buffer error when get supported operation. ErrorCode: %d", hdiRet); + LOGE("Allocate tensor buffer error when get supported operation. ErrorCode: %{public}d, \ + innerHDIRet=%{public}s", hdiRet, ConverterRetToString(returnCode).c_str()); return OH_NN_FAILED; } } - auto iModel = mindspore::lite::MindIR_LiteGraph_To_Model(model.get(), tensorBuffer); + auto iModel = mindspore::lite::MindIR_LiteGraph_To_Model_V2_0(model.get(), tensorBuffer); if (iModel == nullptr) { LOGE("Parse litegraph to hdi model failed."); ReleaseSharedBuffer(tensorBuffer); return OH_NN_FAILED; } - hdiRet = m_iDevice->GetSupportedOperation(*iModel, ops); + V2_0::NNRT_ReturnCode returnCode; + hdiRet = m_iDevice->GetSupportedOperation(*iModel, ops, returnCode); mindspore::lite::MindIR_Model_Destroy(&iModel); auto ret = ReleaseSharedBuffer(tensorBuffer); @@ -177,7 +189,8 @@ OH_NN_ReturnCode HDIDeviceV2_0::GetSupportedOperation(std::shared_ptrIsFloat16PrecisionSupported(isSupported); + V2_0::NNRT_ReturnCode returnCode; + auto ret = m_iDevice->IsFloat16PrecisionSupported(isSupported, returnCode); if (ret != HDF_SUCCESS) { - LOGE("Query fp16 precision supported failed. ErrorCode=%d", ret); + LOGE("Query fp16 precision supported failed. ErrorCode=%{public}d, innerHDIRet=%{public}s", + ret, ConverterRetToString(returnCode).c_str()); return OH_NN_UNAVALIDABLE_DEVICE; } return OH_NN_SUCCESS; @@ -195,9 +210,11 @@ OH_NN_ReturnCode HDIDeviceV2_0::IsFloat16PrecisionSupported(bool& isSupported) OH_NN_ReturnCode HDIDeviceV2_0::IsPerformanceModeSupported(bool& isSupported) { - auto ret = m_iDevice->IsPerformanceModeSupported(isSupported); + V2_0::NNRT_ReturnCode returnCode; + auto ret = m_iDevice->IsPerformanceModeSupported(isSupported, returnCode); if (ret != HDF_SUCCESS) { - LOGE("Query performance mode supported failed. ErrorCode=%d", ret); + LOGE("Query performance mode supported failed. ErrorCode=%{public}d, innerHDIRet=%{public}s", + ret, ConverterRetToString(returnCode).c_str()); return OH_NN_UNAVALIDABLE_DEVICE; } return OH_NN_SUCCESS; @@ -205,9 +222,11 @@ OH_NN_ReturnCode HDIDeviceV2_0::IsPerformanceModeSupported(bool& isSupported) OH_NN_ReturnCode HDIDeviceV2_0::IsPrioritySupported(bool& isSupported) { - auto ret = m_iDevice->IsPrioritySupported(isSupported); + V2_0::NNRT_ReturnCode returnCode; + auto ret = m_iDevice->IsPrioritySupported(isSupported, returnCode); if (ret != HDF_SUCCESS) { - LOGE("Query priority supported failed. ErrorCode=%d", ret); + LOGE("Query priority supported failed. ErrorCode=%{public}d, innerHDIRet=%{public}s", + ret, ConverterRetToString(returnCode).c_str()); return OH_NN_UNAVALIDABLE_DEVICE; } return OH_NN_SUCCESS; @@ -215,9 +234,11 @@ OH_NN_ReturnCode HDIDeviceV2_0::IsPrioritySupported(bool& isSupported) OH_NN_ReturnCode HDIDeviceV2_0::IsDynamicInputSupported(bool& isSupported) { - auto ret = m_iDevice->IsDynamicInputSupported(isSupported); + V2_0::NNRT_ReturnCode returnCode; + auto ret = m_iDevice->IsDynamicInputSupported(isSupported, returnCode); if (ret != HDF_SUCCESS) { - LOGE("Query dynamic input supported failed. ErrorCode=%d", ret); + LOGE("Query dynamic input supported failed. ErrorCode=%{public}d, innerHDIRet=%{public}s", + ret, ConverterRetToString(returnCode).c_str()); return OH_NN_UNAVALIDABLE_DEVICE; } return OH_NN_SUCCESS; @@ -225,9 +246,11 @@ OH_NN_ReturnCode HDIDeviceV2_0::IsDynamicInputSupported(bool& isSupported) OH_NN_ReturnCode HDIDeviceV2_0::IsModelCacheSupported(bool& isSupported) { - auto ret = m_iDevice->IsModelCacheSupported(isSupported); + V2_0::NNRT_ReturnCode returnCode; + auto ret = m_iDevice->IsModelCacheSupported(isSupported, returnCode); if (ret != HDF_SUCCESS) { - LOGE("Query cache model supported failed. ErrorCode=%d", ret); + LOGE("Query cache model supported failed. ErrorCode=%{public}d, innerHDIRet=%{public}s", + ret, ConverterRetToString(returnCode).c_str()); return OH_NN_UNAVALIDABLE_DEVICE; } return OH_NN_SUCCESS; @@ -242,18 +265,20 @@ OH_NN_ReturnCode HDIDeviceV2_0::PrepareModel(std::shared_ptr 0) { - hdiRet = m_iDevice->AllocateBuffer(tensorSize, tensorBuffer); + V2_0::NNRT_ReturnCode returnCode; + hdiRet = m_iDevice->AllocateBuffer(tensorSize, tensorBuffer, returnCode); if (hdiRet != HDF_SUCCESS || tensorBuffer.fd == INVALID_FD) { - LOGE("Allocate tensor buffer error when prepare model. ErrorCode: %d", hdiRet); + LOGE("Allocate tensor buffer error when prepare model. ErrorCode: %{public}d, innerHDIRet=%{public}s", + hdiRet, ConverterRetToString(returnCode).c_str()); return OH_NN_FAILED; } } - V2_0::Model* iModel = mindspore::lite::MindIR_LiteGraph_To_Model(model.get(), tensorBuffer); + V2_0::Model* iModel = mindspore::lite::MindIR_LiteGraph_To_Model_V2_0(model.get(), tensorBuffer); if (iModel == nullptr) { LOGE("Parse litegraph to hdi model failed."); ReleaseSharedBuffer(tensorBuffer); @@ -266,7 +291,8 @@ OH_NN_ReturnCode HDIDeviceV2_0::PrepareModel(std::shared_ptr iPreparedModel; - auto preparedRet = m_iDevice->PrepareModel(*iModel, iModelConfig, iPreparedModel); + V2_0::NNRT_ReturnCode returnCode; + auto preparedRet = m_iDevice->PrepareModel(*iModel, iModelConfig, iPreparedModel, returnCode); mindspore::lite::MindIR_Model_Destroy(&iModel); auto ret = ReleaseSharedBuffer(tensorBuffer); @@ -275,7 +301,8 @@ OH_NN_ReturnCode HDIDeviceV2_0::PrepareModel(std::shared_ptrGetMemory(modelCache[i].buffer, memory); if (ret != OH_NN_SUCCESS) { - LOGE("The %zuth model cache is invalid. Please put valid model cache.", i + 1); + LOGE("The %{public}zuth model cache is invalid. Please put valid model cache.", i + 1); return ret; } iBuffers.emplace_back(V2_0::SharedBuffer {memory.fd, memory.length, 0, memory.length}); @@ -312,9 +339,11 @@ OH_NN_ReturnCode HDIDeviceV2_0::PrepareModelFromModelCache(const std::vector iPreparedModel; - auto hdiRet = m_iDevice->PrepareModelFromModelCache(iBuffers, iModelConfig, iPreparedModel); + V2_0::NNRT_ReturnCode returnCode; + auto hdiRet = m_iDevice->PrepareModelFromModelCache(iBuffers, iModelConfig, iPreparedModel, returnCode); if (hdiRet != HDF_SUCCESS) { - LOGE("Prepare model from cache failed. ErrorCode=%d", hdiRet); + LOGE("Prepare model from cache failed. ErrorCode=%{public}d, innerHDIRet=%{public}s", + hdiRet, ConverterRetToString(returnCode).c_str()); return OH_NN_UNAVALIDABLE_DEVICE; } @@ -334,9 +363,11 @@ void* HDIDeviceV2_0::AllocateBuffer(size_t length) } V2_0::SharedBuffer buffer; - auto ret = m_iDevice->AllocateBuffer(length, buffer); + V2_0::NNRT_ReturnCode returnCode; + auto ret = m_iDevice->AllocateBuffer(length, buffer, returnCode); if (ret != HDF_SUCCESS) { - LOGE("Allocate buffer error. ErrorCode: %d", ret); + LOGE("Allocate buffer error. ErrorCode: %{public}d, innerHDIRet=%{public}s", + ret, ConverterRetToString(returnCode).c_str()); return nullptr; } @@ -364,9 +395,11 @@ OH_NN_ReturnCode HDIDeviceV2_0::ReleaseBuffer(const void* buffer) } V2_0::SharedBuffer hdiBuffer {memory.fd, memory.length, 0, memory.length}; - auto deviceResult = m_iDevice->ReleaseBuffer(hdiBuffer); + V2_0::NNRT_ReturnCode returnCode; + auto deviceResult = m_iDevice->ReleaseBuffer(hdiBuffer, returnCode); if (deviceResult != HDF_SUCCESS) { - LOGE("Device release buffer error. ErrorCode: %d", deviceResult); + LOGE("Device release buffer error. ErrorCode: %{public}d, innerHDIRet=%{public}s", + deviceResult, ConverterRetToString(returnCode).c_str()); return OH_NN_FAILED; } @@ -382,13 +415,15 @@ OH_NN_ReturnCode HDIDeviceV2_0::ReleaseBuffer(const void* buffer) OH_NN_ReturnCode HDIDeviceV2_0::ReleaseSharedBuffer(const V2_0::SharedBuffer& buffer) { if (buffer.fd == INVALID_FD) { - LOGI("No need to release. fd=%d", INVALID_FD); + LOGI("No need to release. fd=%{public}d", INVALID_FD); return OH_NN_SUCCESS; } - auto ret = m_iDevice->ReleaseBuffer(buffer); + V2_0::NNRT_ReturnCode returnCode; + auto ret = m_iDevice->ReleaseBuffer(buffer, returnCode); if (ret != HDF_SUCCESS) { - LOGE("Device release buffer error. ErrorCode=%d", ret); + LOGE("Device release buffer error. ErrorCode=%{public}d, innerHDIRet=%{public}s", + ret, ConverterRetToString(returnCode).c_str()); return OH_NN_FAILED; } return OH_NN_SUCCESS; diff --git a/frameworks/native/hdi_prepared_model_v2_0.cpp b/frameworks/native/hdi_prepared_model_v2_0.cpp index d9c6bc2..c07e7e7 100644 --- a/frameworks/native/hdi_prepared_model_v2_0.cpp +++ b/frameworks/native/hdi_prepared_model_v2_0.cpp @@ -16,6 +16,7 @@ #include "hdi_prepared_model_v2_0.h" #include "common/log.h" +#include "hdi_returncode_transform.h" #include "memory_manager.h" namespace OHOS { @@ -102,14 +103,16 @@ HDIPreparedModelV2_0::HDIPreparedModelV2_0(OHOS::sptr hdiP OH_NN_ReturnCode HDIPreparedModelV2_0::ExportModelCache(std::vector& modelCache) { if (!modelCache.empty()) { - LOGE("The vector of modelCache should be empty. size=%zu", modelCache.size()); + LOGE("The vector of modelCache should be empty. size=%{public}zu", modelCache.size()); return OH_NN_INVALID_PARAMETER; } std::vector iBuffers; - auto ret = m_hdiPreparedModel->ExportModelCache(iBuffers); - if (ret != HDF_SUCCESS) { - LOGE("Export model cache failed. ErrorCode=%d", ret); + V2_0::NNRT_ReturnCode returnCode; + auto ret = m_hdiPreparedModel->ExportModelCache(iBuffers, returnCode); + if (ret != HDF_SUCCESS || returnCode != V2_0::NNRT_ReturnCode::NNRT_SUCCESS) { + LOGE("Export model cache failed. ErrorCode=%{public}d, innerHDIRet=%{public}s", + ret, ConverterRetToString(returnCode).c_str()); return OH_NN_UNAVALIDABLE_DEVICE; } @@ -117,7 +120,7 @@ OH_NN_ReturnCode HDIPreparedModelV2_0::ExportModelCache(std::vector for (size_t i = 0; i < iBuffers.size(); i++) { auto addr = memManager->MapMemory(iBuffers[i].fd, iBuffers[i].bufferSize); if (addr == nullptr) { - LOGE("Export the %zuth model cache failed, cannot not map fd to address.", i + 1); + LOGE("Export the %{public}zuth model cache failed, cannot not map fd to address.", i + 1); return OH_NN_MEMORY_ERROR; } ModelBuffer modelbuffer {addr, iBuffers[i].bufferSize}; @@ -151,9 +154,11 @@ OH_NN_ReturnCode HDIPreparedModelV2_0::Run(const std::vector& inputs, iOutputTensors.emplace_back(iTensor); } - auto ret = m_hdiPreparedModel->Run(iInputTensors, iOutputTensors, outputsDims, isOutputBufferEnough); + V2_0::NNRT_ReturnCode returnCode; + auto ret = m_hdiPreparedModel->Run(iInputTensors, iOutputTensors, outputsDims, returnCode); if (ret != HDF_SUCCESS || outputsDims.empty()) { - LOGE("Run model failed. ErrorCode=%d", ret); + LOGE("Run model failed. ErrorCode=%{public}d, innerHDIRet=%{public}s", + ret, ConverterRetToString(returnCode).c_str()); return OH_NN_UNAVALIDABLE_DEVICE; } @@ -163,9 +168,11 @@ OH_NN_ReturnCode HDIPreparedModelV2_0::Run(const std::vector& inputs, OH_NN_ReturnCode HDIPreparedModelV2_0::GetInputDimRanges(std::vector>& minInputDims, std::vector>& maxInputDims) { - auto ret = m_hdiPreparedModel->GetInputDimRanges(minInputDims, maxInputDims); + V2_0::NNRT_ReturnCode returnCode; + auto ret = m_hdiPreparedModel->GetInputDimRanges(minInputDims, maxInputDims, returnCode); if (ret != HDF_SUCCESS) { - LOGE("GetInputDimRanges failed. ErrorCode=%d", ret); + LOGE("GetInputDimRanges failed. ErrorCode=%{public}d, innerHDIRet=%{public}s", + ret, ConverterRetToString(returnCode).c_str()); return OH_NN_UNAVALIDABLE_DEVICE; } diff --git a/frameworks/native/hdi_returncode_transform.cpp b/frameworks/native/hdi_returncode_transform.cpp new file mode 100644 index 0000000..43d9c92 --- /dev/null +++ b/frameworks/native/hdi_returncode_transform.cpp @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "hdi_returncode_transform.h" +#include + +namespace OHOS { +namespace NeuralNetworkRuntime { +const std::unordered_map RET_STRING_MAP{ + {V2_0::NNRT_ReturnCode::NNRT_SUCCESS, "NNRT_SUCCESS"}, + {V2_0::NNRT_ReturnCode::NNRT_FAILED, "NNRT_FAILED"}, + {V2_0::NNRT_ReturnCode::NNRT_NULL_PTR, "NNRT_NULL_PTR"}, + {V2_0::NNRT_ReturnCode::NNRT_INVALID_PARAMETER, "NNRT_INVALID_PARAMETER"}, + {V2_0::NNRT_ReturnCode::NNRT_MEMORY_ERROR, "NNRT_MEMORY_ERROR"}, + {V2_0::NNRT_ReturnCode::NNRT_OUT_OF_MEMORY, "NNRT_OUT_OF_MEMORY"}, + {V2_0::NNRT_ReturnCode::NNRT_OPERATION_FORBIDDEN, "NNRT_OPERATION_FORBIDDEN"}, + {V2_0::NNRT_ReturnCode::NNRT_INVALID_FILE, "NNRT_INVALID_FILE"}, + {V2_0::NNRT_ReturnCode::NNRT_INVALID_PATH, "NNRT_INVALID_PATH"}, + {V2_0::NNRT_ReturnCode::NNRT_INSUFFICIENT_BUFFER, "NNRT_INSUFFICIENT_BUFFER"}, + {V2_0::NNRT_ReturnCode::NNRT_NO_CHANGE, "NNRT_NO_CHANGE"}, + {V2_0::NNRT_ReturnCode::NNRT_NOT_SUPPORT, "NNRT_NOT_SUPPORT"}, + {V2_0::NNRT_ReturnCode::NNRT_SERVICE_ERROR, "NNRT_SERVICE_ERROR"}, + {V2_0::NNRT_ReturnCode::NNRT_DEVICE_ERROR, "NNRT_DEVICE_ERROR"}, + {V2_0::NNRT_ReturnCode::NNRT_DEVICE_BUSY, "NNRT_DEVICE_BUSY"}, + {V2_0::NNRT_ReturnCode::NNRT_CANCELLED, "NNRT_CANCELLED"}, + {V2_0::NNRT_ReturnCode::NNRT_PERMISSION_DENIED, "NNRT_PERMISSION_DENIED"}, + {V2_0::NNRT_ReturnCode::NNRT_TIME_OUT, "NNRT_TIME_OUT"}, + {V2_0::NNRT_ReturnCode::NNRT_INVALID_TENSOR, "NNRT_INVALID_TENSOR"}, + {V2_0::NNRT_ReturnCode::NNRT_INVALID_NODE, "NNRT_INVALID_NODE"}, + {V2_0::NNRT_ReturnCode::NNRT_INVALID_INPUT, "NNRT_INVALID_INPUT"}, + {V2_0::NNRT_ReturnCode::NNRT_INVALID_OUTPUT, "NNRT_INVALID_OUTPUT"}, + {V2_0::NNRT_ReturnCode::NNRT_INVALID_DATATYPE, "NNRT_INVALID_DATATYPE"}, + {V2_0::NNRT_ReturnCode::NNRT_INVALID_FORMAT, "NNRT_INVALID_FORMAT"}, + {V2_0::NNRT_ReturnCode::NNRT_INVALID_TENSOR_NAME, "NNRT_INVALID_TENSOR_NAME"}, + {V2_0::NNRT_ReturnCode::NNRT_INVALID_SHAPE, "NNRT_INVALID_SHAPE"}, + {V2_0::NNRT_ReturnCode::NNRT_OUT_OF_DIMENTION_RANGES, "NNRT_OUT_OF_DIMENTION_RANGES"}, + {V2_0::NNRT_ReturnCode::NNRT_INVALID_BUFFER, "NNRT_INVALID_BUFFER"}, + {V2_0::NNRT_ReturnCode::NNRT_INVALID_BUFFER_SIZE, "NNRT_INVALID_BUFFER_SIZE"}, + {V2_0::NNRT_ReturnCode::NNRT_INVALID_PERFORMANCE_MODE, "NNRT_INVALID_PERFORMANCE_MODE"}, + {V2_0::NNRT_ReturnCode::NNRT_INVALID_PRIORITY, "NNRT_INVALID_PRIORITY"}, + {V2_0::NNRT_ReturnCode::NNRT_INVALID_MODEL, "NNRT_INVALID_MODEL"}, + {V2_0::NNRT_ReturnCode::NNRT_INVALID_MODEL_CACHE, "NNRT_INVALID_MODEL_CACHE"}, + {V2_0::NNRT_ReturnCode::NNRT_UNSUPPORTED_OP, "NNRT_UNSUPPORTED_OP"} +}; + +std::string ConverterRetToString(V2_0::NNRT_ReturnCode returnCode) +{ + if (RET_STRING_MAP.find(returnCode) == RET_STRING_MAP.end()) { + return ""; + } + + return RET_STRING_MAP.at(returnCode); +} +} +} \ No newline at end of file diff --git a/frameworks/native/hdi_returncode_transform.h b/frameworks/native/hdi_returncode_transform.h new file mode 100644 index 0000000..0d306c7 --- /dev/null +++ b/frameworks/native/hdi_returncode_transform.h @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +#ifndef NEURAL_NETWORK_RUNTIME_HDI_RETURNCODE_TRANSFORM_H +#define NEURAL_NETWORK_RUNTIME_HDI_RETURNCODE_TRANSFORM_H + +#include + +namespace V2_0 = OHOS::HDI::Nnrt::V2_0; + +namespace OHOS { +namespace NeuralNetworkRuntime { +std::string ConverterRetToString(V2_0::NNRT_ReturnCode returnCode); +} // namespace NeuralNetworkRuntime +} // OHOS +#endif // NEURAL_NETWORK_RUNTIME_HDI_RETURNCODE_TRANSFORM_H \ No newline at end of file -- Gitee From 1d9f30fe224e2a95e10214044587cd6cf9679227 Mon Sep 17 00:00:00 2001 From: yuhanshi Date: Tue, 4 Apr 2023 18:27:36 +0800 Subject: [PATCH 08/12] Support offline model Signed-off-by: yuhanshi --- frameworks/native/compilation.cpp | 71 +++++++++ frameworks/native/compilation.h | 2 + frameworks/native/device.h | 4 + frameworks/native/hdi_device_v1_0.cpp | 8 + frameworks/native/hdi_device_v1_0.h | 3 + frameworks/native/hdi_device_v2_0.cpp | 205 +++++++++++++++++++++++++- frameworks/native/hdi_device_v2_0.h | 12 ++ 7 files changed, 301 insertions(+), 4 deletions(-) diff --git a/frameworks/native/compilation.cpp b/frameworks/native/compilation.cpp index ed6e737..c259932 100644 --- a/frameworks/native/compilation.cpp +++ b/frameworks/native/compilation.cpp @@ -607,6 +607,26 @@ OH_NN_ReturnCode Compilation::InnerBuild() { OH_NN_ReturnCode ret; std::shared_ptr preparedModel; + + // Prepare from offline model. + bool isOfflineModel{false}; + ret = IsOfflineModel(isOfflineModel); + if (ret != OH_NN_SUCCESS) { + LOGE("[Compilation] Failed when identifying the offline model."); + return ret; + } + + if (isOfflineModel) { + ret = BuildOfflineModel(preparedModel); + if (ret != OH_NN_SUCCESS) { + LOGE("[Compilation] Failed to build offline model."); + return ret; + } + + m_isBuild = true; + return OH_NN_SUCCESS; + } + if (m_cachePath.empty()) { ret = NormalBuild(preparedModel); if (ret != OH_NN_SUCCESS) { @@ -710,5 +730,56 @@ bool Compilation::IsDynamicShape() const } return false; } + +OH_NN_ReturnCode Compilation::IsOfflineModel(bool& isOfflineModel) const +{ + isOfflineModel = false; // Initialize the returned value + if (m_liteGraph == nullptr) { + LOGE("[Compilation] LiteGraph is empty when identifying the offline model."); + return OH_NN_NULL_PTR; + } + + if (m_liteGraph->all_nodes_.size() == 0) { + LOGE("[Compilation] Find empty node in the model."); + return OH_NN_INVALID_PARAMETER; + } + + // If the model consists of more than 1 node, it will not be considered as offline model. + if (m_liteGraph->all_nodes_.size() > 1) { + isOfflineModel = false; + return OH_NN_SUCCESS; + } + + const mindspore::lite::LiteGraph::Node* pNode = m_liteGraph->all_nodes_[0]; + if (pNode == nullptr) { + LOGE("[Compilation] Find invalid node in the model."); + return OH_NN_NULL_PTR; + } + + const mindspore::lite::NodeType& nodeType = mindspore::lite::MindIR_Primitive_GetType(pNode->primitive_); + if (nodeType == mindspore::lite::NodeType::NODE_TYPE_CUSTOM) { + isOfflineModel = true; + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode Compilation::BuildOfflineModel(std::shared_ptr& preparedModel) +{ + ModelConfig config {m_enableFp16, m_performance, m_priority}; + OH_NN_ReturnCode ret = m_device->PrepareOfflineModel(m_liteGraph, config, preparedModel); + if (ret != OH_NN_SUCCESS) { + LOGE("[Compilation] Preparing model failed when building from offline model."); + return ret; + } + + m_executionPlan = CreateSharedPtr(preparedModel, m_device); + if (m_executionPlan == nullptr) { + LOGE("[Compilation] Failed to create ExecutionPlan when building from offline model."); + return OH_NN_MEMORY_ERROR; + } + + return OH_NN_SUCCESS; +} } // namespace NeuralNetworkRuntime } // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/compilation.h b/frameworks/native/compilation.h index a85f6a5..3d81fe9 100644 --- a/frameworks/native/compilation.h +++ b/frameworks/native/compilation.h @@ -79,6 +79,8 @@ private: OH_NN_ReturnCode LoadCacheBuild(std::shared_ptr& preparedModel, const ModelCacheInfo& cacheInfo); OH_NN_ReturnCode InnerBuild(); OH_NN_ReturnCode GetCacheFileLength(std::ifstream& ifs, int& fsize) const; + OH_NN_ReturnCode IsOfflineModel(bool& isOfflineModel) const; + OH_NN_ReturnCode BuildOfflineModel(std::shared_ptr& preparedModel); }; } // namespace NeuralNetworkRuntime } // namespace OHOS diff --git a/frameworks/native/device.h b/frameworks/native/device.h index c34e043..bc0989f 100644 --- a/frameworks/native/device.h +++ b/frameworks/native/device.h @@ -52,6 +52,10 @@ public: virtual OH_NN_ReturnCode PrepareModelFromModelCache(const std::vector& modelCache, const ModelConfig& config, std::shared_ptr& preparedModel) = 0; + virtual OH_NN_ReturnCode PrepareOfflineModel(std::shared_ptr model, + const ModelConfig& config, + std::shared_ptr& preparedModel) = 0; + virtual void* AllocateBuffer(size_t length) = 0; virtual OH_NN_ReturnCode ReleaseBuffer(const void* buffer) = 0; diff --git a/frameworks/native/hdi_device_v1_0.cpp b/frameworks/native/hdi_device_v1_0.cpp index 3808364..c87c51a 100644 --- a/frameworks/native/hdi_device_v1_0.cpp +++ b/frameworks/native/hdi_device_v1_0.cpp @@ -393,5 +393,13 @@ OH_NN_ReturnCode HDIDeviceV1_0::ReleaseSharedBuffer(const V1_0::SharedBuffer& bu } return OH_NN_SUCCESS; } + +OH_NN_ReturnCode HDIDeviceV1_0::PrepareOfflineModel(std::shared_ptr model, + const ModelConfig& config, + std::shared_ptr& preparedModel) +{ + LOGE("HDIDeviceV1.0 not support PrepareOfflineModel."); + return OH_NN_OPERATION_FORBIDDEN; +} } // namespace NeuralNetworkRuntime } // namespace OHOS diff --git a/frameworks/native/hdi_device_v1_0.h b/frameworks/native/hdi_device_v1_0.h index e28beb1..9b5647c 100644 --- a/frameworks/native/hdi_device_v1_0.h +++ b/frameworks/native/hdi_device_v1_0.h @@ -52,6 +52,9 @@ public: OH_NN_ReturnCode PrepareModelFromModelCache(const std::vector& modelCache, const ModelConfig& config, std::shared_ptr& preparedModel) override; + OH_NN_ReturnCode PrepareOfflineModel(std::shared_ptr model, + const ModelConfig& config, + std::shared_ptr& preparedModel) override; void* AllocateBuffer(size_t length) override; OH_NN_ReturnCode ReleaseBuffer(const void* buffer) override; diff --git a/frameworks/native/hdi_device_v2_0.cpp b/frameworks/native/hdi_device_v2_0.cpp index 5e01560..68c1fe5 100644 --- a/frameworks/native/hdi_device_v2_0.cpp +++ b/frameworks/native/hdi_device_v2_0.cpp @@ -17,6 +17,7 @@ #include "hdf_base.h" #include "mindir.h" +#include "securec.h" #include "hdi_prepared_model_v2_0.h" #include "hdi_returncode_transform.h" @@ -172,7 +173,7 @@ OH_NN_ReturnCode HDIDeviceV2_0::GetSupportedOperation(std::shared_ptr& modelCache, - const ModelConfig& config, - std::shared_ptr& preparedModel) + const ModelConfig& config, + std::shared_ptr& preparedModel) { std::vector iBuffers; auto memManager = MemoryManager::GetInstance(); @@ -428,5 +429,201 @@ OH_NN_ReturnCode HDIDeviceV2_0::ReleaseSharedBuffer(const V2_0::SharedBuffer& bu } return OH_NN_SUCCESS; } + +OH_NN_ReturnCode HDIDeviceV2_0::GetOfflineModelFromLiteGraph(std::shared_ptr graph, + std::vector>& offlineModels) +{ + // graph has been checked in PrepareOfflineModel, no need to check twice. + offlineModels.clear(); + + const size_t inputNum = graph->input_indices_.size(); + if (inputNum < (size_t)2) { + LOGE("LiteGraph with offline model should have at least two input tensors, only get %zu.", inputNum); + return OH_NN_INVALID_PARAMETER; + } + + // The offline model is integrated into input tensors with index larger than 0. + mindspore::lite::TensorPtr pTensor; + std::vector offlineModel; + for (size_t i = 1; i < inputNum; i++) { + pTensor = graph->all_tensors_[i]; + offlineModel = mindspore::lite::MindIR_Tensor_GetData(pTensor); + if (offlineModel.size() == (size_t)0) { + LOGE("Offline model has size of 0, please check the ms model."); + return OH_NN_INVALID_PARAMETER; + } + + offlineModels.emplace_back(std::move(offlineModel)); + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::AllocateDeviceBufferForOfflineModel( + const std::vector>& offlineModels, std::vector& deviceBuffers) +{ + // offlineModels is guaranteed to have at least one element in GetOfflineModelFromLiteGraph, no need to check size. + deviceBuffers.clear(); + + for (const std::vector& offlineModel : offlineModels) { + const size_t offlineModelSize = offlineModel.size(); + + void* newModelBuffer = AllocateBuffer(offlineModelSize); + if (newModelBuffer == nullptr) { + // Release allocated model buffer if error happens. + OH_NN_ReturnCode status {OH_NN_SUCCESS}; + for (const ModelBuffer& deviceBuffer : deviceBuffers) { + status = ReleaseBuffer(deviceBuffer.buffer); + if (status != OH_NN_SUCCESS) { + LOGE("Release shared buffer of offline model failed."); + return status; + } + } + + deviceBuffers.clear(); + LOGE("Error happens when allocating shared buffer for offline model."); + return OH_NN_MEMORY_ERROR; + } + + ModelBuffer modelBuffer {nullptr, 0}; + modelBuffer.buffer = newModelBuffer; + modelBuffer.length = offlineModelSize; + deviceBuffers.emplace_back(modelBuffer); + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::CopyOfflineModelToDevice(const std::vector>& offlineModels, + std::vector& deviceBuffers) +{ + if (offlineModels.size() != deviceBuffers.size()) { + LOGE("CopyOfflineModelToDevice failed, number of offlineModels not equal to allocated buffers."); + return OH_NN_INVALID_PARAMETER; + } + + const void* offlineModel {nullptr}; + size_t offlineModelSize {0}; + void* deviceBuffer {nullptr}; + size_t deviceBufferSize {0}; + + for (size_t i = 0; i < offlineModels.size(); i++) { + offlineModel = offlineModels[i].data(); + offlineModelSize = offlineModels[i].size(); + deviceBuffer = deviceBuffers[i].buffer; + deviceBufferSize = deviceBuffers[i].length; + + // Copy offline model to shared buffer of device. + errno_t errorCode = memcpy_s(deviceBuffer, deviceBufferSize, offlineModel, offlineModelSize); + if (errorCode != EOK) { + LOGE("Error happened when copy offline model to device buffer. Error code: %d.", errorCode); + return OH_NN_MEMORY_ERROR; + } + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::PrepareOfflineModel(std::vector& deviceBuffers, + const ModelConfig& config, + std::shared_ptr& preparedModel) +{ + V2_0::ModelConfig iModelConfig; + iModelConfig.enableFloat16 = config.enableFloat16; + iModelConfig.mode = TransPerformanceMode(config.mode); + iModelConfig.priority = TransPriority(config.priority); + OHOS::sptr iPreparedModel; + + std::vector iBuffers; + auto memManager = MemoryManager::GetInstance(); + Memory memory; + OH_NN_ReturnCode ret; + size_t numOfflineModel = deviceBuffers.size(); + for (size_t i = 0; i < numOfflineModel; i++) { + ret = memManager->GetMemory(deviceBuffers[i].buffer, memory); + if (ret != OH_NN_SUCCESS) { + LOGE("Retrieve the memory of %zuth device buffer failed.", i); + return ret; + } + iBuffers.emplace_back(V2_0::SharedBuffer {memory.fd, memory.length, 0, memory.length}); + } + + V2_0::NNRT_ReturnCode HDIReturnCode {V2_0::NNRT_ReturnCode::NNRT_SUCCESS}; + auto preparedRet = m_iDevice->PrepareOfflineModel(iBuffers, iModelConfig, iPreparedModel, HDIReturnCode); + + // Release allocated model buffer after prepare model. + OH_NN_ReturnCode status {OH_NN_SUCCESS}; + for (const ModelBuffer& deviceBuffer : deviceBuffers) { + status = ReleaseBuffer(deviceBuffer.buffer); + if (status != OH_NN_SUCCESS) { + LOGE("Release shared buffer of offline model failed."); + return status; + } + } + deviceBuffers.clear(); + + if (preparedRet != HDF_SUCCESS || iPreparedModel == nullptr) { + LOGE("Prepare model failed. ErrorCode=%{public}d, innerHDIRet=%{public}s", + preparedRet, ConverterRetToString(HDIReturnCode).c_str()); + return OH_NN_FAILED; + } + + preparedModel = CreateSharedPtr(iPreparedModel); + if (preparedModel == nullptr) { + LOGE("Prepare model failed, because fail to create preparedModel instance."); + return OH_NN_MEMORY_ERROR; + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::PrepareOfflineModel(std::shared_ptr model, + const ModelConfig& config, + std::shared_ptr& preparedModel) +{ + if (model == nullptr) { + LOGE("LiteGraph is empty when identifying the offline model."); + return OH_NN_NULL_PTR; + } + + std::vector> offlineModels; + OH_NN_ReturnCode status = GetOfflineModelFromLiteGraph(model, offlineModels); + if (status != OH_NN_SUCCESS) { + LOGE("Error happens when getting offline models from lite graph."); + return status; + } + + std::vector deviceBuffers; + status = AllocateDeviceBufferForOfflineModel(offlineModels, deviceBuffers); + if (status != OH_NN_SUCCESS) { + LOGE("Error happens when allocating device buffers for offline model."); + return status; + } + + status = CopyOfflineModelToDevice(offlineModels, deviceBuffers); + if (status != OH_NN_SUCCESS) { + LOGE("Error happened when copying offline models to device buffers."); + + OH_NN_ReturnCode ret {OH_NN_SUCCESS}; + // Release allocated model buffer if error happens. + for (const ModelBuffer& deviceBuffer : deviceBuffers) { + ret = ReleaseBuffer(deviceBuffer.buffer); + if (ret != OH_NN_SUCCESS) { + LOGE("Releasing device buffer failed after copying offline models to device buffers failed."); + return ret; + } + } + + return status; + } + + status = PrepareOfflineModel(deviceBuffers, config, preparedModel); + if (status != OH_NN_SUCCESS) { + LOGE("PrepareOfflineModel failed."); + return status; + } + + return OH_NN_SUCCESS; +} } // namespace NeuralNetworkRuntime } // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/hdi_device_v2_0.h b/frameworks/native/hdi_device_v2_0.h index fee7831..f8cba17 100644 --- a/frameworks/native/hdi_device_v2_0.h +++ b/frameworks/native/hdi_device_v2_0.h @@ -52,12 +52,24 @@ public: OH_NN_ReturnCode PrepareModelFromModelCache(const std::vector& modelCache, const ModelConfig& config, std::shared_ptr& preparedModel) override; + OH_NN_ReturnCode PrepareOfflineModel(std::shared_ptr model, + const ModelConfig& config, + std::shared_ptr& preparedModel) override; void* AllocateBuffer(size_t length) override; OH_NN_ReturnCode ReleaseBuffer(const void* buffer) override; private: OH_NN_ReturnCode ReleaseSharedBuffer(const V2_0::SharedBuffer& buffer); + OH_NN_ReturnCode GetOfflineModelFromLiteGraph(std::shared_ptr graph, + std::vector>& offlineModels); + OH_NN_ReturnCode AllocateDeviceBufferForOfflineModel(const std::vector>& offlineModels, + std::vector& deviceBuffers); + OH_NN_ReturnCode CopyOfflineModelToDevice(const std::vector>& offlineModels, + std::vector& deviceBuffers); + OH_NN_ReturnCode PrepareOfflineModel(std::vector& deviceBuffers, + const ModelConfig& config, + std::shared_ptr& preparedModel); private: // first: major version, second: minor version -- Gitee From e9aedc1000df99666beb8c665bb2dc8c5156ff1a Mon Sep 17 00:00:00 2001 From: wangchuanxia Date: Mon, 24 Apr 2023 15:03:30 +0800 Subject: [PATCH 09/12] returncode fix Signed-off-by: wangchuanxia --- frameworks/BUILD.gn | 12 +- frameworks/native/compilation.cpp | 12 +- frameworks/native/device_discover.h | 4 - frameworks/native/device_discover_v2_0.cpp | 40 ++-- frameworks/native/execution_plan.cpp | 13 -- frameworks/native/hdi_device_v1_0.cpp | 17 +- frameworks/native/hdi_device_v1_0.h | 4 +- frameworks/native/hdi_device_v2_0.cpp | 187 +++++++----------- frameworks/native/hdi_device_v2_0.h | 4 +- frameworks/native/hdi_prepared_model_v1_0.cpp | 3 +- frameworks/native/hdi_prepared_model_v1_0.h | 6 +- frameworks/native/hdi_prepared_model_v2_0.cpp | 32 ++- frameworks/native/hdi_prepared_model_v2_0.h | 6 +- .../native/hdi_returncode_transform.cpp | 67 ------- frameworks/native/hdi_returncode_transform.h | 29 --- frameworks/native/hdi_returncode_utils.h | 87 ++++++++ frameworks/native/nn_tensor.cpp | 7 +- frameworks/native/ops/conv2d_builder.cpp | 6 +- .../native/ops/conv2d_transpose_builder.cpp | 8 +- .../ops/depthwise_conv2d_native_builder.cpp | 6 +- frameworks/native/ops/pooling_builder.cpp | 4 +- frameworks/native/ops/top_k_builder.h | 2 +- frameworks/native/prepared_model.h | 8 +- frameworks/native/validation.h | 3 +- test/unittest/BUILD.gn | 2 +- 25 files changed, 248 insertions(+), 321 deletions(-) delete mode 100644 frameworks/native/hdi_returncode_transform.cpp delete mode 100644 frameworks/native/hdi_returncode_transform.h create mode 100644 frameworks/native/hdi_returncode_utils.h diff --git a/frameworks/BUILD.gn b/frameworks/BUILD.gn index bc88015..15e871e 100644 --- a/frameworks/BUILD.gn +++ b/frameworks/BUILD.gn @@ -19,17 +19,16 @@ config("nnrt_config") { nnrt_sources = [ "native/compilation.cpp", + "native/device_discover_v1_0.cpp", + "native/device_discover_v2_0.cpp", "native/device_manager.cpp", "native/device_registrar.cpp", "native/execution_plan.cpp", "native/executor.cpp", - "native/device_discover_v1_0.cpp", - "native/device_discover_v2_0.cpp", "native/hdi_device_v1_0.cpp", "native/hdi_device_v2_0.cpp", "native/hdi_prepared_model_v1_0.cpp", "native/hdi_prepared_model_v2_0.cpp", - "native/hdi_returncode_transform.cpp", "native/inner_model.cpp", "native/memory_manager.cpp", "native/neural_network_runtime.cpp", @@ -105,12 +104,6 @@ ohos_shared_library("libneural_network_runtime") { sources += ops_sources include_dirs = [ "//commonlibrary/c_utils/base/include", - "//drivers/hdf_core/adapter/uhdf/posix/include", - "//drivers/hdf_core/adapter/uhdf2/include/hdi", - "//drivers/hdf_core/adapter/uhdf2/ipc/include", - "//drivers/hdf_core/framework/include/core", - "//drivers/hdf_core/framework/include/utils", - "//drivers/hdf_core/framework/core/common/include/host", "//foundation/ai/neural_network_runtime", "//foundation/communication/ipc/interfaces/innerkits/ipc_core/include", "//third_party/googletest/googletest/include/gtest", @@ -128,7 +121,6 @@ ohos_shared_library("libneural_network_runtime") { "c_utils:utils", "drivers_interface_nnrt:libnnrt_proxy_1.0", "drivers_interface_nnrt:libnnrt_proxy_2.0", - "hdf_core:libhdf_utils", "hilog_native:libhilog", "hitrace_native:libhitracechain", "mindspore:mindir", diff --git a/frameworks/native/compilation.cpp b/frameworks/native/compilation.cpp index c259932..1c06087 100644 --- a/frameworks/native/compilation.cpp +++ b/frameworks/native/compilation.cpp @@ -371,6 +371,7 @@ OH_NN_ReturnCode Compilation::GetCacheFileLength(std::ifstream& ifs, int& fsize) OH_NN_ReturnCode Compilation::ReadCacheModelFile(const std::string& file, ModelBuffer& modelBuffer) const { + // file is validated outside. std::ifstream ifs(file.c_str(), std::ios::in | std::ios::binary); if (!ifs) { LOGE("[Compilation] Fail to open cache file."); @@ -410,15 +411,16 @@ OH_NN_ReturnCode Compilation::ReadCacheModelFile(const std::string& file, ModelB ifs.close(); modelBuffer.buffer = ptr; - modelBuffer.length = fsize; + modelBuffer.length = static_cast(fsize); // fsize should be non-negative, safe to cast. return OH_NN_SUCCESS; } OH_NN_ReturnCode Compilation::CheckCacheInfo(ModelCacheInfo& modelCacheInfo, const std::string& cacheInfoPath) const { + // cacheInfoPath is validated outside. std::ifstream infoCacheFile(cacheInfoPath.c_str(), std::ios::in | std::ios::binary); if (!infoCacheFile) { - LOGE("[Compilation] Openning cache info file failed."); + LOGE("[Compilation] Opening cache info file failed."); return OH_NN_INVALID_FILE; } @@ -577,7 +579,8 @@ OH_NN_ReturnCode Compilation::LoadCacheBuild(std::shared_ptr& pre OH_NN_ReturnCode ret = CheckCacheModel(cacheInfo, modelBuffers); if (ret != OH_NN_SUCCESS) { LOGE("[Compilation] Checking cache model failed."); - for (size_t i = 0; i < modelBuffers.size(); ++i) { + size_t modelBuffersSize = modelBuffers.size(); + for (size_t i = 0; i < modelBuffersSize; ++i) { m_device->ReleaseBuffer(modelBuffers[i].buffer); modelBuffers[i].buffer = nullptr; modelBuffers[i].length = 0; @@ -723,7 +726,8 @@ bool Compilation::IsBuild() const bool Compilation::IsDynamicShape() const { - for (size_t i = 0; i < m_inputTensors.size(); ++i) { + size_t inputTensorsSize = m_inputTensors.size(); + for (size_t i = 0; i < inputTensorsSize; ++i) { if (m_inputTensors[i]->IsDynamicShape()) { return true; } diff --git a/frameworks/native/device_discover.h b/frameworks/native/device_discover.h index fd86f1f..b8270f7 100644 --- a/frameworks/native/device_discover.h +++ b/frameworks/native/device_discover.h @@ -16,16 +16,12 @@ #ifndef NEURAL_NETWORK_RUNTIME_DEVICE_DISCOVER_H #define NEURAL_NETWORK_RUNTIME_DEVICE_DISCOVER_H -#include -#include - #include "device.h" namespace OHOS { namespace NeuralNetworkRuntime { std::shared_ptr DiscoverHDIDevicesV1_0(std::string& deviceName, std::string& vendorName, std::string& version); std::shared_ptr DiscoverHDIDevicesV2_0(std::string& deviceName, std::string& vendorName, std::string& version); - } // namespace NeuralNetworkRuntime } // namespace OHOS #endif // NEURAL_NETWORK_RUNTIME_DEVICE_DISCOVER_H \ No newline at end of file diff --git a/frameworks/native/device_discover_v2_0.cpp b/frameworks/native/device_discover_v2_0.cpp index d4a1cac..f338da9 100644 --- a/frameworks/native/device_discover_v2_0.cpp +++ b/frameworks/native/device_discover_v2_0.cpp @@ -15,7 +15,7 @@ #include "device_discover.h" #include "hdi_device_v2_0.h" -#include "hdi_returncode_transform.h" +#include "hdi_returncode_utils.h" #include "common/log.h" #include "common/utils.h" @@ -30,23 +30,37 @@ std::shared_ptr DiscoverHDIDevicesV2_0(std::string& deviceName, std::str return nullptr; } - V2_0::NNRT_ReturnCode returnCode; - auto hdiRet = iDevice->GetDeviceName(deviceName, returnCode); - if (hdiRet != HDF_SUCCESS) { - LOGW("Get device name failed. ErrorCode=%{public}d, innerHDIRet=%{public}s", - hdiRet, ConverterRetToString(returnCode).c_str()); + auto ret = iDevice->GetDeviceName(deviceName); + if (ret != V2_0::NNRT_ReturnCode::NNRT_SUCCESS) { + if (ret < 0) { + LOGW("Get device name failed. An error occurred in HDI, errorcode is %{public}d.", ret); + } else if (ret > 0) { + OHOS::HDI::Nnrt::V2_0::NNRT_ReturnCode nnrtRet = static_cast(ret); + LOGW("Get device name failed. Errorcode is %{public}s.", ConverterRetToString(nnrtRet).c_str()); + } return nullptr; } - hdiRet = iDevice->GetVendorName(vendorName, returnCode); - if (hdiRet != HDF_SUCCESS) { - LOGW("Get vendor name failed. ErrorCode=%{public}d, innerHDIRet=%{public}s", - hdiRet, ConverterRetToString(returnCode).c_str()); + + ret = iDevice->GetVendorName(vendorName); + if (ret != V2_0::NNRT_ReturnCode::NNRT_SUCCESS) { + if (ret < 0) { + LOGW("Get vendor name failed. An error occurred in HDI, errorcode is %{public}d.", ret); + } else if (ret > 0) { + OHOS::HDI::Nnrt::V2_0::NNRT_ReturnCode nnrtRet = static_cast(ret); + LOGW("Get vendor name failed. Errorcode is %{public}s.", ConverterRetToString(nnrtRet).c_str()); + } return nullptr; } + std::pair hdiVersion; - hdiRet = iDevice->GetVersion(hdiVersion.first, hdiVersion.second); - if (hdiRet != HDF_SUCCESS) { - LOGW("Get version failed. ErrorCode=%{public}d", hdiRet); + ret = iDevice->GetVersion(hdiVersion.first, hdiVersion.second); + if (ret != V2_0::NNRT_ReturnCode::NNRT_SUCCESS) { + if (ret < 0) { + LOGW("Get version failed. An error occurred in HDI, errorcode is %{public}d.", ret); + } else if (ret > 0) { + OHOS::HDI::Nnrt::V2_0::NNRT_ReturnCode nnrtRet = static_cast(ret); + LOGW("Get version failed. Errorcode is %{public}s.", ConverterRetToString(nnrtRet).c_str()); + } return nullptr; } version = 'v' + std::to_string(hdiVersion.first) + '_' + std::to_string(hdiVersion.second); diff --git a/frameworks/native/execution_plan.cpp b/frameworks/native/execution_plan.cpp index 5199199..a074492 100644 --- a/frameworks/native/execution_plan.cpp +++ b/frameworks/native/execution_plan.cpp @@ -62,19 +62,6 @@ OH_NN_ReturnCode ExecutionPlan::Run(const std::vector> return ret; } - // Check if the output buffer is sufficient - bool bufferFailed {false}; - for (size_t i = 0; i < outputSize; ++i) { - if (!isSufficientDataBuffer[i]) { - // Print all output indices with insufficient buffer, don't return until traversing all outputs. - LOGE("Run failed, Output %zu does not have enough buffer to store the data.", i); - bufferFailed = true; - } - } - if (bufferFailed) { - return OH_NN_FAILED; - } - // Set the output NNTensor's dimensions from output IOTensor if it is dynamic. // NNTensor::SetDimensions will check if the tensor buffer is enough for the new dimensions. for (size_t i = 0; i < outputSize; ++i) { diff --git a/frameworks/native/hdi_device_v1_0.cpp b/frameworks/native/hdi_device_v1_0.cpp index c87c51a..110485e 100644 --- a/frameworks/native/hdi_device_v1_0.cpp +++ b/frameworks/native/hdi_device_v1_0.cpp @@ -87,9 +87,7 @@ V1_0::Priority TransPriority(const OH_NN_Priority& priority) } HDIDeviceV1_0::HDIDeviceV1_0(OHOS::sptr device) : m_iDevice(device) -{ - device->GetVersion(m_hdiVersion.first, m_hdiVersion.second); -} +{} OH_NN_ReturnCode HDIDeviceV1_0::GetDeviceName(std::string& name) { @@ -113,6 +111,11 @@ OH_NN_ReturnCode HDIDeviceV1_0::GetVendorName(std::string& name) OH_NN_ReturnCode HDIDeviceV1_0::GetVersion(std::string& version) { + auto ret = m_iDevice->GetVersion(m_hdiVersion.first, m_hdiVersion.second); + if (ret != HDF_SUCCESS) { + LOGE("Get HDI version failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } version = 'v' + std::to_string(m_hdiVersion.first) + '_' + std::to_string(m_hdiVersion.second); return OH_NN_SUCCESS; } @@ -143,7 +146,7 @@ OH_NN_ReturnCode HDIDeviceV1_0::GetDeviceStatus(DeviceStatus& status) } OH_NN_ReturnCode HDIDeviceV1_0::GetSupportedOperation(std::shared_ptr model, - std::vector& ops) + std::vector& ops) { if (model == nullptr) { LOGE("Model is nullptr, cannot query supported operation."); @@ -234,8 +237,7 @@ OH_NN_ReturnCode HDIDeviceV1_0::IsModelCacheSupported(bool& isSupported) } OH_NN_ReturnCode HDIDeviceV1_0::PrepareModel(std::shared_ptr model, - const ModelConfig& config, - std::shared_ptr& preparedModel) + const ModelConfig& config, std::shared_ptr& preparedModel) { if (model == nullptr) { LOGE("Model is nullptr, cannot prepare model."); @@ -289,8 +291,7 @@ OH_NN_ReturnCode HDIDeviceV1_0::PrepareModel(std::shared_ptr& modelCache, - const ModelConfig& config, - std::shared_ptr& preparedModel) + const ModelConfig& config, std::shared_ptr& preparedModel) { std::vector iBuffers; auto memManager = MemoryManager::GetInstance(); diff --git a/frameworks/native/hdi_device_v1_0.h b/frameworks/native/hdi_device_v1_0.h index 9b5647c..740835d 100644 --- a/frameworks/native/hdi_device_v1_0.h +++ b/frameworks/native/hdi_device_v1_0.h @@ -16,18 +16,16 @@ #ifndef NEURAL_NETWORK_RUNTIME_HDI_DEVICE_V1_0_H #define NEURAL_NETWORK_RUNTIME_HDI_DEVICE_V1_0_H -#include "refbase.h" #include #include #include +#include "refbase.h" #include "device.h" namespace OHOS { namespace NeuralNetworkRuntime { - namespace V1_0 = OHOS::HDI::Nnrt::V1_0; - class HDIDeviceV1_0 : public Device { public: explicit HDIDeviceV1_0(OHOS::sptr device); diff --git a/frameworks/native/hdi_device_v2_0.cpp b/frameworks/native/hdi_device_v2_0.cpp index 68c1fe5..7c4794a 100644 --- a/frameworks/native/hdi_device_v2_0.cpp +++ b/frameworks/native/hdi_device_v2_0.cpp @@ -20,7 +20,7 @@ #include "securec.h" #include "hdi_prepared_model_v2_0.h" -#include "hdi_returncode_transform.h" +#include "hdi_returncode_utils.h" #include "memory_manager.h" #include "transform.h" #include "common/log.h" @@ -89,36 +89,32 @@ V2_0::Priority TransPriority(const OH_NN_Priority& priority) } HDIDeviceV2_0::HDIDeviceV2_0(OHOS::sptr device) : m_iDevice(device) -{ - device->GetVersion(m_hdiVersion.first, m_hdiVersion.second); -} +{} OH_NN_ReturnCode HDIDeviceV2_0::GetDeviceName(std::string& name) { - V2_0::NNRT_ReturnCode returnCode; - auto ret = m_iDevice->GetDeviceName(name, returnCode); - if (ret != HDF_SUCCESS) { - LOGE("Get HDI device name failed. ErrorCode=%{public}d, innerHDIRet=%{public}s", - ret, ConverterRetToString(returnCode).c_str()); - return OH_NN_UNAVALIDABLE_DEVICE; + auto ret = m_iDevice->GetDeviceName(name); + if (ret != V2_0::NNRT_ReturnCode::NNRT_SUCCESS) { + return CheckReturnCode(ret, OH_NN_UNAVALIDABLE_DEVICE, "Get HDI device name failed"); } return OH_NN_SUCCESS; } OH_NN_ReturnCode HDIDeviceV2_0::GetVendorName(std::string& name) { - V2_0::NNRT_ReturnCode returnCode; - auto ret = m_iDevice->GetVendorName(name, returnCode); - if (ret != HDF_SUCCESS) { - LOGE("Get HDI device vendor name failed. ErrorCode=%{public}d, innerHDIRet=%{public}s", - ret, ConverterRetToString(returnCode).c_str()); - return OH_NN_UNAVALIDABLE_DEVICE; + auto ret = m_iDevice->GetVendorName(name); + if (ret != V2_0::NNRT_ReturnCode::NNRT_SUCCESS) { + return CheckReturnCode(ret, OH_NN_UNAVALIDABLE_DEVICE, "Get HDI vendor name failed"); } return OH_NN_SUCCESS; } OH_NN_ReturnCode HDIDeviceV2_0::GetVersion(std::string& version) { + auto ret = m_iDevice->GetVersion(m_hdiVersion.first, m_hdiVersion.second); + if (ret != V2_0::NNRT_ReturnCode::NNRT_SUCCESS) { + return CheckReturnCode(ret, OH_NN_UNAVALIDABLE_DEVICE, "Get HDI version failed"); + } version = 'v' + std::to_string(m_hdiVersion.first) + '_' + std::to_string(m_hdiVersion.second); return OH_NN_SUCCESS; } @@ -126,12 +122,9 @@ OH_NN_ReturnCode HDIDeviceV2_0::GetVersion(std::string& version) OH_NN_ReturnCode HDIDeviceV2_0::GetDeviceType(OH_NN_DeviceType& deviceType) { V2_0::DeviceType iDeviceType; - V2_0::NNRT_ReturnCode returnCode; - auto ret = m_iDevice->GetDeviceType(iDeviceType, returnCode); - if (ret != HDF_SUCCESS) { - LOGE("Get HDI device type failed. ErrorCode=%{public}d, innerHDIRet=%{public}s", - ret, ConverterRetToString(returnCode).c_str()); - return OH_NN_UNAVALIDABLE_DEVICE; + auto ret = m_iDevice->GetDeviceType(iDeviceType); + if (ret != V2_0::NNRT_ReturnCode::NNRT_SUCCESS) { + return CheckReturnCode(ret, OH_NN_UNAVALIDABLE_DEVICE, "Get HDI device type failed"); } deviceType = TransHDIDeviceV2_0Type(iDeviceType); @@ -141,12 +134,9 @@ OH_NN_ReturnCode HDIDeviceV2_0::GetDeviceType(OH_NN_DeviceType& deviceType) OH_NN_ReturnCode HDIDeviceV2_0::GetDeviceStatus(DeviceStatus& status) { V2_0::DeviceStatus iDeviceStatus; - V2_0::NNRT_ReturnCode returnCode; - auto ret = m_iDevice->GetDeviceStatus(iDeviceStatus, returnCode); - if (ret != HDF_SUCCESS) { - LOGE("Get HDI device status failed. ErrorCode=%{public}d, innerHDIRet=%{public}s", - ret, ConverterRetToString(returnCode).c_str()); - return OH_NN_UNAVALIDABLE_DEVICE; + auto ret = m_iDevice->GetDeviceStatus(iDeviceStatus); + if (ret != V2_0::NNRT_ReturnCode::NNRT_SUCCESS) { + return CheckReturnCode(ret, OH_NN_UNAVALIDABLE_DEVICE, "Get HDI device status failed"); } status = TransHDIDeviceV2_0Status(iDeviceStatus); return OH_NN_SUCCESS; @@ -162,14 +152,11 @@ OH_NN_ReturnCode HDIDeviceV2_0::GetSupportedOperation(std::shared_ptr 0) { - V2_0::NNRT_ReturnCode returnCode; - hdiRet = m_iDevice->AllocateBuffer(tensorSize, tensorBuffer, returnCode); - if (hdiRet != HDF_SUCCESS || tensorBuffer.fd == INVALID_FD) { - LOGE("Allocate tensor buffer error when get supported operation. ErrorCode: %{public}d, \ - innerHDIRet=%{public}s", hdiRet, ConverterRetToString(returnCode).c_str()); - return OH_NN_FAILED; + ret = m_iDevice->AllocateBuffer(tensorSize, tensorBuffer); + if (ret != V2_0::NNRT_ReturnCode::NNRT_SUCCESS || tensorBuffer.fd == INVALID_FD) { + return CheckReturnCode(ret, OH_NN_FAILED, "Allocate tensor buffer error when get supported operation"); } } @@ -180,79 +167,61 @@ OH_NN_ReturnCode HDIDeviceV2_0::GetSupportedOperation(std::shared_ptrGetSupportedOperation(*iModel, ops, returnCode); + ret = m_iDevice->GetSupportedOperation(*iModel, ops); mindspore::lite::MindIR_Model_Destroy(&iModel); - auto ret = ReleaseSharedBuffer(tensorBuffer); - if (ret != OH_NN_SUCCESS) { + auto innerRet = ReleaseSharedBuffer(tensorBuffer); + if (innerRet != OH_NN_SUCCESS) { LOGE("Release tensorBuffer failed."); return OH_NN_FAILED; } - if (hdiRet != HDF_SUCCESS) { - LOGE("Get supported operation failed. ErrorCode=%{public}d, innerHDIRet=%{public}s", - hdiRet, ConverterRetToString(returnCode).c_str()); - return OH_NN_UNAVALIDABLE_DEVICE; + if (ret != V2_0::NNRT_ReturnCode::NNRT_SUCCESS) { + return CheckReturnCode(ret, OH_NN_UNAVALIDABLE_DEVICE, "Get supported operation failed"); } return OH_NN_SUCCESS; } OH_NN_ReturnCode HDIDeviceV2_0::IsFloat16PrecisionSupported(bool& isSupported) { - V2_0::NNRT_ReturnCode returnCode; - auto ret = m_iDevice->IsFloat16PrecisionSupported(isSupported, returnCode); - if (ret != HDF_SUCCESS) { - LOGE("Query fp16 precision supported failed. ErrorCode=%{public}d, innerHDIRet=%{public}s", - ret, ConverterRetToString(returnCode).c_str()); - return OH_NN_UNAVALIDABLE_DEVICE; + auto ret = m_iDevice->IsFloat16PrecisionSupported(isSupported); + if (ret != V2_0::NNRT_ReturnCode::NNRT_SUCCESS) { + return CheckReturnCode(ret, OH_NN_UNAVALIDABLE_DEVICE, "Query fp16 precision supported failed"); } return OH_NN_SUCCESS; } OH_NN_ReturnCode HDIDeviceV2_0::IsPerformanceModeSupported(bool& isSupported) { - V2_0::NNRT_ReturnCode returnCode; - auto ret = m_iDevice->IsPerformanceModeSupported(isSupported, returnCode); - if (ret != HDF_SUCCESS) { - LOGE("Query performance mode supported failed. ErrorCode=%{public}d, innerHDIRet=%{public}s", - ret, ConverterRetToString(returnCode).c_str()); - return OH_NN_UNAVALIDABLE_DEVICE; + auto ret = m_iDevice->IsPerformanceModeSupported(isSupported); + if (ret != V2_0::NNRT_ReturnCode::NNRT_SUCCESS) { + return CheckReturnCode(ret, OH_NN_UNAVALIDABLE_DEVICE, "Query performance mode supported failed"); } return OH_NN_SUCCESS; } OH_NN_ReturnCode HDIDeviceV2_0::IsPrioritySupported(bool& isSupported) { - V2_0::NNRT_ReturnCode returnCode; - auto ret = m_iDevice->IsPrioritySupported(isSupported, returnCode); - if (ret != HDF_SUCCESS) { - LOGE("Query priority supported failed. ErrorCode=%{public}d, innerHDIRet=%{public}s", - ret, ConverterRetToString(returnCode).c_str()); - return OH_NN_UNAVALIDABLE_DEVICE; + auto ret = m_iDevice->IsPrioritySupported(isSupported); + if (ret != V2_0::NNRT_ReturnCode::NNRT_SUCCESS) { + return CheckReturnCode(ret, OH_NN_UNAVALIDABLE_DEVICE, "Query priority supported failed"); } return OH_NN_SUCCESS; } OH_NN_ReturnCode HDIDeviceV2_0::IsDynamicInputSupported(bool& isSupported) { - V2_0::NNRT_ReturnCode returnCode; - auto ret = m_iDevice->IsDynamicInputSupported(isSupported, returnCode); - if (ret != HDF_SUCCESS) { - LOGE("Query dynamic input supported failed. ErrorCode=%{public}d, innerHDIRet=%{public}s", - ret, ConverterRetToString(returnCode).c_str()); - return OH_NN_UNAVALIDABLE_DEVICE; + auto ret = m_iDevice->IsDynamicInputSupported(isSupported); + if (ret != V2_0::NNRT_ReturnCode::NNRT_SUCCESS) { + return CheckReturnCode(ret, OH_NN_UNAVALIDABLE_DEVICE, "Query dynamic input supported failed"); } return OH_NN_SUCCESS; } OH_NN_ReturnCode HDIDeviceV2_0::IsModelCacheSupported(bool& isSupported) { - V2_0::NNRT_ReturnCode returnCode; - auto ret = m_iDevice->IsModelCacheSupported(isSupported, returnCode); - if (ret != HDF_SUCCESS) { - LOGE("Query cache model supported failed. ErrorCode=%{public}d, innerHDIRet=%{public}s", - ret, ConverterRetToString(returnCode).c_str()); - return OH_NN_UNAVALIDABLE_DEVICE; + auto ret = m_iDevice->IsModelCacheSupported(isSupported); + if (ret != V2_0::NNRT_ReturnCode::NNRT_SUCCESS) { + return CheckReturnCode(ret, OH_NN_UNAVALIDABLE_DEVICE, "Query cache model supported failed"); } return OH_NN_SUCCESS; } @@ -268,14 +237,11 @@ OH_NN_ReturnCode HDIDeviceV2_0::PrepareModel(std::shared_ptr 0) { - V2_0::NNRT_ReturnCode returnCode; - hdiRet = m_iDevice->AllocateBuffer(tensorSize, tensorBuffer, returnCode); - if (hdiRet != HDF_SUCCESS || tensorBuffer.fd == INVALID_FD) { - LOGE("Allocate tensor buffer error when prepare model. ErrorCode: %{public}d, innerHDIRet=%{public}s", - hdiRet, ConverterRetToString(returnCode).c_str()); - return OH_NN_FAILED; + ret = m_iDevice->AllocateBuffer(tensorSize, tensorBuffer); + if (ret != V2_0::NNRT_ReturnCode::NNRT_SUCCESS || tensorBuffer.fd == INVALID_FD) { + return CheckReturnCode(ret, OH_NN_FAILED, "Allocate tensor buffer error when prepare model"); } } @@ -292,19 +258,16 @@ OH_NN_ReturnCode HDIDeviceV2_0::PrepareModel(std::shared_ptr iPreparedModel; - V2_0::NNRT_ReturnCode returnCode; - auto preparedRet = m_iDevice->PrepareModel(*iModel, iModelConfig, iPreparedModel, returnCode); + ret = m_iDevice->PrepareModel(*iModel, iModelConfig, iPreparedModel); mindspore::lite::MindIR_Model_Destroy(&iModel); - auto ret = ReleaseSharedBuffer(tensorBuffer); - if (ret != OH_NN_SUCCESS) { + auto innerRet = ReleaseSharedBuffer(tensorBuffer); + if (innerRet != OH_NN_SUCCESS) { LOGE("Release tensorBuffer failed."); return OH_NN_FAILED; } - if (preparedRet != HDF_SUCCESS || iPreparedModel == nullptr) { - LOGE("Prepare model failed. ErrorCode=%{public}d, innerHDIRet=%{public}s", - preparedRet, ConverterRetToString(returnCode).c_str()); - return OH_NN_FAILED; + if (ret != V2_0::NNRT_ReturnCode::NNRT_SUCCESS || iPreparedModel == nullptr) { + return CheckReturnCode(ret, OH_NN_FAILED, "Prepare model failed"); } preparedModel = CreateSharedPtr(iPreparedModel); @@ -340,12 +303,9 @@ OH_NN_ReturnCode HDIDeviceV2_0::PrepareModelFromModelCache(const std::vector iPreparedModel; - V2_0::NNRT_ReturnCode returnCode; - auto hdiRet = m_iDevice->PrepareModelFromModelCache(iBuffers, iModelConfig, iPreparedModel, returnCode); - if (hdiRet != HDF_SUCCESS) { - LOGE("Prepare model from cache failed. ErrorCode=%{public}d, innerHDIRet=%{public}s", - hdiRet, ConverterRetToString(returnCode).c_str()); - return OH_NN_UNAVALIDABLE_DEVICE; + auto nnrtRet = m_iDevice->PrepareModelFromModelCache(iBuffers, iModelConfig, iPreparedModel); + if (nnrtRet != V2_0::NNRT_ReturnCode::NNRT_SUCCESS) { + return CheckReturnCode(nnrtRet, OH_NN_FAILED, "Prepare model from cache failed"); } preparedModel = CreateSharedPtr(iPreparedModel); @@ -364,12 +324,9 @@ void* HDIDeviceV2_0::AllocateBuffer(size_t length) } V2_0::SharedBuffer buffer; - V2_0::NNRT_ReturnCode returnCode; - auto ret = m_iDevice->AllocateBuffer(length, buffer, returnCode); - if (ret != HDF_SUCCESS) { - LOGE("Allocate buffer error. ErrorCode: %{public}d, innerHDIRet=%{public}s", - ret, ConverterRetToString(returnCode).c_str()); - return nullptr; + auto ret = m_iDevice->AllocateBuffer(length, buffer); + if (ret != V2_0::NNRT_ReturnCode::NNRT_SUCCESS) { + return CheckReturnCode(ret, nullptr, "Allocate buffer error"); } auto memManager = MemoryManager::GetInstance(); @@ -396,12 +353,9 @@ OH_NN_ReturnCode HDIDeviceV2_0::ReleaseBuffer(const void* buffer) } V2_0::SharedBuffer hdiBuffer {memory.fd, memory.length, 0, memory.length}; - V2_0::NNRT_ReturnCode returnCode; - auto deviceResult = m_iDevice->ReleaseBuffer(hdiBuffer, returnCode); - if (deviceResult != HDF_SUCCESS) { - LOGE("Device release buffer error. ErrorCode: %{public}d, innerHDIRet=%{public}s", - deviceResult, ConverterRetToString(returnCode).c_str()); - return OH_NN_FAILED; + auto deviceResult = m_iDevice->ReleaseBuffer(hdiBuffer); + if (deviceResult != V2_0::NNRT_ReturnCode::NNRT_SUCCESS) { + return CheckReturnCode(deviceResult, OH_NN_FAILED, "Device release buffer error"); } ret = memManager->UnMapMemory(buffer); @@ -420,12 +374,9 @@ OH_NN_ReturnCode HDIDeviceV2_0::ReleaseSharedBuffer(const V2_0::SharedBuffer& bu return OH_NN_SUCCESS; } - V2_0::NNRT_ReturnCode returnCode; - auto ret = m_iDevice->ReleaseBuffer(buffer, returnCode); - if (ret != HDF_SUCCESS) { - LOGE("Device release buffer error. ErrorCode=%{public}d, innerHDIRet=%{public}s", - ret, ConverterRetToString(returnCode).c_str()); - return OH_NN_FAILED; + auto ret = m_iDevice->ReleaseBuffer(buffer); + if (ret != V2_0::NNRT_ReturnCode::NNRT_SUCCESS) { + return CheckReturnCode(ret, OH_NN_FAILED, "Device release buffer error"); } return OH_NN_SUCCESS; } @@ -507,7 +458,8 @@ OH_NN_ReturnCode HDIDeviceV2_0::CopyOfflineModelToDevice(const std::vector& de iBuffers.emplace_back(V2_0::SharedBuffer {memory.fd, memory.length, 0, memory.length}); } - V2_0::NNRT_ReturnCode HDIReturnCode {V2_0::NNRT_ReturnCode::NNRT_SUCCESS}; - auto preparedRet = m_iDevice->PrepareOfflineModel(iBuffers, iModelConfig, iPreparedModel, HDIReturnCode); + auto preparedRet = m_iDevice->PrepareOfflineModel(iBuffers, iModelConfig, iPreparedModel); // Release allocated model buffer after prepare model. OH_NN_ReturnCode status {OH_NN_SUCCESS}; @@ -562,10 +513,8 @@ OH_NN_ReturnCode HDIDeviceV2_0::PrepareOfflineModel(std::vector& de } deviceBuffers.clear(); - if (preparedRet != HDF_SUCCESS || iPreparedModel == nullptr) { - LOGE("Prepare model failed. ErrorCode=%{public}d, innerHDIRet=%{public}s", - preparedRet, ConverterRetToString(HDIReturnCode).c_str()); - return OH_NN_FAILED; + if (preparedRet != V2_0::NNRT_ReturnCode::NNRT_SUCCESS || iPreparedModel == nullptr) { + return CheckReturnCode(preparedRet, OH_NN_FAILED, "Prepare offline model failed"); } preparedModel = CreateSharedPtr(iPreparedModel); diff --git a/frameworks/native/hdi_device_v2_0.h b/frameworks/native/hdi_device_v2_0.h index f8cba17..5f99601 100644 --- a/frameworks/native/hdi_device_v2_0.h +++ b/frameworks/native/hdi_device_v2_0.h @@ -16,18 +16,16 @@ #ifndef NEURAL_NETWORK_RUNTIME_HDI_DEVICE_V2_0_H #define NEURAL_NETWORK_RUNTIME_HDI_DEVICE_V2_0_H -#include "refbase.h" #include #include #include +#include "refbase.h" #include "device.h" namespace OHOS { namespace NeuralNetworkRuntime { - namespace V2_0 = OHOS::HDI::Nnrt::V2_0; - class HDIDeviceV2_0 : public Device { public: explicit HDIDeviceV2_0(OHOS::sptr device); diff --git a/frameworks/native/hdi_prepared_model_v1_0.cpp b/frameworks/native/hdi_prepared_model_v1_0.cpp index 898d379..5df1eee 100644 --- a/frameworks/native/hdi_prepared_model_v1_0.cpp +++ b/frameworks/native/hdi_prepared_model_v1_0.cpp @@ -114,7 +114,8 @@ OH_NN_ReturnCode HDIPreparedModelV1_0::ExportModelCache(std::vector } auto memManager = MemoryManager::GetInstance(); - for (size_t i = 0; i < iBuffers.size(); i++) { + size_t iBuffersSize = iBuffers.size(); + for (size_t i = 0; i < iBuffersSize; i++) { auto addr = memManager->MapMemory(iBuffers[i].fd, iBuffers[i].bufferSize); if (addr == nullptr) { LOGE("Export the %zuth model cache failed, cannot not map fd to address.", i + 1); diff --git a/frameworks/native/hdi_prepared_model_v1_0.h b/frameworks/native/hdi_prepared_model_v1_0.h index f5a8911..4b71c71 100644 --- a/frameworks/native/hdi_prepared_model_v1_0.h +++ b/frameworks/native/hdi_prepared_model_v1_0.h @@ -19,12 +19,12 @@ #include -#include "refbase.h" -#include "prepared_model.h" -#include "cpp_type.h" #include #include #include +#include "refbase.h" +#include "prepared_model.h" +#include "cpp_type.h" namespace V1_0 = OHOS::HDI::Nnrt::V1_0; diff --git a/frameworks/native/hdi_prepared_model_v2_0.cpp b/frameworks/native/hdi_prepared_model_v2_0.cpp index c07e7e7..7f04ed9 100644 --- a/frameworks/native/hdi_prepared_model_v2_0.cpp +++ b/frameworks/native/hdi_prepared_model_v2_0.cpp @@ -16,7 +16,7 @@ #include "hdi_prepared_model_v2_0.h" #include "common/log.h" -#include "hdi_returncode_transform.h" +#include "hdi_returncode_utils.h" #include "memory_manager.h" namespace OHOS { @@ -108,16 +108,14 @@ OH_NN_ReturnCode HDIPreparedModelV2_0::ExportModelCache(std::vector } std::vector iBuffers; - V2_0::NNRT_ReturnCode returnCode; - auto ret = m_hdiPreparedModel->ExportModelCache(iBuffers, returnCode); - if (ret != HDF_SUCCESS || returnCode != V2_0::NNRT_ReturnCode::NNRT_SUCCESS) { - LOGE("Export model cache failed. ErrorCode=%{public}d, innerHDIRet=%{public}s", - ret, ConverterRetToString(returnCode).c_str()); - return OH_NN_UNAVALIDABLE_DEVICE; + auto ret = m_hdiPreparedModel->ExportModelCache(iBuffers); + if (ret != V2_0::NNRT_ReturnCode::NNRT_SUCCESS) { + return CheckReturnCode(ret, OH_NN_UNAVALIDABLE_DEVICE, "Export model cache failed"); } auto memManager = MemoryManager::GetInstance(); - for (size_t i = 0; i < iBuffers.size(); i++) { + size_t iBuffersSize = iBuffers.size(); + for (size_t i = 0; i < iBuffersSize; i++) { auto addr = memManager->MapMemory(iBuffers[i].fd, iBuffers[i].bufferSize); if (addr == nullptr) { LOGE("Export the %{public}zuth model cache failed, cannot not map fd to address.", i + 1); @@ -154,12 +152,9 @@ OH_NN_ReturnCode HDIPreparedModelV2_0::Run(const std::vector& inputs, iOutputTensors.emplace_back(iTensor); } - V2_0::NNRT_ReturnCode returnCode; - auto ret = m_hdiPreparedModel->Run(iInputTensors, iOutputTensors, outputsDims, returnCode); - if (ret != HDF_SUCCESS || outputsDims.empty()) { - LOGE("Run model failed. ErrorCode=%{public}d, innerHDIRet=%{public}s", - ret, ConverterRetToString(returnCode).c_str()); - return OH_NN_UNAVALIDABLE_DEVICE; + auto ret = m_hdiPreparedModel->Run(iInputTensors, iOutputTensors, outputsDims); + if (ret != V2_0::NNRT_ReturnCode::NNRT_SUCCESS || outputsDims.empty()) { + return CheckReturnCode(ret, OH_NN_UNAVALIDABLE_DEVICE, "Run model failed"); } return OH_NN_SUCCESS; @@ -168,12 +163,9 @@ OH_NN_ReturnCode HDIPreparedModelV2_0::Run(const std::vector& inputs, OH_NN_ReturnCode HDIPreparedModelV2_0::GetInputDimRanges(std::vector>& minInputDims, std::vector>& maxInputDims) { - V2_0::NNRT_ReturnCode returnCode; - auto ret = m_hdiPreparedModel->GetInputDimRanges(minInputDims, maxInputDims, returnCode); - if (ret != HDF_SUCCESS) { - LOGE("GetInputDimRanges failed. ErrorCode=%{public}d, innerHDIRet=%{public}s", - ret, ConverterRetToString(returnCode).c_str()); - return OH_NN_UNAVALIDABLE_DEVICE; + auto ret = m_hdiPreparedModel->GetInputDimRanges(minInputDims, maxInputDims); + if (ret != V2_0::NNRT_ReturnCode::NNRT_SUCCESS) { + return CheckReturnCode(ret, OH_NN_UNAVALIDABLE_DEVICE, "Get input dim ranges failed"); } return OH_NN_SUCCESS; diff --git a/frameworks/native/hdi_prepared_model_v2_0.h b/frameworks/native/hdi_prepared_model_v2_0.h index ad42dcb..3d91523 100644 --- a/frameworks/native/hdi_prepared_model_v2_0.h +++ b/frameworks/native/hdi_prepared_model_v2_0.h @@ -19,13 +19,13 @@ #include -#include #include #include +#include -#include "refbase.h" -#include "prepared_model.h" #include "cpp_type.h" +#include "prepared_model.h" +#include "refbase.h" namespace V2_0 = OHOS::HDI::Nnrt::V2_0; diff --git a/frameworks/native/hdi_returncode_transform.cpp b/frameworks/native/hdi_returncode_transform.cpp deleted file mode 100644 index 43d9c92..0000000 --- a/frameworks/native/hdi_returncode_transform.cpp +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Copyright (c) 2023 Huawei Device Co., Ltd. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "hdi_returncode_transform.h" -#include - -namespace OHOS { -namespace NeuralNetworkRuntime { -const std::unordered_map RET_STRING_MAP{ - {V2_0::NNRT_ReturnCode::NNRT_SUCCESS, "NNRT_SUCCESS"}, - {V2_0::NNRT_ReturnCode::NNRT_FAILED, "NNRT_FAILED"}, - {V2_0::NNRT_ReturnCode::NNRT_NULL_PTR, "NNRT_NULL_PTR"}, - {V2_0::NNRT_ReturnCode::NNRT_INVALID_PARAMETER, "NNRT_INVALID_PARAMETER"}, - {V2_0::NNRT_ReturnCode::NNRT_MEMORY_ERROR, "NNRT_MEMORY_ERROR"}, - {V2_0::NNRT_ReturnCode::NNRT_OUT_OF_MEMORY, "NNRT_OUT_OF_MEMORY"}, - {V2_0::NNRT_ReturnCode::NNRT_OPERATION_FORBIDDEN, "NNRT_OPERATION_FORBIDDEN"}, - {V2_0::NNRT_ReturnCode::NNRT_INVALID_FILE, "NNRT_INVALID_FILE"}, - {V2_0::NNRT_ReturnCode::NNRT_INVALID_PATH, "NNRT_INVALID_PATH"}, - {V2_0::NNRT_ReturnCode::NNRT_INSUFFICIENT_BUFFER, "NNRT_INSUFFICIENT_BUFFER"}, - {V2_0::NNRT_ReturnCode::NNRT_NO_CHANGE, "NNRT_NO_CHANGE"}, - {V2_0::NNRT_ReturnCode::NNRT_NOT_SUPPORT, "NNRT_NOT_SUPPORT"}, - {V2_0::NNRT_ReturnCode::NNRT_SERVICE_ERROR, "NNRT_SERVICE_ERROR"}, - {V2_0::NNRT_ReturnCode::NNRT_DEVICE_ERROR, "NNRT_DEVICE_ERROR"}, - {V2_0::NNRT_ReturnCode::NNRT_DEVICE_BUSY, "NNRT_DEVICE_BUSY"}, - {V2_0::NNRT_ReturnCode::NNRT_CANCELLED, "NNRT_CANCELLED"}, - {V2_0::NNRT_ReturnCode::NNRT_PERMISSION_DENIED, "NNRT_PERMISSION_DENIED"}, - {V2_0::NNRT_ReturnCode::NNRT_TIME_OUT, "NNRT_TIME_OUT"}, - {V2_0::NNRT_ReturnCode::NNRT_INVALID_TENSOR, "NNRT_INVALID_TENSOR"}, - {V2_0::NNRT_ReturnCode::NNRT_INVALID_NODE, "NNRT_INVALID_NODE"}, - {V2_0::NNRT_ReturnCode::NNRT_INVALID_INPUT, "NNRT_INVALID_INPUT"}, - {V2_0::NNRT_ReturnCode::NNRT_INVALID_OUTPUT, "NNRT_INVALID_OUTPUT"}, - {V2_0::NNRT_ReturnCode::NNRT_INVALID_DATATYPE, "NNRT_INVALID_DATATYPE"}, - {V2_0::NNRT_ReturnCode::NNRT_INVALID_FORMAT, "NNRT_INVALID_FORMAT"}, - {V2_0::NNRT_ReturnCode::NNRT_INVALID_TENSOR_NAME, "NNRT_INVALID_TENSOR_NAME"}, - {V2_0::NNRT_ReturnCode::NNRT_INVALID_SHAPE, "NNRT_INVALID_SHAPE"}, - {V2_0::NNRT_ReturnCode::NNRT_OUT_OF_DIMENTION_RANGES, "NNRT_OUT_OF_DIMENTION_RANGES"}, - {V2_0::NNRT_ReturnCode::NNRT_INVALID_BUFFER, "NNRT_INVALID_BUFFER"}, - {V2_0::NNRT_ReturnCode::NNRT_INVALID_BUFFER_SIZE, "NNRT_INVALID_BUFFER_SIZE"}, - {V2_0::NNRT_ReturnCode::NNRT_INVALID_PERFORMANCE_MODE, "NNRT_INVALID_PERFORMANCE_MODE"}, - {V2_0::NNRT_ReturnCode::NNRT_INVALID_PRIORITY, "NNRT_INVALID_PRIORITY"}, - {V2_0::NNRT_ReturnCode::NNRT_INVALID_MODEL, "NNRT_INVALID_MODEL"}, - {V2_0::NNRT_ReturnCode::NNRT_INVALID_MODEL_CACHE, "NNRT_INVALID_MODEL_CACHE"}, - {V2_0::NNRT_ReturnCode::NNRT_UNSUPPORTED_OP, "NNRT_UNSUPPORTED_OP"} -}; - -std::string ConverterRetToString(V2_0::NNRT_ReturnCode returnCode) -{ - if (RET_STRING_MAP.find(returnCode) == RET_STRING_MAP.end()) { - return ""; - } - - return RET_STRING_MAP.at(returnCode); -} -} -} \ No newline at end of file diff --git a/frameworks/native/hdi_returncode_transform.h b/frameworks/native/hdi_returncode_transform.h deleted file mode 100644 index 0d306c7..0000000 --- a/frameworks/native/hdi_returncode_transform.h +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright (c) 2023 Huawei Device Co., Ltd. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -#ifndef NEURAL_NETWORK_RUNTIME_HDI_RETURNCODE_TRANSFORM_H -#define NEURAL_NETWORK_RUNTIME_HDI_RETURNCODE_TRANSFORM_H - -#include - -namespace V2_0 = OHOS::HDI::Nnrt::V2_0; - -namespace OHOS { -namespace NeuralNetworkRuntime { -std::string ConverterRetToString(V2_0::NNRT_ReturnCode returnCode); -} // namespace NeuralNetworkRuntime -} // OHOS -#endif // NEURAL_NETWORK_RUNTIME_HDI_RETURNCODE_TRANSFORM_H \ No newline at end of file diff --git a/frameworks/native/hdi_returncode_utils.h b/frameworks/native/hdi_returncode_utils.h new file mode 100644 index 0000000..840ff6a --- /dev/null +++ b/frameworks/native/hdi_returncode_utils.h @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +#ifndef NEURAL_NETWORK_RUNTIME_HDI_RETURNCODE_UTILS_H +#define NEURAL_NETWORK_RUNTIME_HDI_RETURNCODE_UTILS_H + +#include +#include +#include +#include "common/log.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +inline std::string ConverterRetToString(OHOS::HDI::Nnrt::V2_0::NNRT_ReturnCode returnCode) +{ + static std::unordered_map nnrtRet2StringMap{ + {V2_0::NNRT_ReturnCode::NNRT_SUCCESS, "NNRT_SUCCESS"}, + {V2_0::NNRT_ReturnCode::NNRT_FAILED, "NNRT_FAILED"}, + {V2_0::NNRT_ReturnCode::NNRT_NULL_PTR, "NNRT_NULL_PTR"}, + {V2_0::NNRT_ReturnCode::NNRT_INVALID_PARAMETER, "NNRT_INVALID_PARAMETER"}, + {V2_0::NNRT_ReturnCode::NNRT_MEMORY_ERROR, "NNRT_MEMORY_ERROR"}, + {V2_0::NNRT_ReturnCode::NNRT_OUT_OF_MEMORY, "NNRT_OUT_OF_MEMORY"}, + {V2_0::NNRT_ReturnCode::NNRT_OPERATION_FORBIDDEN, "NNRT_OPERATION_FORBIDDEN"}, + {V2_0::NNRT_ReturnCode::NNRT_INVALID_FILE, "NNRT_INVALID_FILE"}, + {V2_0::NNRT_ReturnCode::NNRT_INVALID_PATH, "NNRT_INVALID_PATH"}, + {V2_0::NNRT_ReturnCode::NNRT_INSUFFICIENT_BUFFER, "NNRT_INSUFFICIENT_BUFFER"}, + {V2_0::NNRT_ReturnCode::NNRT_NO_CHANGE, "NNRT_NO_CHANGE"}, + {V2_0::NNRT_ReturnCode::NNRT_NOT_SUPPORT, "NNRT_NOT_SUPPORT"}, + {V2_0::NNRT_ReturnCode::NNRT_SERVICE_ERROR, "NNRT_SERVICE_ERROR"}, + {V2_0::NNRT_ReturnCode::NNRT_DEVICE_ERROR, "NNRT_DEVICE_ERROR"}, + {V2_0::NNRT_ReturnCode::NNRT_DEVICE_BUSY, "NNRT_DEVICE_BUSY"}, + {V2_0::NNRT_ReturnCode::NNRT_CANCELLED, "NNRT_CANCELLED"}, + {V2_0::NNRT_ReturnCode::NNRT_PERMISSION_DENIED, "NNRT_PERMISSION_DENIED"}, + {V2_0::NNRT_ReturnCode::NNRT_TIME_OUT, "NNRT_TIME_OUT"}, + {V2_0::NNRT_ReturnCode::NNRT_INVALID_TENSOR, "NNRT_INVALID_TENSOR"}, + {V2_0::NNRT_ReturnCode::NNRT_INVALID_NODE, "NNRT_INVALID_NODE"}, + {V2_0::NNRT_ReturnCode::NNRT_INVALID_INPUT, "NNRT_INVALID_INPUT"}, + {V2_0::NNRT_ReturnCode::NNRT_INVALID_OUTPUT, "NNRT_INVALID_OUTPUT"}, + {V2_0::NNRT_ReturnCode::NNRT_INVALID_DATATYPE, "NNRT_INVALID_DATATYPE"}, + {V2_0::NNRT_ReturnCode::NNRT_INVALID_FORMAT, "NNRT_INVALID_FORMAT"}, + {V2_0::NNRT_ReturnCode::NNRT_INVALID_TENSOR_NAME, "NNRT_INVALID_TENSOR_NAME"}, + {V2_0::NNRT_ReturnCode::NNRT_INVALID_SHAPE, "NNRT_INVALID_SHAPE"}, + {V2_0::NNRT_ReturnCode::NNRT_OUT_OF_DIMENTION_RANGES, "NNRT_OUT_OF_DIMENTION_RANGES"}, + {V2_0::NNRT_ReturnCode::NNRT_INVALID_BUFFER, "NNRT_INVALID_BUFFER"}, + {V2_0::NNRT_ReturnCode::NNRT_INVALID_BUFFER_SIZE, "NNRT_INVALID_BUFFER_SIZE"}, + {V2_0::NNRT_ReturnCode::NNRT_INVALID_PERFORMANCE_MODE, "NNRT_INVALID_PERFORMANCE_MODE"}, + {V2_0::NNRT_ReturnCode::NNRT_INVALID_PRIORITY, "NNRT_INVALID_PRIORITY"}, + {V2_0::NNRT_ReturnCode::NNRT_INVALID_MODEL, "NNRT_INVALID_MODEL"}, + {V2_0::NNRT_ReturnCode::NNRT_INVALID_MODEL_CACHE, "NNRT_INVALID_MODEL_CACHE"}, + {V2_0::NNRT_ReturnCode::NNRT_UNSUPPORTED_OP, "NNRT_UNSUPPORTED_OP"} + }; + + if (nnrtRet2StringMap.find(returnCode) == nnrtRet2StringMap.end()) { + return ""; + } + + return nnrtRet2StringMap.at(returnCode); +} + +template +T CheckReturnCode(int32_t ret, T funcRet, const std::string& errorInfo) +{ + if (ret < 0) { + LOGE("%{public}s. An error occurred in HDI, errorcode is %{public}d.", errorInfo.c_str(), ret); + } else if (ret > 0) { + OHOS::HDI::Nnrt::V2_0::NNRT_ReturnCode nnrtRet = static_cast(ret); + LOGE("%{public}s. Errorcode is %{public}s.", errorInfo.c_str(), ConverterRetToString(nnrtRet).c_str()); + } + + return funcRet; +} +} // namespace NeuralNetworkRuntime +} // OHOS +#endif // NEURAL_NETWORK_RUNTIME_HDI_RETURNCODE_UTILS_H \ No newline at end of file diff --git a/frameworks/native/nn_tensor.cpp b/frameworks/native/nn_tensor.cpp index 68f392a..3f9abd4 100644 --- a/frameworks/native/nn_tensor.cpp +++ b/frameworks/native/nn_tensor.cpp @@ -390,9 +390,10 @@ bool NNTensor::CompareAttribute(const NNTensor& tensor) const return false; } - for (auto i = 0; i < dimensions.size(); i++) { - if (m_dimensions[i] != -1 && m_dimensions[i] != dimensions[i]) { - LOGI("Tensors have different dimension: dimension index: %u, dimension value: %d and %d.", + size_t dimensionsSize = dimensions.size(); + for (size_t i = 0; i < dimensionsSize; i++) { + if ((m_dimensions[i] != -1) && (m_dimensions[i] != dimensions[i])) { + LOGI("Tensors have different dimension: dimension index: %zu, dimension value: %d and %d.", i, m_dimensions[i], dimensions[i]); return false; } diff --git a/frameworks/native/ops/conv2d_builder.cpp b/frameworks/native/ops/conv2d_builder.cpp index 302f1e4..df23946 100644 --- a/frameworks/native/ops/conv2d_builder.cpp +++ b/frameworks/native/ops/conv2d_builder.cpp @@ -96,7 +96,7 @@ OH_NN_ReturnCode Conv2DBuilder::SetStrides(std::shared_ptr tensor) return OH_NN_INVALID_PARAMETER; } const int64_t* pStrides = reinterpret_cast(buffer); - int stridesSize = tensor->GetElementCount(); + uint32_t stridesSize = tensor->GetElementCount(); m_strides.assign(pStrides, pStrides + stridesSize); return OH_NN_SUCCESS; @@ -117,7 +117,7 @@ OH_NN_ReturnCode Conv2DBuilder::SetDilation(std::shared_ptr tensor) return OH_NN_INVALID_PARAMETER; } const int64_t* pDilation = reinterpret_cast(buffer); - int dilationSize = tensor->GetElementCount(); + uint32_t dilationSize = tensor->GetElementCount(); m_dilation.assign(pDilation, pDilation + dilationSize); return OH_NN_SUCCESS; @@ -161,7 +161,7 @@ OH_NN_ReturnCode Conv2DBuilder::SetPad(std::shared_ptr tensor) } int64_t* pPadList = static_cast(buffer); - int padListSize = tensor->GetElementCount(); + uint32_t padListSize = tensor->GetElementCount(); m_pad.assign(pPadList, pPadList + padListSize); } diff --git a/frameworks/native/ops/conv2d_transpose_builder.cpp b/frameworks/native/ops/conv2d_transpose_builder.cpp index 2e7b8b0..9111b55 100644 --- a/frameworks/native/ops/conv2d_transpose_builder.cpp +++ b/frameworks/native/ops/conv2d_transpose_builder.cpp @@ -89,7 +89,7 @@ OH_NN_ReturnCode Conv2DTransposeBuilder::SetStrides(std::shared_ptr te return OH_NN_INVALID_PARAMETER; } const int64_t* pStrides = reinterpret_cast(buffer); - int elementSize = tensor->GetElementCount(); + uint32_t elementSize = tensor->GetElementCount(); m_strides.assign(pStrides, pStrides + elementSize); return OH_NN_SUCCESS; @@ -110,7 +110,7 @@ OH_NN_ReturnCode Conv2DTransposeBuilder::SetDilation(std::shared_ptr t return OH_NN_INVALID_PARAMETER; } const int64_t* pDilation = reinterpret_cast(buffer); - int dilationSize = tensor->GetElementCount(); + uint32_t dilationSize = tensor->GetElementCount(); m_dilation.assign(pDilation, pDilation + dilationSize); return OH_NN_SUCCESS; @@ -154,7 +154,7 @@ OH_NN_ReturnCode Conv2DTransposeBuilder::SetPad(std::shared_ptr tensor } const int64_t* pPadList = reinterpret_cast(buffer); - int padListPadSize = tensor->GetElementCount(); + uint32_t padListPadSize = tensor->GetElementCount(); m_padList.assign(pPadList, pPadList + padListPadSize); } @@ -200,7 +200,7 @@ OH_NN_ReturnCode Conv2DTransposeBuilder::SetOutPadding(std::shared_ptr return OH_NN_INVALID_PARAMETER; } const int64_t* pOutputPadding = reinterpret_cast(buffer); - int outputPadSize = tensor->GetElementCount(); + uint32_t outputPadSize = tensor->GetElementCount(); m_outputPaddings.assign(pOutputPadding, pOutputPadding + outputPadSize); return OH_NN_SUCCESS; diff --git a/frameworks/native/ops/depthwise_conv2d_native_builder.cpp b/frameworks/native/ops/depthwise_conv2d_native_builder.cpp index 51a2066..d1fbeb8 100644 --- a/frameworks/native/ops/depthwise_conv2d_native_builder.cpp +++ b/frameworks/native/ops/depthwise_conv2d_native_builder.cpp @@ -113,7 +113,7 @@ OH_NN_ReturnCode DepthwiseConv2DNativeBuilder::SetStrides(std::shared_ptr(buffer); - int stridesSize = tensor->GetElementCount(); + uint32_t stridesSize = tensor->GetElementCount(); m_strides.assign(pStrides, pStrides + stridesSize); return OH_NN_SUCCESS; @@ -132,7 +132,7 @@ OH_NN_ReturnCode DepthwiseConv2DNativeBuilder::SetDilation(std::shared_ptr(buffer); - int dilationSize = tensor->GetElementCount(); + uint32_t dilationSize = tensor->GetElementCount(); m_dilation.assign(pDilation, pDilation + dilationSize); return OH_NN_SUCCESS; @@ -174,7 +174,7 @@ OH_NN_ReturnCode DepthwiseConv2DNativeBuilder::SetPadModeOrPaddings( } const int64_t* pPadList = reinterpret_cast(buffer); - int padListSize = tensor->GetElementCount(); + uint32_t padListSize = tensor->GetElementCount(); m_pad.assign(pPadList, pPadList + padListSize); } return OH_NN_SUCCESS; diff --git a/frameworks/native/ops/pooling_builder.cpp b/frameworks/native/ops/pooling_builder.cpp index 9b52c8f..7338ed4 100644 --- a/frameworks/native/ops/pooling_builder.cpp +++ b/frameworks/native/ops/pooling_builder.cpp @@ -113,7 +113,7 @@ OH_NN_ReturnCode PoolingBuilder::SetKernel(std::shared_ptr tensor) } const int64_t* pKernelSize = reinterpret_cast(buffer); - int kernelSize = tensor->GetElementCount(); + uint32_t kernelSize = tensor->GetElementCount(); m_kernelSize.assign(pKernelSize, pKernelSize + kernelSize); return OH_NN_SUCCESS; @@ -135,7 +135,7 @@ OH_NN_ReturnCode PoolingBuilder::SetStrides(std::shared_ptr tensor) } const int64_t* pStrides = reinterpret_cast(buffer); - int strideslSize = tensor->GetElementCount(); + uint32_t strideslSize = tensor->GetElementCount(); m_strides.assign(pStrides, pStrides + strideslSize); return OH_NN_SUCCESS; diff --git a/frameworks/native/ops/top_k_builder.h b/frameworks/native/ops/top_k_builder.h index dfd4a6a..69d5080 100644 --- a/frameworks/native/ops/top_k_builder.h +++ b/frameworks/native/ops/top_k_builder.h @@ -36,7 +36,7 @@ private: OH_NN_ReturnCode SetSorted(std::shared_ptr tensor); private: - bool m_sorted; + bool m_sorted {true}; // true means sorting in the descending order. }; } // namespace Ops } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/prepared_model.h b/frameworks/native/prepared_model.h index 2d25f6f..06ed645 100644 --- a/frameworks/native/prepared_model.h +++ b/frameworks/native/prepared_model.h @@ -35,9 +35,11 @@ public: std::vector>& outputsDims, std::vector& isOutputBufferEnough) = 0; - virtual OH_NN_ReturnCode GetInputDimRanges( - std::vector>& minInputDims, - std::vector>& maxInputDims) { return OH_NN_OPERATION_FORBIDDEN; } + virtual OH_NN_ReturnCode GetInputDimRanges(std::vector>& minInputDims, + std::vector>& maxInputDims) + { + return OH_NN_OPERATION_FORBIDDEN; + } }; } // OHOS } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/validation.h b/frameworks/native/validation.h index 919d4c4..003df7b 100644 --- a/frameworks/native/validation.h +++ b/frameworks/native/validation.h @@ -26,7 +26,8 @@ template OH_NN_ReturnCode ValidateArray(const T* data, size_t size) { if ((data != nullptr) != (size > 0)) { - LOGE("ValidateArray failed, data is %p but the length is %zu", data, size); + LOGE("ValidateArray failed, data should passed a valid pointer when size is larger than 0, " + "otherwise, data should be nullptr when size is 0."); return OH_NN_INVALID_PARAMETER; } return OH_NN_SUCCESS; diff --git a/test/unittest/BUILD.gn b/test/unittest/BUILD.gn index cca7052..75d0457 100644 --- a/test/unittest/BUILD.gn +++ b/test/unittest/BUILD.gn @@ -16,8 +16,8 @@ import("//build/ohos.gni") group("unittest") { testonly = true deps = [ - "inner_kits:inner_kits_unittest", "components:components_unittest", + "inner_kits:inner_kits_unittest", "ops:ops_unittest", ] } \ No newline at end of file -- Gitee From 96cee56fe7206c3b46ec56282a7e612cc838bd61 Mon Sep 17 00:00:00 2001 From: yuhanshi Date: Mon, 24 Apr 2023 16:49:16 +0800 Subject: [PATCH 10/12] support offline model Signed-off-by: yuhanshi --- frameworks/native/hdi_device_v2_0.cpp | 91 ++++++++++++++++++++++----- frameworks/native/hdi_device_v2_0.h | 1 + 2 files changed, 76 insertions(+), 16 deletions(-) diff --git a/frameworks/native/hdi_device_v2_0.cpp b/frameworks/native/hdi_device_v2_0.cpp index 7c4794a..b625e69 100644 --- a/frameworks/native/hdi_device_v2_0.cpp +++ b/frameworks/native/hdi_device_v2_0.cpp @@ -86,7 +86,40 @@ V2_0::Priority TransPriority(const OH_NN_Priority& priority) return V2_0::Priority::PRIORITY_NONE; } } + +OH_NN_ReturnCode IsOfflineModel(std::shared_ptr liteGraph, bool& isOfflineModel) +{ + isOfflineModel = false; // Initialize the returned value + if (liteGraph == nullptr) { + LOGE("LiteGraph is empty when identifying the offline model."); + return OH_NN_NULL_PTR; + } + + if (liteGraph->all_nodes_.size() == 0) { + LOGE("Find empty node in the model."); + return OH_NN_INVALID_PARAMETER; + } + + // If the model consists of more than 1 node, it will not be considered as offline model. + if (liteGraph->all_nodes_.size() > 1) { + isOfflineModel = false; + return OH_NN_SUCCESS; + } + + const mindspore::lite::LiteGraph::Node* pNode = liteGraph->all_nodes_[0]; + if (pNode == nullptr) { + LOGE("Find invalid node in the model."); + return OH_NN_NULL_PTR; + } + + const mindspore::lite::NodeType& nodeType = mindspore::lite::MindIR_Primitive_GetType(pNode->primitive_); + if (nodeType == mindspore::lite::NodeType::NODE_TYPE_CUSTOM) { + isOfflineModel = true; + } + + return OH_NN_SUCCESS; } +} // unamed namespace HDIDeviceV2_0::HDIDeviceV2_0(OHOS::sptr device) : m_iDevice(device) {} @@ -143,13 +176,27 @@ OH_NN_ReturnCode HDIDeviceV2_0::GetDeviceStatus(DeviceStatus& status) } OH_NN_ReturnCode HDIDeviceV2_0::GetSupportedOperation(std::shared_ptr model, - std::vector& ops) + std::vector& ops) { if (model == nullptr) { LOGE("Model is nullptr, cannot query supported operation."); return OH_NN_NULL_PTR; } + bool isOfflineModel {false}; + OH_NN_ReturnCode innerRet = IsOfflineModel(model, isOfflineModel); + if (innerRet != OH_NN_SUCCESS) { + LOGE("Check offline model failed."); + return innerRet; + } + + // Permanently return a [true] array for offline model. + if (isOfflineModel) { + ops.clear(); + ops.emplace_back(true); + return OH_NN_SUCCESS; + } + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {INVALID_FD, 0, 0, 0}; size_t tensorSize = mindspore::lite::MindIR_LiteGraph_GetConstTensorSize(model.get()); int32_t ret {0}; @@ -170,7 +217,7 @@ OH_NN_ReturnCode HDIDeviceV2_0::GetSupportedOperation(std::shared_ptrGetSupportedOperation(*iModel, ops); mindspore::lite::MindIR_Model_Destroy(&iModel); - auto innerRet = ReleaseSharedBuffer(tensorBuffer); + innerRet = ReleaseSharedBuffer(tensorBuffer); if (innerRet != OH_NN_SUCCESS) { LOGE("Release tensorBuffer failed."); return OH_NN_FAILED; @@ -387,25 +434,21 @@ OH_NN_ReturnCode HDIDeviceV2_0::GetOfflineModelFromLiteGraph(std::shared_ptrinput_indices_.size(); + const size_t inputNum = graph->all_nodes_[0]->input_indices_.size(); if (inputNum < (size_t)2) { LOGE("LiteGraph with offline model should have at least two input tensors, only get %zu.", inputNum); return OH_NN_INVALID_PARAMETER; } - // The offline model is integrated into input tensors with index larger than 0. - mindspore::lite::TensorPtr pTensor; - std::vector offlineModel; - for (size_t i = 1; i < inputNum; i++) { - pTensor = graph->all_tensors_[i]; - offlineModel = mindspore::lite::MindIR_Tensor_GetData(pTensor); - if (offlineModel.size() == (size_t)0) { - LOGE("Offline model has size of 0, please check the ms model."); - return OH_NN_INVALID_PARAMETER; - } - - offlineModels.emplace_back(std::move(offlineModel)); + // The offline model is integrated into the last input tensor. + uint32_t index = graph->all_nodes_[0]->input_indices_[inputNum - 1]; + mindspore::lite::TensorPtr pTensor = graph->all_tensors_[index]; + std::vector offlineModel = mindspore::lite::MindIR_Tensor_GetData(pTensor); + if (offlineModel.size() == (size_t) 0) { + LOGE("Offline model has size of 0, please check the ms model."); + return OH_NN_INVALID_PARAMETER; } + offlineModels.emplace_back(std::move(offlineModel)); return OH_NN_SUCCESS; } @@ -478,12 +521,14 @@ OH_NN_ReturnCode HDIDeviceV2_0::CopyOfflineModelToDevice(const std::vector& deviceBuffers, const ModelConfig& config, + const std::map> extensions, std::shared_ptr& preparedModel) { V2_0::ModelConfig iModelConfig; iModelConfig.enableFloat16 = config.enableFloat16; iModelConfig.mode = TransPerformanceMode(config.mode); iModelConfig.priority = TransPriority(config.priority); + iModelConfig.extensions = extensions; OHOS::sptr iPreparedModel; std::vector iBuffers; @@ -566,7 +611,21 @@ OH_NN_ReturnCode HDIDeviceV2_0::PrepareOfflineModel(std::shared_ptr valueFromCustomPrimitive; + std::vector value; + std::map> extensions; + std::vector attributes =\ + mindspore::lite::MindIR_Custom_GetAttr(model->all_nodes_[0]->primitive_); + for (const auto& attribute : attributes) { + key = mindspore::lite::MindIR_Attribute_GetName(*attribute); + valueFromCustomPrimitive = mindspore::lite::MindIR_Attribute_GetData(*attribute); + value.assign(valueFromCustomPrimitive.begin(), valueFromCustomPrimitive.end()); + extensions.insert(std::pair>(key, value)); + } + + status = PrepareOfflineModel(deviceBuffers, config, extensions, preparedModel); if (status != OH_NN_SUCCESS) { LOGE("PrepareOfflineModel failed."); return status; diff --git a/frameworks/native/hdi_device_v2_0.h b/frameworks/native/hdi_device_v2_0.h index 5f99601..65cb133 100644 --- a/frameworks/native/hdi_device_v2_0.h +++ b/frameworks/native/hdi_device_v2_0.h @@ -67,6 +67,7 @@ private: std::vector& deviceBuffers); OH_NN_ReturnCode PrepareOfflineModel(std::vector& deviceBuffers, const ModelConfig& config, + const std::map> extensions, std::shared_ptr& preparedModel); private: -- Gitee From c851c07b4a51fd657d98cfd2fc7458e74abd092b Mon Sep 17 00:00:00 2001 From: wangchuanxia Date: Thu, 18 May 2023 14:53:18 +0800 Subject: [PATCH 11/12] delete example changes Signed-off-by: wangchuanxia --- example/drivers/nnrt/{v1_0 => }/BUILD.gn | 0 .../nnrt/{v1_0 => }/hdi_cpu_service/BUILD.gn | 0 .../include/nnrt_device_service.h | 0 .../hdi_cpu_service/include/node_functions.h | 0 .../hdi_cpu_service/include/node_registry.h | 0 .../include/prepared_model_service.h | 0 .../include/shared_buffer_parser.h | 0 .../hdi_cpu_service/include/validation.h | 0 .../src/nnrt_device_driver.cpp | 0 .../src/nnrt_device_service.cpp | 0 .../hdi_cpu_service/src/node_functions.cpp | 0 .../hdi_cpu_service/src/node_registry.cpp | 0 .../src/prepared_model_service.cpp | 0 .../src/shared_buffer_parser.cpp | 0 .../hdi_cpu_service/src/validation.cpp | 0 example/drivers/nnrt/v2_0/BUILD.gn | 24 - .../nnrt/v2_0/hdi_cpu_service/BUILD.gn | 90 --- .../include/nnrt_device_service.h | 88 --- .../hdi_cpu_service/include/node_functions.h | 71 --- .../hdi_cpu_service/include/node_registry.h | 57 -- .../include/prepared_model_service.h | 80 --- .../include/shared_buffer_parser.h | 49 -- .../v2_0/hdi_cpu_service/include/validation.h | 33 -- .../src/nnrt_device_driver.cpp | 115 ---- .../src/nnrt_device_service.cpp | 529 ------------------ .../hdi_cpu_service/src/node_functions.cpp | 373 ------------ .../hdi_cpu_service/src/node_registry.cpp | 60 -- .../src/prepared_model_service.cpp | 461 --------------- .../src/shared_buffer_parser.cpp | 104 ---- .../v2_0/hdi_cpu_service/src/validation.cpp | 72 --- 30 files changed, 2206 deletions(-) rename example/drivers/nnrt/{v1_0 => }/BUILD.gn (100%) rename example/drivers/nnrt/{v1_0 => }/hdi_cpu_service/BUILD.gn (100%) rename example/drivers/nnrt/{v1_0 => }/hdi_cpu_service/include/nnrt_device_service.h (100%) rename example/drivers/nnrt/{v1_0 => }/hdi_cpu_service/include/node_functions.h (100%) rename example/drivers/nnrt/{v1_0 => }/hdi_cpu_service/include/node_registry.h (100%) rename example/drivers/nnrt/{v1_0 => }/hdi_cpu_service/include/prepared_model_service.h (100%) rename example/drivers/nnrt/{v1_0 => }/hdi_cpu_service/include/shared_buffer_parser.h (100%) rename example/drivers/nnrt/{v1_0 => }/hdi_cpu_service/include/validation.h (100%) rename example/drivers/nnrt/{v1_0 => }/hdi_cpu_service/src/nnrt_device_driver.cpp (100%) rename example/drivers/nnrt/{v1_0 => }/hdi_cpu_service/src/nnrt_device_service.cpp (100%) rename example/drivers/nnrt/{v1_0 => }/hdi_cpu_service/src/node_functions.cpp (100%) rename example/drivers/nnrt/{v1_0 => }/hdi_cpu_service/src/node_registry.cpp (100%) rename example/drivers/nnrt/{v1_0 => }/hdi_cpu_service/src/prepared_model_service.cpp (100%) rename example/drivers/nnrt/{v1_0 => }/hdi_cpu_service/src/shared_buffer_parser.cpp (100%) rename example/drivers/nnrt/{v1_0 => }/hdi_cpu_service/src/validation.cpp (100%) delete mode 100644 example/drivers/nnrt/v2_0/BUILD.gn delete mode 100644 example/drivers/nnrt/v2_0/hdi_cpu_service/BUILD.gn delete mode 100644 example/drivers/nnrt/v2_0/hdi_cpu_service/include/nnrt_device_service.h delete mode 100644 example/drivers/nnrt/v2_0/hdi_cpu_service/include/node_functions.h delete mode 100644 example/drivers/nnrt/v2_0/hdi_cpu_service/include/node_registry.h delete mode 100644 example/drivers/nnrt/v2_0/hdi_cpu_service/include/prepared_model_service.h delete mode 100644 example/drivers/nnrt/v2_0/hdi_cpu_service/include/shared_buffer_parser.h delete mode 100644 example/drivers/nnrt/v2_0/hdi_cpu_service/include/validation.h delete mode 100644 example/drivers/nnrt/v2_0/hdi_cpu_service/src/nnrt_device_driver.cpp delete mode 100644 example/drivers/nnrt/v2_0/hdi_cpu_service/src/nnrt_device_service.cpp delete mode 100644 example/drivers/nnrt/v2_0/hdi_cpu_service/src/node_functions.cpp delete mode 100644 example/drivers/nnrt/v2_0/hdi_cpu_service/src/node_registry.cpp delete mode 100644 example/drivers/nnrt/v2_0/hdi_cpu_service/src/prepared_model_service.cpp delete mode 100644 example/drivers/nnrt/v2_0/hdi_cpu_service/src/shared_buffer_parser.cpp delete mode 100644 example/drivers/nnrt/v2_0/hdi_cpu_service/src/validation.cpp diff --git a/example/drivers/nnrt/v1_0/BUILD.gn b/example/drivers/nnrt/BUILD.gn similarity index 100% rename from example/drivers/nnrt/v1_0/BUILD.gn rename to example/drivers/nnrt/BUILD.gn diff --git a/example/drivers/nnrt/v1_0/hdi_cpu_service/BUILD.gn b/example/drivers/nnrt/hdi_cpu_service/BUILD.gn similarity index 100% rename from example/drivers/nnrt/v1_0/hdi_cpu_service/BUILD.gn rename to example/drivers/nnrt/hdi_cpu_service/BUILD.gn diff --git a/example/drivers/nnrt/v1_0/hdi_cpu_service/include/nnrt_device_service.h b/example/drivers/nnrt/hdi_cpu_service/include/nnrt_device_service.h similarity index 100% rename from example/drivers/nnrt/v1_0/hdi_cpu_service/include/nnrt_device_service.h rename to example/drivers/nnrt/hdi_cpu_service/include/nnrt_device_service.h diff --git a/example/drivers/nnrt/v1_0/hdi_cpu_service/include/node_functions.h b/example/drivers/nnrt/hdi_cpu_service/include/node_functions.h similarity index 100% rename from example/drivers/nnrt/v1_0/hdi_cpu_service/include/node_functions.h rename to example/drivers/nnrt/hdi_cpu_service/include/node_functions.h diff --git a/example/drivers/nnrt/v1_0/hdi_cpu_service/include/node_registry.h b/example/drivers/nnrt/hdi_cpu_service/include/node_registry.h similarity index 100% rename from example/drivers/nnrt/v1_0/hdi_cpu_service/include/node_registry.h rename to example/drivers/nnrt/hdi_cpu_service/include/node_registry.h diff --git a/example/drivers/nnrt/v1_0/hdi_cpu_service/include/prepared_model_service.h b/example/drivers/nnrt/hdi_cpu_service/include/prepared_model_service.h similarity index 100% rename from example/drivers/nnrt/v1_0/hdi_cpu_service/include/prepared_model_service.h rename to example/drivers/nnrt/hdi_cpu_service/include/prepared_model_service.h diff --git a/example/drivers/nnrt/v1_0/hdi_cpu_service/include/shared_buffer_parser.h b/example/drivers/nnrt/hdi_cpu_service/include/shared_buffer_parser.h similarity index 100% rename from example/drivers/nnrt/v1_0/hdi_cpu_service/include/shared_buffer_parser.h rename to example/drivers/nnrt/hdi_cpu_service/include/shared_buffer_parser.h diff --git a/example/drivers/nnrt/v1_0/hdi_cpu_service/include/validation.h b/example/drivers/nnrt/hdi_cpu_service/include/validation.h similarity index 100% rename from example/drivers/nnrt/v1_0/hdi_cpu_service/include/validation.h rename to example/drivers/nnrt/hdi_cpu_service/include/validation.h diff --git a/example/drivers/nnrt/v1_0/hdi_cpu_service/src/nnrt_device_driver.cpp b/example/drivers/nnrt/hdi_cpu_service/src/nnrt_device_driver.cpp similarity index 100% rename from example/drivers/nnrt/v1_0/hdi_cpu_service/src/nnrt_device_driver.cpp rename to example/drivers/nnrt/hdi_cpu_service/src/nnrt_device_driver.cpp diff --git a/example/drivers/nnrt/v1_0/hdi_cpu_service/src/nnrt_device_service.cpp b/example/drivers/nnrt/hdi_cpu_service/src/nnrt_device_service.cpp similarity index 100% rename from example/drivers/nnrt/v1_0/hdi_cpu_service/src/nnrt_device_service.cpp rename to example/drivers/nnrt/hdi_cpu_service/src/nnrt_device_service.cpp diff --git a/example/drivers/nnrt/v1_0/hdi_cpu_service/src/node_functions.cpp b/example/drivers/nnrt/hdi_cpu_service/src/node_functions.cpp similarity index 100% rename from example/drivers/nnrt/v1_0/hdi_cpu_service/src/node_functions.cpp rename to example/drivers/nnrt/hdi_cpu_service/src/node_functions.cpp diff --git a/example/drivers/nnrt/v1_0/hdi_cpu_service/src/node_registry.cpp b/example/drivers/nnrt/hdi_cpu_service/src/node_registry.cpp similarity index 100% rename from example/drivers/nnrt/v1_0/hdi_cpu_service/src/node_registry.cpp rename to example/drivers/nnrt/hdi_cpu_service/src/node_registry.cpp diff --git a/example/drivers/nnrt/v1_0/hdi_cpu_service/src/prepared_model_service.cpp b/example/drivers/nnrt/hdi_cpu_service/src/prepared_model_service.cpp similarity index 100% rename from example/drivers/nnrt/v1_0/hdi_cpu_service/src/prepared_model_service.cpp rename to example/drivers/nnrt/hdi_cpu_service/src/prepared_model_service.cpp diff --git a/example/drivers/nnrt/v1_0/hdi_cpu_service/src/shared_buffer_parser.cpp b/example/drivers/nnrt/hdi_cpu_service/src/shared_buffer_parser.cpp similarity index 100% rename from example/drivers/nnrt/v1_0/hdi_cpu_service/src/shared_buffer_parser.cpp rename to example/drivers/nnrt/hdi_cpu_service/src/shared_buffer_parser.cpp diff --git a/example/drivers/nnrt/v1_0/hdi_cpu_service/src/validation.cpp b/example/drivers/nnrt/hdi_cpu_service/src/validation.cpp similarity index 100% rename from example/drivers/nnrt/v1_0/hdi_cpu_service/src/validation.cpp rename to example/drivers/nnrt/hdi_cpu_service/src/validation.cpp diff --git a/example/drivers/nnrt/v2_0/BUILD.gn b/example/drivers/nnrt/v2_0/BUILD.gn deleted file mode 100644 index 28ca28b..0000000 --- a/example/drivers/nnrt/v2_0/BUILD.gn +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright (c) 2022 Huawei Device Co., Ltd. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -if (defined(ohos_lite)) { - group("nnrt_entry") { - deps = [ ] - } -} else { - group("nnrt_entry") { - deps = [ - "./hdi_cpu_service:hdf_nnrt_service", - ] - } -} \ No newline at end of file diff --git a/example/drivers/nnrt/v2_0/hdi_cpu_service/BUILD.gn b/example/drivers/nnrt/v2_0/hdi_cpu_service/BUILD.gn deleted file mode 100644 index 003de74..0000000 --- a/example/drivers/nnrt/v2_0/hdi_cpu_service/BUILD.gn +++ /dev/null @@ -1,90 +0,0 @@ -# Copyright (c) 2022 Huawei Device Co., Ltd. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import("//build/ohos.gni") -import("//drivers/hdf_core/adapter/uhdf2/uhdf.gni") - -ohos_prebuilt_shared_library("mindspore_demo") { - source = "//drivers/peripheral/nnrt/v2_0/mindspore/mindspore/libmindspore-lite.huawei.so" - - install_images = [chipset_base_dir] - subsystem_name = "hdf" - part_name = "drivers_peripheral_nnrt" -} - -ohos_shared_library("libnnrt_device_service_2.0") { - include_dirs = [ - "//drivers/peripheral/nnrt/v2_0/hdi_cpu_service/include", - "//drivers/peripheral/nnrt/v2_0/mindspore", - "//third_party/flatbuffers/include", - "//commonlibrary/c_utils/base/include" - ] - sources = [ - "src/nnrt_device_service.cpp", - "src/prepared_model_service.cpp", - "src/node_registry.cpp", - "src/node_functions.cpp", - "src/shared_buffer_parser.cpp", - "src/validation.cpp" - ] - - deps = [ - "//drivers/interface/nnrt/v2_0:nnrt_idl_headers", - "//drivers/interface/nnrt/v2_0:libnnrt_stub_2.0", - ":mindspore_demo" - ] - - external_deps = [ - "hdf_core:libhdf_utils", - "hiviewdfx_hilog_native:libhilog", - "ipc:ipc_core", - "c_utils:utils" - ] - - install_images = [ chipset_base_dir ] - subsystem_name = "hdf" - part_name = "drivers_peripheral_nnrt" -} - -ohos_shared_library("libnnrt_driver") { - include_dirs = [] - sources = [ - "src/nnrt_device_driver.cpp" - ] - deps = [ - "//drivers/peripheral/nnrt/v2_0/hdi_cpu_service:libnnrt_device_service_2.0", - "//drivers/interface/nnrt/v2_0:libnnrt_stub_2.0" - ] - - external_deps = [ - "hdf_core:libhdf_host", - "hdf_core:libhdf_ipc_adapter", - "hdf_core:libhdf_utils", - "hiviewdfx_hilog_native:libhilog", - "ipc:ipc_core", - "c_utils:utils", - "hdf_core:libhdi" - ] - - install_images = [ chipset_base_dir ] - subsystem_name = "hdf" - part_name = "drivers_peripheral_nnrt" -} - -group("hdf_nnrt_service") { - deps = [ - ":mindspore_demo", - ":libnnrt_driver", - ":libnnrt_device_service_2.0", - ] -} \ No newline at end of file diff --git a/example/drivers/nnrt/v2_0/hdi_cpu_service/include/nnrt_device_service.h b/example/drivers/nnrt/v2_0/hdi_cpu_service/include/nnrt_device_service.h deleted file mode 100644 index 9419f40..0000000 --- a/example/drivers/nnrt/v2_0/hdi_cpu_service/include/nnrt_device_service.h +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Copyright (c) 2022 Huawei Device Co., Ltd. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef OHOS_HDI_NNRT_V2_0_NNRTDEVICESERVICE_H -#define OHOS_HDI_NNRT_V2_0_NNRTDEVICESERVICE_H - -#include - -#include "v2_0/innrt_device.h" -#include "ashmem.h" -#include "include/api/model.h" - -#include "mindspore_schema/model_generated.h" - -namespace OHOS { -namespace HDI { -namespace Nnrt { -namespace V2_0 { -class NnrtDeviceService : public INnrtDevice { -public: - NnrtDeviceService() = default; - virtual ~NnrtDeviceService(); - - int32_t GetDeviceName(std::string& name) override; - - int32_t GetVendorName(std::string& name) override; - - int32_t GetDeviceType(DeviceType& deviceType) override; - - int32_t GetDeviceStatus(DeviceStatus& status) override; - - int32_t GetSupportedOperation(const Model& model, std::vector& ops) override; - - int32_t IsFloat16PrecisionSupported(bool& isSupported) override; - - int32_t IsPerformanceModeSupported(bool& isSupported) override; - - int32_t IsPrioritySupported(bool& isSupported) override; - - int32_t IsDynamicInputSupported(bool& isSupported) override; - - int32_t PrepareModel(const Model& model, const ModelConfig& config, sptr& preparedModel) override; - - int32_t IsModelCacheSupported(bool& isSupported) override; - - int32_t PrepareModelFromModelCache(const std::vector& modelCache, const ModelConfig& config, - sptr& preparedModel) override; - - int32_t AllocateBuffer(uint32_t length, SharedBuffer& buffer) override; - - int32_t ReleaseBuffer(const SharedBuffer& buffer) override; - -private: - int32_t ValidateModelConfig(const ModelConfig& config) const; - int32_t ValidateModel(const Model& model) const; - std::shared_ptr TransModelToGraph(const Model& model) const; - std::unique_ptr TransTensor(const Tensor& tensor) const; - std::unique_ptr TransNode(const Node& node) const; - std::unique_ptr TransSubGraph(const SubGraph& graph, const size_t numTensor) const; - std::shared_ptr TransModelConfig(const ModelConfig& config) const; - int32_t ShowCustomAttributes(const std::map>& extensions) const; - int32_t ParseCustomAttributes(const std::map>& extensions, float& attr1, - std::string& attr2) const; - int32_t ConvertVecToFloat(std::vector vecFloat, float& result) const; - int32_t ConvertVecToString(std::vector vecFloat, std::string& result) const; - -private: - std::shared_ptr m_model {nullptr}; - std::unordered_map> m_ashmems; -}; -} // V2_0 -} // Nnrt -} // HDI -} // OHOS - -#endif // OHOS_HDI_NNRT_V2_0_NNRTDEVICESERVICE_H \ No newline at end of file diff --git a/example/drivers/nnrt/v2_0/hdi_cpu_service/include/node_functions.h b/example/drivers/nnrt/v2_0/hdi_cpu_service/include/node_functions.h deleted file mode 100644 index 8e1fbb3..0000000 --- a/example/drivers/nnrt/v2_0/hdi_cpu_service/include/node_functions.h +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Copyright (c) 2022 Huawei Device Co., Ltd. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef OHOS_HDI_NNR_NODE_FUNCTIONS_H -#define OHOS_HDI_NNR_NODE_FUNCTIONS_H - -#include - -#include "hdf_base.h" -#include "hdf_log.h" -#include -#include "node_registry.h" - -namespace OHOS { -namespace HDI { -namespace Nnrt { -namespace V2_0 { -template -int32_t ParsePrimitive(const std::vector& primitive, T& attr, - std::function parseFunc) -{ - if (primitive.empty()) { - HDF_LOGE("Primitive data is empty."); - return HDF_FAILURE; - } - - OHOS::MessageParcel parcelData; - bool ret = parcelData.WriteBuffer(primitive.data(), primitive.size()); - if (!ret) { - HDF_LOGE("Write data to MessageParcel failed."); - return HDF_FAILURE; - } - - ret = parseFunc(parcelData, attr); - if (!ret) { - HDF_LOGE("Unmarshalling data failed."); - return HDF_FAILURE; - } - return HDF_SUCCESS; -} - -PrimUniquePtr GetAddPrimitive(const std::vector& primitive); -PrimUniquePtr GetAvgPoolPrimitive(const std::vector& primitive); -PrimUniquePtr GetConcatPrimitive(const std::vector& primitive); -PrimUniquePtr GetConv2dPrimitive(const std::vector& primitive); -PrimUniquePtr GetFullConnectionPrimitive(const std::vector& primitive); -PrimUniquePtr GetMaxPoolFusionPrimitive(const std::vector& primitive); -PrimUniquePtr GetMatMulFusionPrimitive(const std::vector& primitive); -PrimUniquePtr GetSoftmaxPrimitive(const std::vector& primitive); -PrimUniquePtr GetReshapePrimitive(const std::vector& primitive); -PrimUniquePtr GetScaleFusionPrimitive(const std::vector& primitive); -PrimUniquePtr GetActivationPrimitive(const std::vector& primitive); -PrimUniquePtr GetQuantDTypeCastPrimitive(const std::vector& primitive); -PrimUniquePtr GetMulFusionPrimitive(const std::vector& primitive); -} // namespace V2_0 -} // namespace Nnrt -} // namespace HDI -} // namespace OHOS -#endif // OHOS_HDI_NNR_NODE_FUNCTIONS_H \ No newline at end of file diff --git a/example/drivers/nnrt/v2_0/hdi_cpu_service/include/node_registry.h b/example/drivers/nnrt/v2_0/hdi_cpu_service/include/node_registry.h deleted file mode 100644 index 17d4b51..0000000 --- a/example/drivers/nnrt/v2_0/hdi_cpu_service/include/node_registry.h +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright (c) 2022 Huawei Device Co., Ltd. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef OHOS_HDI_NNR_NODE_REGISTRY_H -#define OHOS_HDI_NNR_NODE_REGISTRY_H - -#include -#include -#include - -#include "v2_0/nnrt_types.h" -#include "mindspore_schema/model_generated.h" - -namespace OHOS { -namespace HDI { -namespace Nnrt { -namespace V2_0 { -using PrimUniquePtr = std::unique_ptr; -class NodeRegistry { -public: - struct Registrar { - Registrar() = delete; - Registrar(NodeType type, std::function&)> nodeFunc); - }; - -public: - static NodeRegistry& GetSingleton(); - std::function&)> GetNodeFunc(NodeType type) const; - bool IsNodeTypeExist(NodeType type) const; - -private: - NodeRegistry() {}; - NodeRegistry(const NodeRegistry&) = delete; - NodeRegistry& operator=(const NodeRegistry&) = delete; - -private: - std::unordered_map&)>> m_nodeRegs; -}; - -#define REGISTER_NODE(nodeName, nodeType, funcPtr) static NodeRegistry::Registrar g_##nodeName(nodeType, funcPtr) -} // namespace V2_0 -} // namespace Nnrt -} // namespace HDI -} // namespace OHOS -#endif // OHOS_HDI_NNR_NODE_REGISTRY_H \ No newline at end of file diff --git a/example/drivers/nnrt/v2_0/hdi_cpu_service/include/prepared_model_service.h b/example/drivers/nnrt/v2_0/hdi_cpu_service/include/prepared_model_service.h deleted file mode 100644 index c52ed06..0000000 --- a/example/drivers/nnrt/v2_0/hdi_cpu_service/include/prepared_model_service.h +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright (c) 2022 Huawei Device Co., Ltd. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef OHOS_HDI_NNR_V2_0_PREPAREDMODELSERVICE_H -#define OHOS_HDI_NNR_V2_0_PREPAREDMODELSERVICE_H - -#include "v2_0/iprepared_model.h" -#include "include/api/data_type.h" -#include "include/api/context.h" -#include "include/api/types.h" -#include "include/api/model.h" -#include "mindspore_schema/model_generated.h" -#include "ashmem.h" - -namespace OHOS { -namespace HDI { -namespace Nnrt { -namespace V2_0 { -constexpr int DYNAMIC_SHAPE_FLAG = -1; -class PreparedModelService : public IPreparedModel { -public: - PreparedModelService() = default; - - virtual ~PreparedModelService(); - - explicit PreparedModelService(std::shared_ptr context); - - int32_t Compile(std::shared_ptr graph); - - int32_t Compile(const void* modelBuffer, size_t length); - - int32_t ExportModelCache(std::vector& modelCache) override; - - int32_t Run(const std::vector& inputs, const std::vector& outputs, - std::vector>& outputsDims, std::vector& isOutputBufferEnough) override; - - int32_t GetInputDimRanges(std::vector>& minInputDims, - std::vector>& maxInputDims) override; - -private: - int32_t SetInputs(const std::vector& inputs); - int32_t SetOutputs(const std::vector& outputs); - int32_t GetMSInputsAndOutputs(); - int32_t CompareTensor(const IOTensor& tensor, const mindspore::MSTensor& msTensor); - sptr ParseBuffer(const SharedBuffer& buffer); - int32_t UpdateOutput(const std::vector& outputs, - std::vector>& outputsDims, std::vector& isOutputBufferEnough); - void ResetInputAndOutput(); - -private: - std::shared_ptr m_graph {nullptr}; - std::shared_ptr m_context {nullptr}; - flatbuffers::FlatBufferBuilder m_builder; - std::shared_ptr m_model {nullptr}; - sptr m_cacheBuffer {nullptr}; - std::vector> m_inputAshmems; - std::vector m_inputs; - std::vector> m_outputAshmems; - std::vector m_outputs; - std::vector> m_inputDims; - bool m_isDynamicShape {false}; -}; -} // V2_0 -} // Nnrt -} // HDI -} // OHOS - -#endif // OHOS_HDI_NNR_V2_0_PREPAREDMODELSERVICE_H \ No newline at end of file diff --git a/example/drivers/nnrt/v2_0/hdi_cpu_service/include/shared_buffer_parser.h b/example/drivers/nnrt/v2_0/hdi_cpu_service/include/shared_buffer_parser.h deleted file mode 100644 index 8e74154..0000000 --- a/example/drivers/nnrt/v2_0/hdi_cpu_service/include/shared_buffer_parser.h +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright (c) 2022 Huawei Device Co., Ltd. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef OHOS_HDI_NNR_V2_0_SHARED_BUFFER_PARSER_H -#define OHOS_HDI_NNR_V2_0_SHARED_BUFFER_PARSER_H - -#include "ashmem.h" -#include "v2_0/nnrt_types.h" - -namespace OHOS { -namespace HDI { -namespace Nnrt { -namespace V2_0 { -namespace { -const int INVALID_FD = -1; -} - -class SharedBufferParser { -public: - SharedBufferParser() {}; - ~SharedBufferParser(); - - int32_t Init(const SharedBuffer& buffer); - int32_t Init(const std::string& name, int32_t size); - void* GetBufferPtr(); - SharedBuffer GetBuffer(); - -private: - SharedBuffer m_buffer; - sptr m_ashptr {nullptr}; - void* m_bufferAddr {nullptr}; -}; -} // V2_0 -} // Nnrt -} // HDI -} // OHOS -#endif // OHOS_HDI_NNR_V2_0_SHARED_BUFFER_PARSER_H \ No newline at end of file diff --git a/example/drivers/nnrt/v2_0/hdi_cpu_service/include/validation.h b/example/drivers/nnrt/v2_0/hdi_cpu_service/include/validation.h deleted file mode 100644 index ffcdf50..0000000 --- a/example/drivers/nnrt/v2_0/hdi_cpu_service/include/validation.h +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright (c) 2022 Huawei Device Co., Ltd. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef OHOS_HDI_NNRT_VALIDATION_H -#define OHOS_HDI_NNRT_VALIDATION_H - -#include "v2_0/nnrt_types.h" - -namespace OHOS { -namespace HDI { -namespace Nnrt { -namespace V2_0 { -int32_t ValidatePerformanceMode(PerformanceMode mode); -int32_t ValidatePriority(Priority priority); -int32_t ValidateDataType(DataType dataType); -int32_t ValidateFormat(Format format); -} // namespace V2_0 -} // namespace Nnrt -} // namespace HDI -} // namespace OHOS -#endif // OHOS_HDI_NNRT_VALIDATION_H \ No newline at end of file diff --git a/example/drivers/nnrt/v2_0/hdi_cpu_service/src/nnrt_device_driver.cpp b/example/drivers/nnrt/v2_0/hdi_cpu_service/src/nnrt_device_driver.cpp deleted file mode 100644 index fab6e89..0000000 --- a/example/drivers/nnrt/v2_0/hdi_cpu_service/src/nnrt_device_driver.cpp +++ /dev/null @@ -1,115 +0,0 @@ -/* - * Copyright (c) 2022 Huawei Device Co., Ltd. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include -#include -#include -#include -#include "v2_0/nnrt_device_stub.h" - -using namespace OHOS::HDI::Nnrt::V2_0; - -struct HdfNnrtDeviceHost { - struct IDeviceIoService ioService; - OHOS::sptr stub; -}; - -static int32_t NnrtDeviceDriverDispatch(struct HdfDeviceIoClient *client, int cmdId, struct HdfSBuf *data, - struct HdfSBuf *reply) -{ - auto *hdfNnrtDeviceHost = CONTAINER_OF(client->device->service, struct HdfNnrtDeviceHost, ioService); - - OHOS::MessageParcel *dataParcel = nullptr; - OHOS::MessageParcel *replyParcel = nullptr; - OHOS::MessageOption option; - - if (SbufToParcel(data, &dataParcel) != HDF_SUCCESS) { - HDF_LOGE("%{public}s:invalid data sbuf object to dispatch", __func__); - return HDF_ERR_INVALID_PARAM; - } - if (SbufToParcel(reply, &replyParcel) != HDF_SUCCESS) { - HDF_LOGE("%{public}s:invalid reply sbuf object to dispatch", __func__); - return HDF_ERR_INVALID_PARAM; - } - - return hdfNnrtDeviceHost->stub->SendRequest(cmdId, *dataParcel, *replyParcel, option); -} - -static int HdfNnrtDeviceDriverInit(struct HdfDeviceObject *deviceObject) -{ - HDF_LOGI("HdfNnrtDeviceDriverInit enter"); - return HDF_SUCCESS; -} - -static int HdfNnrtDeviceDriverBind(struct HdfDeviceObject *deviceObject) -{ - HDF_LOGI("HdfNnrtDeviceDriverBind enter"); - - auto *hdfNnrtDeviceHost = new (std::nothrow) HdfNnrtDeviceHost; - if (hdfNnrtDeviceHost == nullptr) { - HDF_LOGE("%{public}s: failed to create create HdfNnrtDeviceHost object", __func__); - return HDF_FAILURE; - } - - hdfNnrtDeviceHost->ioService.Dispatch = NnrtDeviceDriverDispatch; - hdfNnrtDeviceHost->ioService.Open = NULL; - hdfNnrtDeviceHost->ioService.Release = NULL; - - auto serviceImpl = INnrtDevice::Get(true); - if (serviceImpl == nullptr) { - HDF_LOGE("%{public}s: failed to get of implement service", __func__); - delete hdfNnrtDeviceHost; - return HDF_FAILURE; - } - - hdfNnrtDeviceHost->stub = OHOS::HDI::ObjectCollector::GetInstance().GetOrNewObject(serviceImpl, - INnrtDevice::GetDescriptor()); - if (hdfNnrtDeviceHost->stub == nullptr) { - HDF_LOGE("%{public}s: failed to get stub object", __func__); - delete hdfNnrtDeviceHost; - return HDF_FAILURE; - } - - deviceObject->service = &hdfNnrtDeviceHost->ioService; - return HDF_SUCCESS; -} - -static void HdfNnrtDeviceDriverRelease(struct HdfDeviceObject *deviceObject) -{ - HDF_LOGI("HdfNnrtDeviceDriverRelease enter"); - if (deviceObject->service == nullptr) { - HDF_LOGE("HdfNnrtDeviceDriverRelease not initted"); - return; - } - - auto *hdfNnrtDeviceHost = CONTAINER_OF(deviceObject->service, struct HdfNnrtDeviceHost, ioService); - delete hdfNnrtDeviceHost; -} - -struct HdfDriverEntry g_nnrtdeviceDriverEntry = { - .moduleVersion = 2, - .moduleName = "nnrt", - .Bind = HdfNnrtDeviceDriverBind, - .Init = HdfNnrtDeviceDriverInit, - .Release = HdfNnrtDeviceDriverRelease, -}; - -#ifdef __cplusplus -extern "C" { -#endif /* __cplusplus */ -HDF_INIT(g_nnrtdeviceDriverEntry); -#ifdef __cplusplus -} -#endif /* __cplusplus */ \ No newline at end of file diff --git a/example/drivers/nnrt/v2_0/hdi_cpu_service/src/nnrt_device_service.cpp b/example/drivers/nnrt/v2_0/hdi_cpu_service/src/nnrt_device_service.cpp deleted file mode 100644 index 77ca239..0000000 --- a/example/drivers/nnrt/v2_0/hdi_cpu_service/src/nnrt_device_service.cpp +++ /dev/null @@ -1,529 +0,0 @@ -/* - * Copyright (c) 2022 Huawei Device Co., Ltd. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "nnrt_device_service.h" - -#include -#include "hdf_log.h" -#include "ashmem.h" -#include "securec.h" - -#include "node_registry.h" -#include "prepared_model_service.h" -#include "shared_buffer_parser.h" -#include "validation.h" - -namespace OHOS { -namespace HDI { -namespace Nnrt { -namespace V2_0 { -extern "C" INnrtDevice *NnrtDeviceImplGetInstance(void) -{ - return new (std::nothrow) NnrtDeviceService(); -} - -NnrtDeviceService::~NnrtDeviceService() -{ - for (auto ash : m_ashmems) { - ash.second->UnmapAshmem(); - ash.second->CloseAshmem(); - } -} - -int32_t NnrtDeviceService::GetDeviceName(std::string& name) -{ - name = "RK3568-CPU"; - return HDF_SUCCESS; -} - -int32_t NnrtDeviceService::GetVendorName(std::string& name) -{ - name = "Rockchip"; - return HDF_SUCCESS; -} - -int32_t NnrtDeviceService::GetDeviceType(DeviceType& deviceType) -{ - deviceType = DeviceType::CPU; - return HDF_SUCCESS; -} - -int32_t NnrtDeviceService::GetDeviceStatus(DeviceStatus& status) -{ - status = DeviceStatus::AVAILABLE; - return HDF_SUCCESS; -} - -int32_t NnrtDeviceService::GetSupportedOperation(const Model& model, std::vector& ops) -{ - size_t nodeSize = model.nodes.size(); - auto nodes = model.nodes; - ops.resize(nodeSize, false); - auto& regInstance = NodeRegistry::GetSingleton(); - for (size_t i = 0; i < nodeSize; i++) { - ops[i] = regInstance.IsNodeTypeExist(nodes[i].nodeType); - } - return HDF_SUCCESS; -} - -int32_t NnrtDeviceService::IsFloat16PrecisionSupported(bool& isSupported) -{ - isSupported = true; - return HDF_SUCCESS; -} - -int32_t NnrtDeviceService::IsPerformanceModeSupported(bool& isSupported) -{ - isSupported = true; - return HDF_SUCCESS; -} - -int32_t NnrtDeviceService::IsPrioritySupported(bool& isSupported) -{ - isSupported = false; - return HDF_SUCCESS; -} - -int32_t NnrtDeviceService::IsDynamicInputSupported(bool& isSupported) -{ - isSupported = true; - return HDF_SUCCESS; -} - -int32_t NnrtDeviceService::ShowCustomAttributes(const std::map>& extensions) const -{ - float attr1{0.0}; - std::string attr2; - - auto ret = ParseCustomAttributes(extensions, attr1, attr2); - if (ret != HDF_SUCCESS) { - HDF_LOGE("Parsing custom attributes failed."); - return ret; - } - - if (attr1 != 0.0f) { - HDF_LOGI("Set attr1: %f", attr1); - } - - if (!attr2.empty()) { - HDF_LOGI("Set attr2: %s", attr2.c_str()); - } - - return HDF_SUCCESS; -} - -int32_t NnrtDeviceService::PrepareModel(const Model& model, const ModelConfig& config, - sptr& preparedModel) -{ - auto ret = ValidateModel(model); - if (ret != HDF_SUCCESS) { - HDF_LOGE("Model is invalid."); - return ret; - } - - auto graph = TransModelToGraph(model); - if (graph == nullptr) { - HDF_LOGE("Transfrom model to graph failed."); - return HDF_ERR_INVALID_PARAM; - } - - ret = ValidateModelConfig(config); - if (ret != HDF_SUCCESS) { - HDF_LOGE("ModelConfig is invalid."); - return ret; - } - - ret = ShowCustomAttributes(config.extensions); - if (ret != HDF_SUCCESS) { - HDF_LOGE("Showing custom attributes failed."); - return ret; - } - - auto context = TransModelConfig(config); - sptr service = new (std::nothrow) PreparedModelService(context); - if (service == nullptr) { - HDF_LOGE("Create new PreparedModelService instance failed."); - return HDF_ERR_MALLOC_FAIL; - } - - ret = service->Compile(graph); - if (ret != HDF_SUCCESS) { - HDF_LOGE("Prepared model failed."); - return ret; - } - - preparedModel = service; - return HDF_SUCCESS; -} - -int32_t NnrtDeviceService::IsModelCacheSupported(bool& isSupported) -{ - isSupported = true; - return HDF_SUCCESS; -} - -int32_t NnrtDeviceService::PrepareModelFromModelCache(const std::vector& modelCache, - const ModelConfig& config, sptr& preparedModel) -{ - HDF_LOGD("Using cache to prepare model."); - - // modelCache must be 1, because PreparedModel only export one cache file. - if (modelCache.size() != 1) { - HDF_LOGE("The size of modelCache vector is not valid, it should be one elememt in that vector."); - return HDF_ERR_INVALID_PARAM; - } - - SharedBufferParser parser; - auto ret = parser.Init(modelCache[0]); - if (ret != HDF_SUCCESS) { - HDF_LOGE("Parse modle buffer failed."); - return HDF_ERR_INVALID_PARAM; - } - - ret = ValidateModelConfig(config); - if (ret != HDF_SUCCESS) { - HDF_LOGE("ModelConfig is invalid."); - return ret; - } - - ret = ShowCustomAttributes(config.extensions); - if (ret != HDF_SUCCESS) { - HDF_LOGE("Showing custom attributes failed."); - return ret; - } - - auto context = TransModelConfig(config); - sptr service = new (std::nothrow) PreparedModelService(context); - if (service == nullptr) { - HDF_LOGE("Create new instance PreparedModelService failed."); - return HDF_ERR_MALLOC_FAIL; - } - - void* modelBuffer = parser.GetBufferPtr(); - ret = service->Compile(modelBuffer, modelCache[0].dataSize); - if (ret != HDF_SUCCESS) { - HDF_LOGE("Prepared model failed."); - return ret; - } - - preparedModel = service; - return HDF_SUCCESS; -} - -int32_t NnrtDeviceService::AllocateBuffer(uint32_t length, SharedBuffer& buffer) -{ - sptr ashptr = Ashmem::CreateAshmem("allocateBuffer", length); - if (ashptr == nullptr) { - HDF_LOGE("Create shared memory failed."); - return HDF_FAILURE; - } - - if (!ashptr->MapReadAndWriteAshmem()) { - HDF_LOGE("Map allocate buffer failed."); - return HDF_FAILURE; - } - - buffer.fd = ashptr->GetAshmemFd(); - buffer.bufferSize = ashptr->GetAshmemSize(); - buffer.offset = 0; - buffer.dataSize = length; - - m_ashmems[buffer.fd] = ashptr; - return HDF_SUCCESS; -} - -int32_t NnrtDeviceService::ReleaseBuffer(const SharedBuffer& buffer) -{ - // parser will close current fd. - SharedBufferParser parser; - auto ret = parser.Init(buffer); - if (ret != HDF_SUCCESS) { - HDF_LOGE("Parse buffer failed."); - return HDF_ERR_INVALID_PARAM; - } - - for (auto& ash : m_ashmems) { - ash.second->UnmapAshmem(); - ash.second->CloseAshmem(); - } - m_ashmems.clear(); - return HDF_SUCCESS; -} - -int32_t NnrtDeviceService::ValidateModelConfig(const ModelConfig& config) const -{ - if (!ValidatePerformanceMode(config.mode)) { - HDF_LOGE("PerformanceMode is invalid. mode=%d", config.mode); - return HDF_ERR_INVALID_PARAM; - } - - if (!ValidatePriority(config.priority)) { - HDF_LOGE("Priority is invalid. priority=%d", config.priority); - return HDF_ERR_INVALID_PARAM; - } - - return HDF_SUCCESS; -} - -int32_t NnrtDeviceService::ValidateModel(const Model& model) const -{ - if (model.allTensors.empty()) { - HDF_LOGE("Model has no tensors."); - return HDF_ERR_INVALID_PARAM; - } - - if (model.subGraph.empty()) { - HDF_LOGE("Model has no subGraphs."); - return HDF_ERR_INVALID_PARAM; - } - - if (model.nodes.empty()) { - HDF_LOGE("Model has no nodes."); - return HDF_ERR_INVALID_PARAM; - } - - if (model.inputIndex.empty()) { - HDF_LOGE("Model has no input."); - return HDF_ERR_INVALID_PARAM; - } - - if (model.outputIndex.empty()) { - HDF_LOGE("Model has no output."); - return HDF_ERR_INVALID_PARAM; - } - - size_t tensorSize = model.allTensors.size(); - for (auto index : model.inputIndex) { - if (index > tensorSize) { - HDF_LOGE("Input index is invalid, index=%u", index); - return HDF_ERR_INVALID_PARAM; - } - } - - for (auto index : model.outputIndex) { - if (index > tensorSize) { - HDF_LOGE("Output index is invalid, index=%u", index); - return HDF_ERR_INVALID_PARAM; - } - } - - return HDF_SUCCESS; -} - -std::shared_ptr NnrtDeviceService::TransModelToGraph(const Model& model) const -{ - auto metaGraph = std::make_shared(); - metaGraph->name = model.name; - metaGraph->version = mindspore::Version(); - - std::unique_ptr transTensor{nullptr}; - for (auto tensor : model.allTensors) { - transTensor = TransTensor(tensor); - if (transTensor == nullptr) { - HDF_LOGE("Transform tensor failed."); - return nullptr; - } - metaGraph->allTensors.emplace_back(std::move(transTensor)); - } - metaGraph->inputIndex = model.inputIndex; - metaGraph->outputIndex = model.outputIndex; - - // Transform node - std::unique_ptr transNode {nullptr}; - for (auto& node : model.nodes) { - transNode = TransNode(node); - if (transNode == nullptr) { - HDF_LOGE("Transform node failed, node name=%{public}s", node.name.c_str()); - return nullptr; - } - metaGraph->nodes.emplace_back(std::move(transNode)); - } - - // Transform subgraph - const size_t numTensor = model.allTensors.size(); - for (auto graph : model.subGraph) { - metaGraph->subGraph.emplace_back(TransSubGraph(graph, numTensor)); - } - return metaGraph; -} - -std::unique_ptr NnrtDeviceService::TransTensor(const Tensor& tensor) const -{ - if (!ValidateDataType(tensor.dataType)) { - HDF_LOGE("DataType of tensor is invalid. dataType=%d", tensor.dataType); - return nullptr; - } - - if (!ValidateFormat(tensor.format)) { - HDF_LOGE("Format of tensor is invalid. format=%d", tensor.format); - return nullptr; - } - - auto schemaTensor = std::make_unique(); - schemaTensor->name = tensor.name; - schemaTensor->dataType = static_cast(tensor.dataType); - schemaTensor->format = static_cast(tensor.format); - schemaTensor->dims = tensor.dims; - for (auto param : tensor.quantParams) { - auto quantParam = std::make_unique(); - quantParam->scale = param.scale; - quantParam->zeroPoint = param.zeroPoint; - quantParam->numBits = param.numBits; - quantParam->inited = true; - schemaTensor->quantParams.emplace_back(std::move(quantParam)); - } - - if (tensor.data.fd != INVALID_FD) { - SharedBufferParser parser; - auto ret = parser.Init(tensor.data); - if (ret != HDF_SUCCESS) { - HDF_LOGE("Parse tensor data failed."); - return nullptr; - } - - auto data = parser.GetBufferPtr(); - schemaTensor->data.resize(tensor.data.dataSize); - auto memRet = memcpy_s(const_cast(schemaTensor->data.data()), - tensor.data.dataSize, data, tensor.data.dataSize); - if (memRet != EOK) { - HDF_LOGW("Copy tensor data failed."); - return nullptr; - } - } - return schemaTensor; -} - -std::unique_ptr NnrtDeviceService::TransNode(const Node& node) const -{ - auto cnode = std::make_unique(); - cnode->name = node.name; - cnode->inputIndex = node.inputIndex; - cnode->outputIndex = node.outputIndex; - cnode->quantType = static_cast(node.quantType); - - auto& regInstance = NodeRegistry::GetSingleton(); - auto parseFunc = regInstance.GetNodeFunc(node.nodeType); - auto primitive = parseFunc(node.nodeAttr); - if (primitive == nullptr) { - HDF_LOGE("Parse primitve data failed. node name=%{public}s", node.name.c_str()); - return nullptr; - } - - cnode->primitive = std::move(primitive); - return cnode; -} - -std::unique_ptr NnrtDeviceService::TransSubGraph(const SubGraph& graph, - const size_t numTensor) const -{ - auto subGraph = std::make_unique(); - subGraph->name = graph.name; - subGraph->inputIndices = graph.inputIndices; - subGraph->outputIndices = graph.outputIndices; - subGraph->nodeIndices = graph.nodeIndices; - subGraph->tensorIndices.reserve(numTensor); - for (size_t i = 0; i < numTensor; i++) { - subGraph->tensorIndices.emplace_back(static_cast(i)); - } - return subGraph; -} - -std::shared_ptr NnrtDeviceService::TransModelConfig(const ModelConfig& config) const -{ - auto context = std::make_shared(); - const int cpuThreadNum = 2; - const int cpuNoAffinities = 0; - const int cpuBigCore = 1; - const int cpuLittleCore = 2; - context->SetThreadNum(cpuThreadNum); - - int mode = cpuNoAffinities; - switch (config.mode) { - case PerformanceMode::PERFORMANCE_LOW: - case PerformanceMode::PERFORMANCE_MEDIUM: - mode = cpuLittleCore; - break; - case PerformanceMode::PERFORMANCE_HIGH: - case PerformanceMode::PERFORMANCE_EXTREME: - mode = cpuBigCore; - break; - default: - mode = cpuNoAffinities; - } - context->SetThreadAffinity(mode); - - auto cpuInfo = std::make_shared(); - cpuInfo->SetEnableFP16(config.enableFloat16); - auto& deviceInfos = context->MutableDeviceInfo(); - deviceInfos.emplace_back(cpuInfo); - return context; -} - -int32_t NnrtDeviceService::ConvertVecToFloat(std::vector vecFloat, float& result) const -{ - if (vecFloat.size() != sizeof(float)) { - HDF_LOGE("Size of the int8_t vector dose not match a float value."); - return HDF_ERR_INVALID_PARAM; - } - - result = *(reinterpret_cast(vecFloat.data())); - return HDF_SUCCESS; -} - -int32_t NnrtDeviceService::ConvertVecToString(std::vector vecFloat, std::string& result) const -{ - if (vecFloat.empty()) { - HDF_LOGE("int8_t vector is empty."); - return HDF_ERR_INVALID_PARAM; - } - - result = reinterpret_cast(vecFloat.data()); - return HDF_SUCCESS; -} - -int32_t NnrtDeviceService::ParseCustomAttributes(const std::map>& extensions, - float& attr1, std::string& attr2) const -{ - int32_t ret; - for (auto extension : extensions) { - if (extension.first == "attr1") { - ret = ConvertVecToFloat(extension.second, attr1); - if (ret != HDF_SUCCESS) { - HDF_LOGE("ConvertVecToFloat failed."); - return ret; - } - if (attr1 <= 0.0f || attr1 > 1.0f) { - HDF_LOGE("attr1 is out of range (0,1]."); - return HDF_ERR_INVALID_PARAM; - } - } else if (extension.first == "attr2") { - ret = ConvertVecToString(extension.second, attr2); - if (ret != HDF_SUCCESS) { - HDF_LOGE("ConvertVecToString failed."); - return ret; - } - if (attr2 != "LOW" || attr2 != "HIGH") { - HDF_LOGE("attr2 is neither LOW nor HIGH."); - return HDF_ERR_INVALID_PARAM; - } - } - } - - return HDF_SUCCESS; -} -} // V2_0 -} // Nnrt -} // HDI -} // OHOS diff --git a/example/drivers/nnrt/v2_0/hdi_cpu_service/src/node_functions.cpp b/example/drivers/nnrt/v2_0/hdi_cpu_service/src/node_functions.cpp deleted file mode 100644 index fb7a701..0000000 --- a/example/drivers/nnrt/v2_0/hdi_cpu_service/src/node_functions.cpp +++ /dev/null @@ -1,373 +0,0 @@ -/* - * Copyright (c) 2022 Huawei Device Co., Ltd. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "node_functions.h" - -#include "node_registry.h" -#include -#include - -namespace OHOS { -namespace HDI { -namespace Nnrt { -namespace V2_0 { -PrimUniquePtr GetAddPrimitive(const std::vector& primitive) -{ - AddFusion addAttr; - auto ret = ParsePrimitive(primitive, addAttr, AddFusionBlockUnmarshalling); - if (ret != HDF_SUCCESS) { - HDF_LOGE("Parse primitive data of AddFusion operator failed."); - return nullptr; - } - - auto prim = std::make_unique(); - prim->value.type = mindspore::schema::PrimitiveType_AddFusion; - auto attr = new (std::nothrow) mindspore::schema::AddFusionT; - if (attr == nullptr) { - HDF_LOGE("Create AddFusion primitive failed."); - return nullptr; - } - attr->activation_type = static_cast(addAttr.activationType); - prim->value.value = attr; - return prim; -} - -PrimUniquePtr GetAvgPoolPrimitive(const std::vector& primitive) -{ - AvgPoolFusion avgPoolAttr; - auto ret = ParsePrimitive(primitive, avgPoolAttr, AvgPoolFusionBlockUnmarshalling); - if (ret != HDF_SUCCESS) { - HDF_LOGE("Parse primitive data of AvgPoolFusion operator failed."); - return nullptr; - } - - auto prim = std::make_unique(); - prim->value.type = mindspore::schema::PrimitiveType_AvgPoolFusion; - - auto attr = new (std::nothrow) mindspore::schema::AvgPoolFusionT; - if (attr == nullptr) { - HDF_LOGE("Create AvgPoolFusion primitive failed."); - return nullptr; - } - attr->kernel_size = avgPoolAttr.kernelSize; - attr->strides = avgPoolAttr.strides; - attr->pad = avgPoolAttr.pad; - attr->pad_mode = static_cast(avgPoolAttr.padMode); - attr->round_mode = static_cast(avgPoolAttr.roundMode); - attr->format = static_cast(avgPoolAttr.format); - attr->global = avgPoolAttr.global; - attr->activation_type = static_cast(avgPoolAttr.activationType); - prim->value.value = attr; - return prim; -} - -PrimUniquePtr GetConcatPrimitive(const std::vector& primitive) -{ - Concat concatAttr; - auto ret = ParsePrimitive(primitive, concatAttr, ConcatBlockUnmarshalling); - if (ret != HDF_SUCCESS) { - HDF_LOGE("Parse primitive data of Concat operator failed."); - return nullptr; - } - - auto prim = std::make_unique(); - prim->value.type = mindspore::schema::PrimitiveType_Concat; - - auto attr = new (std::nothrow) mindspore::schema::ConcatT; - if (attr == nullptr) { - HDF_LOGE("Create concat primitive failed."); - return nullptr; - } - attr->axis = concatAttr.axis; - prim->value.value = attr; - return prim; -} - -PrimUniquePtr GetConv2dPrimitive(const std::vector& primitive) -{ - Conv2DFusion conv2dAttr; - auto ret = ParsePrimitive(primitive, conv2dAttr, Conv2DFusionBlockUnmarshalling); - if (ret != HDF_SUCCESS) { - HDF_LOGE("Parse primitive data of Conv2DFusion operator failed."); - return nullptr; - } - - auto prim = std::make_unique(); - prim->value.type = mindspore::schema::PrimitiveType_Conv2DFusion; - - auto attr = new (std::nothrow) mindspore::schema::Conv2DFusionT; - if (attr == nullptr) { - HDF_LOGE("Create Conv2DFusion primitive failed."); - return nullptr; - } - - attr->kernel_size = conv2dAttr.kernelSize; - attr->stride = conv2dAttr.stride; - attr->dilation = conv2dAttr.dilation; - attr->pad_mode = static_cast(conv2dAttr.padMode); - attr->pad_list = conv2dAttr.padList; - attr->group = conv2dAttr.group; - attr->in_channel = conv2dAttr.inChannel; - attr->out_channel = conv2dAttr.outChannel; - attr->activation_type = static_cast(conv2dAttr.activationType); - - prim->value.value = attr; - return prim; -} - -PrimUniquePtr GetFullConnectionPrimitive(const std::vector& primitive) -{ - FullConnection fullConnAttr; - auto ret = ParsePrimitive(primitive, fullConnAttr, FullConnectionBlockUnmarshalling); - if (ret != HDF_SUCCESS) { - HDF_LOGE("Parse primitive data of FullConnection operator failed."); - return nullptr; - } - - auto prim = std::make_unique(); - prim->value.type = mindspore::schema::PrimitiveType_FullConnection; - - auto attr = new (std::nothrow) mindspore::schema::FullConnectionT; - if (attr == nullptr) { - HDF_LOGE("Create FullConnection primitive failed."); - return nullptr; - } - - attr->has_bias = fullConnAttr.hasBias; - attr->use_axis = fullConnAttr.useAxis; - attr->axis = fullConnAttr.axis; - attr->activation_type = static_cast(fullConnAttr.activationType); - - prim->value.value = attr; - return prim; -} - -PrimUniquePtr GetMaxPoolFusionPrimitive(const std::vector& primitive) -{ - MaxPoolFusion maxPoolAttr; - auto ret = ParsePrimitive(primitive, maxPoolAttr, MaxPoolFusionBlockUnmarshalling); - if (ret != HDF_SUCCESS) { - HDF_LOGE("Parse primitive data of MaxPoolFusion operator failed."); - return nullptr; - } - - auto prim = std::make_unique(); - prim->value.type = mindspore::schema::PrimitiveType_MaxPoolFusion; - - auto attr = new (std::nothrow) mindspore::schema::MaxPoolFusionT; - if (attr == nullptr) { - HDF_LOGE("Create MaxPoolFusion primitive failed."); - return nullptr; - } - - attr->kernel_size = maxPoolAttr.kernelSize; - attr->strides = maxPoolAttr.strides; - attr->pad = maxPoolAttr.pad; - attr->pad_mode = static_cast(maxPoolAttr.padMode); - attr->format = static_cast(maxPoolAttr.format); - attr->global = maxPoolAttr.global; - attr->activation_type = static_cast(maxPoolAttr.activationType); - - prim->value.value = attr; - return prim; -} - -PrimUniquePtr GetMatMulFusionPrimitive(const std::vector& primitive) -{ - MatMulFusion matmulAttr; - auto ret = ParsePrimitive(primitive, matmulAttr, MatMulFusionBlockUnmarshalling); - if (ret != HDF_SUCCESS) { - HDF_LOGE("Parse primitive data of MatMulFusion operator failed."); - return nullptr; - } - - auto prim = std::make_unique(); - prim->value.type = mindspore::schema::PrimitiveType_MatMulFusion; - - auto attr = new (std::nothrow) mindspore::schema::MatMulFusionT; - if (attr == nullptr) { - HDF_LOGE("Create MatMulFusion primitive failed."); - return nullptr; - } - - attr->transpose_a = matmulAttr.transposeA; - attr->transpose_b = matmulAttr.transposeB; - attr->activation_type = static_cast(matmulAttr.activationType); - - prim->value.value = attr; - return prim; -} - -PrimUniquePtr GetSoftmaxPrimitive(const std::vector& primitive) -{ - Softmax softmaxAttr; - auto ret = ParsePrimitive(primitive, softmaxAttr, SoftmaxBlockUnmarshalling); - if (ret != HDF_SUCCESS) { - HDF_LOGE("Parse primitive data of Softmax operator failed."); - return nullptr; - } - - auto prim = std::make_unique(); - prim->value.type = mindspore::schema::PrimitiveType_Softmax; - - auto attr = new (std::nothrow) mindspore::schema::SoftmaxT; - if (attr == nullptr) { - HDF_LOGE("Create Softmax primitive failed."); - return nullptr; - } - - attr->axis = softmaxAttr.axis; - prim->value.value = attr; - return prim; -} - -PrimUniquePtr GetReshapePrimitive(const std::vector& primitive) -{ - Reshape reshapeAttr; - auto ret = ParsePrimitive(primitive, reshapeAttr, ReshapeBlockUnmarshalling); - if (ret != HDF_SUCCESS) { - HDF_LOGE("Parse primitive data of Reshape operator failed."); - return nullptr; - } - - auto prim = std::make_unique(); - prim->value.type = mindspore::schema::PrimitiveType_Reshape; - - auto attr = new (std::nothrow) mindspore::schema::ReshapeT; - if (attr == nullptr) { - HDF_LOGE("Create Reshape primitive failed."); - return nullptr; - } - - prim->value.value = attr; - return prim; -} - -PrimUniquePtr GetScaleFusionPrimitive(const std::vector& primitive) -{ - ScaleFusion scaleAttr; - auto ret = ParsePrimitive(primitive, scaleAttr, ScaleFusionBlockUnmarshalling); - if (ret != HDF_SUCCESS) { - HDF_LOGE("Parse primitive data of ScaleFusion operator failed."); - return nullptr; - } - - auto prim = std::make_unique(); - prim->value.type = mindspore::schema::PrimitiveType_ScaleFusion; - - auto attr = new (std::nothrow) mindspore::schema::ScaleFusionT; - if (attr == nullptr) { - HDF_LOGE("Create ScaleFusion primitive failed."); - return nullptr; - } - - attr->axis = scaleAttr.axis; - attr->activation_type = static_cast(scaleAttr.activationType); - prim->value.value = attr; - return prim; -} - -PrimUniquePtr GetActivationPrimitive(const std::vector& primitive) -{ - Activation actAttr; - auto ret = ParsePrimitive(primitive, actAttr, ActivationBlockUnmarshalling); - if (ret != HDF_SUCCESS) { - HDF_LOGE("Parse primitive data of Activation operator failed."); - return nullptr; - } - - auto prim = std::make_unique(); - prim->value.type = mindspore::schema::PrimitiveType_Activation; - - auto attr = new (std::nothrow) mindspore::schema::ActivationT; - if (attr == nullptr) { - HDF_LOGE("Create Activation primitive failed."); - return nullptr; - } - - attr->alpha = actAttr.alpha; - attr->min_val = actAttr.minVal; - attr->max_val = actAttr.maxVal; - attr->approximate = actAttr.approximate; - attr->activation_type = static_cast(actAttr.activationType); - - prim->value.value = attr; - return prim; -} - -PrimUniquePtr GetQuantDTypeCastPrimitive(const std::vector& primitive) -{ - QuantDTypeCast quantAttr; - auto ret = ParsePrimitive(primitive, quantAttr, QuantDTypeCastBlockUnmarshalling); - if (ret != HDF_SUCCESS) { - HDF_LOGE("Parse primitive data of QuantDTypeCast operator failed."); - return nullptr; - } - - auto prim = std::make_unique(); - prim->value.type = mindspore::schema::PrimitiveType_QuantDTypeCast; - - auto attr = new (std::nothrow) mindspore::schema::QuantDTypeCastT; - if (attr == nullptr) { - HDF_LOGE("Create QuantDTypeCast primitive failed."); - return nullptr; - } - - attr->src_t = quantAttr.srcT; - attr->dst_t = quantAttr.dstT; - prim->value.value = attr; - return prim; -} - -PrimUniquePtr GetMulFusionPrimitive(const std::vector& primitive) -{ - MulFusion mulAttr; - auto ret = ParsePrimitive(primitive, mulAttr, MulFusionBlockUnmarshalling); - if (ret != HDF_SUCCESS) { - HDF_LOGE("Parse primitive data of MulFusion operator failed."); - return nullptr; - } - - auto prim = std::make_unique(); - prim->value.type = mindspore::schema::PrimitiveType_MulFusion; - - auto attr = new (std::nothrow) mindspore::schema::MulFusionT; - if (attr == nullptr) { - HDF_LOGE("Create MulFusion primitive failed."); - return nullptr; - } - - attr->activation_type = static_cast(mulAttr.activationType); - prim->value.value = attr; - return prim; -} - -REGISTER_NODE(Activation, NodeType::NODE_TYPE_ACTIVATION, GetActivationPrimitive); -REGISTER_NODE(AddFusion, NodeType::NODE_TYPE_ADD_FUSION, GetAddPrimitive); -REGISTER_NODE(AvgPoolFusion, NodeType::NODE_TYPE_AVGPOOL_FUSION, GetAvgPoolPrimitive); -REGISTER_NODE(Concat, NodeType::NODE_TYPE_CONCAT, GetConcatPrimitive); -REGISTER_NODE(Conv2DFusion, NodeType::NODE_TYPE_CONV2D_FUSION, GetConv2dPrimitive); -REGISTER_NODE(FullConnection, NodeType::NODE_TYPE_FULL_CONNECTION, GetFullConnectionPrimitive); -REGISTER_NODE(MaxPoolFusion, NodeType::NODE_TYPE_MAX_POOL_FUSION, GetMaxPoolFusionPrimitive); -REGISTER_NODE(MatMulFusion, NodeType::NODE_TYPE_MATMUL_FUSION, GetMatMulFusionPrimitive); -REGISTER_NODE(Reshape, NodeType::NODE_TYPE_RESHAPE, GetReshapePrimitive); -REGISTER_NODE(Softmax, NodeType::NODE_TYPE_SOFTMAX, GetSoftmaxPrimitive); -REGISTER_NODE(ScaleFusion, NodeType::NODE_TYPE_SCALE_FUSION, GetScaleFusionPrimitive); -REGISTER_NODE(QuantDTypeCast, NodeType::NODE_TYPE_QUANT_DTYPE_CAST, GetQuantDTypeCastPrimitive); -REGISTER_NODE(MulFusion, NodeType::NODE_TYPE_MUL_FUSION, GetMulFusionPrimitive); -} // namespace V2_0 -} // namespace Nnrt -} // namespace HDI -} // namespace OHOS \ No newline at end of file diff --git a/example/drivers/nnrt/v2_0/hdi_cpu_service/src/node_registry.cpp b/example/drivers/nnrt/v2_0/hdi_cpu_service/src/node_registry.cpp deleted file mode 100644 index f6537ad..0000000 --- a/example/drivers/nnrt/v2_0/hdi_cpu_service/src/node_registry.cpp +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright (c) 2022 Huawei Device Co., Ltd. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "node_registry.h" - -#include "hdf_log.h" - -namespace OHOS { -namespace HDI { -namespace Nnrt { -namespace V2_0 { -NodeRegistry& NodeRegistry::GetSingleton() -{ - static NodeRegistry registry; - return registry; -} - -NodeRegistry::Registrar::Registrar(NodeType type, std::function&)> nodeFunc) -{ - auto& registry = NodeRegistry::GetSingleton(); - if (registry.m_nodeRegs.find(type) != registry.m_nodeRegs.end()) { - HDF_LOGW("Node has been registered. nodeType=%d", type); - } else { - registry.m_nodeRegs[type] = nodeFunc; - } -} - -std::function&)> NodeRegistry::GetNodeFunc(NodeType type) const -{ - if (m_nodeRegs.find(type) == m_nodeRegs.end()) { - HDF_LOGW("Node type is not found. nodeType=%d", type); - return nullptr; - } - - return m_nodeRegs.at(type); -} - -bool NodeRegistry::IsNodeTypeExist(NodeType type) const -{ - if (m_nodeRegs.find(type) == m_nodeRegs.end()) { - return false; - } - return true; -} -} // namespace V2_0 -} // namespace Nnrt -} // namespace HDI -} // namespace OHOS \ No newline at end of file diff --git a/example/drivers/nnrt/v2_0/hdi_cpu_service/src/prepared_model_service.cpp b/example/drivers/nnrt/v2_0/hdi_cpu_service/src/prepared_model_service.cpp deleted file mode 100644 index 1d3edf6..0000000 --- a/example/drivers/nnrt/v2_0/hdi_cpu_service/src/prepared_model_service.cpp +++ /dev/null @@ -1,461 +0,0 @@ -/* - * Copyright (c) 2022 Huawei Device Co., Ltd. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "prepared_model_service.h" - -#include -#include "securec.h" -#include "hdf_log.h" - -#include "shared_buffer_parser.h" - -namespace OHOS { -namespace HDI { -namespace Nnrt { -namespace V2_0 { -PreparedModelService::PreparedModelService(std::shared_ptr context) - : m_context(context) {} - -PreparedModelService::~PreparedModelService() -{ - if (m_cacheBuffer != nullptr) { - m_cacheBuffer->CloseAshmem(); - } - - for (auto& inputAsh : m_inputAshmems) { - inputAsh->UnmapAshmem(); - inputAsh->CloseAshmem(); - } - - for (auto& outputAsh : m_outputAshmems) { - outputAsh->UnmapAshmem(); - outputAsh->CloseAshmem(); - } -} - -int32_t PreparedModelService::ExportModelCache(std::vector& modelCache) -{ - if (!modelCache.empty()) { - HDF_LOGE("The parameters of ExportModelCache should be an empty vector."); - return HDF_ERR_INVALID_PARAM; - } - - if (m_cacheBuffer != nullptr) { - auto fd = m_cacheBuffer->GetAshmemFd(); - auto size = m_cacheBuffer->GetAshmemSize(); - - // SharedBuffer: fd, bufferSize, offset, dataSize - modelCache.emplace_back(SharedBuffer{fd, size, 0, size}); - return HDF_SUCCESS; - } - - auto size = m_builder.GetSize(); - auto buffer = m_builder.GetBufferPointer(); - const char* name = m_graph != nullptr ? m_graph->name.c_str() : "CacheModel"; - sptr cache = Ashmem::CreateAshmem(name, size); - if (cache == nullptr) { - HDF_LOGE("Create shared memory failed."); - return HDF_ERR_MALLOC_FAIL; - } - - bool ret = cache->MapReadAndWriteAshmem(); - if (!ret) { - HDF_LOGE("Map fd to write cache failed."); - return HDF_FAILURE; - } - - ret = cache->WriteToAshmem(buffer, size, 0); - cache->UnmapAshmem(); - if (!ret) { - HDF_LOGE("Write cache failed."); - return HDF_FAILURE; - } - - m_cacheBuffer = cache; - - // SharedBuffer: fd, bufferSize, offset, dataSize - modelCache.emplace_back(SharedBuffer {cache->GetAshmemFd(), cache->GetAshmemSize(), 0, cache->GetAshmemSize()}); - return HDF_SUCCESS; -} - -int32_t PreparedModelService::Run(const std::vector& inputs, const std::vector& outputs, - std::vector>& outputsDims, std::vector& isOutputBufferEnough) -{ - auto ret = SetInputs(inputs); - if (ret != HDF_SUCCESS) { - HDF_LOGE("Inputs tensor is invalid."); - return ret; - } - - if (!m_isDynamicShape) { - ret = SetOutputs(outputs); - if (ret != HDF_SUCCESS) { - HDF_LOGE("Output tensor is invalid."); - ResetInputAndOutput(); - return ret; - } - } - - auto msRet = m_model->Predict(m_inputs, &m_outputs); - if (msRet != mindspore::kSuccess) { - HDF_LOGE("Run model failed."); - ResetInputAndOutput(); - return HDF_FAILURE; - } - - ret = UpdateOutput(outputs, outputsDims, isOutputBufferEnough); - if (ret != HDF_SUCCESS) { - HDF_LOGE("Update output dimension or data failed."); - ResetInputAndOutput(); - return ret; - } - - ResetInputAndOutput(); - - return HDF_SUCCESS; -} - -int32_t PreparedModelService::GetInputDimRanges(std::vector>& minInputDims, - std::vector>& maxInputDims) -{ - if (m_inputDims.empty()) { - HDF_LOGE("Model has not been prepared yet."); - return HDF_ERR_INVALID_PARAM; - } - - minInputDims.clear(); - maxInputDims.clear(); - - for (auto inputShape : m_inputDims) { - std::vector minInputShape; - std::vector maxInputShape; - for (auto dim : inputShape) { - if (dim != DYNAMIC_SHAPE_FLAG) { // Min and max are same if the dimension is fixed. - if (dim <= 0) { - HDF_LOGE("Dimesion value is invalid."); - return HDF_ERR_INVALID_PARAM; - } - minInputShape.push_back(static_cast(dim)); - maxInputShape.push_back(static_cast(dim)); - } else { // Dimension range is [1, 10]. - minInputShape.push_back(1); - maxInputShape.push_back(10); - } - } - minInputDims.push_back(std::move(minInputShape)); - maxInputDims.push_back(std::move(maxInputShape)); - } - - return HDF_SUCCESS; -} - -int32_t PreparedModelService::UpdateOutput(const std::vector& outputs, - std::vector>& outputsDims, std::vector& isOutputBufferEnough) -{ - bool isEnough {true}; - size_t outputSize = m_outputs.size(); - isOutputBufferEnough.resize(outputSize, true); - for (size_t i = 0; i < outputSize; i++) { - auto& msOutput = m_outputs[i]; - auto& output = outputs[i]; - - auto msShape = msOutput.Shape(); - outputsDims.emplace_back(msShape.begin(), msShape.end()); - - auto dataSize = msOutput.DataSize(); - if (dataSize > output.data.bufferSize) { - HDF_LOGE("Output buffer is not enough. actual size %{public}zu, buffer size %{public}u", - dataSize, output.data.bufferSize); - isOutputBufferEnough[i] = false; - isEnough= false; - } - - if (isEnough && m_isDynamicShape) { - auto msData = msOutput.MutableData(); - SharedBufferParser parser; - auto ret = parser.Init(output.data); - if (ret != HDF_SUCCESS) { - HDF_LOGE("Parse %zu th output data failed.", i); - return HDF_ERR_INVALID_PARAM; - } - - auto data = parser.GetBufferPtr(); - auto memRet = memcpy_s(data, dataSize, msData, dataSize); - if (memRet != EOK) { - HDF_LOGE("Copy output memory failed."); - return HDF_FAILURE; - } - } - } - - return HDF_SUCCESS; -} - -void PreparedModelService::ResetInputAndOutput() -{ - for (auto& msInput : m_inputs) { - msInput.SetData(nullptr); - } - - if (!m_isDynamicShape) { - for (auto& msOutput : m_outputs) { - msOutput.SetData(nullptr); - } - } -} - -int32_t PreparedModelService::Compile(std::shared_ptr graph) -{ - if (graph == nullptr) { - HDF_LOGE("Graph cannot be nullptr"); - return HDF_ERR_INVALID_PARAM; - } - for (auto i : graph->inputIndex) { - auto inputShape = graph->allTensors[i]->dims; - auto iter = std::find(inputShape.begin(), inputShape.end(), DYNAMIC_SHAPE_FLAG); - if (iter != inputShape.end()) { - m_isDynamicShape = true; - break; - } - } - auto offset = mindspore::schema::MetaGraph::Pack(m_builder, graph.get()); - m_builder.Finish(offset); - mindspore::schema::FinishMetaGraphBuffer(m_builder, offset); - auto modelSize = m_builder.GetSize(); - uint8_t* modelBuffer = m_builder.GetBufferPointer(); - if (modelBuffer == nullptr) { - HDF_LOGE("Model is invalid."); - return HDF_FAILURE; - } - - m_model = std::make_shared(); - mindspore::Status msRet = m_model->Build(modelBuffer, modelSize, mindspore::kMindIR, m_context); - if (msRet != mindspore::kSuccess) { - HDF_LOGE("Prepare model failed, please make sure model is validate."); - return HDF_FAILURE; - } - - auto ret = GetMSInputsAndOutputs(); - if (ret != HDF_SUCCESS) { - HDF_LOGE("Model without inputs or outputs is invalid."); - return ret; - } - - for (auto input : m_inputs) { - m_inputDims.push_back(input.Shape()); - } - - return HDF_SUCCESS; -} - -int32_t PreparedModelService::Compile(const void* modelBuffer, size_t length) -{ - if (modelBuffer == nullptr || length == 0) { - HDF_LOGE("ModelBuffer cannot be nullptr and length cannot be zero."); - return HDF_ERR_INVALID_PARAM; - } - - m_model = std::make_shared(); - mindspore::Status msRet = m_model->Build(modelBuffer, length, mindspore::kMindIR, m_context); - if (msRet != mindspore::kSuccess) { - HDF_LOGE("Prepare model from cache failed, please make sure model cache is valid."); - return HDF_FAILURE; - } - - auto ret = GetMSInputsAndOutputs(); - if (ret != HDF_SUCCESS) { - HDF_LOGE("Model without inputs or outputs is invalid."); - return ret; - } - - for (auto input : m_inputs) { - auto shapes = input.Shape(); - if (std::find(shapes.begin(), shapes.end(), DYNAMIC_SHAPE_FLAG) != shapes.end()) { - m_isDynamicShape = true; - break; - } - } - - for (auto input : m_inputs) { - m_inputDims.push_back(input.Shape()); - } - - return HDF_SUCCESS; -} - -int32_t PreparedModelService::SetInputs(const std::vector& inputs) -{ - if (inputs.size() != m_inputs.size()) { - HDF_LOGE("inputs size is invalid. expect: %zu, actual: %zu", m_inputs.size(), inputs.size()); - return HDF_ERR_INVALID_PARAM; - } - for (auto& ash : m_inputAshmems) { - ash->UnmapAshmem(); - ash->CloseAshmem(); - } - m_inputAshmems.clear(); - - int32_t ret {0}; - size_t inputSize = m_inputs.size(); - std::vector> tmpAllDims; - for (size_t i = 0; i < inputSize; i++) { - auto& input = inputs[i]; - auto& msInput = m_inputs[i]; - ret = CompareTensor(input, msInput); - if (ret != HDF_SUCCESS) { - HDF_LOGE("Inputs tensor is not match that of model. Please check input tensor."); - return ret; - } - tmpAllDims.emplace_back(input.dimensions.begin(), input.dimensions.end()); - } - - if (m_isDynamicShape) { - auto msRet = m_model->Resize(m_inputs, tmpAllDims); - if (msRet != mindspore::kSuccess) { - HDF_LOGE("Resize for dynamic inputs failed."); - return HDF_FAILURE; - } - ret = GetMSInputsAndOutputs(); - if (ret != HDF_SUCCESS) { - HDF_LOGE("Get ms inputs or outputs failed after resize."); - return ret; - } - } - - for (size_t i = 0; i < inputSize; i++) { - auto& input = inputs[i]; - auto& msInput = m_inputs[i]; - sptr ashptr = ParseBuffer(input.data); - if (ashptr == nullptr) { - HDF_LOGE("Parse %zuth input data failed.", i); - return HDF_ERR_INVALID_PARAM; - } - - auto data = const_cast(ashptr->ReadFromAshmem(input.data.dataSize, 0)); - msInput.SetData(data); - m_inputAshmems.emplace_back(ashptr); - } - return HDF_SUCCESS; -} - -int32_t PreparedModelService::SetOutputs(const std::vector& outputs) -{ - HDF_LOGI("Start Set outputs, m_outputs size=%zu", m_outputs.size()); - if (outputs.size() != m_outputs.size()) { - HDF_LOGE("outputs size is invalid. expect: %{public}zu, actual: %{public}zu", m_outputs.size(), outputs.size()); - return HDF_ERR_INVALID_PARAM; - } - for (auto ash : m_outputAshmems) { - ash->UnmapAshmem(); - ash->CloseAshmem(); - } - m_outputAshmems.clear(); - - for (size_t i = 0; i < m_outputs.size(); i++) { - auto& output = outputs[i]; - auto& msOutput = m_outputs[i]; - - sptr ashptr = ParseBuffer(output.data); - if (ashptr == nullptr) { - HDF_LOGE("Parse %{public}zu th output data failed.", i); - return HDF_ERR_INVALID_PARAM; - } - - auto data = const_cast(ashptr->ReadFromAshmem(output.data.dataSize, 0)); - msOutput.SetAllocator(nullptr); - msOutput.SetData(data); - m_outputAshmems.emplace_back(ashptr); - } - return HDF_SUCCESS; -} - -int32_t PreparedModelService::GetMSInputsAndOutputs() -{ - m_inputs = m_model->GetInputs(); - if (m_inputs.empty()) { - HDF_LOGE("Get inputs failed."); - return HDF_FAILURE; - } - - m_outputs = m_model->GetOutputs(); - if (m_outputs.empty()) { - HDF_LOGE("Get outputs failed."); - return HDF_FAILURE; - } - return HDF_SUCCESS; -} - -int32_t PreparedModelService::CompareTensor(const IOTensor& tensor, const mindspore::MSTensor& msTensor) -{ - auto dataType = static_cast(msTensor.DataType()); - if (tensor.dataType != dataType) { - HDF_LOGE("Data type of tensor dose not match that of model."); - return HDF_ERR_INVALID_PARAM; - } - - auto format = static_cast(msTensor.format()); - if (tensor.format != format) { - HDF_LOGE("Format of tensor dose not match that of model."); - return HDF_ERR_INVALID_PARAM; - } - - if (tensor.dimensions.size() != msTensor.Shape().size()) { - HDF_LOGE("Rank of tensor dose not match that of model."); - return HDF_ERR_INVALID_PARAM; - } - - for (size_t i = 0; i < tensor.dimensions.size(); i++) { - if (msTensor.Shape()[i] != DYNAMIC_SHAPE_FLAG && tensor.dimensions[i] != msTensor.Shape()[i]) { - HDF_LOGE("The Shape of tensor dose not match that of model."); - return HDF_ERR_INVALID_PARAM; - } - } - - return HDF_SUCCESS; -} - -sptr PreparedModelService::ParseBuffer(const SharedBuffer& buffer) -{ - if (buffer.fd == -1) { - HDF_LOGE("Invalid buffer fd, it cannot be -1."); - return nullptr; - } - - HDF_LOGW("NNRT buffer fd=%{public}d, length=%{public}u", buffer.fd, buffer.dataSize); - - sptr ashptr = new (std::nothrow) Ashmem(buffer.fd, buffer.bufferSize); - if (ashptr == nullptr) { - HDF_LOGE("Create shared memory failed."); - return nullptr; - } - - if (!ashptr->MapReadAndWriteAshmem()) { - HDF_LOGE("Map buffer fd to address failed."); - return nullptr; - } - - const void* data = ashptr->ReadFromAshmem(buffer.dataSize, buffer.offset); - if (data == nullptr) { - HDF_LOGE("Get data address failed."); - ashptr->UnmapAshmem(); - ashptr->CloseAshmem(); - return nullptr; - } - return ashptr; -} -} // V2_0 -} // Nnrt -} // HDI -} // OHOS diff --git a/example/drivers/nnrt/v2_0/hdi_cpu_service/src/shared_buffer_parser.cpp b/example/drivers/nnrt/v2_0/hdi_cpu_service/src/shared_buffer_parser.cpp deleted file mode 100644 index 69416b6..0000000 --- a/example/drivers/nnrt/v2_0/hdi_cpu_service/src/shared_buffer_parser.cpp +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Copyright (c) 2022 Huawei Device Co., Ltd. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef OHOS_HDI_NNR_V2_0_UTILS_H -#define OHOS_HDI_NNR_V2_0_UTILS_H - -#include "shared_buffer_parser.h" - -#include -#include "ashmem.h" -#include "v2_0/nnrt_types.h" -#include "hdf_log.h" - -namespace OHOS { -namespace HDI { -namespace Nnrt { -namespace V2_0 { -SharedBufferParser::~SharedBufferParser() -{ - if (m_ashptr != nullptr) { - m_ashptr->UnmapAshmem(); - m_ashptr->CloseAshmem(); - m_bufferAddr = nullptr; - } -} - -int32_t SharedBufferParser::Init(const std::string& name, int32_t size) -{ - HDF_LOGI("Init SharedBufferParser from name and size."); - sptr ashptr = Ashmem::CreateAshmem(name.c_str(), size); - if (ashptr == nullptr) { - HDF_LOGE("Create ashmen from size failed."); - return HDF_FAILURE; - } - - SharedBuffer buffer; - buffer.fd = ashptr->GetAshmemFd(); - buffer.bufferSize = ashptr->GetAshmemSize(); - buffer.offset = 0; - buffer.dataSize = size; - - auto ret = Init(buffer); - if (ret != HDF_SUCCESS) { - HDF_LOGE("Init SharedBufferParser failed."); - return ret; - } - return HDF_SUCCESS; -} - -int32_t SharedBufferParser::Init(const SharedBuffer& buffer) -{ - if (buffer.fd == INVALID_FD) { - HDF_LOGE("Invalid buffer fd, it cannot be %{public}d.", INVALID_FD); - return HDF_ERR_INVALID_PARAM; - } - - m_ashptr = new (std::nothrow) Ashmem(buffer.fd, buffer.bufferSize); - if (m_ashptr == nullptr) { - HDF_LOGE("Create ashmem failed."); - return HDF_FAILURE; - } - - if (!m_ashptr->MapReadAndWriteAshmem()) { - HDF_LOGE("Map buffer fd to address failed."); - return HDF_FAILURE; - } - - auto bufferAddr = m_ashptr->ReadFromAshmem(buffer.dataSize, buffer.offset); - if (bufferAddr == nullptr) { - HDF_LOGE("Invalid dataSize or offset of SharedBuffer."); - return HDF_ERR_INVALID_PARAM; - } - m_bufferAddr = const_cast(bufferAddr); - - m_buffer = buffer; - return HDF_SUCCESS; -} - -void* SharedBufferParser::GetBufferPtr() -{ - return m_bufferAddr; -} - -SharedBuffer SharedBufferParser::GetBuffer() -{ - return m_buffer; -} -} // V2_0 -} // Nnrt -} // HDI -} // OHOS -#endif // OHOS_HDI_NNR_V2_0_UTILS_H \ No newline at end of file diff --git a/example/drivers/nnrt/v2_0/hdi_cpu_service/src/validation.cpp b/example/drivers/nnrt/v2_0/hdi_cpu_service/src/validation.cpp deleted file mode 100644 index 03521c7..0000000 --- a/example/drivers/nnrt/v2_0/hdi_cpu_service/src/validation.cpp +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Copyright (c) 2022 Huawei Device Co., Ltd. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "validation.h" - -namespace OHOS { -namespace HDI { -namespace Nnrt { -namespace V2_0 { -int32_t ValidatePerformanceMode(PerformanceMode mode) -{ - if (mode < PerformanceMode::PERFORMANCE_NONE || mode > PerformanceMode::PERFORMANCE_EXTREME) { - return false; - } - - return true; -} - -int32_t ValidatePriority(Priority priority) -{ - if (priority < Priority::PRIORITY_NONE || priority > Priority::PRIORITY_HIGH) { - return false; - } - - return true; -} - -int32_t ValidateDataType(DataType dataType) -{ - if (dataType < DataType::DATA_TYPE_UNKNOWN || dataType > DataType::DATA_TYPE_FLOAT64) { - return false; - } - - if (dataType > DataType::DATA_TYPE_UNKNOWN && dataType < DataType::DATA_TYPE_BOOL) { - return false; - } - - if (dataType > DataType::DATA_TYPE_BOOL && dataType < DataType::DATA_TYPE_INT8) { - return false; - } - - if (dataType > DataType::DATA_TYPE_UINT64 && dataType < DataType::DATA_TYPE_FLOAT16) { - return false; - } - - return true; -} - -int32_t ValidateFormat(Format format) -{ - if (format < Format::FORMAT_NONE || format > Format::FORMAT_NHWC) { - return false; - } - - return true; -} -} // namespace V2_0 -} // namespace Nnrt -} // namespace HDI -} // namespace OHOS \ No newline at end of file -- Gitee From b365b7469d9e1b3881d651d41452226bc8464c60 Mon Sep 17 00:00:00 2001 From: wangchuanxia Date: Wed, 24 May 2023 10:38:55 +0800 Subject: [PATCH 12/12] codedex Signed-off-by: wangchuanxia --- frameworks/native/compilation.cpp | 79 +++++++++++-------- frameworks/native/compilation.h | 1 + frameworks/native/device.h | 1 - frameworks/native/device_discover_v2_0.cpp | 12 +-- frameworks/native/hdi_device_v2_0.cpp | 6 +- frameworks/native/hdi_prepared_model_v2_0.cpp | 6 +- frameworks/native/hdi_returncode_utils.h | 8 +- 7 files changed, 66 insertions(+), 47 deletions(-) diff --git a/frameworks/native/compilation.cpp b/frameworks/native/compilation.cpp index 1c06087..1d54550 100644 --- a/frameworks/native/compilation.cpp +++ b/frameworks/native/compilation.cpp @@ -606,41 +606,9 @@ OH_NN_ReturnCode Compilation::LoadCacheBuild(std::shared_ptr& pre return OH_NN_SUCCESS; } -OH_NN_ReturnCode Compilation::InnerBuild() +OH_NN_ReturnCode Compilation::BuildCacheModel(std::shared_ptr& preparedModel) { OH_NN_ReturnCode ret; - std::shared_ptr preparedModel; - - // Prepare from offline model. - bool isOfflineModel{false}; - ret = IsOfflineModel(isOfflineModel); - if (ret != OH_NN_SUCCESS) { - LOGE("[Compilation] Failed when identifying the offline model."); - return ret; - } - - if (isOfflineModel) { - ret = BuildOfflineModel(preparedModel); - if (ret != OH_NN_SUCCESS) { - LOGE("[Compilation] Failed to build offline model."); - return ret; - } - - m_isBuild = true; - return OH_NN_SUCCESS; - } - - if (m_cachePath.empty()) { - ret = NormalBuild(preparedModel); - if (ret != OH_NN_SUCCESS) { - LOGE("Fail to normally build."); - return ret; - } - - m_isBuild = true; - return OH_NN_SUCCESS; - } - std::string cacheInfoPath = m_cachePath + "cache_info.nncache"; if (access(cacheInfoPath.c_str(), 0) != 0) { ret = GenCacheBuild(preparedModel); @@ -680,6 +648,51 @@ OH_NN_ReturnCode Compilation::InnerBuild() } m_isBuild = true; + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode Compilation::InnerBuild() +{ + OH_NN_ReturnCode ret; + std::shared_ptr preparedModel; + + // Prepare from offline model. + bool isOfflineModel {false}; + ret = IsOfflineModel(isOfflineModel); + if (ret != OH_NN_SUCCESS) { + LOGE("[Compilation] Failed when identifying the offline model."); + return ret; + } + + if (isOfflineModel) { + ret = BuildOfflineModel(preparedModel); + if (ret != OH_NN_SUCCESS) { + LOGE("[Compilation] Failed to build offline model."); + return ret; + } + + m_isBuild = true; + return OH_NN_SUCCESS; + } + + if (m_cachePath.empty()) { + ret = NormalBuild(preparedModel); + if (ret != OH_NN_SUCCESS) { + LOGE("Fail to normally build."); + return ret; + } + + m_isBuild = true; + return OH_NN_SUCCESS; + } + + ret = BuildCacheModel(preparedModel); + if (ret != OH_NN_SUCCESS) { + LOGE("Fail to build cache model."); + return ret; + } + return OH_NN_SUCCESS; } diff --git a/frameworks/native/compilation.h b/frameworks/native/compilation.h index 3d81fe9..5fa3099 100644 --- a/frameworks/native/compilation.h +++ b/frameworks/native/compilation.h @@ -74,6 +74,7 @@ private: OH_NN_ReturnCode CheckCacheModel(const ModelCacheInfo& modelCacheInfo, std::vector& modelBuffers) const; OH_NN_ReturnCode NormalBuild(std::shared_ptr& preparedModel); + OH_NN_ReturnCode BuildCacheModel(std::shared_ptr& preparedModel); OH_NN_ReturnCode GenCacheBuild(std::shared_ptr& preparedModel); OH_NN_ReturnCode ReGenCacheBuild(uint32_t fileNumber, std::shared_ptr& preparedModel); OH_NN_ReturnCode LoadCacheBuild(std::shared_ptr& preparedModel, const ModelCacheInfo& cacheInfo); diff --git a/frameworks/native/device.h b/frameworks/native/device.h index bc0989f..e4ae277 100644 --- a/frameworks/native/device.h +++ b/frameworks/native/device.h @@ -56,7 +56,6 @@ public: const ModelConfig& config, std::shared_ptr& preparedModel) = 0; - virtual void* AllocateBuffer(size_t length) = 0; virtual OH_NN_ReturnCode ReleaseBuffer(const void* buffer) = 0; }; diff --git a/frameworks/native/device_discover_v2_0.cpp b/frameworks/native/device_discover_v2_0.cpp index fca55c3..7b0ad86 100644 --- a/frameworks/native/device_discover_v2_0.cpp +++ b/frameworks/native/device_discover_v2_0.cpp @@ -32,9 +32,9 @@ std::shared_ptr DiscoverHDIDevicesV2_0(std::string& deviceName, std::str auto ret = iDevice->GetDeviceName(deviceName); if (ret != V2_0::NNRT_ReturnCode::NNRT_SUCCESS) { - if (ret < 0) { + if (ret < V2_0::NNRT_ReturnCode::NNRT_SUCCESS) { LOGW("Get device name failed. An error occurred in HDI, errorcode is %{public}d.", ret); - } else if (ret > 0) { + } else { OHOS::HDI::Nnrt::V2_0::NNRT_ReturnCode nnrtRet = static_cast(ret); LOGW("Get device name failed. Errorcode is %{public}s.", ConverterRetToString(nnrtRet).c_str()); } @@ -43,9 +43,9 @@ std::shared_ptr DiscoverHDIDevicesV2_0(std::string& deviceName, std::str ret = iDevice->GetVendorName(vendorName); if (ret != V2_0::NNRT_ReturnCode::NNRT_SUCCESS) { - if (ret < 0) { + if (ret < V2_0::NNRT_ReturnCode::NNRT_SUCCESS) { LOGW("Get vendor name failed. An error occurred in HDI, errorcode is %{public}d.", ret); - } else if (ret > 0) { + } else { OHOS::HDI::Nnrt::V2_0::NNRT_ReturnCode nnrtRet = static_cast(ret); LOGW("Get vendor name failed. Errorcode is %{public}s.", ConverterRetToString(nnrtRet).c_str()); } @@ -55,9 +55,9 @@ std::shared_ptr DiscoverHDIDevicesV2_0(std::string& deviceName, std::str std::pair hdiVersion; ret = iDevice->GetVersion(hdiVersion.first, hdiVersion.second); if (ret != V2_0::NNRT_ReturnCode::NNRT_SUCCESS) { - if (ret < 0) { + if (ret < V2_0::NNRT_ReturnCode::NNRT_SUCCESS) { LOGW("Get version failed. An error occurred in HDI, errorcode is %{public}d.", ret); - } else if (ret > 0) { + } else { OHOS::HDI::Nnrt::V2_0::NNRT_ReturnCode nnrtRet = static_cast(ret); LOGW("Get version failed. Errorcode is %{public}s.", ConverterRetToString(nnrtRet).c_str()); } diff --git a/frameworks/native/hdi_device_v2_0.cpp b/frameworks/native/hdi_device_v2_0.cpp index 2c236be..f2a8865 100644 --- a/frameworks/native/hdi_device_v2_0.cpp +++ b/frameworks/native/hdi_device_v2_0.cpp @@ -28,6 +28,8 @@ namespace OHOS { namespace NeuralNetworkRuntime { +const size_t OFFLINE_MODEL_MINIMUM_INPUT_SIZE = 2; + namespace { OH_NN_DeviceType TransHDIDeviceV2_0Type(const V2_0::DeviceType& iDeviceType) { @@ -433,7 +435,7 @@ OH_NN_ReturnCode HDIDeviceV2_0::GetOfflineModelFromLiteGraph(std::shared_ptrall_nodes_[0]->input_indices_.size(); - if (inputNum < (size_t)2) { + if (inputNum < OFFLINE_MODEL_MINIMUM_INPUT_SIZE) { LOGE("LiteGraph with offline model should have at least two input tensors, only get %zu.", inputNum); return OH_NN_INVALID_PARAMETER; } @@ -614,7 +616,7 @@ OH_NN_ReturnCode HDIDeviceV2_0::PrepareOfflineModel(std::shared_ptr valueFromCustomPrimitive; std::vector value; std::map> extensions; - std::vector attributes =\ + std::vector attributes = mindspore::lite::MindIR_Custom_GetAttr(model->all_nodes_[0]->primitive_); for (const auto& attribute : attributes) { key = mindspore::lite::MindIR_Attribute_GetName(*attribute); diff --git a/frameworks/native/hdi_prepared_model_v2_0.cpp b/frameworks/native/hdi_prepared_model_v2_0.cpp index 1fce29d..37777f9 100644 --- a/frameworks/native/hdi_prepared_model_v2_0.cpp +++ b/frameworks/native/hdi_prepared_model_v2_0.cpp @@ -153,9 +153,13 @@ OH_NN_ReturnCode HDIPreparedModelV2_0::Run(const std::vector& inputs, } auto ret = m_hdiPreparedModel->Run(iInputTensors, iOutputTensors, outputsDims); - if (ret != V2_0::NNRT_ReturnCode::NNRT_SUCCESS || outputsDims.empty()) { + if (ret != V2_0::NNRT_ReturnCode::NNRT_SUCCESS) { return CheckReturnCode(ret, OH_NN_UNAVALIDABLE_DEVICE, "Run model failed"); } + if (outputsDims.empty()) { + LOGE("Run failed, outputsDims is empty."); + return OH_NN_UNAVALIDABLE_DEVICE; + } return OH_NN_SUCCESS; } diff --git a/frameworks/native/hdi_returncode_utils.h b/frameworks/native/hdi_returncode_utils.h index 840ff6a..7d82e96 100644 --- a/frameworks/native/hdi_returncode_utils.h +++ b/frameworks/native/hdi_returncode_utils.h @@ -26,7 +26,7 @@ namespace OHOS { namespace NeuralNetworkRuntime { inline std::string ConverterRetToString(OHOS::HDI::Nnrt::V2_0::NNRT_ReturnCode returnCode) { - static std::unordered_map nnrtRet2StringMap{ + static std::unordered_map nnrtRet2StringMap { {V2_0::NNRT_ReturnCode::NNRT_SUCCESS, "NNRT_SUCCESS"}, {V2_0::NNRT_ReturnCode::NNRT_FAILED, "NNRT_FAILED"}, {V2_0::NNRT_ReturnCode::NNRT_NULL_PTR, "NNRT_NULL_PTR"}, @@ -64,7 +64,7 @@ inline std::string ConverterRetToString(OHOS::HDI::Nnrt::V2_0::NNRT_ReturnCode r }; if (nnrtRet2StringMap.find(returnCode) == nnrtRet2StringMap.end()) { - return ""; + return "ConverterRetToString failed, returnCode is invalid."; } return nnrtRet2StringMap.at(returnCode); @@ -73,9 +73,9 @@ inline std::string ConverterRetToString(OHOS::HDI::Nnrt::V2_0::NNRT_ReturnCode r template T CheckReturnCode(int32_t ret, T funcRet, const std::string& errorInfo) { - if (ret < 0) { + if (ret < V2_0::NNRT_ReturnCode::NNRT_SUCCESS) { LOGE("%{public}s. An error occurred in HDI, errorcode is %{public}d.", errorInfo.c_str(), ret); - } else if (ret > 0) { + } else if (ret > V2_0::NNRT_ReturnCode::NNRT_SUCCESS) { OHOS::HDI::Nnrt::V2_0::NNRT_ReturnCode nnrtRet = static_cast(ret); LOGE("%{public}s. Errorcode is %{public}s.", errorInfo.c_str(), ConverterRetToString(nnrtRet).c_str()); } -- Gitee