diff --git a/BUILD.gn b/BUILD.gn index bc1599a46337dc6058a1d439c31e2c3ce2ea9d92..95f2896a0b1fb5063b35019ddfe65e7a71c1f7fe 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -16,8 +16,7 @@ import("//build/ohos.gni") group("nnrt_target") { deps = [ "frameworks:libneural_network_runtime" ] } - -group("nnrt_test_target") { - testonly = true - deps = [ "test/unittest:unittest" ] -} +# group("nnrt_test_target") { +# testonly = true +# deps = [ "test/unittest:unittest" ] +# } diff --git a/bundle.json b/bundle.json index 06ee22904e6c1691f5b05235fe132cab1cd5e78b..f95ed56f43d37ce745d2a0a1642455b324bbe3d5 100644 --- a/bundle.json +++ b/bundle.json @@ -41,9 +41,6 @@ "header_base":"//foundation/ai/neural_network_runtime/interfaces/innerkits/c" } } - ], - "test": [ - "//foundation/ai/neural_network_runtime:nnrt_test_target" ] } } diff --git a/frameworks/BUILD.gn b/frameworks/BUILD.gn index 321fb2f9e1c5e4c8319eca89ca275ef9dd641ad6..28ed59f575f634101c1df68df121c6be112e85a2 100644 --- a/frameworks/BUILD.gn +++ b/frameworks/BUILD.gn @@ -19,12 +19,16 @@ config("nnrt_config") { nnrt_sources = [ "native/compilation.cpp", + "native/device_discover_v1_0.cpp", + "native/device_discover_v2_0.cpp", "native/device_manager.cpp", "native/device_registrar.cpp", "native/execution_plan.cpp", "native/executor.cpp", - "native/hdi_device.cpp", - "native/hdi_prepared_model.cpp", + "native/hdi_device_v1_0.cpp", + "native/hdi_device_v2_0.cpp", + "native/hdi_prepared_model_v1_0.cpp", + "native/hdi_prepared_model_v2_0.cpp", "native/inner_model.cpp", "native/memory_manager.cpp", "native/neural_network_runtime.cpp", @@ -122,6 +126,7 @@ ohos_shared_library("libneural_network_runtime") { external_deps = [ "c_utils:utils", "drivers_interface_nnrt:libnnrt_proxy_1.0", + "drivers_interface_nnrt:libnnrt_proxy_2.0", "hdf_core:libhdf_utils", "hilog_native:libhilog", "hitrace_native:libhitracechain", diff --git a/frameworks/native/compilation.cpp b/frameworks/native/compilation.cpp index 0f89fd335c3b801180cbcc12cd680a2b464966b4..e715b934b87d5fbd758a393d9aa68c436c4675af 100644 --- a/frameworks/native/compilation.cpp +++ b/frameworks/native/compilation.cpp @@ -579,7 +579,8 @@ OH_NN_ReturnCode Compilation::LoadCacheBuild(std::shared_ptr& pre OH_NN_ReturnCode ret = CheckCacheModel(cacheInfo, modelBuffers); if (ret != OH_NN_SUCCESS) { LOGE("[Compilation] Checking cache model failed."); - for (size_t i = 0; i < modelBuffers.size(); ++i) { + size_t modelBuffersSize = modelBuffers.size(); + for (size_t i = 0; i < modelBuffersSize; ++i) { m_device->ReleaseBuffer(modelBuffers[i].buffer); modelBuffers[i].buffer = nullptr; modelBuffers[i].length = 0; @@ -705,7 +706,8 @@ bool Compilation::IsBuild() const bool Compilation::IsDynamicShape() const { - for (size_t i = 0; i < m_inputTensors.size(); ++i) { + size_t inputTensorsSize = m_inputTensors.size(); + for (size_t i = 0; i < inputTensorsSize; ++i) { if (m_inputTensors[i]->IsDynamicShape()) { return true; } diff --git a/frameworks/native/device.h b/frameworks/native/device.h index 93415e4bb527e26049cba0563cc3e86bcfa4b148..c34e0432d139de3c9d54934b8f9f2a10620746d8 100644 --- a/frameworks/native/device.h +++ b/frameworks/native/device.h @@ -34,6 +34,7 @@ public: virtual OH_NN_ReturnCode GetDeviceName(std::string& name) = 0; virtual OH_NN_ReturnCode GetVendorName(std::string& name) = 0; + virtual OH_NN_ReturnCode GetVersion(std::string& version) = 0; virtual OH_NN_ReturnCode GetDeviceType(OH_NN_DeviceType& deviceType) = 0; virtual OH_NN_ReturnCode GetDeviceStatus(DeviceStatus& status) = 0; virtual OH_NN_ReturnCode GetSupportedOperation(std::shared_ptr model, diff --git a/frameworks/native/hdi_interfaces.h b/frameworks/native/device_discover.h similarity index 61% rename from frameworks/native/hdi_interfaces.h rename to frameworks/native/device_discover.h index 1d3416ba6f9daff3cd10c3a5ea5bfa2bd02315d4..fd79e65352993ff38b3b4c3c607487bed8127269 100644 --- a/frameworks/native/hdi_interfaces.h +++ b/frameworks/native/device_discover.h @@ -13,17 +13,18 @@ * limitations under the License. */ -#ifndef NEURAL_NETWORK_RUNTIME_HDI_INTERFACES_H -#define NEURAL_NETWORK_RUNTIME_HDI_INTERFACES_H +#ifndef NEURAL_NETWORK_RUNTIME_DEVICE_DISCOVER_H +#define NEURAL_NETWORK_RUNTIME_DEVICE_DISCOVER_H -#include -#include -#include +#include +#include + +#include "device.h" namespace OHOS { namespace NeuralNetworkRuntime { -namespace V1_0 = OHOS::HDI::Nnrt::V1_0; +std::shared_ptr DiscoverHDIDevicesV1_0(std::string& deviceName, std::string& vendorName, std::string& version); +std::shared_ptr DiscoverHDIDevicesV2_0(std::string& deviceName, std::string& vendorName, std::string& version); } // namespace NeuralNetworkRuntime } // namespace OHOS - -#endif // NEURAL_NETWORK_RUNTIME_HDI_INTERFACES_H \ No newline at end of file +#endif // NEURAL_NETWORK_RUNTIME_DEVICE_DISCOVER_H \ No newline at end of file diff --git a/frameworks/native/device_discover_v1_0.cpp b/frameworks/native/device_discover_v1_0.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e5f802e9d086d61d545320bcb490e7c15f7058d2 --- /dev/null +++ b/frameworks/native/device_discover_v1_0.cpp @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "device_discover.h" +#include "hdi_device_v1_0.h" +#include "common/log.h" +#include "common/utils.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +std::shared_ptr DiscoverHDIDevicesV1_0(std::string& deviceName, std::string& vendorName, std::string& version) +{ + // only one device from HDI now. + OHOS::sptr iDevice = V1_0::INnrtDevice::Get(); + if (iDevice == nullptr) { + LOGW("Get HDI device failed."); + return nullptr; + } + + auto hdiRet = iDevice->GetDeviceName(deviceName); + if (hdiRet != HDF_SUCCESS) { + LOGW("Get device name failed. ErrorCode=%d", hdiRet); + return nullptr; + } + hdiRet = iDevice->GetVendorName(vendorName); + if (hdiRet != HDF_SUCCESS) { + LOGW("Get vendor name failed. ErrorCode=%d", hdiRet); + return nullptr; + } + std::pair hdiVersion; + hdiRet = iDevice->GetVersion(hdiVersion.first, hdiVersion.second); + if (hdiRet != HDF_SUCCESS) { + LOGW("Get version failed. ErrorCode=%d", hdiRet); + return nullptr; + } + version = 'v' + std::to_string(hdiVersion.first) + '_' + std::to_string(hdiVersion.second); + + std::shared_ptr device = CreateSharedPtr(iDevice); + if (device == nullptr) { + LOGW("Failed to register device, because fail to create device instance."); + } + return device; +} +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/device_discover_v2_0.cpp b/frameworks/native/device_discover_v2_0.cpp new file mode 100644 index 0000000000000000000000000000000000000000..de5e8b792aa8c3cf088a9195cabf61808589b52e --- /dev/null +++ b/frameworks/native/device_discover_v2_0.cpp @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "device_discover.h" +#include "hdi_device_v2_0.h" +#include "common/log.h" +#include "common/utils.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +std::shared_ptr DiscoverHDIDevicesV2_0(std::string& deviceName, std::string& vendorName, std::string& version) +{ + // only one device from HDI now. + OHOS::sptr iDevice = V2_0::INnrtDevice::Get(); + if (iDevice == nullptr) { + LOGW("Get HDI device failed."); + return nullptr; + } + + auto hdiRet = iDevice->GetDeviceName(deviceName); + if (hdiRet != HDF_SUCCESS) { + LOGW("Get device name failed. ErrorCode=%d", hdiRet); + return nullptr; + } + hdiRet = iDevice->GetVendorName(vendorName); + if (hdiRet != HDF_SUCCESS) { + LOGW("Get vendor name failed. ErrorCode=%d", hdiRet); + return nullptr; + } + std::pair hdiVersion; + hdiRet = iDevice->GetVersion(hdiVersion.first, hdiVersion.second); + if (hdiRet != HDF_SUCCESS) { + LOGW("Get version failed. ErrorCode=%d", hdiRet); + return nullptr; + } + version = 'v' + std::to_string(hdiVersion.first) + '_' + std::to_string(hdiVersion.second); + + std::shared_ptr device = CreateSharedPtr(iDevice); + if (device == nullptr) { + LOGW("Failed to register device, because fail to create device instance."); + } + return device; +} +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/device_manager.cpp b/frameworks/native/device_manager.cpp index 6ad79bbc5ffe954305079386c3e6c42c1dcacd85..75ac674f3e2dd3ec9e7ae9bc0ab2e96b79bcfde9 100644 --- a/frameworks/native/device_manager.cpp +++ b/frameworks/native/device_manager.cpp @@ -14,9 +14,8 @@ */ #include "device_manager.h" +#include "device_discover.h" -#include "hdi_interfaces.h" -#include "hdi_device.h" #include "common/log.h" #include "common/utils.h" @@ -70,13 +69,21 @@ const std::string& DeviceManager::GetDeviceName(size_t deviceId) return m_tmpDeviceName; } - m_tmpDeviceName = GenUniqueName(deviceName, vendorName); + std::string version; + ret = iter->second->GetVersion(version); + if (ret != OH_NN_SUCCESS) { + LOGE("Get version failed."); + return m_tmpDeviceName; + } + + m_tmpDeviceName = GenUniqueName(deviceName, vendorName, version); return m_tmpDeviceName; } -std::string DeviceManager::GenUniqueName(const std::string& deviceName, const std::string& vendorName) const +std::string DeviceManager::GenUniqueName( + const std::string& deviceName, const std::string& vendorName, const std::string& version) const { - return deviceName + "_" + vendorName; + return deviceName + "_" + vendorName + "_" + version; } OH_NN_ReturnCode DeviceManager::RegisterDevice(std::function()> creator) @@ -106,8 +113,15 @@ OH_NN_ReturnCode DeviceManager::RegisterDevice(std::functionGetVersion(version); + if (ret != OH_NN_SUCCESS) { + LOGE("Get version failed."); + return ret; + } + const std::lock_guard lock(m_mtx); - std::string uniqueName = GenUniqueName(deviceName, vendorName); + std::string uniqueName = GenUniqueName(deviceName, vendorName, version); auto setResult = m_uniqueName.emplace(uniqueName); if (!setResult.second) { LOGE("Device already exists, cannot register again. deviceName=%s, vendorName=%s", @@ -119,29 +133,10 @@ OH_NN_ReturnCode DeviceManager::RegisterDevice(std::function device) { - // only one device from HDI now. - OHOS::sptr iDevice = V1_0::INnrtDevice::Get(); - if (iDevice == nullptr) { - LOGW("Get HDI device failed."); - return; - } - - std::string deviceName; - std::string vendorName; - auto hdiRet = iDevice->GetDeviceName(deviceName); - if (hdiRet != HDF_SUCCESS) { - LOGW("Get device name failed. ErrorCode=%d", hdiRet); - return; - } - hdiRet = iDevice->GetVendorName(vendorName); - if (hdiRet != HDF_SUCCESS) { - LOGW("Get vendor name failed. ErrorCode=%d", hdiRet); - return; - } - - std::string uniqueName = GenUniqueName(deviceName, vendorName); + std::string uniqueName = GenUniqueName(deviceName, vendorName, version); const std::lock_guard lock(m_mtx); auto setResult = m_uniqueName.emplace(uniqueName); if (!setResult.second) { @@ -150,14 +145,25 @@ void DeviceManager::DiscoverHDIDevices() return; } - std::shared_ptr device = CreateSharedPtr(iDevice); - if (device == nullptr) { - LOGW("Failed to register device, because fail to create device instance."); - return; - } m_devices.emplace(std::hash{}(uniqueName), device); } +void DeviceManager::DiscoverHDIDevices() +{ + std::string deviceName; + std::string vendorName; + std::string version; + std::shared_ptr deviceV1_0 = DiscoverHDIDevicesV1_0(deviceName, vendorName, version); + if (deviceV1_0 != nullptr) { + AddDevice(deviceName, vendorName, version, deviceV1_0); + } + + std::shared_ptr deviceV2_0 = DiscoverHDIDevicesV2_0(deviceName, vendorName, version); + if (deviceV2_0 != nullptr) { + AddDevice(deviceName, vendorName, version, deviceV2_0); + } +} + bool DeviceManager::IsValidDevice(std::shared_ptr device) const { DeviceStatus status {DeviceStatus::UNKNOWN}; diff --git a/frameworks/native/device_manager.h b/frameworks/native/device_manager.h index 20d4bf05834c6cbac02191ffa1f743730ebbfb9e..4d8b9fbd35e1c029c9c8ab46c6a620ce12c63ed6 100644 --- a/frameworks/native/device_manager.h +++ b/frameworks/native/device_manager.h @@ -49,8 +49,11 @@ private: DeviceManager(const DeviceManager&) = delete; DeviceManager& operator=(const DeviceManager&) = delete; + void AddDevice(const std::string& deviceName, const std::string& vendorName, + const std::string& version, std::shared_ptr device); void DiscoverHDIDevices(); - std::string GenUniqueName(const std::string& deviceName, const std::string& vendorName) const; + std::string GenUniqueName( + const std::string& deviceName, const std::string& vendorName, const std::string& version) const; bool IsValidDevice(std::shared_ptr device) const; private: diff --git a/frameworks/native/device_registrar.h b/frameworks/native/device_registrar.h index a9645299821087407c45202087110eca1b8365ea..521a075a336db601d839d10eafe5ca1455c0ca34 100644 --- a/frameworks/native/device_registrar.h +++ b/frameworks/native/device_registrar.h @@ -34,7 +34,7 @@ public: #define REGISTER_DEVICE(deviceName, vendorName, creator) \ namespace { \ - static OHOS::NeuralNetworkRuntime::DeviceRegistrar g_##deviceName##_##vendorName##_device_registrar(creator) \ + static OHOS::NeuralNetworkRuntime::DeviceRegistrar g_##deviceName##_##vendorName##_device_registrar(creator); \ } // namespace } // namespace NeuralNetworkRuntime } // OHOS diff --git a/frameworks/native/execution_plan.cpp b/frameworks/native/execution_plan.cpp index b1ddfe3ac53676fb016bd9aeebd9bd1deaabfd01..5199199bfd9cf4c11a8d0c58cddcfc2108743ead 100644 --- a/frameworks/native/execution_plan.cpp +++ b/frameworks/native/execution_plan.cpp @@ -23,6 +23,18 @@ namespace OHOS { namespace NeuralNetworkRuntime { +OH_NN_ReturnCode ExecutionPlan::GetInputDimRanges(std::vector>& minInputDims, + std::vector>& maxInputDims) +{ + OH_NN_ReturnCode ret = m_preparedModel->GetInputDimRanges(minInputDims, maxInputDims); + if (ret != OH_NN_SUCCESS) { + LOGE("ExecutionPlan GetInputDimRanges() failed."); + return ret; + } + + return OH_NN_SUCCESS; +} + OH_NN_ReturnCode ExecutionPlan::Run(const std::vector>& inputTensors, std::vector>& outputTensors) { diff --git a/frameworks/native/execution_plan.h b/frameworks/native/execution_plan.h index 9644a321d12b0120f24af6835e7eb035548c7445..54f4648b21372a1a23b6a9d7bef5dd0ec023e36b 100644 --- a/frameworks/native/execution_plan.h +++ b/frameworks/native/execution_plan.h @@ -29,6 +29,9 @@ public: ExecutionPlan(std::shared_ptr preparedModel, std::shared_ptr device) : m_preparedModel(preparedModel), m_device(device) {}; + + OH_NN_ReturnCode GetInputDimRanges(std::vector>& minInputDims, + std::vector>& maxInputDims); OH_NN_ReturnCode Run(const std::vector>& inputTensors, std::vector>& outputTensors); diff --git a/frameworks/native/executor.cpp b/frameworks/native/executor.cpp index f99d28cdb1869484968217fb5b03c927e6e7b3f7..93670eb0f715e6640e3e06588d034638a3404135 100644 --- a/frameworks/native/executor.cpp +++ b/frameworks/native/executor.cpp @@ -19,6 +19,7 @@ #include "common/utils.h" #include "common/scoped_trace.h" +#include "transform.h" namespace OHOS { @@ -113,8 +114,64 @@ void Executor::SetInputTensorWithNewBuffer(uint32_t index, } +OH_NN_ReturnCode Executor::CheckInputDimRanges(uint32_t index, const OH_NN_Tensor& nnTensor) const +{ + std::vector> minInputDims; + std::vector> maxInputDims; + auto ret = m_executionPlan->GetInputDimRanges(minInputDims, maxInputDims); + if (ret != OH_NN_SUCCESS) { + LOGE("Get the dimension ranges of input %u failed. ErrorCode=%d", index, ret); + return ret; + } + + if (index >= minInputDims.size()) { + LOGE("index is %u, which exceeds the size of minInputDims:%zu.", index, minInputDims.size()); + return OH_NN_INVALID_PARAMETER; + } + + if (index >= maxInputDims.size()) { + LOGE("index is %u, which exceeds the size of maxInputDims:%zu.", index, maxInputDims.size()); + return OH_NN_INVALID_PARAMETER; + } + + const std::vector& minSingleInputDims = minInputDims[index]; + const std::vector& maxSingleInputDims = maxInputDims[index]; + + std::vector tensorShape = ConstructVectorFromArray(nnTensor.dimensions, nnTensor.dimensionCount); + size_t tensorShapeSize = tensorShape.size(); + if (minSingleInputDims.size() != tensorShapeSize || maxSingleInputDims.size() != tensorShapeSize) { + LOGE("Size of minSingleInputDims, maxSingleInputDims and tensorShape of input %u are not equal.", index); + return OH_NN_INVALID_PARAMETER; + } + + for (size_t j = 0; j < tensorShapeSize; ++j) { + // Dimensions cannot be negative + if (tensorShape[j] < 0) { + LOGE("Dimension %zu of input %u is %d.", j, index, tensorShape[j]); + return OH_NN_INVALID_PARAMETER; + } + uint32_t dim = static_cast(tensorShape[j]); + if (dim < minSingleInputDims[j] || dim > maxSingleInputDims[j]) { + LOGE("Dimension %zu of input %u is %u, which is out of range [%u, %u]", + j, index, dim, minSingleInputDims[j], maxSingleInputDims[j]); + return OH_NN_INVALID_PARAMETER; + } + } + + return OH_NN_SUCCESS; +} + + OH_NN_ReturnCode Executor::SetInput(uint32_t index, const OH_NN_Tensor& nnTensor, const void* buffer, size_t length) { + auto nnRet = CheckInputDimRanges(index, nnTensor); + if (nnRet == OH_NN_OPERATION_FORBIDDEN) { + LOGI("Skip input dimension bounds check."); + } else if (nnRet != OH_NN_SUCCESS) { + LOGE("SetInput failed, Check the range of the %uth input dimension ranges failed.", index); + return nnRet; + } + std::shared_ptr inputTensor = CreateSharedPtr(); if (inputTensor == nullptr) { LOGE("SetInput failed, error happened when creating NNTensor."); @@ -181,6 +238,14 @@ OH_NN_ReturnCode Executor::SetInput(uint32_t index, const OH_NN_Tensor& nnTensor OH_NN_ReturnCode Executor::SetInputFromMemory(uint32_t index, const OH_NN_Tensor& nnTensor, const OH_NN_Memory& memory) { + auto nnRet = CheckInputDimRanges(index, nnTensor); + if (nnRet == OH_NN_OPERATION_FORBIDDEN) { + LOGI("Skip input dimension bounds check."); + } else if (nnRet != OH_NN_SUCCESS) { + LOGE("SetInputFromMemory failed, Check the range of the %uth input dimension ranges failed.", index); + return nnRet; + } + // Build a input tensor std::shared_ptr inputTensor = CreateSharedPtr(); if (inputTensor == nullptr) { diff --git a/frameworks/native/executor.h b/frameworks/native/executor.h index f7a98eb094f35c4235e8f04b5d92e9746f7dff63..c7b2061e911800cef2efcf80bbec771e9b50b6dd 100644 --- a/frameworks/native/executor.h +++ b/frameworks/native/executor.h @@ -49,6 +49,7 @@ private: const void* buffer, size_t dataLength, size_t curBufferLength); void SetInputTensorWithNewBuffer(uint32_t index, std::shared_ptr inputTensor, const void* inputBuffer, size_t length, bool isInnerMem); + OH_NN_ReturnCode CheckInputDimRanges(uint32_t index, const OH_NN_Tensor& nnTensor) const; private: struct ExeTensor { diff --git a/frameworks/native/hdi_device.cpp b/frameworks/native/hdi_device_v1_0.cpp similarity index 69% rename from frameworks/native/hdi_device.cpp rename to frameworks/native/hdi_device_v1_0.cpp index b360ea73145b6c41fcb3324d0b3812a5ef155e4c..146eb3dcca4412dbacd65878ca3f0b4f938a9919 100644 --- a/frameworks/native/hdi_device.cpp +++ b/frameworks/native/hdi_device_v1_0.cpp @@ -13,12 +13,12 @@ * limitations under the License. */ -#include "hdi_device.h" +#include "hdi_device_v1_0.h" #include "hdf_base.h" #include "mindir.h" -#include "hdi_prepared_model.h" +#include "hdi_prepared_model_v1_0.h" #include "memory_manager.h" #include "transform.h" #include "common/log.h" @@ -26,12 +26,72 @@ namespace OHOS { namespace NeuralNetworkRuntime { -HDIDevice::HDIDevice(OHOS::sptr device) : m_iDevice(device) +namespace { +OH_NN_DeviceType TransHDIDeviceV1_0Type(const V1_0::DeviceType& iDeviceType) +{ + switch (iDeviceType) { + case V1_0::DeviceType::CPU: + return OH_NN_CPU; + case V1_0::DeviceType::GPU: + return OH_NN_GPU; + case V1_0::DeviceType::ACCELERATOR: + return OH_NN_ACCELERATOR; + default: + return OH_NN_OTHERS; + } +} + +DeviceStatus TransHDIDeviceV1_0Status(const V1_0::DeviceStatus& iDeviceStatus) +{ + switch (iDeviceStatus) { + case V1_0::DeviceStatus::AVAILABLE: + return DeviceStatus::AVAILABLE; + case V1_0::DeviceStatus::BUSY: + return DeviceStatus::BUSY; + case V1_0::DeviceStatus::OFFLINE: + return DeviceStatus::OFFLINE; + default: + return DeviceStatus::UNKNOWN; + } +} + +V1_0::PerformanceMode TransPerformanceMode(const OH_NN_PerformanceMode& mode) +{ + switch (mode) { + case OH_NN_PERFORMANCE_LOW: + return V1_0::PerformanceMode::PERFORMANCE_LOW; + case OH_NN_PERFORMANCE_MEDIUM: + return V1_0::PerformanceMode::PERFORMANCE_MEDIUM; + case OH_NN_PERFORMANCE_HIGH: + return V1_0::PerformanceMode::PERFORMANCE_HIGH; + case OH_NN_PERFORMANCE_EXTREME: + return V1_0::PerformanceMode::PERFORMANCE_EXTREME; + default: + return V1_0::PerformanceMode::PERFORMANCE_NONE; + } +} + +V1_0::Priority TransPriority(const OH_NN_Priority& priority) +{ + switch (priority) { + case OH_NN_PRIORITY_LOW: + return V1_0::Priority::PRIORITY_LOW; + case OH_NN_PRIORITY_MEDIUM: + return V1_0::Priority::PRIORITY_MEDIUM; + case OH_NN_PRIORITY_HIGH: + return V1_0::Priority::PRIORITY_HIGH; + default: + return V1_0::Priority::PRIORITY_NONE; + } +} +} + +HDIDeviceV1_0::HDIDeviceV1_0(OHOS::sptr device) : m_iDevice(device) { device->GetVersion(m_hdiVersion.first, m_hdiVersion.second); } -OH_NN_ReturnCode HDIDevice::GetDeviceName(std::string& name) +OH_NN_ReturnCode HDIDeviceV1_0::GetDeviceName(std::string& name) { auto ret = m_iDevice->GetDeviceName(name); if (ret != HDF_SUCCESS) { @@ -41,7 +101,7 @@ OH_NN_ReturnCode HDIDevice::GetDeviceName(std::string& name) return OH_NN_SUCCESS; } -OH_NN_ReturnCode HDIDevice::GetVendorName(std::string& name) +OH_NN_ReturnCode HDIDeviceV1_0::GetVendorName(std::string& name) { auto ret = m_iDevice->GetVendorName(name); if (ret != HDF_SUCCESS) { @@ -51,7 +111,13 @@ OH_NN_ReturnCode HDIDevice::GetVendorName(std::string& name) return OH_NN_SUCCESS; } -OH_NN_ReturnCode HDIDevice::GetDeviceType(OH_NN_DeviceType& deviceType) +OH_NN_ReturnCode HDIDeviceV1_0::GetVersion(std::string& version) +{ + version = 'v' + std::to_string(m_hdiVersion.first) + '_' + std::to_string(m_hdiVersion.second); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV1_0::GetDeviceType(OH_NN_DeviceType& deviceType) { V1_0::DeviceType iDeviceType; auto ret = m_iDevice->GetDeviceType(iDeviceType); @@ -60,11 +126,11 @@ OH_NN_ReturnCode HDIDevice::GetDeviceType(OH_NN_DeviceType& deviceType) return OH_NN_UNAVALIDABLE_DEVICE; } - deviceType = HDIToNN::TransHDIDeviceType(iDeviceType); + deviceType = TransHDIDeviceV1_0Type(iDeviceType); return OH_NN_SUCCESS; } -OH_NN_ReturnCode HDIDevice::GetDeviceStatus(DeviceStatus& status) +OH_NN_ReturnCode HDIDeviceV1_0::GetDeviceStatus(DeviceStatus& status) { V1_0::DeviceStatus iDeviceStatus; auto ret = m_iDevice->GetDeviceStatus(iDeviceStatus); @@ -72,12 +138,12 @@ OH_NN_ReturnCode HDIDevice::GetDeviceStatus(DeviceStatus& status) LOGE("Get HDI device status failed. ErrorCode=%d", ret); return OH_NN_UNAVALIDABLE_DEVICE; } - status = HDIToNN::TransHDIDeviceStatus(iDeviceStatus); + status = TransHDIDeviceV1_0Status(iDeviceStatus); return OH_NN_SUCCESS; } -OH_NN_ReturnCode HDIDevice::GetSupportedOperation(std::shared_ptr model, - std::vector& ops) +OH_NN_ReturnCode HDIDeviceV1_0::GetSupportedOperation(std::shared_ptr model, + std::vector& ops) { if (model == nullptr) { LOGE("Model is nullptr, cannot query supported operation."); @@ -117,7 +183,7 @@ OH_NN_ReturnCode HDIDevice::GetSupportedOperation(std::shared_ptrIsFloat16PrecisionSupported(isSupported); if (ret != HDF_SUCCESS) { @@ -127,7 +193,7 @@ OH_NN_ReturnCode HDIDevice::IsFloat16PrecisionSupported(bool& isSupported) return OH_NN_SUCCESS; } -OH_NN_ReturnCode HDIDevice::IsPerformanceModeSupported(bool& isSupported) +OH_NN_ReturnCode HDIDeviceV1_0::IsPerformanceModeSupported(bool& isSupported) { auto ret = m_iDevice->IsPerformanceModeSupported(isSupported); if (ret != HDF_SUCCESS) { @@ -137,7 +203,7 @@ OH_NN_ReturnCode HDIDevice::IsPerformanceModeSupported(bool& isSupported) return OH_NN_SUCCESS; } -OH_NN_ReturnCode HDIDevice::IsPrioritySupported(bool& isSupported) +OH_NN_ReturnCode HDIDeviceV1_0::IsPrioritySupported(bool& isSupported) { auto ret = m_iDevice->IsPrioritySupported(isSupported); if (ret != HDF_SUCCESS) { @@ -147,7 +213,7 @@ OH_NN_ReturnCode HDIDevice::IsPrioritySupported(bool& isSupported) return OH_NN_SUCCESS; } -OH_NN_ReturnCode HDIDevice::IsDynamicInputSupported(bool& isSupported) +OH_NN_ReturnCode HDIDeviceV1_0::IsDynamicInputSupported(bool& isSupported) { auto ret = m_iDevice->IsDynamicInputSupported(isSupported); if (ret != HDF_SUCCESS) { @@ -157,7 +223,7 @@ OH_NN_ReturnCode HDIDevice::IsDynamicInputSupported(bool& isSupported) return OH_NN_SUCCESS; } -OH_NN_ReturnCode HDIDevice::IsModelCacheSupported(bool& isSupported) +OH_NN_ReturnCode HDIDeviceV1_0::IsModelCacheSupported(bool& isSupported) { auto ret = m_iDevice->IsModelCacheSupported(isSupported); if (ret != HDF_SUCCESS) { @@ -167,9 +233,8 @@ OH_NN_ReturnCode HDIDevice::IsModelCacheSupported(bool& isSupported) return OH_NN_SUCCESS; } -OH_NN_ReturnCode HDIDevice::PrepareModel(std::shared_ptr model, - const ModelConfig& config, - std::shared_ptr& preparedModel) +OH_NN_ReturnCode HDIDeviceV1_0::PrepareModel(std::shared_ptr model, + const ModelConfig& config, std::shared_ptr& preparedModel) { if (model == nullptr) { LOGE("Model is nullptr, cannot prepare model."); @@ -196,8 +261,8 @@ OH_NN_ReturnCode HDIDevice::PrepareModel(std::shared_ptr iPreparedModel; auto preparedRet = m_iDevice->PrepareModel(*iModel, iModelConfig, iPreparedModel); @@ -213,7 +278,7 @@ OH_NN_ReturnCode HDIDevice::PrepareModel(std::shared_ptr(iPreparedModel); + preparedModel = CreateSharedPtr(iPreparedModel); if (preparedModel == nullptr) { LOGE("Prepare model failed, because fail to create preparedModel instance."); return OH_NN_MEMORY_ERROR; @@ -222,9 +287,8 @@ OH_NN_ReturnCode HDIDevice::PrepareModel(std::shared_ptr& modelCache, - const ModelConfig& config, - std::shared_ptr& preparedModel) +OH_NN_ReturnCode HDIDeviceV1_0::PrepareModelFromModelCache(const std::vector& modelCache, + const ModelConfig& config, std::shared_ptr& preparedModel) { std::vector iBuffers; auto memManager = MemoryManager::GetInstance(); @@ -242,8 +306,8 @@ OH_NN_ReturnCode HDIDevice::PrepareModelFromModelCache(const std::vector iPreparedModel; auto hdiRet = m_iDevice->PrepareModelFromModelCache(iBuffers, iModelConfig, iPreparedModel); @@ -252,7 +316,7 @@ OH_NN_ReturnCode HDIDevice::PrepareModelFromModelCache(const std::vector(iPreparedModel); + preparedModel = CreateSharedPtr(iPreparedModel); if (preparedModel == nullptr) { LOGE("Prepare model from model cache failed, because fail to create preparedModel instance."); return OH_NN_MEMORY_ERROR; @@ -260,7 +324,7 @@ OH_NN_ReturnCode HDIDevice::PrepareModelFromModelCache(const std::vector +#include +#include #include "refbase.h" -#include "hdi_interfaces.h" #include "device.h" namespace OHOS { namespace NeuralNetworkRuntime { -class HDIDevice : public Device { +namespace V1_0 = OHOS::HDI::Nnrt::V1_0; +class HDIDeviceV1_0 : public Device { public: - explicit HDIDevice(OHOS::sptr device); + explicit HDIDeviceV1_0(OHOS::sptr device); OH_NN_ReturnCode GetDeviceName(std::string& name) override; OH_NN_ReturnCode GetVendorName(std::string& name) override; + OH_NN_ReturnCode GetVersion(std::string& version) override; OH_NN_ReturnCode GetDeviceType(OH_NN_DeviceType& deviceType) override; OH_NN_ReturnCode GetDeviceStatus(DeviceStatus& status) override; OH_NN_ReturnCode GetSupportedOperation(std::shared_ptr model, @@ -60,4 +64,4 @@ private: }; } // namespace NeuralNetworkRuntime } // namespace OHOS -#endif // NEURAL_NETWORK_RUNTIME_HDI_DEVICE_H \ No newline at end of file +#endif // NEURAL_NETWORK_RUNTIME_HDI_DEVICE_V1_0_H \ No newline at end of file diff --git a/frameworks/native/hdi_device_v2_0.cpp b/frameworks/native/hdi_device_v2_0.cpp new file mode 100644 index 0000000000000000000000000000000000000000..8458394357f976722a55778a29a2b97aa3f389fd --- /dev/null +++ b/frameworks/native/hdi_device_v2_0.cpp @@ -0,0 +1,395 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "hdi_device_v2_0.h" + +#include "hdf_base.h" +#include "mindir.h" + +#include "hdi_prepared_model_v2_0.h" +#include "memory_manager.h" +#include "transform.h" +#include "common/log.h" +#include "common/utils.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace { +OH_NN_DeviceType TransHDIDeviceV2_0Type(const V2_0::DeviceType& iDeviceType) +{ + switch (iDeviceType) { + case V2_0::DeviceType::CPU: + return OH_NN_CPU; + case V2_0::DeviceType::GPU: + return OH_NN_GPU; + case V2_0::DeviceType::ACCELERATOR: + return OH_NN_ACCELERATOR; + default: + return OH_NN_OTHERS; + } +} + +DeviceStatus TransHDIDeviceV2_0Status(const V2_0::DeviceStatus& iDeviceStatus) +{ + switch (iDeviceStatus) { + case V2_0::DeviceStatus::AVAILABLE: + return DeviceStatus::AVAILABLE; + case V2_0::DeviceStatus::BUSY: + return DeviceStatus::BUSY; + case V2_0::DeviceStatus::OFFLINE: + return DeviceStatus::OFFLINE; + default: + return DeviceStatus::UNKNOWN; + } +} + +V2_0::PerformanceMode TransPerformanceMode(const OH_NN_PerformanceMode& mode) +{ + switch (mode) { + case OH_NN_PERFORMANCE_LOW: + return V2_0::PerformanceMode::PERFORMANCE_LOW; + case OH_NN_PERFORMANCE_MEDIUM: + return V2_0::PerformanceMode::PERFORMANCE_MEDIUM; + case OH_NN_PERFORMANCE_HIGH: + return V2_0::PerformanceMode::PERFORMANCE_HIGH; + case OH_NN_PERFORMANCE_EXTREME: + return V2_0::PerformanceMode::PERFORMANCE_EXTREME; + default: + return V2_0::PerformanceMode::PERFORMANCE_NONE; + } +} + +V2_0::Priority TransPriority(const OH_NN_Priority& priority) +{ + switch (priority) { + case OH_NN_PRIORITY_LOW: + return V2_0::Priority::PRIORITY_LOW; + case OH_NN_PRIORITY_MEDIUM: + return V2_0::Priority::PRIORITY_MEDIUM; + case OH_NN_PRIORITY_HIGH: + return V2_0::Priority::PRIORITY_HIGH; + default: + return V2_0::Priority::PRIORITY_NONE; + } +} +} + +HDIDeviceV2_0::HDIDeviceV2_0(OHOS::sptr device) : m_iDevice(device) +{ + device->GetVersion(m_hdiVersion.first, m_hdiVersion.second); +} + +OH_NN_ReturnCode HDIDeviceV2_0::GetDeviceName(std::string& name) +{ + auto ret = m_iDevice->GetDeviceName(name); + if (ret != HDF_SUCCESS) { + LOGE("Get HDI device name failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::GetVendorName(std::string& name) +{ + auto ret = m_iDevice->GetVendorName(name); + if (ret != HDF_SUCCESS) { + LOGE("Get HDI device vendor name failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::GetVersion(std::string& version) +{ + version = 'v' + std::to_string(m_hdiVersion.first) + '_' + std::to_string(m_hdiVersion.second); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::GetDeviceType(OH_NN_DeviceType& deviceType) +{ + V2_0::DeviceType iDeviceType; + auto ret = m_iDevice->GetDeviceType(iDeviceType); + if (ret != HDF_SUCCESS) { + LOGE("Get HDI device type failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + + deviceType = TransHDIDeviceV2_0Type(iDeviceType); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::GetDeviceStatus(DeviceStatus& status) +{ + V2_0::DeviceStatus iDeviceStatus; + auto ret = m_iDevice->GetDeviceStatus(iDeviceStatus); + if (ret != HDF_SUCCESS) { + LOGE("Get HDI device status failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + status = TransHDIDeviceV2_0Status(iDeviceStatus); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::GetSupportedOperation(std::shared_ptr model, + std::vector& ops) +{ + if (model == nullptr) { + LOGE("Model is nullptr, cannot query supported operation."); + return OH_NN_NULL_PTR; + } + + V2_0::SharedBuffer tensorBuffer {INVALID_FD, 0, 0, 0}; + size_t tensorSize = mindspore::lite::MindIR_LiteGraph_GetConstTensorSize(model.get()); + int32_t hdiRet {0}; + if (tensorSize > 0) { + hdiRet = m_iDevice->AllocateBuffer(tensorSize, tensorBuffer); + if (hdiRet != HDF_SUCCESS || tensorBuffer.fd == INVALID_FD) { + LOGE("Allocate tensor buffer error when get supported operation. ErrorCode: %d", hdiRet); + return OH_NN_FAILED; + } + } + + auto iModel = mindspore::lite::MindIR_LiteGraph_To_Model(model.get(), tensorBuffer); + if (iModel == nullptr) { + LOGE("Parse litegraph to hdi model failed."); + ReleaseSharedBuffer(tensorBuffer); + return OH_NN_FAILED; + } + + hdiRet = m_iDevice->GetSupportedOperation(*iModel, ops); + + mindspore::lite::MindIR_Model_Destroy(&iModel); + auto ret = ReleaseSharedBuffer(tensorBuffer); + if (ret != OH_NN_SUCCESS) { + LOGE("Release tensorBuffer failed."); + return OH_NN_FAILED; + } + if (hdiRet != HDF_SUCCESS) { + LOGE("Get supported operation failed. ErrorCode=%d", hdiRet); + return OH_NN_UNAVALIDABLE_DEVICE; + } + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::IsFloat16PrecisionSupported(bool& isSupported) +{ + auto ret = m_iDevice->IsFloat16PrecisionSupported(isSupported); + if (ret != HDF_SUCCESS) { + LOGE("Query fp16 precision supported failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::IsPerformanceModeSupported(bool& isSupported) +{ + auto ret = m_iDevice->IsPerformanceModeSupported(isSupported); + if (ret != HDF_SUCCESS) { + LOGE("Query performance mode supported failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::IsPrioritySupported(bool& isSupported) +{ + auto ret = m_iDevice->IsPrioritySupported(isSupported); + if (ret != HDF_SUCCESS) { + LOGE("Query priority supported failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::IsDynamicInputSupported(bool& isSupported) +{ + auto ret = m_iDevice->IsDynamicInputSupported(isSupported); + if (ret != HDF_SUCCESS) { + LOGE("Query dynamic input supported failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::IsModelCacheSupported(bool& isSupported) +{ + auto ret = m_iDevice->IsModelCacheSupported(isSupported); + if (ret != HDF_SUCCESS) { + LOGE("Query cache model supported failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::PrepareModel(std::shared_ptr model, + const ModelConfig& config, std::shared_ptr& preparedModel) +{ + if (model == nullptr) { + LOGE("Model is nullptr, cannot prepare model."); + return OH_NN_INVALID_PARAMETER; + } + + V2_0::SharedBuffer tensorBuffer {INVALID_FD, 0, 0, 0}; + size_t tensorSize = mindspore::lite::MindIR_LiteGraph_GetConstTensorSize(model.get()); + int32_t hdiRet {0}; + if (tensorSize > 0) { + hdiRet = m_iDevice->AllocateBuffer(tensorSize, tensorBuffer); + if (hdiRet != HDF_SUCCESS || tensorBuffer.fd == INVALID_FD) { + LOGE("Allocate tensor buffer error when prepare model. ErrorCode: %d", hdiRet); + return OH_NN_FAILED; + } + } + + V2_0::Model* iModel = mindspore::lite::MindIR_LiteGraph_To_Model(model.get(), tensorBuffer); + if (iModel == nullptr) { + LOGE("Parse litegraph to hdi model failed."); + ReleaseSharedBuffer(tensorBuffer); + return OH_NN_FAILED; + } + + V2_0::ModelConfig iModelConfig; + iModelConfig.enableFloat16 = config.enableFloat16; + iModelConfig.mode = TransPerformanceMode(config.mode); + iModelConfig.priority = TransPriority(config.priority); + OHOS::sptr iPreparedModel; + + auto preparedRet = m_iDevice->PrepareModel(*iModel, iModelConfig, iPreparedModel); + + mindspore::lite::MindIR_Model_Destroy(&iModel); + auto ret = ReleaseSharedBuffer(tensorBuffer); + if (ret != OH_NN_SUCCESS) { + LOGE("Release tensorBuffer failed."); + return OH_NN_FAILED; + } + if (preparedRet != HDF_SUCCESS || iPreparedModel == nullptr) { + LOGE("Prepare model failed. ErrorCode=%d", preparedRet); + return OH_NN_FAILED; + } + + preparedModel = CreateSharedPtr(iPreparedModel); + if (preparedModel == nullptr) { + LOGE("Prepare model failed, because fail to create preparedModel instance."); + return OH_NN_MEMORY_ERROR; + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::PrepareModelFromModelCache(const std::vector& modelCache, + const ModelConfig& config, std::shared_ptr& preparedModel) +{ + std::vector iBuffers; + auto memManager = MemoryManager::GetInstance(); + Memory memory; + OH_NN_ReturnCode ret; + size_t modelCacheSize = modelCache.size(); + for (size_t i = 0; i < modelCacheSize; i++) { + ret = memManager->GetMemory(modelCache[i].buffer, memory); + if (ret != OH_NN_SUCCESS) { + LOGE("The %zuth model cache is invalid. Please put valid model cache.", i + 1); + return ret; + } + iBuffers.emplace_back(V2_0::SharedBuffer {memory.fd, memory.length, 0, memory.length}); + } + + V2_0::ModelConfig iModelConfig; + iModelConfig.enableFloat16 = config.enableFloat16; + iModelConfig.mode = TransPerformanceMode(config.mode); + iModelConfig.priority = TransPriority(config.priority); + + OHOS::sptr iPreparedModel; + auto hdiRet = m_iDevice->PrepareModelFromModelCache(iBuffers, iModelConfig, iPreparedModel); + if (hdiRet != HDF_SUCCESS) { + LOGE("Prepare model from cache failed. ErrorCode=%d", hdiRet); + return OH_NN_UNAVALIDABLE_DEVICE; + } + + preparedModel = CreateSharedPtr(iPreparedModel); + if (preparedModel == nullptr) { + LOGE("Prepare model from model cache failed, because fail to create preparedModel instance."); + return OH_NN_MEMORY_ERROR; + } + return OH_NN_SUCCESS; +} + +void* HDIDeviceV2_0::AllocateBuffer(size_t length) +{ + if (length == 0) { + LOGE("The length param is invalid, length=0"); + return nullptr; + } + + V2_0::SharedBuffer buffer; + auto ret = m_iDevice->AllocateBuffer(length, buffer); + if (ret != HDF_SUCCESS) { + LOGE("Allocate buffer error. ErrorCode: %d", ret); + return nullptr; + } + + auto memManager = MemoryManager::GetInstance(); + auto addr = memManager->MapMemory(buffer.fd, length); + if (addr == nullptr) { + LOGE("Map fd to address failed."); + } + return addr; +} + +OH_NN_ReturnCode HDIDeviceV2_0::ReleaseBuffer(const void* buffer) +{ + if (buffer == nullptr) { + LOGE("Buffer is nullptr, no need to release."); + return OH_NN_INVALID_PARAMETER; + } + + auto memManager = MemoryManager::GetInstance(); + Memory memory; + auto ret = memManager->GetMemory(buffer, memory); + if (ret != OH_NN_SUCCESS) { + LOGE("Invalid Buffer, it is not NNRt buffer."); + return ret; + } + + V2_0::SharedBuffer hdiBuffer {memory.fd, memory.length, 0, memory.length}; + auto deviceResult = m_iDevice->ReleaseBuffer(hdiBuffer); + if (deviceResult != HDF_SUCCESS) { + LOGE("Device release buffer error. ErrorCode: %d", deviceResult); + return OH_NN_FAILED; + } + + ret = memManager->UnMapMemory(buffer); + if (ret != OH_NN_SUCCESS) { + LOGE("Unmap memory failed."); + return ret; + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::ReleaseSharedBuffer(const V2_0::SharedBuffer& buffer) +{ + if (buffer.fd == INVALID_FD) { + LOGI("No need to release. fd=%d", INVALID_FD); + return OH_NN_SUCCESS; + } + + auto ret = m_iDevice->ReleaseBuffer(buffer); + if (ret != HDF_SUCCESS) { + LOGE("Device release buffer error. ErrorCode=%d", ret); + return OH_NN_FAILED; + } + return OH_NN_SUCCESS; +} +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/hdi_device_v2_0.h b/frameworks/native/hdi_device_v2_0.h new file mode 100644 index 0000000000000000000000000000000000000000..4964d98844ce67a6db0703705b2ba73152be7f4a --- /dev/null +++ b/frameworks/native/hdi_device_v2_0.h @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_HDI_DEVICE_V2_0_H +#define NEURAL_NETWORK_RUNTIME_HDI_DEVICE_V2_0_H + +#include +#include +#include +#include "refbase.h" + +#include "device.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace V2_0 = OHOS::HDI::Nnrt::V2_0; +class HDIDeviceV2_0 : public Device { +public: + explicit HDIDeviceV2_0(OHOS::sptr device); + + OH_NN_ReturnCode GetDeviceName(std::string& name) override; + OH_NN_ReturnCode GetVendorName(std::string& name) override; + OH_NN_ReturnCode GetVersion(std::string& version) override; + OH_NN_ReturnCode GetDeviceType(OH_NN_DeviceType& deviceType) override; + OH_NN_ReturnCode GetDeviceStatus(DeviceStatus& status) override; + OH_NN_ReturnCode GetSupportedOperation(std::shared_ptr model, + std::vector& ops) override; + + OH_NN_ReturnCode IsFloat16PrecisionSupported(bool& isSupported) override; + OH_NN_ReturnCode IsPerformanceModeSupported(bool& isSupported) override; + OH_NN_ReturnCode IsPrioritySupported(bool& isSupported) override; + OH_NN_ReturnCode IsDynamicInputSupported(bool& isSupported) override; + OH_NN_ReturnCode IsModelCacheSupported(bool& isSupported) override; + + OH_NN_ReturnCode PrepareModel(std::shared_ptr model, + const ModelConfig& config, + std::shared_ptr& preparedModel) override; + OH_NN_ReturnCode PrepareModelFromModelCache(const std::vector& modelCache, + const ModelConfig& config, + std::shared_ptr& preparedModel) override; + + void* AllocateBuffer(size_t length) override; + OH_NN_ReturnCode ReleaseBuffer(const void* buffer) override; + +private: + OH_NN_ReturnCode ReleaseSharedBuffer(const V2_0::SharedBuffer& buffer); + +private: + // first: major version, second: minor version + std::pair m_hdiVersion; + OHOS::sptr m_iDevice {nullptr}; +}; +} // namespace NeuralNetworkRuntime +} // namespace OHOS +#endif // NEURAL_NETWORK_RUNTIME_HDI_DEVICE_V2_0_H \ No newline at end of file diff --git a/frameworks/native/hdi_prepared_model.cpp b/frameworks/native/hdi_prepared_model_v1_0.cpp similarity index 49% rename from frameworks/native/hdi_prepared_model.cpp rename to frameworks/native/hdi_prepared_model_v1_0.cpp index 491aec696489b34b44c32bad5cdd5a8c93d7c969..5df1eee12f61ad23ed21aa566778adc8b053dd7d 100644 --- a/frameworks/native/hdi_prepared_model.cpp +++ b/frameworks/native/hdi_prepared_model_v1_0.cpp @@ -13,21 +13,93 @@ * limitations under the License. */ -#include "hdi_prepared_model.h" +#include "hdi_prepared_model_v1_0.h" #include "common/log.h" #include "memory_manager.h" -#include "transform.h" namespace OHOS { namespace NeuralNetworkRuntime { -HDIPreparedModel::HDIPreparedModel(OHOS::sptr hdiPreparedModel) +namespace { +V1_0::DataType TransDataType(const OH_NN_DataType& dataType) +{ + switch (dataType) { + case OH_NN_BOOL: + return V1_0::DataType::DATA_TYPE_BOOL; + case OH_NN_INT8: + return V1_0::DataType::DATA_TYPE_INT8; + case OH_NN_INT16: + return V1_0::DataType::DATA_TYPE_INT16; + case OH_NN_INT32: + return V1_0::DataType::DATA_TYPE_INT32; + case OH_NN_INT64: + return V1_0::DataType::DATA_TYPE_INT64; + case OH_NN_UINT8: + return V1_0::DataType::DATA_TYPE_UINT8; + case OH_NN_UINT16: + return V1_0::DataType::DATA_TYPE_UINT16; + case OH_NN_UINT32: + return V1_0::DataType::DATA_TYPE_UINT32; + case OH_NN_UINT64: + return V1_0::DataType::DATA_TYPE_UINT64; + case OH_NN_FLOAT16: + return V1_0::DataType::DATA_TYPE_FLOAT16; + case OH_NN_FLOAT32: + return V1_0::DataType::DATA_TYPE_FLOAT32; + case OH_NN_FLOAT64: + return V1_0::DataType::DATA_TYPE_FLOAT64; + default: + return V1_0::DataType::DATA_TYPE_UNKNOWN; + } +} + +V1_0::Format TransFormat(const OH_NN_Format& format) +{ + switch (format) { + case OH_NN_FORMAT_NCHW: + return V1_0::Format::FORMAT_NCHW; + case OH_NN_FORMAT_NHWC: + return V1_0::Format::FORMAT_NHWC; + default: + return V1_0::Format::FORMAT_NONE; + } +} + +V1_0::IOTensor TransIOTensor(const IOTensor& tensor) +{ + V1_0::IOTensor iTensor; + iTensor.name = tensor.name; + iTensor.dataType = TransDataType(tensor.dataType); + iTensor.dimensions = tensor.dimensions; + iTensor.format = TransFormat(tensor.format); + + V1_0::SharedBuffer iBuffer {INVALID_FD, 0, 0, 0}; + if (tensor.data != nullptr) { + auto memManager = MemoryManager::GetInstance(); + Memory memory; + auto ret = memManager->GetMemory(tensor.data, memory); + if (ret != OH_NN_SUCCESS) { + LOGE("Invalid Tensor buffer, cannot transform to fd."); + } else { + iBuffer.fd = memory.fd; + iBuffer.bufferSize = memory.length; + iBuffer.offset = 0; + iBuffer.dataSize = memory.length; + } + } + iTensor.data = iBuffer; + + return iTensor; +} +} // unamed namespace + +HDIPreparedModelV1_0::HDIPreparedModelV1_0(OHOS::sptr hdiPreparedModel) : m_hdiPreparedModel(hdiPreparedModel) { hdiPreparedModel->GetVersion(m_hdiVersion.first, m_hdiVersion.second); } -OH_NN_ReturnCode HDIPreparedModel::ExportModelCache(std::vector& modelCache) +OH_NN_ReturnCode HDIPreparedModelV1_0::ExportModelCache(std::vector& modelCache) { if (!modelCache.empty()) { LOGE("The vector of modelCache should be empty. size=%zu", modelCache.size()); @@ -42,7 +114,8 @@ OH_NN_ReturnCode HDIPreparedModel::ExportModelCache(std::vector& mo } auto memManager = MemoryManager::GetInstance(); - for (size_t i = 0; i < iBuffers.size(); i++) { + size_t iBuffersSize = iBuffers.size(); + for (size_t i = 0; i < iBuffersSize; i++) { auto addr = memManager->MapMemory(iBuffers[i].fd, iBuffers[i].bufferSize); if (addr == nullptr) { LOGE("Export the %zuth model cache failed, cannot not map fd to address.", i + 1); @@ -55,13 +128,13 @@ OH_NN_ReturnCode HDIPreparedModel::ExportModelCache(std::vector& mo return OH_NN_SUCCESS; } -OH_NN_ReturnCode HDIPreparedModel::Run(const std::vector& inputs, const std::vector& outputs, +OH_NN_ReturnCode HDIPreparedModelV1_0::Run(const std::vector& inputs, const std::vector& outputs, std::vector>& outputsDims, std::vector& isOutputBufferEnough) { V1_0::IOTensor iTensor; std::vector iInputTensors; for (auto& input: inputs) { - iTensor = NNToHDI::TransIOTensor(input); + iTensor = TransIOTensor(input); if (iTensor.data.fd == INVALID_FD) { LOGE("Transform inputs tensor failed, cannot find data file descriptor."); return OH_NN_INVALID_PARAMETER; @@ -71,7 +144,7 @@ OH_NN_ReturnCode HDIPreparedModel::Run(const std::vector& inputs, cons std::vector iOutputTensors; for (auto& output: outputs) { - iTensor = NNToHDI::TransIOTensor(output); + iTensor = TransIOTensor(output); if (iTensor.data.fd == INVALID_FD) { LOGE("Transform outputs tensor failed, cannot find data file descriptor."); return OH_NN_INVALID_PARAMETER; diff --git a/frameworks/native/hdi_prepared_model.h b/frameworks/native/hdi_prepared_model_v1_0.h similarity index 75% rename from frameworks/native/hdi_prepared_model.h rename to frameworks/native/hdi_prepared_model_v1_0.h index d111977b329e3377a95c0de03ae825b1121410cf..4b71c71df8807505a1a391cec1ca1fdba9e7150c 100644 --- a/frameworks/native/hdi_prepared_model.h +++ b/frameworks/native/hdi_prepared_model_v1_0.h @@ -14,21 +14,25 @@ */ -#ifndef NEURAL_NETWORK_RUNTIME_HDI_PREPARED_MODEL_H -#define NEURAL_NETWORK_RUNTIME_HDI_PREPARED_MODEL_H +#ifndef NEURAL_NETWORK_RUNTIME_HDI_PREPARED_MODEL_V1_0_H +#define NEURAL_NETWORK_RUNTIME_HDI_PREPARED_MODEL_V1_0_H #include +#include +#include +#include #include "refbase.h" -#include "hdi_interfaces.h" #include "prepared_model.h" #include "cpp_type.h" +namespace V1_0 = OHOS::HDI::Nnrt::V1_0; + namespace OHOS { namespace NeuralNetworkRuntime { -class HDIPreparedModel : public PreparedModel { +class HDIPreparedModelV1_0 : public PreparedModel { public: - explicit HDIPreparedModel(OHOS::sptr hdiPreparedModel); + explicit HDIPreparedModelV1_0(OHOS::sptr hdiPreparedModel); OH_NN_ReturnCode ExportModelCache(std::vector& modelCache) override; @@ -44,4 +48,4 @@ private: }; } // namespace NeuralNetworkRuntime } // OHOS -#endif // NEURAL_NETWORK_RUNTIME_HDI_PREPARED_MODEL_H \ No newline at end of file +#endif // NEURAL_NETWORK_RUNTIME_HDI_PREPARED_MODEL_V1_0_H \ No newline at end of file diff --git a/frameworks/native/hdi_prepared_model_v2_0.cpp b/frameworks/native/hdi_prepared_model_v2_0.cpp new file mode 100644 index 0000000000000000000000000000000000000000..0bf9fdfb741aea4aa3973e278c9b6855ed4917c6 --- /dev/null +++ b/frameworks/native/hdi_prepared_model_v2_0.cpp @@ -0,0 +1,176 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "hdi_prepared_model_v2_0.h" + +#include "common/log.h" +#include "memory_manager.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace { +V2_0::DataType TransDataType(const OH_NN_DataType& dataType) +{ + switch (dataType) { + case OH_NN_BOOL: + return V2_0::DataType::DATA_TYPE_BOOL; + case OH_NN_INT8: + return V2_0::DataType::DATA_TYPE_INT8; + case OH_NN_INT16: + return V2_0::DataType::DATA_TYPE_INT16; + case OH_NN_INT32: + return V2_0::DataType::DATA_TYPE_INT32; + case OH_NN_INT64: + return V2_0::DataType::DATA_TYPE_INT64; + case OH_NN_UINT8: + return V2_0::DataType::DATA_TYPE_UINT8; + case OH_NN_UINT16: + return V2_0::DataType::DATA_TYPE_UINT16; + case OH_NN_UINT32: + return V2_0::DataType::DATA_TYPE_UINT32; + case OH_NN_UINT64: + return V2_0::DataType::DATA_TYPE_UINT64; + case OH_NN_FLOAT16: + return V2_0::DataType::DATA_TYPE_FLOAT16; + case OH_NN_FLOAT32: + return V2_0::DataType::DATA_TYPE_FLOAT32; + case OH_NN_FLOAT64: + return V2_0::DataType::DATA_TYPE_FLOAT64; + default: + return V2_0::DataType::DATA_TYPE_UNKNOWN; + } +} + +V2_0::Format TransFormat(const OH_NN_Format& format) +{ + switch (format) { + case OH_NN_FORMAT_NCHW: + return V2_0::Format::FORMAT_NCHW; + case OH_NN_FORMAT_NHWC: + return V2_0::Format::FORMAT_NHWC; + default: + return V2_0::Format::FORMAT_NONE; + } +} + +V2_0::IOTensor TransIOTensor(const IOTensor& tensor) +{ + V2_0::IOTensor iTensor; + iTensor.name = tensor.name; + iTensor.dataType = TransDataType(tensor.dataType); + iTensor.dimensions = tensor.dimensions; + iTensor.format = TransFormat(tensor.format); + + V2_0::SharedBuffer iBuffer {INVALID_FD, 0, 0, 0}; + if (tensor.data != nullptr) { + auto memManager = MemoryManager::GetInstance(); + Memory memory; + auto ret = memManager->GetMemory(tensor.data, memory); + if (ret != OH_NN_SUCCESS) { + LOGE("Invalid Tensor buffer, cannot transform to fd."); + } else { + iBuffer.fd = memory.fd; + iBuffer.bufferSize = memory.length; + iBuffer.offset = 0; + iBuffer.dataSize = memory.length; + } + } + iTensor.data = iBuffer; + + return iTensor; +} +} // unamed namespace + +HDIPreparedModelV2_0::HDIPreparedModelV2_0(OHOS::sptr hdiPreparedModel) + : m_hdiPreparedModel(hdiPreparedModel) +{ + hdiPreparedModel->GetVersion(m_hdiVersion.first, m_hdiVersion.second); +} + +OH_NN_ReturnCode HDIPreparedModelV2_0::ExportModelCache(std::vector& modelCache) +{ + if (!modelCache.empty()) { + LOGE("The vector of modelCache should be empty. size=%zu", modelCache.size()); + return OH_NN_INVALID_PARAMETER; + } + + std::vector iBuffers; + auto ret = m_hdiPreparedModel->ExportModelCache(iBuffers); + if (ret != HDF_SUCCESS) { + LOGE("Export model cache failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + + auto memManager = MemoryManager::GetInstance(); + size_t iBuffersSize = iBuffers.size(); + for (size_t i = 0; i < iBuffersSize; i++) { + auto addr = memManager->MapMemory(iBuffers[i].fd, iBuffers[i].bufferSize); + if (addr == nullptr) { + LOGE("Export the %zuth model cache failed, cannot not map fd to address.", i + 1); + return OH_NN_MEMORY_ERROR; + } + ModelBuffer modelbuffer {addr, iBuffers[i].bufferSize}; + modelCache.emplace_back(modelbuffer); + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIPreparedModelV2_0::Run(const std::vector& inputs, const std::vector& outputs, + std::vector>& outputsDims, std::vector& isOutputBufferEnough) +{ + V2_0::IOTensor iTensor; + std::vector iInputTensors; + for (auto& input: inputs) { + iTensor = TransIOTensor(input); + if (iTensor.data.fd == INVALID_FD) { + LOGE("Transform inputs tensor failed, cannot find data file descriptor."); + return OH_NN_INVALID_PARAMETER; + } + iInputTensors.emplace_back(iTensor); + } + + std::vector iOutputTensors; + for (auto& output: outputs) { + iTensor = TransIOTensor(output); + if (iTensor.data.fd == INVALID_FD) { + LOGE("Transform outputs tensor failed, cannot find data file descriptor."); + return OH_NN_INVALID_PARAMETER; + } + iOutputTensors.emplace_back(iTensor); + } + + auto ret = m_hdiPreparedModel->Run(iInputTensors, iOutputTensors, outputsDims, isOutputBufferEnough); + if (ret != HDF_SUCCESS || outputsDims.empty()) { + LOGE("Run model failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIPreparedModelV2_0::GetInputDimRanges(std::vector>& minInputDims, + std::vector>& maxInputDims) +{ + auto ret = m_hdiPreparedModel->GetInputDimRanges(minInputDims, maxInputDims); + if (ret != HDF_SUCCESS) { + LOGE("GetInputDimRanges failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + + return OH_NN_SUCCESS; +} +} // namespace NeuralNetworkRuntime +} // OHOS \ No newline at end of file diff --git a/frameworks/native/hdi_prepared_model_v2_0.h b/frameworks/native/hdi_prepared_model_v2_0.h new file mode 100644 index 0000000000000000000000000000000000000000..ad42dcbcb314c56727b8f641132fcddc23e2bb64 --- /dev/null +++ b/frameworks/native/hdi_prepared_model_v2_0.h @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +#ifndef NEURAL_NETWORK_RUNTIME_HDI_PREPARED_MODEL_V2_0_H +#define NEURAL_NETWORK_RUNTIME_HDI_PREPARED_MODEL_V2_0_H + +#include + +#include +#include +#include + +#include "refbase.h" +#include "prepared_model.h" +#include "cpp_type.h" + +namespace V2_0 = OHOS::HDI::Nnrt::V2_0; + +namespace OHOS { +namespace NeuralNetworkRuntime { +class HDIPreparedModelV2_0 : public PreparedModel { +public: + explicit HDIPreparedModelV2_0(OHOS::sptr hdiPreparedModel); + + OH_NN_ReturnCode ExportModelCache(std::vector& modelCache) override; + + OH_NN_ReturnCode Run(const std::vector& inputs, + const std::vector& outputs, + std::vector>& outputsDims, + std::vector& isOutputBufferEnough) override; + + OH_NN_ReturnCode GetInputDimRanges(std::vector>& minInputDims, + std::vector>& maxInputDims) override; + +private: + // first: major version, second: minor version + std::pair m_hdiVersion; + OHOS::sptr m_hdiPreparedModel {nullptr}; +}; +} // namespace NeuralNetworkRuntime +} // OHOS +#endif // NEURAL_NETWORK_RUNTIME_HDI_PREPARED_MODEL_V2_0_H \ No newline at end of file diff --git a/frameworks/native/inner_model.cpp b/frameworks/native/inner_model.cpp index bcd20c6e2cf1feafc2e058ea790dce815363b03e..e3df670754667a14bd4e9434834bda6850e304c9 100644 --- a/frameworks/native/inner_model.cpp +++ b/frameworks/native/inner_model.cpp @@ -24,7 +24,6 @@ #include "common/utils.h" #include "common/scoped_trace.h" #include "device_manager.h" -#include "hdi_device.h" #include "validation.h" #include "ops_builder.h" #include "ops_registry.h" @@ -289,8 +288,9 @@ OH_NN_ReturnCode InnerModel::ValidateTensorArray(const OH_NN_UInt32Array& indice return ret; } + size_t allTensorsSize = m_allTensors.size(); for (uint32_t i = 0; i < indices.size; i++) { - if (indices.data[i] >= m_allTensors.size()) { + if (indices.data[i] >= allTensorsSize) { LOGE("ValidateTensors failed, index %u is out of the number of added tensors.", indices.data[i]); return OH_NN_INVALID_PARAMETER; } diff --git a/frameworks/native/nn_tensor.cpp b/frameworks/native/nn_tensor.cpp index df334b2705f3f07cec414cf517c6b0172736b90e..3f9abd41a305a93f8f52f53d2d087c00aa9fd3ab 100644 --- a/frameworks/native/nn_tensor.cpp +++ b/frameworks/native/nn_tensor.cpp @@ -390,7 +390,8 @@ bool NNTensor::CompareAttribute(const NNTensor& tensor) const return false; } - for (size_t i = 0; i < dimensions.size(); i++) { + size_t dimensionsSize = dimensions.size(); + for (size_t i = 0; i < dimensionsSize; i++) { if ((m_dimensions[i] != -1) && (m_dimensions[i] != dimensions[i])) { LOGI("Tensors have different dimension: dimension index: %zu, dimension value: %d and %d.", i, m_dimensions[i], dimensions[i]); diff --git a/frameworks/native/ops/cast_builder.cpp b/frameworks/native/ops/cast_builder.cpp index 81dc1eb2b5f386bbf04fa022649b8dba146b4438..6336926209671c077fe40afd083ba5cfaab5f097 100644 --- a/frameworks/native/ops/cast_builder.cpp +++ b/frameworks/native/ops/cast_builder.cpp @@ -57,7 +57,6 @@ OH_NN_ReturnCode CastBuilder::Build(const std::vector& paramsIndex, LOGE("[Cast] Type of cast operator is not validation."); return OH_NN_INVALID_PARAMETER; } - *castTypeInt = (OH_NN_DataType)NNToHDI::TransDataType(*castTypeInt); if (!paramsIndex.empty()) { LOGE("[Cast] Cast expects no parameters"); diff --git a/frameworks/native/ops_builder.cpp b/frameworks/native/ops_builder.cpp index d815fc99c8492e4cfd8a7332481da5c7ebfd805b..f0fce4cbd18a49f9a9dc694666450c1c6b9c5876 100644 --- a/frameworks/native/ops_builder.cpp +++ b/frameworks/native/ops_builder.cpp @@ -70,15 +70,16 @@ OH_NN_ReturnCode OpsBuilder::CheckIOIndex(const std::vector& inputsInd return OH_NN_INVALID_PARAMETER; } + size_t allTensorsSize = allTensors.size(); for (auto index : inputsIndex) { - if (index >= allTensors.size()) { + if (index >= allTensorsSize) { LOGE("The index of inputs is out of range."); return OH_NN_INVALID_PARAMETER; } } for (auto index : outputsIndex) { - if (index >= allTensors.size()) { + if (index >= allTensorsSize) { LOGE("The index of outputs is out of range."); return OH_NN_INVALID_PARAMETER; } diff --git a/frameworks/native/prepared_model.h b/frameworks/native/prepared_model.h index 65741311a999d456487091e11fc54e2f2c4641b1..06ed645fa7b673d9744e2a755ea2f57030a7049a 100644 --- a/frameworks/native/prepared_model.h +++ b/frameworks/native/prepared_model.h @@ -34,6 +34,12 @@ public: const std::vector& outputs, std::vector>& outputsDims, std::vector& isOutputBufferEnough) = 0; + + virtual OH_NN_ReturnCode GetInputDimRanges(std::vector>& minInputDims, + std::vector>& maxInputDims) + { + return OH_NN_OPERATION_FORBIDDEN; + } }; } // OHOS } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/transform.cpp b/frameworks/native/transform.cpp index ea0d3391a84757016eb600c0e782642a768cd627..d3705d5381f93b45c79d0def51b688b8608adf5f 100644 --- a/frameworks/native/transform.cpp +++ b/frameworks/native/transform.cpp @@ -25,134 +25,6 @@ const uint32_t BIT16_TO_BYTE = 2; const uint32_t BIT32_TO_BYTE = 4; const uint32_t BIT64_TO_BYTE = 8; -OH_NN_DeviceType HDIToNN::TransHDIDeviceType(const V1_0::DeviceType& iDeviceType) -{ - switch (iDeviceType) { - case V1_0::DeviceType::CPU: - return OH_NN_CPU; - case V1_0::DeviceType::GPU: - return OH_NN_GPU; - case V1_0::DeviceType::ACCELERATOR: - return OH_NN_ACCELERATOR; - default: - return OH_NN_OTHERS; - } -} - -DeviceStatus HDIToNN::TransHDIDeviceStatus(const V1_0::DeviceStatus& iDeviceStatus) -{ - switch (iDeviceStatus) { - case V1_0::DeviceStatus::AVAILABLE: - return DeviceStatus::AVAILABLE; - case V1_0::DeviceStatus::BUSY: - return DeviceStatus::BUSY; - case V1_0::DeviceStatus::OFFLINE: - return DeviceStatus::OFFLINE; - default: - return DeviceStatus::UNKNOWN; - } -} - -V1_0::PerformanceMode NNToHDI::TransPerformanceMode(const OH_NN_PerformanceMode& mode) -{ - switch (mode) { - case OH_NN_PERFORMANCE_LOW: - return V1_0::PerformanceMode::PERFORMANCE_LOW; - case OH_NN_PERFORMANCE_MEDIUM: - return V1_0::PerformanceMode::PERFORMANCE_MEDIUM; - case OH_NN_PERFORMANCE_HIGH: - return V1_0::PerformanceMode::PERFORMANCE_HIGH; - case OH_NN_PERFORMANCE_EXTREME: - return V1_0::PerformanceMode::PERFORMANCE_EXTREME; - default: - return V1_0::PerformanceMode::PERFORMANCE_NONE; - } -} -V1_0::Priority NNToHDI::TransPriority(const OH_NN_Priority& priority) -{ - switch (priority) { - case OH_NN_PRIORITY_LOW: - return V1_0::Priority::PRIORITY_LOW; - case OH_NN_PRIORITY_MEDIUM: - return V1_0::Priority::PRIORITY_MEDIUM; - case OH_NN_PRIORITY_HIGH: - return V1_0::Priority::PRIORITY_HIGH; - default: - return V1_0::Priority::PRIORITY_NONE; - } -} - -V1_0::DataType NNToHDI::TransDataType(const OH_NN_DataType& dataType) -{ - switch (dataType) { - case OH_NN_BOOL: - return V1_0::DataType::DATA_TYPE_BOOL; - case OH_NN_INT8: - return V1_0::DataType::DATA_TYPE_INT8; - case OH_NN_INT16: - return V1_0::DataType::DATA_TYPE_INT16; - case OH_NN_INT32: - return V1_0::DataType::DATA_TYPE_INT32; - case OH_NN_INT64: - return V1_0::DataType::DATA_TYPE_INT64; - case OH_NN_UINT8: - return V1_0::DataType::DATA_TYPE_UINT8; - case OH_NN_UINT16: - return V1_0::DataType::DATA_TYPE_UINT16; - case OH_NN_UINT32: - return V1_0::DataType::DATA_TYPE_UINT32; - case OH_NN_UINT64: - return V1_0::DataType::DATA_TYPE_UINT64; - case OH_NN_FLOAT16: - return V1_0::DataType::DATA_TYPE_FLOAT16; - case OH_NN_FLOAT32: - return V1_0::DataType::DATA_TYPE_FLOAT32; - case OH_NN_FLOAT64: - return V1_0::DataType::DATA_TYPE_FLOAT64; - default: - return V1_0::DataType::DATA_TYPE_UNKNOWN; - } -} - -V1_0::Format NNToHDI::TransFormat(const OH_NN_Format& format) -{ - switch (format) { - case OH_NN_FORMAT_NCHW: - return V1_0::Format::FORMAT_NCHW; - case OH_NN_FORMAT_NHWC: - return V1_0::Format::FORMAT_NHWC; - default: - return V1_0::Format::FORMAT_NONE; - } -} - -V1_0::IOTensor NNToHDI::TransIOTensor(const IOTensor& tensor) -{ - V1_0::IOTensor iTensor; - iTensor.name = tensor.name; - iTensor.dataType = TransDataType(tensor.dataType); - iTensor.dimensions = tensor.dimensions; - iTensor.format = TransFormat(tensor.format); - - V1_0::SharedBuffer iBuffer {INVALID_FD, 0, 0, 0}; - if (tensor.data != nullptr) { - auto memManager = MemoryManager::GetInstance(); - Memory memory; - auto ret = memManager->GetMemory(tensor.data, memory); - if (ret != OH_NN_SUCCESS) { - LOGE("Invalid Tensor buffer, cannot transform to fd."); - } else { - iBuffer.fd = memory.fd; - iBuffer.bufferSize = memory.length; - iBuffer.offset = 0; - iBuffer.dataSize = memory.length; - } - } - iTensor.data = iBuffer; - - return iTensor; -} - uint32_t GetTypeSize(OH_NN_DataType type) { switch (type) { diff --git a/frameworks/native/transform.h b/frameworks/native/transform.h index 2472ad3f8d1ca8e77912ac3e4f521d7ce7825c1b..24d54e8d351dc7cf9a05e70eceea8cc365412daa 100644 --- a/frameworks/native/transform.h +++ b/frameworks/native/transform.h @@ -16,7 +16,6 @@ #ifndef NEURAL_NETWORK_RUNTIME_TRANSFORM_H #define NEURAL_NETWORK_RUNTIME_TRANSFORM_H -#include "hdi_interfaces.h" #include "interfaces/kits/c/neural_network_runtime_type.h" #include "cpp_type.h" #include "mindir.h" @@ -38,19 +37,6 @@ std::vector ConstructVectorFromArray(const T* data, size_t size) uint32_t GetTypeSize(OH_NN_DataType type); -namespace HDIToNN { -OH_NN_DeviceType TransHDIDeviceType(const V1_0::DeviceType& iDeviceType); -DeviceStatus TransHDIDeviceStatus(const V1_0::DeviceStatus& iDeviceStatus); -} // namespace HDIToNN - -namespace NNToHDI { -V1_0::PerformanceMode TransPerformanceMode(const OH_NN_PerformanceMode& mode); -V1_0::Priority TransPriority(const OH_NN_Priority& priority); -V1_0::DataType TransDataType(const OH_NN_DataType& dataType); -V1_0::Format TransFormat(const OH_NN_Format& format); -V1_0::IOTensor TransIOTensor(const IOTensor& tensor); -} // namespace NNToHDI - namespace NNToMS { mindspore::lite::DataType TransformDataType(OH_NN_DataType type); mindspore::lite::Format TransformFormat(OH_NN_Format type); diff --git a/test/unittest/BUILD.gn b/test/unittest/BUILD.gn index cca705240cd612b69029f7b3bd64ab7386f0482f..eed3e38723fadf25cc0a701bafd17c7e40703bd5 100644 --- a/test/unittest/BUILD.gn +++ b/test/unittest/BUILD.gn @@ -16,8 +16,8 @@ import("//build/ohos.gni") group("unittest") { testonly = true deps = [ - "inner_kits:inner_kits_unittest", "components:components_unittest", + "inner_kits:inner_kits_unittest", "ops:ops_unittest", ] -} \ No newline at end of file +}