diff --git a/BUILD.gn b/BUILD.gn index d5186b941cd5af83de40e2c710da1b4023fb1cc5..fbdb7fbf4468a168a7ddc29daa2f822dc9d1f903 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -9,7 +9,7 @@ # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and -# limitations under the License. +# limitations under the License. import("//build/ohos.gni") @@ -17,10 +17,11 @@ group("nnrt_target") { deps = [ "frameworks:libneural_network_runtime" ] } -# group("nnrt_test_target") { -# testonly = true -# deps = [ "test/unittest:unittest" ] -# } +group("nnrt_test_target") { + testonly = true + deps = [ "test/unittest:unittest" ] +} + group("nnrt_fuzztest") { testonly = true deps = [ "test/fuzztest:fuzztest" ] diff --git a/test/unittest/common/compilation_mock_idevice.cpp b/test/unittest/common/v1_0/compilation_mock_idevice.cpp similarity index 75% rename from test/unittest/common/compilation_mock_idevice.cpp rename to test/unittest/common/v1_0/compilation_mock_idevice.cpp index 52f647d0af64ec0f253ad5121d18158b5bb111ce..4e2d34aacd26206457cb3aeaf0ac37560c755f47 100644 --- a/test/unittest/common/compilation_mock_idevice.cpp +++ b/test/unittest/common/v1_0/compilation_mock_idevice.cpp @@ -15,9 +15,9 @@ #include "common/utils.h" #include "frameworks/native/device_manager.h" -#include "frameworks/native/hdi_device.h" +#include "frameworks/native/hdi_device_v1_0.h" #include "frameworks/native/nn_tensor.h" -#include "test/unittest/common/mock_idevice.h" +#include "test/unittest/common/v1_0/mock_idevice.h" OH_NN_ReturnCode OHOS::HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; @@ -32,7 +32,7 @@ std::shared_ptr DeviceManager::GetDevice(size_t deviceId) const return nullptr; } - std::shared_ptr device = CreateSharedPtr(idevice); + std::shared_ptr device = CreateSharedPtr(idevice); if (device == nullptr) { LOGE("DeviceManager mock GetDevice failed, the device is nullptr"); return nullptr; @@ -46,7 +46,7 @@ std::shared_ptr DeviceManager::GetDevice(size_t deviceId) const } } -OH_NN_ReturnCode HDIDevice::IsModelCacheSupported(bool& isSupported) +OH_NN_ReturnCode HDIDeviceV1_0::IsModelCacheSupported(bool& isSupported) { // isSupported is false when expecting to return success if (HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_SUCCESS) { @@ -66,8 +66,8 @@ OH_NN_ReturnCode HDIDevice::IsModelCacheSupported(bool& isSupported) return OH_NN_SUCCESS; } -OH_NN_ReturnCode HDIDevice::GetSupportedOperation(std::shared_ptr model, - std::vector& ops) +OH_NN_ReturnCode HDIDeviceV1_0::GetSupportedOperation(std::shared_ptr model, + std::vector& ops) { if (HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_INVALID_FILE) { HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; @@ -76,7 +76,7 @@ OH_NN_ReturnCode HDIDevice::GetSupportedOperation(std::shared_ptr model, - const ModelConfig& config, - std::shared_ptr& preparedModel) +OH_NN_ReturnCode HDIDeviceV1_0::PrepareModel(std::shared_ptr model, + const ModelConfig& config, std::shared_ptr& preparedModel) { if (model == nullptr) { - LOGE("HDIDevice mock PrepareModel failed, the model is nullptr"); + LOGE("HDIDeviceV1_0 mock PrepareModel failed, the model is nullptr"); return OH_NN_INVALID_PARAMETER; } if (config.enableFloat16 == false) { - LOGE("HDIDevice mock PrepareModel failed, the enableFloat16 is false"); + LOGE("HDIDeviceV1_0 mock PrepareModel failed, the enableFloat16 is false"); return OH_NN_FAILED; } sptr hdiPreparedModel = sptr(new (std::nothrow) OHOS::HDI::Nnrt::V1_0::MockIPreparedModel()); if (hdiPreparedModel == nullptr) { - LOGE("HDIDevice mock PrepareModel failed, error happened when new sptr"); + LOGE("HDIDeviceV1_0 mock PrepareModel failed, error happened when new sptr"); return OH_NN_NULL_PTR; } - preparedModel = CreateSharedPtr(hdiPreparedModel); + preparedModel = CreateSharedPtr(hdiPreparedModel); return OH_NN_SUCCESS; } -OH_NN_ReturnCode HDIPreparedModel::ExportModelCache(std::vector& modelCache) +OH_NN_ReturnCode HDIPreparedModelV1_0::ExportModelCache(std::vector& modelCache) { if (!modelCache.empty()) { - LOGE("HDIPreparedModel mock ExportModelCache failed, the modelCache is not empty"); + LOGE("HDIPreparedModelV1_0 mock ExportModelCache failed, the modelCache is not empty"); return OH_NN_INVALID_PARAMETER; } @@ -215,10 +214,10 @@ OH_NN_ReturnCode HDIPreparedModel::ExportModelCache(std::vector& mo return OH_NN_SUCCESS; } -void* HDIDevice::AllocateBuffer(size_t length) +void* HDIDeviceV1_0::AllocateBuffer(size_t length) { if (length == 0) { - LOGE("HDIDevice mock AllocateBuffer failed, the length param is invalid"); + LOGE("HDIDeviceV1_0 mock AllocateBuffer failed, the length param is invalid"); return nullptr; } @@ -229,16 +228,16 @@ void* HDIDevice::AllocateBuffer(size_t length) void* buffer = (void*)malloc(length); if (buffer == nullptr) { - LOGE("HDIDevice mock AllocateBuffer failed, the buffer is nullptr"); + LOGE("HDIDeviceV1_0 mock AllocateBuffer failed, the buffer is nullptr"); return nullptr; } return buffer; } -OH_NN_ReturnCode HDIDevice::ReleaseBuffer(const void* buffer) +OH_NN_ReturnCode HDIDeviceV1_0::ReleaseBuffer(const void* buffer) { if (buffer == nullptr) { - LOGE("HDIDevice mock ReleaseBuffer failed, the buffer is nullptr"); + LOGE("HDIDeviceV1_0 mock ReleaseBuffer failed, the buffer is nullptr"); return OH_NN_NULL_PTR; } @@ -247,9 +246,8 @@ OH_NN_ReturnCode HDIDevice::ReleaseBuffer(const void* buffer) return OH_NN_SUCCESS; } -OH_NN_ReturnCode HDIDevice::PrepareModelFromModelCache(const std::vector& modelCache, - const ModelConfig& config, - std::shared_ptr& preparedModel) +OH_NN_ReturnCode HDIDeviceV1_0::PrepareModelFromModelCache(const std::vector& modelCache, + const ModelConfig& config, std::shared_ptr& preparedModel) { if (HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_FAILED) { HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; @@ -257,18 +255,18 @@ OH_NN_ReturnCode HDIDevice::PrepareModelFromModelCache(const std::vector hdiPreparedModel = sptr(new (std::nothrow) OHOS::HDI::Nnrt::V1_0::MockIPreparedModel()); if (hdiPreparedModel == nullptr) { - LOGE("HDIDevice mock PrepareModelFromModelCache failed, error happened when new sptr"); + LOGE("HDIDeviceV1_0 mock PrepareModelFromModelCache failed, error happened when new sptr"); return OH_NN_NULL_PTR; } - preparedModel = CreateSharedPtr(hdiPreparedModel); + preparedModel = CreateSharedPtr(hdiPreparedModel); return OH_NN_SUCCESS; } diff --git a/test/unittest/common/executor_mock_device.cpp b/test/unittest/common/v1_0/executor_mock_device.cpp similarity index 84% rename from test/unittest/common/executor_mock_device.cpp rename to test/unittest/common/v1_0/executor_mock_device.cpp index 47934e0e3c2ccee30cc6e0643e558adef38e9bce..51b795086f5b319b812ef16336e1bf6ab96909fd 100644 --- a/test/unittest/common/executor_mock_device.cpp +++ b/test/unittest/common/v1_0/executor_mock_device.cpp @@ -15,8 +15,8 @@ #include "frameworks/native/compilation.h" #include "frameworks/native/execution_plan.h" -#include "frameworks/native/hdi_device.h" -#include "test/unittest/common/mock_idevice.h" +#include "frameworks/native/hdi_device_v1_0.h" +#include "test/unittest/common/v1_0/mock_idevice.h" OH_NN_ReturnCode OHOS::HDI::Nnrt::V1_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; @@ -26,7 +26,7 @@ std::shared_ptr ExecutionPlan::GetInputDevice() const { sptr idevice = sptr(new (std::nothrow) OHOS::HDI::Nnrt::V1_0::MockIDevice()); - std::shared_ptr device = std::make_shared(idevice); + std::shared_ptr device = std::make_shared(idevice); return device; } @@ -34,11 +34,11 @@ std::shared_ptr ExecutionPlan::GetOutputDevice() const { sptr idevice = sptr(new (std::nothrow) OHOS::HDI::Nnrt::V1_0::MockIDevice()); - std::shared_ptr device = std::make_shared(idevice); + std::shared_ptr device = std::make_shared(idevice); return device; } -void* HDIDevice::AllocateBuffer(size_t length) +void* HDIDeviceV1_0::AllocateBuffer(size_t length) { if (length == 0) { LOGE("The length param is invalid, length=0"); @@ -58,7 +58,7 @@ void* HDIDevice::AllocateBuffer(size_t length) return buffer; } -OH_NN_ReturnCode HDIDevice::ReleaseBuffer(const void* buffer) +OH_NN_ReturnCode HDIDeviceV1_0::ReleaseBuffer(const void* buffer) { if (buffer == nullptr) { LOGE("alloct buffer failed"); @@ -69,7 +69,7 @@ OH_NN_ReturnCode HDIDevice::ReleaseBuffer(const void* buffer) return OH_NN_SUCCESS; } -OH_NN_ReturnCode HDIPreparedModel::Run(const std::vector& inputs, const std::vector& outputs, +OH_NN_ReturnCode HDIPreparedModelV1_0::Run(const std::vector& inputs, const std::vector& outputs, std::vector>& outputsDims, std::vector& isOutputBufferEnough) { if (inputs.empty() || outputs.empty()) { @@ -92,10 +92,10 @@ std::shared_ptr Compilation::GetExecutionPlan() const sptr hdiPreparedModel = OHOS::sptr(new (std::nothrow) OHOS::HDI::Nnrt::V1_0::HDI::Nnrt::V1_0::MockIPreparedModel()); - std::shared_ptr preparedModel = std::make_shared(hdiPreparedModel); + std::shared_ptr preparedModel = std::make_shared(hdiPreparedModel); sptr idevice = OHOS::sptr(new (std::nothrow) OHOS::HDI::Nnrt::V1_0::MockIDevice()); - std::shared_ptr device = std::make_shared(idevice); + std::shared_ptr device = std::make_shared(idevice); ExecutionPlan executor(preparedModel, device); std::shared_ptr pExcutor = std::make_shared(executor); return pExcutor; diff --git a/test/unittest/common/inner_model_mock_device.cpp b/test/unittest/common/v1_0/inner_model_mock_device.cpp similarity index 92% rename from test/unittest/common/inner_model_mock_device.cpp rename to test/unittest/common/v1_0/inner_model_mock_device.cpp index 386ee5ba60e5ad17f44d8168ff3badb88c051d22..d9be0bd16419f8618e69444a7abab1b59e1f71ba 100644 --- a/test/unittest/common/inner_model_mock_device.cpp +++ b/test/unittest/common/v1_0/inner_model_mock_device.cpp @@ -18,7 +18,7 @@ #include "common/utils.h" #include "frameworks/native/inner_model.h" -#include "frameworks/native/hdi_device.h" +#include "frameworks/native/hdi_device_v1_0.h" #include "frameworks/native/device_manager.h" #include "frameworks/native/ops/div_builder.h" #include "mock_idevice.h" @@ -35,7 +35,7 @@ std::shared_ptr DeviceManager::GetDevice(size_t deviceId) const LOGE("DeviceManager mock GetDevice failed, error happened when new sptr"); return nullptr; } else { - std::shared_ptr device = CreateSharedPtr(idevice); + std::shared_ptr device = CreateSharedPtr(idevice); if (device == nullptr) { LOGE("DeviceManager mock GetDevice failed, device is nullptr"); return nullptr; @@ -57,7 +57,7 @@ Ops::LiteGraphPrimitvePtr Ops::DivBuilder::GetPrimitive() } // Mock the palce where the device GetSupportedOperation is called in inner_model build function. -OH_NN_ReturnCode HDIDevice::GetSupportedOperation(std::shared_ptr model, +OH_NN_ReturnCode HDIDeviceV1_0::GetSupportedOperation(std::shared_ptr model, std::vector& supportedOperations) { supportedOperations = {true, true, true}; diff --git a/test/unittest/common/mock_idevice.cpp b/test/unittest/common/v1_0/mock_idevice.cpp similarity index 100% rename from test/unittest/common/mock_idevice.cpp rename to test/unittest/common/v1_0/mock_idevice.cpp diff --git a/test/unittest/common/mock_idevice.h b/test/unittest/common/v1_0/mock_idevice.h similarity index 97% rename from test/unittest/common/mock_idevice.h rename to test/unittest/common/v1_0/mock_idevice.h index 64e8231c331b3bdfac76a9e1df8a391036518056..2d871d6bdc81774f5f01804c9044395e0f639b4e 100644 --- a/test/unittest/common/mock_idevice.h +++ b/test/unittest/common/v1_0/mock_idevice.h @@ -18,7 +18,7 @@ #include -#include "frameworks/native/hdi_prepared_model.h" +#include "frameworks/native/hdi_prepared_model_v1_0.h" #include "frameworks/native/memory_manager.h" #include "frameworks/native/transform.h" diff --git a/test/unittest/common/v2_0/compilation_mock_idevice.cpp b/test/unittest/common/v2_0/compilation_mock_idevice.cpp new file mode 100644 index 0000000000000000000000000000000000000000..eccc730fff76610daf2a373fadbf4c779f0575eb --- /dev/null +++ b/test/unittest/common/v2_0/compilation_mock_idevice.cpp @@ -0,0 +1,284 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "common/utils.h" +#include "frameworks/native/device_manager.h" +#include "frameworks/native/hdi_device_v2_0.h" +#include "frameworks/native/nn_tensor.h" +#include "test/unittest/common/v2_0/mock_idevice.h" + +OH_NN_ReturnCode OHOS::HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + +namespace OHOS { +namespace NeuralNetworkRuntime { +std::shared_ptr DeviceManager::GetDevice(size_t deviceId) const +{ + sptr idevice + = sptr(new (std::nothrow) OHOS::HDI::Nnrt::V2_0::MockIDevice()); + if (idevice == nullptr) { + LOGE("DeviceManager mock GetDevice failed, error happened when new sptr"); + return nullptr; + } + + std::shared_ptr device = CreateSharedPtr(idevice); + if (device == nullptr) { + LOGE("DeviceManager mock GetDevice failed, the device is nullptr"); + return nullptr; + } + + if (deviceId == 0) { + LOGE("DeviceManager mock GetDevice failed, the passed parameter deviceId is 0"); + return nullptr; + } else { + return device; + } +} + +OH_NN_ReturnCode HDIDeviceV2_0::IsModelCacheSupported(bool& isSupported) +{ + // isSupported is false when expecting to return success + if (HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_SUCCESS) { + // In order not to affect other use cases, set to the OH_NN_OPERATION_FORBIDDEN + HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + isSupported = false; + return OH_NN_SUCCESS; + } + + if (HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_FAILED) { + HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + isSupported = false; + return OH_NN_FAILED; + } + + isSupported = true; + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::GetSupportedOperation(std::shared_ptr model, + std::vector& ops) +{ + if (HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_INVALID_FILE) { + HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + ops.emplace_back(true); + return OH_NN_SUCCESS; + } + + if (model == nullptr) { + LOGE("HDIDeviceV2_0 mock GetSupportedOperation failed, Model is nullptr, cannot query supported operation."); + return OH_NN_NULL_PTR; + } + + if (HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_SUCCESS) { + HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + ops.emplace_back(false); + return OH_NN_SUCCESS; + } + + ops.emplace_back(true); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::IsDynamicInputSupported(bool& isSupported) +{ + if (HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_FAILED) { + HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + isSupported = false; + return OH_NN_FAILED; + } + + if (HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_INVALID_PATH) { + HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + isSupported = false; + return OH_NN_SUCCESS; + } + + isSupported = true; + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::IsPerformanceModeSupported(bool& isSupported) +{ + if (HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_FAILED) { + HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + isSupported = false; + return OH_NN_FAILED; + } + + if (HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_SUCCESS) { + HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + isSupported = false; + return OH_NN_SUCCESS; + } + + isSupported = true; + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::IsPrioritySupported(bool& isSupported) +{ + if (HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_INVALID_PARAMETER) { + HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + isSupported = false; + return OH_NN_INVALID_PARAMETER; + } + + if (HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_SUCCESS) { + HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + isSupported = false; + return OH_NN_SUCCESS; + } + + isSupported = true; + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::IsFloat16PrecisionSupported(bool& isSupported) +{ + if (HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_SUCCESS) { + HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + isSupported = false; + return OH_NN_SUCCESS; + } + + if (HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_MEMORY_ERROR) { + HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + isSupported = false; + return OH_NN_MEMORY_ERROR; + } + + isSupported = true; + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::PrepareModel(std::shared_ptr model, + const ModelConfig& config, std::shared_ptr& preparedModel) +{ + if (model == nullptr) { + LOGE("HDIDeviceV2_0 mock PrepareModel failed, the model is nullptr"); + return OH_NN_INVALID_PARAMETER; + } + + if (config.enableFloat16 == false) { + LOGE("HDIDeviceV2_0 mock PrepareModel failed, the enableFloat16 is false"); + return OH_NN_FAILED; + } + + sptr hdiPreparedModel = sptr(new (std::nothrow) OHOS::HDI::Nnrt::V2_0::MockIPreparedModel()); + if (hdiPreparedModel == nullptr) { + LOGE("HDIDeviceV2_0 mock PrepareModel failed, error happened when new sptr"); + return OH_NN_NULL_PTR; + } + + preparedModel = CreateSharedPtr(hdiPreparedModel); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIPreparedModelV2_0::ExportModelCache(std::vector& modelCache) +{ + if (!modelCache.empty()) { + LOGE("HDIPreparedModelV2_0 mock ExportModelCache failed, the modelCache is not empty"); + return OH_NN_INVALID_PARAMETER; + } + + if (HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_FAILED) { + HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + return OH_NN_FAILED; + } + + int bufferSize = 13; + ModelBuffer modelBuffer; + std::string aBuffer = "mock_buffer_a"; + modelBuffer.buffer = (void*)aBuffer.c_str(); + modelBuffer.length = bufferSize; + modelCache.emplace_back(modelBuffer); + + ModelBuffer modelBuffer2; + std::string bBuffer = "mock_buffer_b"; + modelBuffer2.buffer = (void*)bBuffer.c_str(); + modelBuffer2.length = bufferSize; + modelCache.emplace_back(modelBuffer2); + + return OH_NN_SUCCESS; +} + +void* HDIDeviceV2_0::AllocateBuffer(size_t length) +{ + if (length == 0) { + LOGE("HDIDeviceV2_0 mock AllocateBuffer failed, the length param is invalid"); + return nullptr; + } + + if (HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_NULL_PTR) { + HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + return nullptr; + } + + void* buffer = (void*)malloc(length); + if (buffer == nullptr) { + LOGE("HDIDeviceV2_0 mock AllocateBuffer failed, the buffer is nullptr"); + return nullptr; + } + return buffer; +} + +OH_NN_ReturnCode HDIDeviceV2_0::ReleaseBuffer(const void* buffer) +{ + if (buffer == nullptr) { + LOGE("HDIDeviceV2_0 mock ReleaseBuffer failed, the buffer is nullptr"); + return OH_NN_NULL_PTR; + } + + free(const_cast(buffer)); + buffer = nullptr; + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::PrepareModelFromModelCache(const std::vector& modelCache, + const ModelConfig& config, std::shared_ptr& preparedModel) +{ + if (HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_FAILED) { + HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + return OH_NN_FAILED; + } + + if (modelCache.size() == 0 || config.enableFloat16 == false) { + LOGE("HDIDeviceV2_0 mock PrepareModel failed, the modelCache size equals 0 or enableFloat16 is false"); + return OH_NN_FAILED; + } + + sptr hdiPreparedModel = sptr(new (std::nothrow) OHOS::HDI::Nnrt::V2_0::MockIPreparedModel()); + if (hdiPreparedModel == nullptr) { + LOGE("HDIDeviceV2_0 mock PrepareModelFromModelCache failed, error happened when new sptr"); + return OH_NN_NULL_PTR; + } + + preparedModel = CreateSharedPtr(hdiPreparedModel); + + return OH_NN_SUCCESS; +} + +bool NNTensor::IsDynamicShape() const +{ + if (HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_FAILED) { + HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + return false; + } + + return true; +} +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/common/v2_0/executor_mock_device.cpp b/test/unittest/common/v2_0/executor_mock_device.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b5b2049c4273bc8374b928b9eb53a681f5d4abda --- /dev/null +++ b/test/unittest/common/v2_0/executor_mock_device.cpp @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/compilation.h" +#include "frameworks/native/execution_plan.h" +#include "frameworks/native/hdi_device_v2_0.h" +#include "test/unittest/common/v2_0/mock_idevice.h" + +OH_NN_ReturnCode OHOS::HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + +namespace OHOS { +namespace NeuralNetworkRuntime { +std::shared_ptr ExecutionPlan::GetInputDevice() const +{ + sptr idevice + = sptr(new (std::nothrow) OHOS::HDI::Nnrt::V2_0::MockIDevice()); + std::shared_ptr device = std::make_shared(idevice); + return device; +} + +std::shared_ptr ExecutionPlan::GetOutputDevice() const +{ + sptr idevice + = sptr(new (std::nothrow) OHOS::HDI::Nnrt::V2_0::MockIDevice()); + std::shared_ptr device = std::make_shared(idevice); + return device; +} + +void* HDIDeviceV2_0::AllocateBuffer(size_t length) +{ + if (length == 0) { + LOGE("The length param is invalid, length=0"); + return nullptr; + } + + void* buffer = (void*)malloc(length); + if (buffer == nullptr) { + LOGE("alloct buffer failed"); + return nullptr; + } + + if (OHOS::HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_INVALID_PARAMETER) { + OHOS::HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + return nullptr; + } + return buffer; +} + +OH_NN_ReturnCode HDIDeviceV2_0::ReleaseBuffer(const void* buffer) +{ + if (buffer == nullptr) { + LOGE("alloct buffer failed"); + return OH_NN_FAILED; + } + free(const_cast(buffer)); + buffer = nullptr; + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIPreparedModelV2_0::Run(const std::vector& inputs, const std::vector& outputs, + std::vector>& outputsDims, std::vector& isOutputBufferEnough) +{ + if (inputs.empty() || outputs.empty()) { + return OH_NN_INVALID_PARAMETER; + } + + if (OHOS::HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_FAILED) { + OHOS::HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + return OH_NN_INVALID_PARAMETER; + } + + isOutputBufferEnough.emplace_back(true); + outputsDims.emplace_back(outputs[0].dimensions); + + return OH_NN_SUCCESS; +} + +std::shared_ptr Compilation::GetExecutionPlan() const +{ + sptr hdiPreparedModel = OHOS::sptr(new (std::nothrow) OHOS::HDI::Nnrt::V2_0::HDI::Nnrt::V2_0::MockIPreparedModel()); + + std::shared_ptr preparedModel = std::make_shared(hdiPreparedModel); + sptr idevice + = OHOS::sptr(new (std::nothrow) OHOS::HDI::Nnrt::V2_0::MockIDevice()); + std::shared_ptr device = std::make_shared(idevice); + ExecutionPlan executor(preparedModel, device); + std::shared_ptr pExcutor = std::make_shared(executor); + return pExcutor; +} +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/common/v2_0/inner_model_mock_device.cpp b/test/unittest/common/v2_0/inner_model_mock_device.cpp new file mode 100644 index 0000000000000000000000000000000000000000..cffa3ccf82b94e36005218720209bd7eef3f9928 --- /dev/null +++ b/test/unittest/common/v2_0/inner_model_mock_device.cpp @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include "common/utils.h" +#include "frameworks/native/inner_model.h" +#include "frameworks/native/hdi_device_v2_0.h" +#include "frameworks/native/device_manager.h" +#include "frameworks/native/ops/div_builder.h" +#include "mock_idevice.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +// Mock the palce where the devicemanager GetDevice is called in inner_model build function. +std::shared_ptr DeviceManager::GetDevice(size_t deviceId) const +{ + sptr idevice = + sptr(new (std::nothrow) OHOS::HDI::Nnrt::V2_0::MockIDevice()); + + if (idevice == nullptr) { + LOGE("DeviceManager mock GetDevice failed, error happened when new sptr"); + return nullptr; + } else { + std::shared_ptr device = CreateSharedPtr(idevice); + if (device == nullptr) { + LOGE("DeviceManager mock GetDevice failed, device is nullptr"); + return nullptr; + } + + if (deviceId == 0) { + return nullptr; + } else { + return device; + } + } +} + +// Mock the palce where the operator GetPrimitive is called in inner_model build function. +Ops::LiteGraphPrimitvePtr Ops::DivBuilder::GetPrimitive() +{ + Ops::LiteGraphPrimitvePtr primitive = {nullptr, DestroyLiteGraphTensor}; + return primitive; +} + +// Mock the palce where the device GetSupportedOperation is called in inner_model build function. +OH_NN_ReturnCode HDIDeviceV2_0::GetSupportedOperation(std::shared_ptr model, + std::vector& supportedOperations) +{ + supportedOperations = {true, true, true}; + + if (model->name_ == "Loaded_NNR_Model") { + return OH_NN_UNAVALIDABLE_DEVICE; + } else { + return OH_NN_SUCCESS; + } +} +} // NeuralNetworkRuntime +} // OHOS diff --git a/test/unittest/common/v2_0/mock_idevice.cpp b/test/unittest/common/v2_0/mock_idevice.cpp new file mode 100644 index 0000000000000000000000000000000000000000..17561c43c5176cbc70c7b9ccedb196d44abaaa4c --- /dev/null +++ b/test/unittest/common/v2_0/mock_idevice.cpp @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "mock_idevice.h" + +namespace OHOS { +namespace HDI { +namespace Nnrt { +namespace V2_0 { +sptr INnrtDevice::Get(bool isStub) +{ + return INnrtDevice::Get("device_service", isStub); +} + +sptr INnrtDevice::Get(const std::string& serviceName, bool isStub) +{ + if (isStub) { + return nullptr; + } + + sptr mockIDevice = sptr(new (std::nothrow) MockIDevice()); + if (mockIDevice == nullptr) { + return nullptr; + } + std::string deviceName = "MockDevice"; + EXPECT_CALL(*((V2_0::MockIDevice*)mockIDevice.GetRefPtr()), GetDeviceName(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(deviceName), ::testing::Return(HDF_SUCCESS))); + + std::string vendorName = "MockVendor"; + EXPECT_CALL(*((V2_0::MockIDevice*)mockIDevice.GetRefPtr()), GetVendorName(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(vendorName), ::testing::Return(HDF_SUCCESS))); + + V2_0::DeviceStatus deviceStatus = V2_0::DeviceStatus::AVAILABLE; + EXPECT_CALL(*((V2_0::MockIDevice*)mockIDevice.GetRefPtr()), GetDeviceStatus(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(deviceStatus), ::testing::Return(HDF_SUCCESS))); + + return mockIDevice; +} +} // V2_0 +} // Nnrt +} // HDI +} // OHOS \ No newline at end of file diff --git a/test/unittest/common/v2_0/mock_idevice.h b/test/unittest/common/v2_0/mock_idevice.h new file mode 100644 index 0000000000000000000000000000000000000000..7ad846e79d757727df911f6ea1a67e5278eaebe4 --- /dev/null +++ b/test/unittest/common/v2_0/mock_idevice.h @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_MOCK_IDEVICE_H +#define NEURAL_NETWORK_RUNTIME_MOCK_IDEVICE_H + +#include + +#include "frameworks/native/hdi_prepared_model_v2_0.h" +#include "frameworks/native/memory_manager.h" +#include "frameworks/native/transform.h" + +namespace OHOS { +namespace HDI { +namespace Nnrt { +namespace V2_0 { +class MockIDevice : public INnrtDevice { +public: + MOCK_METHOD1(GetDeviceName, int32_t(std::string&)); + MOCK_METHOD1(GetVendorName, int32_t(std::string&)); + MOCK_METHOD1(GetDeviceType, int32_t(DeviceType&)); + MOCK_METHOD1(GetDeviceStatus, int32_t(DeviceStatus&)); + MOCK_METHOD2(GetSupportedOperation, int32_t(const Model&, std::vector&)); + MOCK_METHOD1(IsFloat16PrecisionSupported, int32_t(bool&)); + MOCK_METHOD1(IsPerformanceModeSupported, int32_t(bool&)); + MOCK_METHOD1(IsPrioritySupported, int32_t(bool&)); + MOCK_METHOD1(IsDynamicInputSupported, int32_t(bool&)); + MOCK_METHOD3(PrepareModel, int32_t(const Model&, const ModelConfig&, OHOS::sptr&)); + MOCK_METHOD1(IsModelCacheSupported, int32_t(bool&)); + MOCK_METHOD3(PrepareModelFromModelCache, int32_t(const std::vector&, const ModelConfig&, + OHOS::sptr&)); + MOCK_METHOD2(AllocateBuffer, int32_t(uint32_t, SharedBuffer&)); + MOCK_METHOD1(ReleaseBuffer, int32_t(const SharedBuffer&)); + MOCK_METHOD2(GetVersion, int32_t(uint32_t&, uint32_t&)); +}; + +class MockIPreparedModel : public IPreparedModel { +public: + MOCK_METHOD1(ExportModelCache, int32_t(std::vector&)); + MOCK_METHOD4(Run, int32_t(const std::vector&, const std::vector&, + std::vector>&, std::vector&)); + MOCK_METHOD2(GetInputDimRanges, int32_t(std::vector>&, std::vector>&)); + MOCK_METHOD2(GetVersion, int32_t(uint32_t&, uint32_t&)); + + static OH_NN_ReturnCode m_ExpectRetCode; +}; +} // V2_0 +} // Nnrt +} // HDI +} // OHOS +#endif // NEURAL_NETWORK_RUNTIME_MOCK_IDEVICE_H diff --git a/test/unittest/components/BUILD.gn b/test/unittest/components/BUILD.gn index 5b37d98d22fea6db43f4401271b675a6614105b1..a62c7e21192fb2d3b6f3f7962fb4cccbf50563e1 100644 --- a/test/unittest/components/BUILD.gn +++ b/test/unittest/components/BUILD.gn @@ -20,21 +20,42 @@ config("module_private_config") { include_dirs = [ "//third_party/googletest/googlemock/include", - "//foundation/ai/neural_network_runtime", + "../../../", "//foundation/communication/ipc/interfaces/innerkits/ipc_core/include", "//third_party/mindspore/mindspore-src/source/mindspore/lite/mindir/include", ] } -ohos_unittest("CompilationTest") { +ohos_unittest("MemoryManagerTest") { module_out_path = module_output_path - sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/compilation/compilation_test.cpp" ] - sources += [ "//foundation/ai/neural_network_runtime/test/unittest/common/compilation_mock_idevice.cpp" ] + sources = [ "./memory_manager/memory_manager_test.cpp" ] + sources += [ "../common/file_utils.cpp" ] configs = [ ":module_private_config" ] deps = [ - "//foundation/ai/neural_network_runtime/frameworks:libneural_network_runtime", + "../../../frameworks:libneural_network_runtime", + "//third_party/googletest:gmock_main", + "//third_party/googletest:gtest_main", + ] + + external_deps = [ + "drivers_interface_nnrt:libnnrt_proxy_1.0", + "hitrace_native:libhitracechain", + "hiviewdfx_hilog_native:libhilog", + "mindspore:mindir", + ] +} + +ohos_unittest("CompilationV1_0Test") { + module_out_path = module_output_path + + sources = [ "./v1_0/compilation/compilation_test.cpp" ] + sources += [ "../common/v1_0/compilation_mock_idevice.cpp" ] + configs = [ ":module_private_config" ] + + deps = [ + "../../../frameworks:libneural_network_runtime", "//third_party/googletest:gmock_main", "//third_party/googletest:gtest_main", ] @@ -49,15 +70,15 @@ ohos_unittest("CompilationTest") { ] } -ohos_unittest("ExecutorTest") { +ohos_unittest("ExecutorV1_0Test") { module_out_path = module_output_path - sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/executor/executor_test.cpp" ] - sources += [ "//foundation/ai/neural_network_runtime/test/unittest/common/executor_mock_device.cpp" ] + sources = [ "./v1_0/executor/executor_test.cpp" ] + sources += [ "../common/v1_0/executor_mock_device.cpp" ] configs = [ ":module_private_config" ] deps = [ - "//foundation/ai/neural_network_runtime/frameworks:libneural_network_runtime", + "../../../frameworks:libneural_network_runtime", "//third_party/googletest:gmock_main", "//third_party/googletest:gtest_main", ] @@ -72,15 +93,15 @@ ohos_unittest("ExecutorTest") { ] } -ohos_unittest("DeviceManagerTest") { +ohos_unittest("DeviceManagerV1_0Test") { module_out_path = module_output_path - sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/device_manager/device_manager_test.cpp" ] - sources += [ "//foundation/ai/neural_network_runtime/test/unittest/common/mock_idevice.cpp" ] + sources = [ "./v1_0/device_manager/device_manager_test.cpp" ] + sources += [ "../common/v1_0/mock_idevice.cpp" ] configs = [ ":module_private_config" ] deps = [ - "//foundation/ai/neural_network_runtime/frameworks:libneural_network_runtime", + "../../../frameworks:libneural_network_runtime", "//third_party/googletest:gmock_main", "//third_party/googletest:gtest_main", ] @@ -94,15 +115,15 @@ ohos_unittest("DeviceManagerTest") { ] } -ohos_unittest("DeviceRegistrarTest") { +ohos_unittest("DeviceRegistrarV1_0Test") { module_out_path = module_output_path - sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/device_registrar/device_registrar_test.cpp" ] - sources += [ "//foundation/ai/neural_network_runtime/test/unittest/common/mock_idevice.cpp" ] + sources = [ "./v1_0/device_registrar/device_registrar_test.cpp" ] + sources += [ "../common/v1_0/mock_idevice.cpp" ] configs = [ ":module_private_config" ] deps = [ - "//foundation/ai/neural_network_runtime/frameworks:libneural_network_runtime", + "../../../frameworks:libneural_network_runtime", "//third_party/googletest:gmock_main", "//third_party/googletest:gtest_main", ] @@ -116,16 +137,16 @@ ohos_unittest("DeviceRegistrarTest") { ] } -ohos_unittest("HDIDeviceTest") { +ohos_unittest("HDIDeviceV1_0Test") { module_out_path = module_output_path - sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/hdi_device/hdi_device_test.cpp" ] - sources += [ "//foundation/ai/neural_network_runtime/test/unittest/common/mock_idevice.cpp" ] - sources += [ "//foundation/ai/neural_network_runtime/test/unittest/common/file_utils.cpp" ] + sources = [ "./v1_0/hdi_device/hdi_device_test.cpp" ] + sources += [ "../common/v1_0/mock_idevice.cpp" ] + sources += [ "../common/file_utils.cpp" ] configs = [ ":module_private_config" ] deps = [ - "//foundation/ai/neural_network_runtime/frameworks:libneural_network_runtime", + "../../../frameworks:libneural_network_runtime", "//third_party/googletest:gmock_main", "//third_party/googletest:gtest_main", ] @@ -139,16 +160,16 @@ ohos_unittest("HDIDeviceTest") { ] } -ohos_unittest("HDIPreparedModelTest") { +ohos_unittest("HDIPreparedModelV1_0Test") { module_out_path = module_output_path - sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/hdi_prepared_model/hdi_prepared_model_test.cpp" ] - sources += [ "//foundation/ai/neural_network_runtime/test/unittest/common/mock_idevice.cpp" ] - sources += [ "//foundation/ai/neural_network_runtime/test/unittest/common/file_utils.cpp" ] + sources = [ "./v1_0/hdi_prepared_model/hdi_prepared_model_test.cpp" ] + sources += [ "../common/v1_0/mock_idevice.cpp" ] + sources += [ "../common/file_utils.cpp" ] configs = [ ":module_private_config" ] deps = [ - "//foundation/ai/neural_network_runtime/frameworks:libneural_network_runtime", + "../../../frameworks:libneural_network_runtime", "//third_party/googletest:gmock_main", "//third_party/googletest:gtest_main", ] @@ -162,15 +183,14 @@ ohos_unittest("HDIPreparedModelTest") { ] } -ohos_unittest("MemoryManagerTest") { +ohos_unittest("TransformV1_0Test") { module_out_path = module_output_path - sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/memory_manager/memory_manager_test.cpp" ] - sources += [ "//foundation/ai/neural_network_runtime/test/unittest/common/file_utils.cpp" ] + sources = [ "./v1_0/transform/transform_test.cpp" ] configs = [ ":module_private_config" ] deps = [ - "//foundation/ai/neural_network_runtime/frameworks:libneural_network_runtime", + "../../../frameworks:libneural_network_runtime", "//third_party/googletest:gmock_main", "//third_party/googletest:gtest_main", ] @@ -183,35 +203,37 @@ ohos_unittest("MemoryManagerTest") { ] } -ohos_unittest("TransformTest") { +ohos_unittest("InnerModelV1_0Test") { module_out_path = module_output_path - sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/transform/transform_test.cpp" ] + sources = [ "./v1_0/inner_model/inner_model_test.cpp" ] + sources += [ "../common/v1_0/inner_model_mock_device.cpp" ] configs = [ ":module_private_config" ] deps = [ - "//foundation/ai/neural_network_runtime/frameworks:libneural_network_runtime", + "../../../frameworks:libneural_network_runtime", "//third_party/googletest:gmock_main", "//third_party/googletest:gtest_main", ] external_deps = [ + "c_utils:utils", "drivers_interface_nnrt:libnnrt_proxy_1.0", + "hdf_core:libhdf_utils", "hitrace_native:libhitracechain", "hiviewdfx_hilog_native:libhilog", "mindspore:mindir", ] } -ohos_unittest("InnerModelTest") { +ohos_unittest("NnTensorV1_0Test") { module_out_path = module_output_path - sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/inner_model/inner_model_test.cpp" ] - sources += [ "//foundation/ai/neural_network_runtime/test/unittest/common/inner_model_mock_device.cpp" ] + sources = [ "./v1_0/inner_model/nn_tensor_test.cpp" ] configs = [ ":module_private_config" ] deps = [ - "//foundation/ai/neural_network_runtime/frameworks:libneural_network_runtime", + "../../../frameworks:libneural_network_runtime", "//third_party/googletest:gmock_main", "//third_party/googletest:gtest_main", ] @@ -226,14 +248,14 @@ ohos_unittest("InnerModelTest") { ] } -ohos_unittest("NnTensorTest") { +ohos_unittest("NnValidationV1_0Test") { module_out_path = module_output_path - sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/inner_model/nn_tensor_test.cpp" ] + sources = [ "./v1_0/inner_model/nn_validation_test.cpp" ] configs = [ ":module_private_config" ] deps = [ - "//foundation/ai/neural_network_runtime/frameworks:libneural_network_runtime", + "../../../frameworks:libneural_network_runtime", "//third_party/googletest:gmock_main", "//third_party/googletest:gtest_main", ] @@ -248,14 +270,14 @@ ohos_unittest("NnTensorTest") { ] } -ohos_unittest("NnValidationTest") { +ohos_unittest("OpsRegistryV1_0Test") { module_out_path = module_output_path - sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/inner_model/nn_validation_test.cpp" ] + sources = [ "./v1_0/inner_model/ops_regitstry_test.cpp" ] configs = [ ":module_private_config" ] deps = [ - "//foundation/ai/neural_network_runtime/frameworks:libneural_network_runtime", + "../../../frameworks:libneural_network_runtime", "//third_party/googletest:gmock_main", "//third_party/googletest:gtest_main", ] @@ -270,14 +292,17 @@ ohos_unittest("NnValidationTest") { ] } -ohos_unittest("OpsRegistryTest") { +ohos_unittest("NeuralNetworkRuntimeV1_0Test") { module_out_path = module_output_path - sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/inner_model/ops_regitstry_test.cpp" ] + sources = + [ "./v1_0/neural_network_runtime_test/neural_network_runtime_test.cpp" ] + sources += [ "../common/v1_0/executor_mock_device.cpp" ] + configs = [ ":module_private_config" ] deps = [ - "//foundation/ai/neural_network_runtime/frameworks:libneural_network_runtime", + "../../../frameworks:libneural_network_runtime", "//third_party/googletest:gmock_main", "//third_party/googletest:gtest_main", ] @@ -292,23 +317,269 @@ ohos_unittest("OpsRegistryTest") { ] } -ohos_unittest("NeuralNetworkRuntimeTest") { +ohos_unittest("CompilationV2_0Test") { module_out_path = module_output_path - sources = [ "//foundation/ai/neural_network_runtime/test/unittest/components/neural_network_runtime_test/neural_network_runtime_test.cpp" ] - sources += [ "//foundation/ai/neural_network_runtime/test/unittest/common/executor_mock_device.cpp" ] + sources = [ "./v2_0/compilation/compilation_test.cpp" ] + sources += [ "../common/v2_0/compilation_mock_idevice.cpp" ] + configs = [ ":module_private_config" ] + + deps = [ + "../../../frameworks:libneural_network_runtime", + "//third_party/googletest:gmock_main", + "//third_party/googletest:gtest_main", + ] + + external_deps = [ + "c_utils:utils", + "drivers_interface_nnrt:libnnrt_proxy_2.0", + "hdf_core:libhdf_utils", + "hitrace_native:libhitracechain", + "hiviewdfx_hilog_native:libhilog", + "mindspore:mindir", + ] +} + +ohos_unittest("ExecutorV2_0Test") { + module_out_path = module_output_path + sources = [ "./v2_0/executor/executor_test.cpp" ] + sources += [ "../common/v2_0/executor_mock_device.cpp" ] configs = [ ":module_private_config" ] deps = [ - "//foundation/ai/neural_network_runtime/frameworks:libneural_network_runtime", + "../../../frameworks:libneural_network_runtime", "//third_party/googletest:gmock_main", "//third_party/googletest:gtest_main", ] external_deps = [ "c_utils:utils", - "drivers_interface_nnrt:libnnrt_proxy_1.0", + "drivers_interface_nnrt:libnnrt_proxy_2.0", + "hdf_core:libhdf_utils", + "hitrace_native:libhitracechain", + "hiviewdfx_hilog_native:libhilog", + "mindspore:mindir", + ] +} + +ohos_unittest("DeviceManagerV2_0Test") { + module_out_path = module_output_path + + sources = [ "./v2_0/device_manager/device_manager_test.cpp" ] + sources += [ "../common/v2_0/mock_idevice.cpp" ] + configs = [ ":module_private_config" ] + + deps = [ + "../../../frameworks:libneural_network_runtime", + "//third_party/googletest:gmock_main", + "//third_party/googletest:gtest_main", + ] + + external_deps = [ + "c_utils:utils", + "drivers_interface_nnrt:libnnrt_proxy_2.0", + "hitrace_native:libhitracechain", + "hiviewdfx_hilog_native:libhilog", + "mindspore:mindir", + ] +} + +ohos_unittest("DeviceRegistrarV2_0Test") { + module_out_path = module_output_path + + sources = [ "./v2_0/device_registrar/device_registrar_test.cpp" ] + sources += [ "../common/v2_0/mock_idevice.cpp" ] + configs = [ ":module_private_config" ] + + deps = [ + "../../../frameworks:libneural_network_runtime", + "//third_party/googletest:gmock_main", + "//third_party/googletest:gtest_main", + ] + + external_deps = [ + "c_utils:utils", + "drivers_interface_nnrt:libnnrt_proxy_2.0", + "hitrace_native:libhitracechain", + "hiviewdfx_hilog_native:libhilog", + "mindspore:mindir", + ] +} + +ohos_unittest("HDIDeviceV2_0Test") { + module_out_path = module_output_path + + sources = [ "./v2_0/hdi_device/hdi_device_test.cpp" ] + sources += [ "../common/v2_0/mock_idevice.cpp" ] + sources += [ "../common/file_utils.cpp" ] + configs = [ ":module_private_config" ] + + deps = [ + "../../../frameworks:libneural_network_runtime", + "//third_party/googletest:gmock_main", + "//third_party/googletest:gtest_main", + ] + + external_deps = [ + "c_utils:utils", + "drivers_interface_nnrt:libnnrt_proxy_2.0", + "hitrace_native:libhitracechain", + "hiviewdfx_hilog_native:libhilog", + "mindspore:mindir", + ] +} + +ohos_unittest("HDIPreparedModelV2_0Test") { + module_out_path = module_output_path + + sources = [ "./v2_0/hdi_prepared_model/hdi_prepared_model_test.cpp" ] + sources += [ "../common/v2_0/mock_idevice.cpp" ] + sources += [ "../common/file_utils.cpp" ] + configs = [ ":module_private_config" ] + + deps = [ + "../../../frameworks:libneural_network_runtime", + "//third_party/googletest:gmock_main", + "//third_party/googletest:gtest_main", + ] + + external_deps = [ + "c_utils:utils", + "drivers_interface_nnrt:libnnrt_proxy_2.0", + "hitrace_native:libhitracechain", + "hiviewdfx_hilog_native:libhilog", + "mindspore:mindir", + ] +} + +ohos_unittest("TransformV2_0Test") { + module_out_path = module_output_path + + sources = [ "./v2_0/transform/transform_test.cpp" ] + configs = [ ":module_private_config" ] + + deps = [ + "../../../frameworks:libneural_network_runtime", + "//third_party/googletest:gmock_main", + "//third_party/googletest:gtest_main", + ] + + external_deps = [ + "drivers_interface_nnrt:libnnrt_proxy_2.0", + "hitrace_native:libhitracechain", + "hiviewdfx_hilog_native:libhilog", + "mindspore:mindir", + ] +} + +ohos_unittest("InnerModelV2_0Test") { + module_out_path = module_output_path + + sources = [ "./v2_0/inner_model/inner_model_test.cpp" ] + sources += [ "../common/v2_0/inner_model_mock_device.cpp" ] + configs = [ ":module_private_config" ] + + deps = [ + "../../../frameworks:libneural_network_runtime", + "//third_party/googletest:gmock_main", + "//third_party/googletest:gtest_main", + ] + + external_deps = [ + "c_utils:utils", + "drivers_interface_nnrt:libnnrt_proxy_2.0", + "hdf_core:libhdf_utils", + "hitrace_native:libhitracechain", + "hiviewdfx_hilog_native:libhilog", + "mindspore:mindir", + ] +} + +ohos_unittest("NnTensorV2_0Test") { + module_out_path = module_output_path + + sources = [ "./v2_0/inner_model/nn_tensor_test.cpp" ] + configs = [ ":module_private_config" ] + + deps = [ + "../../../frameworks:libneural_network_runtime", + "//third_party/googletest:gmock_main", + "//third_party/googletest:gtest_main", + ] + + external_deps = [ + "c_utils:utils", + "drivers_interface_nnrt:libnnrt_proxy_2.0", + "hdf_core:libhdf_utils", + "hitrace_native:libhitracechain", + "hiviewdfx_hilog_native:libhilog", + "mindspore:mindir", + ] +} + +ohos_unittest("NnValidationV2_0Test") { + module_out_path = module_output_path + + sources = [ "./v2_0/inner_model/nn_validation_test.cpp" ] + configs = [ ":module_private_config" ] + + deps = [ + "../../../frameworks:libneural_network_runtime", + "//third_party/googletest:gmock_main", + "//third_party/googletest:gtest_main", + ] + + external_deps = [ + "c_utils:utils", + "drivers_interface_nnrt:libnnrt_proxy_2.0", + "hdf_core:libhdf_utils", + "hitrace_native:libhitracechain", + "hiviewdfx_hilog_native:libhilog", + "mindspore:mindir", + ] +} + +ohos_unittest("OpsRegistryV2_0Test") { + module_out_path = module_output_path + + sources = [ "./v2_0/inner_model/ops_regitstry_test.cpp" ] + configs = [ ":module_private_config" ] + + deps = [ + "../../../frameworks:libneural_network_runtime", + "//third_party/googletest:gmock_main", + "//third_party/googletest:gtest_main", + ] + + external_deps = [ + "c_utils:utils", + "drivers_interface_nnrt:libnnrt_proxy_2.0", + "hdf_core:libhdf_utils", + "hitrace_native:libhitracechain", + "hiviewdfx_hilog_native:libhilog", + "mindspore:mindir", + ] +} + +ohos_unittest("NeuralNetworkRuntimeV2_0Test") { + module_out_path = module_output_path + + sources = + [ "./v2_0/neural_network_runtime_test/neural_network_runtime_test.cpp" ] + sources += [ "../common/v2_0/executor_mock_device.cpp" ] + + configs = [ ":module_private_config" ] + + deps = [ + "../../../frameworks:libneural_network_runtime", + "//third_party/googletest:gmock_main", + "//third_party/googletest:gtest_main", + ] + + external_deps = [ + "c_utils:utils", + "drivers_interface_nnrt:libnnrt_proxy_2.0", "hdf_core:libhdf_utils", "hitrace_native:libhitracechain", "hiviewdfx_hilog_native:libhilog", @@ -319,18 +590,30 @@ ohos_unittest("NeuralNetworkRuntimeTest") { group("components_unittest") { testonly = true deps = [ - ":CompilationTest", - ":DeviceManagerTest", - ":DeviceRegistrarTest", - ":ExecutorTest", - ":HDIDeviceTest", - ":HDIPreparedModelTest", - ":InnerModelTest", + ":CompilationV1_0Test", + ":CompilationV2_0Test", + ":DeviceManagerV1_0Test", + ":DeviceManagerV2_0Test", + ":DeviceRegistrarV1_0Test", + ":DeviceRegistrarV2_0Test", + ":ExecutorV1_0Test", + ":ExecutorV2_0Test", + ":HDIDeviceV1_0Test", + ":HDIDeviceV2_0Test", + ":HDIPreparedModelV1_0Test", + ":HDIPreparedModelV2_0Test", + ":InnerModelV1_0Test", + ":InnerModelV2_0Test", ":MemoryManagerTest", - ":NeuralNetworkRuntimeTest", - ":NnTensorTest", - ":NnValidationTest", - ":OpsRegistryTest", - ":TransformTest", + ":NeuralNetworkRuntimeV1_0Test", + ":NeuralNetworkRuntimeV2_0Test", + ":NnTensorV1_0Test", + ":NnTensorV2_0Test", + ":NnValidationV1_0Test", + ":NnValidationV2_0Test", + ":OpsRegistryV1_0Test", + ":OpsRegistryV2_0Test", + ":TransformV1_0Test", + ":TransformV2_0Test", ] } diff --git a/test/unittest/components/memory_manager/memory_manager_test.cpp b/test/unittest/components/memory_manager/memory_manager_test.cpp index 293aca3c98fadb19d3187bfd50cc14e409ae35c1..2a9f9b0116afb387cf08ed2dc230b89cfc824b0a 100644 --- a/test/unittest/components/memory_manager/memory_manager_test.cpp +++ b/test/unittest/components/memory_manager/memory_manager_test.cpp @@ -119,7 +119,7 @@ HWTEST_F(MemoryManagerTest, memorymanagertest_unmapmemory_001, TestSize.Level0) { const auto& memoryManager = MemoryManager::GetInstance(); void* memory = nullptr; - EXPECT_EQ(OH_NN_INVALID_PARAMETER, memoryManager->UnMapMemory(memory)); + memoryManager->UnMapMemory(memory); } /** @@ -131,7 +131,7 @@ HWTEST_F(MemoryManagerTest, memorymanagertest_unmapmemory_002, TestSize.Level0) { const auto& memoryManager = MemoryManager::GetInstance(); void* memory = malloc(10); - EXPECT_EQ(OH_NN_INVALID_PARAMETER, memoryManager->UnMapMemory(memory)); + memoryManager->UnMapMemory(memory); free(memory); } diff --git a/test/unittest/components/compilation/compilation_test.cpp b/test/unittest/components/v1_0/compilation/compilation_test.cpp similarity index 99% rename from test/unittest/components/compilation/compilation_test.cpp rename to test/unittest/components/v1_0/compilation/compilation_test.cpp index 8529ccb2ed4fd0eadf897a26c0aa7c77e73dad14..25cc81c3eb8bb0943cc3bd52694102a577d2627d 100644 --- a/test/unittest/components/compilation/compilation_test.cpp +++ b/test/unittest/components/v1_0/compilation/compilation_test.cpp @@ -19,7 +19,7 @@ #include "mindir.h" -#include "test/unittest/common/mock_idevice.h" +#include "test/unittest/common/v1_0/mock_idevice.h" using namespace OHOS::NeuralNetworkRuntime; using namespace OHOS::HDI::Nnrt::V1_0; diff --git a/test/unittest/components/compilation/compilation_test.h b/test/unittest/components/v1_0/compilation/compilation_test.h similarity index 100% rename from test/unittest/components/compilation/compilation_test.h rename to test/unittest/components/v1_0/compilation/compilation_test.h diff --git a/test/unittest/components/device_manager/device_manager_test.cpp b/test/unittest/components/v1_0/device_manager/device_manager_test.cpp similarity index 89% rename from test/unittest/components/device_manager/device_manager_test.cpp rename to test/unittest/components/v1_0/device_manager/device_manager_test.cpp index 3e5068962be3e663510bb60607c2656db7769921..2c1480f9dc19990ead8ed073f9de7dc71f38d65e 100644 --- a/test/unittest/components/device_manager/device_manager_test.cpp +++ b/test/unittest/components/v1_0/device_manager/device_manager_test.cpp @@ -18,8 +18,8 @@ #include "common/log.h" #include "frameworks/native/device_manager.h" -#include "frameworks/native/hdi_device.h" -#include "test/unittest/common/mock_idevice.h" +#include "frameworks/native/hdi_device_v1_0.h" +#include "test/unittest/common/v1_0/mock_idevice.h" using namespace testing; using namespace testing::ext; @@ -70,7 +70,7 @@ HWTEST_F(DeviceManagerTest, devicemanager_getalldeviceid_001, TestSize.Level0) std::vector idVect = deviceManager.GetAllDeviceId(); EXPECT_NE((size_t)0, idVect.size()); - const size_t expectDeviceId {std::hash{}("MockDevice_MockVendor")}; + const size_t expectDeviceId {std::hash {} ("MockDevice_MockVendor")}; EXPECT_EQ(expectDeviceId, idVect[0]); const std::string expectDeviceName = "MockDevice"; @@ -128,8 +128,8 @@ HWTEST_F(DeviceManagerTest, devicemanager_registerdevice_001, TestSize.Level0) std::string vendorName = "MockVendor"; MockInit(device, typeVect, deviceName, vendorName); - std::function()> creator = - [&device]()->std::shared_ptr {return std::make_shared(device);}; + std::function()> creator = + [&device]()->std::shared_ptr {return std::make_shared(device);}; auto& deviceManager = DeviceManager::GetInstance(); OH_NN_ReturnCode result = deviceManager.RegisterDevice(creator); EXPECT_EQ(OH_NN_FAILED, result); @@ -142,8 +142,8 @@ HWTEST_F(DeviceManagerTest, devicemanager_registerdevice_001, TestSize.Level0) */ HWTEST_F(DeviceManagerTest, devicemanager_registerdevice_002, TestSize.Level0) { - std::function()> creator = - []()->std::shared_ptr {return nullptr;}; + std::function()> creator = + []()->std::shared_ptr {return nullptr;}; auto& deviceManager = DeviceManager::GetInstance(); OH_NN_ReturnCode result = deviceManager.RegisterDevice(creator); EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); @@ -164,8 +164,8 @@ HWTEST_F(DeviceManagerTest, devicemanager_registerdevice_003, TestSize.Level0) std::string vendorName = "MockVendor"; MockInit(device, typeVect, deviceName, vendorName); - std::function()> creator = - [&device]()->std::shared_ptr {return std::make_shared(device);}; + std::function()> creator = + [&device]()->std::shared_ptr {return std::make_shared(device);}; auto& deviceManager = DeviceManager::GetInstance(); OH_NN_ReturnCode result = deviceManager.RegisterDevice(creator); EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, result); @@ -186,8 +186,8 @@ HWTEST_F(DeviceManagerTest, devicemanager_registerdevice_004, TestSize.Level0) std::string vendorName = "MockVendor"; MockInit(device, typeVect, deviceName, vendorName); - std::function()> creator = - [&device]()->std::shared_ptr {return std::make_shared(device);}; + std::function()> creator = + [&device]()->std::shared_ptr {return std::make_shared(device);}; auto& deviceManager = DeviceManager::GetInstance(); OH_NN_ReturnCode result = deviceManager.RegisterDevice(creator); EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, result); @@ -208,8 +208,8 @@ HWTEST_F(DeviceManagerTest, devicemanager_registerdevice_005, TestSize.Level0) std::string vendorName = "MockVendorA"; MockInit(device, typeVect, deviceName, vendorName); - std::function()> creator = - [&device]()->std::shared_ptr {return std::make_shared(device);}; + std::function()> creator = + [&device]()->std::shared_ptr {return std::make_shared(device);}; auto& deviceManager = DeviceManager::GetInstance(); OH_NN_ReturnCode result = deviceManager.RegisterDevice(creator); EXPECT_EQ(OH_NN_SUCCESS, result); @@ -217,7 +217,7 @@ HWTEST_F(DeviceManagerTest, devicemanager_registerdevice_005, TestSize.Level0) std::vector idVect = deviceManager.GetAllDeviceId(); EXPECT_NE((size_t)0, idVect.size()); - const size_t expectDeviceId {std::hash{}("MockDeviceA_MockVendorA")}; + const size_t expectDeviceId {std::hash {} ("MockDeviceA_MockVendorA")}; EXPECT_EQ(expectDeviceId, idVect[0]); const std::string expectDeviceName = "MockDeviceA_MockVendorA"; diff --git a/test/unittest/components/device_registrar/device_registrar_test.cpp b/test/unittest/components/v1_0/device_registrar/device_registrar_test.cpp similarity index 95% rename from test/unittest/components/device_registrar/device_registrar_test.cpp rename to test/unittest/components/v1_0/device_registrar/device_registrar_test.cpp index 8b8449f87877bf021bbaa00b129d128e4bc09885..95b3183369c59a079f622972d319e06d6d941aa1 100644 --- a/test/unittest/components/device_registrar/device_registrar_test.cpp +++ b/test/unittest/components/v1_0/device_registrar/device_registrar_test.cpp @@ -14,16 +14,15 @@ */ #include - -#include #include +#include #include #include "common/log.h" #include "frameworks/native/device_registrar.h" -#include "frameworks/native/hdi_device.h" +#include "frameworks/native/hdi_device_v1_0.h" #include "frameworks/native/device_manager.h" -#include "test/unittest/common/mock_idevice.h" +#include "test/unittest/common/v1_0/mock_idevice.h" using namespace testing; using namespace testing::ext; @@ -72,8 +71,8 @@ public: virtual int32_t GetVersion(uint32_t& majorVer, uint32_t& minorVer) { - majorVer = 1; - minorVer = 0; + majorVer = INNRT_DEVICE_MAJOR_VERSION; + minorVer = INNRT_DEVICE_MINOR_VERSION; return HDF_SUCCESS; } }; @@ -92,6 +91,11 @@ public: name = "MockVendorA"; return OH_NN_SUCCESS; }; + OH_NN_ReturnCode GetVersion(std::string& version) override + { + version = "MockVersionA"; + return OH_NN_SUCCESS; + } OH_NN_ReturnCode GetDeviceType(OH_NN_DeviceType& deviceType) override { return OH_NN_SUCCESS; @@ -211,7 +215,7 @@ std::shared_ptr CreateDeviceObjectCallback() { OHOS::sptr device = IRegisterDevice::Get(false); EXPECT_NE(device, nullptr); - std::shared_ptr m_mockDevice = std::make_unique(device); + std::shared_ptr m_mockDevice = std::make_shared(device); return m_mockDevice; } @@ -234,7 +238,7 @@ HWTEST_F(DeviceRegistrarTest, deviceregistrar_constructor_001, TestSize.Level0) std::vector idVect = deviceManager.GetAllDeviceId(); EXPECT_EQ((size_t)2, idVect.size()); - const size_t expectDeviceId {std::hash{}("MockDevice_MockVendor")}; + const size_t expectDeviceId {std::hash {} ("MockDevice_MockVendor")}; EXPECT_EQ(expectDeviceId, idVect[1]); const std::string expectDeviceNameA = "MockDevice"; diff --git a/test/unittest/components/executor/executor_test.cpp b/test/unittest/components/v1_0/executor/executor_test.cpp similarity index 99% rename from test/unittest/components/executor/executor_test.cpp rename to test/unittest/components/v1_0/executor/executor_test.cpp index 5d13e51600f36395b41fff685076199b18e03673..6e8397889d7f30d5e3d9d459c2fcd32b11a70718 100644 --- a/test/unittest/components/executor/executor_test.cpp +++ b/test/unittest/components/v1_0/executor/executor_test.cpp @@ -18,7 +18,7 @@ #include "common/scoped_trace.h" #include "frameworks/native/compilation.h" #include "frameworks/native/inner_model.h" -#include "test/unittest/common/mock_idevice.h" +#include "test/unittest/common/v1_0/mock_idevice.h" using namespace OHOS::NeuralNetworkRuntime; using namespace OHOS::NeuralNetworkRuntime::Ops; @@ -75,7 +75,7 @@ OH_NN_Tensor ExecutorTest::SetTensor(OH_NN_DataType dataType, uint32_t dimension void ExecutorTest::SetMermory(OH_NN_Memory** &memory) { float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; - void* const data = dataArry; + void * const data = dataArry; OH_NN_Memory memoryPtr = {data, 9 * sizeof(float)}; OH_NN_Memory* ptr = &memoryPtr; memory = &ptr; diff --git a/test/unittest/components/executor/executor_test.h b/test/unittest/components/v1_0/executor/executor_test.h similarity index 100% rename from test/unittest/components/executor/executor_test.h rename to test/unittest/components/v1_0/executor/executor_test.h diff --git a/test/unittest/components/hdi_device/hdi_device_test.cpp b/test/unittest/components/v1_0/hdi_device/hdi_device_test.cpp similarity index 90% rename from test/unittest/components/hdi_device/hdi_device_test.cpp rename to test/unittest/components/v1_0/hdi_device/hdi_device_test.cpp index 07925bf69aa8d1dd60eed179007006240fe43a05..2c1be5a32a797fe3793447c418e725cc99ca1411 100644 --- a/test/unittest/components/hdi_device/hdi_device_test.cpp +++ b/test/unittest/components/v1_0/hdi_device/hdi_device_test.cpp @@ -24,8 +24,8 @@ #include #include -#include "frameworks/native/hdi_device.h" -#include "test/unittest/common/mock_idevice.h" +#include "frameworks/native/hdi_device_v1_0.h" +#include "test/unittest/common/v1_0/mock_idevice.h" #include "test/unittest/common/file_utils.h" using namespace testing; @@ -95,7 +95,7 @@ OH_NN_ReturnCode HDIDeviceTest::PrepareModel(int32_t allocBufferType, int32_t pr OHOS::sptr sp = OHOS::sptr(new (std::nothrow) V1_0::MockIDevice()); EXPECT_NE(sp, nullptr); - std::unique_ptr hdiDevice = std::make_unique(sp); + std::unique_ptr hdiDevice = std::make_unique(sp); EXPECT_NE(hdiDevice, nullptr); V1_0::SharedBuffer buffer {1, 1, 0, 1}; @@ -124,7 +124,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_constructor_001, TestSize.Level0) { OHOS::sptr device = V1_0::INnrtDevice::Get(false); EXPECT_NE(device, nullptr); - std::unique_ptr hdiDevice = std::make_unique(device); + std::unique_ptr hdiDevice = std::make_unique(device); EXPECT_NE(hdiDevice, nullptr); } @@ -136,7 +136,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_constructor_001, TestSize.Level0) HWTEST_F(HDIDeviceTest, hdidevice_getdevicename_001, TestSize.Level0) { OHOS::sptr device = V1_0::INnrtDevice::Get(false); - std::unique_ptr hdiDevice = std::make_unique(device); + std::unique_ptr hdiDevice = std::make_unique(device); EXPECT_NE(hdiDevice, nullptr); std::string deviceName = "MockDevice"; EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), GetDeviceName(::testing::_)) @@ -157,7 +157,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_getdevicename_001, TestSize.Level0) HWTEST_F(HDIDeviceTest, hdidevice_getdevicename_002, TestSize.Level0) { OHOS::sptr device = V1_0::INnrtDevice::Get(false); - std::unique_ptr hdiDevice = std::make_unique(device); + std::unique_ptr hdiDevice = std::make_unique(device); EXPECT_NE(hdiDevice, nullptr); std::string deviceName = "MockDevice"; EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), GetDeviceName(::testing::_)) @@ -174,7 +174,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_getdevicename_002, TestSize.Level0) HWTEST_F(HDIDeviceTest, hdidevice_getvendorname_001, TestSize.Level0) { OHOS::sptr device = V1_0::INnrtDevice::Get(false); - std::unique_ptr hdiDevice = std::make_unique(device); + std::unique_ptr hdiDevice = std::make_unique(device); EXPECT_NE(hdiDevice, nullptr); std::string vendorName = "MockVendor"; EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), GetVendorName(::testing::_)) @@ -195,7 +195,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_getvendorname_001, TestSize.Level0) HWTEST_F(HDIDeviceTest, hdidevice_getvendorname_002, TestSize.Level0) { OHOS::sptr device = V1_0::INnrtDevice::Get(false); - std::unique_ptr hdiDevice = std::make_unique(device); + std::unique_ptr hdiDevice = std::make_unique(device); EXPECT_NE(hdiDevice, nullptr); std::string vendorName = "MockVendor"; EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), GetVendorName(::testing::_)) @@ -212,7 +212,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_getvendorname_002, TestSize.Level0) HWTEST_F(HDIDeviceTest, hdidevice_getdevicetype_001, TestSize.Level0) { OHOS::sptr device = V1_0::INnrtDevice::Get(false); - std::unique_ptr hdiDevice = std::make_unique(device); + std::unique_ptr hdiDevice = std::make_unique(device); EXPECT_NE(hdiDevice, nullptr); V1_0::DeviceType iDeviceType = V1_0::DeviceType::CPU; EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), GetDeviceType(::testing::_)) @@ -233,7 +233,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_getdevicetype_001, TestSize.Level0) HWTEST_F(HDIDeviceTest, hdidevice_getdevicetype_002, TestSize.Level0) { OHOS::sptr device = V1_0::INnrtDevice::Get(false); - std::unique_ptr hdiDevice = std::make_unique(device); + std::unique_ptr hdiDevice = std::make_unique(device); EXPECT_NE(hdiDevice, nullptr); OH_NN_DeviceType deviceType = OH_NN_CPU; @@ -252,7 +252,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_getdevicetype_002, TestSize.Level0) HWTEST_F(HDIDeviceTest, hdidevice_getdevicestatus_001, TestSize.Level0) { OHOS::sptr device = V1_0::INnrtDevice::Get(false); - std::unique_ptr hdiDevice = std::make_unique(device); + std::unique_ptr hdiDevice = std::make_unique(device); EXPECT_NE(hdiDevice, nullptr); V1_0::DeviceStatus iDeviceStatus = V1_0::DeviceStatus::AVAILABLE; @@ -274,7 +274,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_getdevicestatus_001, TestSize.Level0) HWTEST_F(HDIDeviceTest, hdidevice_getdevicestatus_002, TestSize.Level0) { OHOS::sptr device = V1_0::INnrtDevice::Get(false); - std::unique_ptr hdiDevice = std::make_unique(device); + std::unique_ptr hdiDevice = std::make_unique(device); EXPECT_NE(hdiDevice, nullptr); DeviceStatus deviceStatus = AVAILABLE; V1_0::DeviceStatus iDeviceStatus = V1_0::DeviceStatus::AVAILABLE; @@ -294,7 +294,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_getsupportedoperation_001, TestSize.Level0) std::vector ops {true}; std::shared_ptr model = std::make_shared(); OHOS::sptr device = V1_0::INnrtDevice::Get(false); - std::unique_ptr hdiDevice = std::make_unique(device); + std::unique_ptr hdiDevice = std::make_unique(device); EXPECT_NE(hdiDevice, nullptr); V1_0::SharedBuffer buffer {1, 1, 0, 1}; @@ -324,7 +324,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_getsupportedoperation_002, TestSize.Level0) std::vector ops; std::shared_ptr model = std::make_shared(); OHOS::sptr device = V1_0::INnrtDevice::Get(false); - std::unique_ptr hdiDevice = std::make_unique(device); + std::unique_ptr hdiDevice = std::make_unique(device); EXPECT_NE(hdiDevice, nullptr); V1_0::SharedBuffer buffer {1, 1, 0, 1}; @@ -343,7 +343,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_getsupportedoperation_002, TestSize.Level0) HWTEST_F(HDIDeviceTest, hdidevice_getsupportedoperation_003, TestSize.Level0) { OHOS::sptr device = V1_0::INnrtDevice::Get(false); - std::unique_ptr hdiDevice = std::make_unique(device); + std::unique_ptr hdiDevice = std::make_unique(device); EXPECT_NE(hdiDevice, nullptr); std::shared_ptr model = nullptr; @@ -362,7 +362,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_getsupportedoperation_004, TestSize.Level0) std::vector ops {true}; std::shared_ptr model = std::make_shared(); OHOS::sptr device = V1_0::INnrtDevice::Get(false); - std::unique_ptr hdiDevice = std::make_unique(device); + std::unique_ptr hdiDevice = std::make_unique(device); EXPECT_NE(hdiDevice, nullptr); V1_0::SharedBuffer buffer {2, 1, 0, 1}; @@ -385,7 +385,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_getsupportedoperation_004, TestSize.Level0) HWTEST_F(HDIDeviceTest, hdidevice_isfloat16precisionsupported_001, TestSize.Level0) { OHOS::sptr device = V1_0::INnrtDevice::Get(false); - std::unique_ptr hdiDevice = std::make_unique(device); + std::unique_ptr hdiDevice = std::make_unique(device); EXPECT_NE(hdiDevice, nullptr); bool isSupported = false; @@ -403,7 +403,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_isfloat16precisionsupported_001, TestSize.Leve HWTEST_F(HDIDeviceTest, hdidevice_isfloat16precisionsupported_002, TestSize.Level0) { OHOS::sptr device = V1_0::INnrtDevice::Get(false); - std::unique_ptr hdiDevice = std::make_unique(device); + std::unique_ptr hdiDevice = std::make_unique(device); EXPECT_NE(hdiDevice, nullptr); bool isSupported = false; @@ -421,7 +421,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_isfloat16precisionsupported_002, TestSize.Leve HWTEST_F(HDIDeviceTest, hdidevice_isperformancemodesupported_001, TestSize.Level0) { OHOS::sptr device = V1_0::INnrtDevice::Get(false); - std::unique_ptr hdiDevice = std::make_unique(device); + std::unique_ptr hdiDevice = std::make_unique(device); EXPECT_NE(hdiDevice, nullptr); bool isSupported = false; @@ -443,7 +443,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_isperformancemodesupported_001, TestSize.Level HWTEST_F(HDIDeviceTest, hdidevice_isperformancemodesupported_002, TestSize.Level0) { OHOS::sptr device = V1_0::INnrtDevice::Get(false); - std::unique_ptr hdiDevice = std::make_unique(device); + std::unique_ptr hdiDevice = std::make_unique(device); EXPECT_NE(hdiDevice, nullptr); bool isSupported = false; @@ -461,7 +461,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_isperformancemodesupported_002, TestSize.Level HWTEST_F(HDIDeviceTest, hdidevice_isprioritysupported_001, TestSize.Level0) { OHOS::sptr device = V1_0::INnrtDevice::Get(false); - std::unique_ptr hdiDevice = std::make_unique(device); + std::unique_ptr hdiDevice = std::make_unique(device); EXPECT_NE(hdiDevice, nullptr); bool isSupported = false; @@ -483,7 +483,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_isprioritysupported_001, TestSize.Level0) HWTEST_F(HDIDeviceTest, hdidevice_isprioritysupported_002, TestSize.Level0) { OHOS::sptr device = V1_0::INnrtDevice::Get(false); - std::unique_ptr hdiDevice = std::make_unique(device); + std::unique_ptr hdiDevice = std::make_unique(device); EXPECT_NE(hdiDevice, nullptr); bool isSupported = false; @@ -501,7 +501,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_isprioritysupported_002, TestSize.Level0) HWTEST_F(HDIDeviceTest, hdidevice_isdynamicinputsupported_001, TestSize.Level0) { OHOS::sptr device = V1_0::INnrtDevice::Get(false); - std::unique_ptr hdiDevice = std::make_unique(device); + std::unique_ptr hdiDevice = std::make_unique(device); EXPECT_NE(hdiDevice, nullptr); bool isSupported = false; @@ -523,7 +523,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_isdynamicinputsupported_001, TestSize.Level0) HWTEST_F(HDIDeviceTest, hdidevice_isdynamicinputsupported_002, TestSize.Level0) { OHOS::sptr device = V1_0::INnrtDevice::Get(false); - std::unique_ptr hdiDevice = std::make_unique(device); + std::unique_ptr hdiDevice = std::make_unique(device); EXPECT_NE(hdiDevice, nullptr); bool isSupported = false; @@ -541,7 +541,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_isdynamicinputsupported_002, TestSize.Level0) HWTEST_F(HDIDeviceTest, hdidevice_ismodelcachesupported_001, TestSize.Level0) { OHOS::sptr device = V1_0::INnrtDevice::Get(false); - std::unique_ptr hdiDevice = std::make_unique(device); + std::unique_ptr hdiDevice = std::make_unique(device); EXPECT_NE(hdiDevice, nullptr); bool isSupported = false; @@ -563,7 +563,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_ismodelcachesupported_001, TestSize.Level0) HWTEST_F(HDIDeviceTest, hdidevice_ismodelcachesupported_002, TestSize.Level0) { OHOS::sptr device = V1_0::INnrtDevice::Get(false); - std::unique_ptr hdiDevice = std::make_unique(device); + std::unique_ptr hdiDevice = std::make_unique(device); EXPECT_NE(hdiDevice, nullptr); bool isSupported = false; @@ -594,7 +594,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_preparemodel_001, TestSize.Level0) HWTEST_F(HDIDeviceTest, hdidevice_preparemodel_002, TestSize.Level0) { OHOS::sptr device = V1_0::INnrtDevice::Get(false); - std::unique_ptr hdiDevice = std::make_unique(device); + std::unique_ptr hdiDevice = std::make_unique(device); EXPECT_NE(hdiDevice, nullptr); std::shared_ptr model = nullptr; @@ -647,7 +647,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_preparemodelfrommodelcache_001, TestSize.Level OHOS::sptr sp = OHOS::sptr(new (std::nothrow) V1_0::MockIDevice()); EXPECT_NE(sp, nullptr); - std::unique_ptr hdiDevice = std::make_unique(sp); + std::unique_ptr hdiDevice = std::make_unique(sp); EXPECT_NE(hdiDevice, nullptr); std::shared_ptr preparedModel; @@ -677,7 +677,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_preparemodelfrommodelcache_002, TestSize.Level OHOS::sptr sp = OHOS::sptr(new (std::nothrow) V1_0::MockIDevice()); EXPECT_NE(sp, nullptr); - std::unique_ptr hdiDevice = std::make_unique(sp); + std::unique_ptr hdiDevice = std::make_unique(sp); EXPECT_NE(hdiDevice, nullptr); std::vector modelCache = { { buffer, 100 } }; @@ -686,7 +686,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_preparemodelfrommodelcache_002, TestSize.Level OHOS::sptr(new (std::nothrow) V1_0::MockIPreparedModel()); EXPECT_NE(preModel, nullptr); - std::shared_ptr preparedModel = std::make_shared(preModel); + std::shared_ptr preparedModel = std::make_shared(preModel); OHOS::sptr iPreparedModel = OHOS::sptr(new (std::nothrow) V1_0::MockIPreparedModel); @@ -705,7 +705,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_preparemodelfrommodelcache_002, TestSize.Level HWTEST_F(HDIDeviceTest, hdidevice_preparemodelfrommodelcache_003, TestSize.Level0) { OHOS::sptr device = V1_0::INnrtDevice::Get(false); - std::unique_ptr hdiDevice = std::make_unique(device); + std::unique_ptr hdiDevice = std::make_unique(device); EXPECT_NE(hdiDevice, nullptr); std::vector modelCache = { { nullptr, 0 } }; @@ -723,7 +723,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_preparemodelfrommodelcache_003, TestSize.Level HWTEST_F(HDIDeviceTest, hdidevice_allocatebuffer_001, TestSize.Level0) { OHOS::sptr device = V1_0::INnrtDevice::Get(false); - std::unique_ptr hdiDevice = std::make_unique(device); + std::unique_ptr hdiDevice = std::make_unique(device); EXPECT_NE(hdiDevice, nullptr); V1_0::SharedBuffer buffer; @@ -744,7 +744,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_allocatebuffer_001, TestSize.Level0) HWTEST_F(HDIDeviceTest, hdidevice_allocatebuffer_002, TestSize.Level0) { OHOS::sptr device = V1_0::INnrtDevice::Get(false); - std::unique_ptr hdiDevice = std::make_unique(device); + std::unique_ptr hdiDevice = std::make_unique(device); EXPECT_NE(hdiDevice, nullptr); size_t length = 8; @@ -761,7 +761,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_allocatebuffer_002, TestSize.Level0) HWTEST_F(HDIDeviceTest, hdidevice_allocatebuffer_003, TestSize.Level0) { OHOS::sptr device = V1_0::INnrtDevice::Get(false); - std::unique_ptr hdiDevice = std::make_unique(device); + std::unique_ptr hdiDevice = std::make_unique(device); EXPECT_NE(hdiDevice, nullptr); size_t length = 0; @@ -781,7 +781,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_releasebuffer_001, TestSize.Level0) GetBuffer(buffer, length); OHOS::sptr device = V1_0::INnrtDevice::Get(false); - std::unique_ptr hdiDevice = std::make_unique(device); + std::unique_ptr hdiDevice = std::make_unique(device); EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), ReleaseBuffer(::testing::_)) .WillRepeatedly(::testing::Return(HDF_SUCCESS)); @@ -800,7 +800,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_releasebuffer_001, TestSize.Level0) HWTEST_F(HDIDeviceTest, hdidevice_releasebuffer_002, TestSize.Level0) { OHOS::sptr device = V1_0::INnrtDevice::Get(false); - std::unique_ptr hdiDevice = std::make_unique(device); + std::unique_ptr hdiDevice = std::make_unique(device); EXPECT_NE(hdiDevice, nullptr); V1_0::SharedBuffer sharedbuffer; @@ -823,7 +823,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_releasebuffer_002, TestSize.Level0) HWTEST_F(HDIDeviceTest, hdidevice_releasebuffer_003, TestSize.Level0) { OHOS::sptr device = V1_0::INnrtDevice::Get(false); - std::unique_ptr hdiDevice = std::make_unique(device); + std::unique_ptr hdiDevice = std::make_unique(device); EXPECT_NE(hdiDevice, nullptr); void *buffer = nullptr; @@ -840,7 +840,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_releasebuffer_004, TestSize.Level0) const size_t length = 100; auto* buffer = new(std::nothrow) char[length]; OHOS::sptr device = V1_0::INnrtDevice::Get(false); - std::unique_ptr hdiDevice = std::make_unique(device); + std::unique_ptr hdiDevice = std::make_unique(device); EXPECT_NE(hdiDevice, nullptr); hdiDevice->ReleaseBuffer(buffer); @@ -860,7 +860,7 @@ HWTEST_F(HDIDeviceTest, hdidevice_releasebuffer_005, TestSize.Level0) GetBuffer(buffer, length); OHOS::sptr device = V1_0::INnrtDevice::Get(false); - std::unique_ptr hdiDevice = std::make_unique(device); + std::unique_ptr hdiDevice = std::make_unique(device); EXPECT_NE(hdiDevice, nullptr); EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), ReleaseBuffer(::testing::_)) diff --git a/test/unittest/components/hdi_prepared_model/hdi_prepared_model_test.cpp b/test/unittest/components/v1_0/hdi_prepared_model/hdi_prepared_model_test.cpp similarity index 91% rename from test/unittest/components/hdi_prepared_model/hdi_prepared_model_test.cpp rename to test/unittest/components/v1_0/hdi_prepared_model/hdi_prepared_model_test.cpp index d946b6312390730ad7e02d588ad78aab8df8bcea..413fb5c64ff290022f33e3e4fc3e336c5b57e05e 100644 --- a/test/unittest/components/hdi_prepared_model/hdi_prepared_model_test.cpp +++ b/test/unittest/components/v1_0/hdi_prepared_model/hdi_prepared_model_test.cpp @@ -21,10 +21,10 @@ #include #include "common/log.h" -#include "frameworks/native/hdi_prepared_model.h" +#include "frameworks/native/hdi_prepared_model_v1_0.h" #include "frameworks/native/memory_manager.h" #include "frameworks/native/transform.h" -#include "test/unittest/common/mock_idevice.h" +#include "test/unittest/common/v1_0/mock_idevice.h" #include "test/unittest/common/file_utils.h" using namespace testing; @@ -81,7 +81,7 @@ OH_NN_ReturnCode HDIPreparedModelTest::Run(std::vector& inputs) OHOS::sptr(new (std::nothrow) V1_0::MockIPreparedModel()); EXPECT_NE(sp, nullptr); - std::unique_ptr preparedModel = std::make_unique(sp); + std::unique_ptr preparedModel = std::make_unique(sp); EXPECT_CALL(*sp, Run(::testing::_, ::testing::_, ::testing::_, ::testing::_)) .WillRepeatedly(::testing::DoAll( ::testing::SetArgReferee(outputsDims), @@ -104,7 +104,7 @@ HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_constructor_001, TestSize.Level0 OHOS::sptr(new (std::nothrow) V1_0::MockIPreparedModel()); EXPECT_NE(hdiPreparedModel, nullptr); - std::unique_ptr preparedModel = std::make_unique(hdiPreparedModel); + std::unique_ptr preparedModel = std::make_unique(hdiPreparedModel); EXPECT_NE(preparedModel, nullptr); } @@ -118,7 +118,7 @@ HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_exportmodelcache_001, TestSize.L std::vector bufferVect = {{100, 100, 0, 100}}; OHOS::sptr hdiPreparedModel = OHOS::sptr(new (std::nothrow) V1_0::MockIPreparedModel()); - std::unique_ptr preparedModel = std::make_unique(hdiPreparedModel); + std::unique_ptr preparedModel = std::make_unique(hdiPreparedModel); std::vector modelCache; EXPECT_CALL(*((V1_0::MockIPreparedModel*)hdiPreparedModel.GetRefPtr()), ExportModelCache(::testing::_)) @@ -145,7 +145,7 @@ HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_exportmodelcache_002, TestSize.L OHOS::sptr(new (std::nothrow) V1_0::MockIPreparedModel()); EXPECT_NE(mockPreparedModel, nullptr); - std::unique_ptr preparedModel = std::make_unique(mockPreparedModel); + std::unique_ptr preparedModel = std::make_unique(mockPreparedModel); std::vector modelCache; EXPECT_CALL(*((V1_0::MockIPreparedModel*)mockPreparedModel.GetRefPtr()), ExportModelCache(::testing::_)) @@ -171,7 +171,7 @@ HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_exportmodelcache_003, TestSize.L OHOS::sptr(new (std::nothrow) V1_0::MockIPreparedModel()); EXPECT_NE(hdiPreparedModel, nullptr); - std::unique_ptr preparedModel = std::make_unique(hdiPreparedModel); + std::unique_ptr preparedModel = std::make_unique(hdiPreparedModel); std::vector modelCache {{nullptr, 0}}; OH_NN_ReturnCode result = preparedModel->ExportModelCache(modelCache); EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); @@ -189,7 +189,7 @@ HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_exportmodelcache_004, TestSize.L OHOS::sptr(new (std::nothrow) V1_0::MockIPreparedModel()); EXPECT_NE(mockPreparedModel, nullptr); - std::unique_ptr preparedModel = std::make_unique(mockPreparedModel); + std::unique_ptr preparedModel = std::make_unique(mockPreparedModel); std::vector modelCache; EXPECT_CALL(*((V1_0::MockIPreparedModel*)mockPreparedModel.GetRefPtr()), ExportModelCache(::testing::_)) @@ -231,7 +231,7 @@ HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_001, TestSize.Level0) OHOS::sptr(new (std::nothrow) V1_0::MockIPreparedModel()); EXPECT_NE(hdiPreparedModel, nullptr); - std::unique_ptr preparedModel = std::make_unique(hdiPreparedModel); + std::unique_ptr preparedModel = std::make_unique(hdiPreparedModel); OH_NN_ReturnCode result = preparedModel->Run(inputs, outputs, outputsDims, isOutputBufferEnough); EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); } @@ -279,7 +279,7 @@ HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_003, TestSize.Level0) OHOS::sptr(new (std::nothrow) V1_0::MockIPreparedModel()); EXPECT_NE(sp, nullptr); - std::unique_ptr preparedModel = std::make_unique(sp); + std::unique_ptr preparedModel = std::make_unique(sp); EXPECT_CALL(*sp, Run(::testing::_, ::testing::_, ::testing::_, ::testing::_)) .WillRepeatedly( @@ -332,7 +332,7 @@ HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_005, TestSize.Level0) OHOS::sptr(new (std::nothrow) V1_0::MockIPreparedModel()); EXPECT_NE(sp, nullptr); - std::unique_ptr preparedModel = std::make_unique(sp); + std::unique_ptr preparedModel = std::make_unique(sp); OH_NN_ReturnCode result = preparedModel->Run(inputs, outputs, outputsDims, isOutputBufferEnough); EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); diff --git a/test/unittest/components/inner_model/inner_model_test.cpp b/test/unittest/components/v1_0/inner_model/inner_model_test.cpp similarity index 98% rename from test/unittest/components/inner_model/inner_model_test.cpp rename to test/unittest/components/v1_0/inner_model/inner_model_test.cpp index e40c0422576273d205f66e960a48f00d4f11c3f7..7e6dbb2a6cfa2daf3170a8f4bceae21d2312f43a 100644 --- a/test/unittest/components/inner_model/inner_model_test.cpp +++ b/test/unittest/components/v1_0/inner_model/inner_model_test.cpp @@ -36,20 +36,20 @@ public: public: InnerModel m_innerModelTest; - std::vector m_dimInput{3, 3}; - std::vector m_dimOutput{3, 3}; - std::vector m_inputIndices{0}; - std::vector m_outputIndices{1}; + std::vector m_dimInput {3, 3}; + std::vector m_dimOutput {3, 3}; + std::vector m_inputIndices {0}; + std::vector m_outputIndices {1}; - OH_NN_OperationType m_opType{OH_NN_OPS_ADD}; + OH_NN_OperationType m_opType {OH_NN_OPS_ADD}; OH_NN_UInt32Array m_inputs; OH_NN_UInt32Array m_outputs; OH_NN_UInt32Array m_params; - uint32_t m_paramIndexs[1]{3}; - uint32_t m_inputIndexs[2]{0, 1}; - uint32_t m_outputIndexs[1]{2}; + uint32_t m_paramIndexs[1] {3}; + uint32_t m_inputIndexs[2] {0, 1}; + uint32_t m_outputIndexs[1] {2}; }; void InnerModelTest::SetLiteGraph(mindspore::lite::LiteGraph* liteGraph) diff --git a/test/unittest/components/inner_model/nn_tensor_test.cpp b/test/unittest/components/v1_0/inner_model/nn_tensor_test.cpp similarity index 88% rename from test/unittest/components/inner_model/nn_tensor_test.cpp rename to test/unittest/components/v1_0/inner_model/nn_tensor_test.cpp index a288c26902bbd8c384f263a39f1bdfae9228e466..512194e078794072b0e11da9fc2c759534a62835 100644 --- a/test/unittest/components/inner_model/nn_tensor_test.cpp +++ b/test/unittest/components/v1_0/inner_model/nn_tensor_test.cpp @@ -37,7 +37,7 @@ class NnTensorTest : public testing::Test { HWTEST_F(NnTensorTest, nn_tensor_parse_dimensions_001, TestSize.Level1) { const int dim[2] = {2, 2}; - OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + OH_NN_Tensor tensor {OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; NNTensor nnTensor; EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); @@ -69,7 +69,7 @@ HWTEST_F(NnTensorTest, nn_tensor_parse_dimensions_002, TestSize.Level1) HWTEST_F(NnTensorTest, nn_tensor_parse_dimensions_003, TestSize.Level1) { const int dim[2] = {2, -2}; - OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + OH_NN_Tensor tensor {OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; NNTensor nnTensor; EXPECT_EQ(OH_NN_INVALID_PARAMETER, nnTensor.BuildFromOHNNTensor(tensor)); @@ -83,7 +83,7 @@ HWTEST_F(NnTensorTest, nn_tensor_parse_dimensions_003, TestSize.Level1) HWTEST_F(NnTensorTest, nn_tensor_parse_dimensions_004, TestSize.Level1) { const int dim[2] = {2, -1}; - OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + OH_NN_Tensor tensor {OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; NNTensor nnTensor; EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); @@ -97,7 +97,7 @@ HWTEST_F(NnTensorTest, nn_tensor_parse_dimensions_004, TestSize.Level1) HWTEST_F(NnTensorTest, nn_tensor_parse_dimensions_005, TestSize.Level1) { const int dim[3] = {1000000, 1000000, 10000000}; - OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + OH_NN_Tensor tensor {OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; NNTensor nnTensor; EXPECT_EQ(OH_NN_INVALID_PARAMETER, nnTensor.BuildFromOHNNTensor(tensor)); @@ -118,7 +118,7 @@ HWTEST_F(NnTensorTest, nn_tensor_parse_quant_params_001, TestSize.Level1) NNTensor nnTensor; const int dim[2] = {2, 2}; - OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, &quantParam, OH_NN_TENSOR}; + OH_NN_Tensor tensor {OH_NN_FLOAT32, 2, dim, &quantParam, OH_NN_TENSOR}; EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); } @@ -137,7 +137,7 @@ HWTEST_F(NnTensorTest, nn_tensor_parse_quant_params_002, TestSize.Level1) NNTensor nnTensor; const int dim[2] = {2, 2}; - OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, &quantParam, OH_NN_TENSOR}; + OH_NN_Tensor tensor {OH_NN_FLOAT32, 2, dim, &quantParam, OH_NN_TENSOR}; EXPECT_EQ(OH_NN_INVALID_PARAMETER, nnTensor.BuildFromOHNNTensor(tensor)); } @@ -155,7 +155,7 @@ HWTEST_F(NnTensorTest, nn_tensor_parse_quant_params_004, TestSize.Level1) NNTensor nnTensor; const int dim[2] = {2, 2}; - OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, &quantParam, OH_NN_TENSOR}; + OH_NN_Tensor tensor {OH_NN_FLOAT32, 2, dim, &quantParam, OH_NN_TENSOR}; EXPECT_EQ(OH_NN_INVALID_PARAMETER, nnTensor.BuildFromOHNNTensor(tensor)); } @@ -173,7 +173,7 @@ HWTEST_F(NnTensorTest, nn_tensor_parse_quant_params_005, TestSize.Level1) NNTensor nnTensor; const int dim[2] = {2, 2}; - OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, &quantParam, OH_NN_TENSOR}; + OH_NN_Tensor tensor {OH_NN_FLOAT32, 2, dim, &quantParam, OH_NN_TENSOR}; EXPECT_EQ(OH_NN_INVALID_PARAMETER, nnTensor.BuildFromOHNNTensor(tensor)); } @@ -186,7 +186,7 @@ HWTEST_F(NnTensorTest, nn_tensor_parse_quant_params_005, TestSize.Level1) HWTEST_F(NnTensorTest, nn_tensor_set_dimensions_001, TestSize.Level1) { const int dim[2] = {2, -1}; - OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + OH_NN_Tensor tensor {OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; const std::vector dimensions = {2, 3}; NNTensor nnTensor; @@ -202,7 +202,7 @@ HWTEST_F(NnTensorTest, nn_tensor_set_dimensions_001, TestSize.Level1) HWTEST_F(NnTensorTest, nn_tensor_set_dimensions_002, TestSize.Level1) { const int dim[2] = {2, -1}; - OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + OH_NN_Tensor tensor {OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; NNTensor nnTensor; const std::vector dimensions = {2, 3, 5}; @@ -218,7 +218,7 @@ HWTEST_F(NnTensorTest, nn_tensor_set_dimensions_002, TestSize.Level1) HWTEST_F(NnTensorTest, nn_tensor_compare_attribute_001, TestSize.Level1) { const int dim[2] = {2, 2}; - OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + OH_NN_Tensor tensor {OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; NNTensor nnTensor; NNTensor expectTensor; @@ -235,14 +235,14 @@ HWTEST_F(NnTensorTest, nn_tensor_compare_attribute_001, TestSize.Level1) HWTEST_F(NnTensorTest, nn_tensor_compare_attribute_002, TestSize.Level1) { const int dim[2] = {2, 2}; - OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + OH_NN_Tensor tensor {OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; NNTensor nnTensor; NNTensor expectTensor; EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); const int dimExpect[2] = {2, 2}; - OH_NN_Tensor tensorExpect{OH_NN_INT32, 2, dimExpect, nullptr, OH_NN_TENSOR}; + OH_NN_Tensor tensorExpect {OH_NN_INT32, 2, dimExpect, nullptr, OH_NN_TENSOR}; EXPECT_EQ(OH_NN_SUCCESS, expectTensor.BuildFromOHNNTensor(tensorExpect)); EXPECT_EQ(false, nnTensor.CompareAttribute(expectTensor)); @@ -256,14 +256,14 @@ HWTEST_F(NnTensorTest, nn_tensor_compare_attribute_002, TestSize.Level1) HWTEST_F(NnTensorTest, nn_tensor_compare_attribute_003, TestSize.Level1) { const int dim[2] = {2, 2}; - OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + OH_NN_Tensor tensor {OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; NNTensor nnTensor; NNTensor expectTensor; EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); const int dimExpect[3] = {2, 2, 3}; - OH_NN_Tensor tensorExpect{OH_NN_FLOAT32, 3, dimExpect, nullptr, OH_NN_TENSOR}; + OH_NN_Tensor tensorExpect {OH_NN_FLOAT32, 3, dimExpect, nullptr, OH_NN_TENSOR}; EXPECT_EQ(OH_NN_SUCCESS, expectTensor.BuildFromOHNNTensor(tensorExpect)); EXPECT_EQ(false, nnTensor.CompareAttribute(expectTensor)); @@ -277,14 +277,14 @@ HWTEST_F(NnTensorTest, nn_tensor_compare_attribute_003, TestSize.Level1) HWTEST_F(NnTensorTest, nn_tensor_compare_attribute_004, TestSize.Level1) { const int dim[2] = {2, 2}; - OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + OH_NN_Tensor tensor {OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; NNTensor nnTensor; NNTensor expectTensor; EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); const int dimExpect[2] = {2, 3}; - OH_NN_Tensor tensorExpect{OH_NN_FLOAT32, 2, dimExpect, nullptr, OH_NN_TENSOR}; + OH_NN_Tensor tensorExpect {OH_NN_FLOAT32, 2, dimExpect, nullptr, OH_NN_TENSOR}; EXPECT_EQ(OH_NN_SUCCESS, expectTensor.BuildFromOHNNTensor(tensorExpect)); EXPECT_EQ(false, nnTensor.CompareAttribute(expectTensor)); @@ -298,7 +298,7 @@ HWTEST_F(NnTensorTest, nn_tensor_compare_attribute_004, TestSize.Level1) HWTEST_F(NnTensorTest, nn_tensor_is_scalar_001, TestSize.Level1) { const int dim[2] = {2, 2}; - OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + OH_NN_Tensor tensor {OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; NNTensor nnTensor; EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); @@ -313,12 +313,12 @@ HWTEST_F(NnTensorTest, nn_tensor_is_scalar_001, TestSize.Level1) HWTEST_F(NnTensorTest, nn_tensor_convert_to_io_tensor_001, TestSize.Level1) { const int dim[2] = {2, 2}; - OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + OH_NN_Tensor tensor {OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; NNTensor nnTensor; EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); - int8_t* activationValue = new (std::nothrow) int8_t[1]{0}; + int8_t* activationValue = new (std::nothrow) int8_t[1] {0}; EXPECT_NE(nullptr, activationValue); // After SetBuffer, this memory is released by NNTensor @@ -336,11 +336,11 @@ HWTEST_F(NnTensorTest, nn_tensor_convert_to_io_tensor_001, TestSize.Level1) HWTEST_F(NnTensorTest, nn_tensor_get_buffer_length_001, TestSize.Level1) { const int dim[2] = {2, 2}; - OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + OH_NN_Tensor tensor {OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; NNTensor nnTensor; EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); - int8_t* activationValue = new (std::nothrow) int8_t[1]{0}; + int8_t* activationValue = new (std::nothrow) int8_t[1] {0}; EXPECT_NE(nullptr, activationValue); // After SetBuffer, this memory is released by NNTensor @@ -357,7 +357,7 @@ HWTEST_F(NnTensorTest, nn_tensor_get_buffer_length_001, TestSize.Level1) HWTEST_F(NnTensorTest, nn_tensor_get_format_001, TestSize.Level1) { const int dim[2] = {2, 2}; - OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + OH_NN_Tensor tensor {OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; NNTensor nnTensor; EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); @@ -386,7 +386,7 @@ HWTEST_F(NnTensorTest, nn_tensor_get_name_001, TestSize.Level1) HWTEST_F(NnTensorTest, nn_tensor_get_quant_param_001, TestSize.Level1) { const int dim[2] = {2, 2}; - OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + OH_NN_Tensor tensor {OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; NNTensor nnTensor; EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); @@ -407,7 +407,7 @@ HWTEST_F(NnTensorTest, nn_tensor_build_from_tensor_002, TestSize.Level1) int dataTypeTest = 13; OH_NN_DataType dataType = (OH_NN_DataType)dataTypeTest; - OH_NN_Tensor tensor{dataType, 2, dim, nullptr, OH_NN_TENSOR}; + OH_NN_Tensor tensor {dataType, 2, dim, nullptr, OH_NN_TENSOR}; NNTensor nnTensor; EXPECT_EQ(OH_NN_INVALID_PARAMETER, nnTensor.BuildFromOHNNTensor(tensor)); @@ -421,7 +421,7 @@ HWTEST_F(NnTensorTest, nn_tensor_build_from_tensor_002, TestSize.Level1) HWTEST_F(NnTensorTest, nn_tensor_convert_to_lite_graph_tensor_001, TestSize.Level1) { const int dim[2] = {2, 2}; - OH_NN_Tensor tensor{OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + OH_NN_Tensor tensor {OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; NNTensor nnTensor; EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); diff --git a/test/unittest/components/inner_model/nn_validation_test.cpp b/test/unittest/components/v1_0/inner_model/nn_validation_test.cpp similarity index 100% rename from test/unittest/components/inner_model/nn_validation_test.cpp rename to test/unittest/components/v1_0/inner_model/nn_validation_test.cpp diff --git a/test/unittest/components/inner_model/ops_regitstry_test.cpp b/test/unittest/components/v1_0/inner_model/ops_regitstry_test.cpp similarity index 100% rename from test/unittest/components/inner_model/ops_regitstry_test.cpp rename to test/unittest/components/v1_0/inner_model/ops_regitstry_test.cpp diff --git a/test/unittest/components/neural_network_runtime_test/neural_network_runtime_test.cpp b/test/unittest/components/v1_0/neural_network_runtime_test/neural_network_runtime_test.cpp similarity index 97% rename from test/unittest/components/neural_network_runtime_test/neural_network_runtime_test.cpp rename to test/unittest/components/v1_0/neural_network_runtime_test/neural_network_runtime_test.cpp index 404f2e80d118d23b3478d60fe8de3fcf551fadc1..b78f169b61faa9fe6aade7ae6106cfd7c3b94988 100644 --- a/test/unittest/components/neural_network_runtime_test/neural_network_runtime_test.cpp +++ b/test/unittest/components/v1_0/neural_network_runtime_test/neural_network_runtime_test.cpp @@ -20,14 +20,13 @@ #include "common/utils.h" #include "frameworks/native/compilation.h" #include "frameworks/native/device_manager.h" -#include "frameworks/native/hdi_device.h" -#include "test/unittest/common/mock_idevice.h" +#include "frameworks/native/hdi_device_v1_0.h" +#include "test/unittest/common/v1_0/mock_idevice.h" namespace OHOS { namespace NeuralNetworkRuntime { -OH_NN_ReturnCode HDIDevice::PrepareModel(std::shared_ptr model, - const ModelConfig& config, - std::shared_ptr& preparedModel) +OH_NN_ReturnCode HDIDeviceV1_0::PrepareModel(std::shared_ptr model, + const ModelConfig& config, std::shared_ptr& preparedModel) { if (model == nullptr) { return OH_NN_INVALID_PARAMETER; @@ -40,11 +39,11 @@ OH_NN_ReturnCode HDIDevice::PrepareModel(std::shared_ptr iPreparedModel = sptr(new OHOS::HDI::Nnrt::V1_0::MockIPreparedModel()); if (iPreparedModel == nullptr) { - LOGE("HDIDevice mock PrepareModel failed, error happened when new sptr"); + LOGE("HDIDeviceV1_0 mock PrepareModel failed, error happened when new sptr"); return OH_NN_NULL_PTR; } - preparedModel = CreateSharedPtr(iPreparedModel); + preparedModel = CreateSharedPtr(iPreparedModel); return OH_NN_SUCCESS; } @@ -57,7 +56,7 @@ std::shared_ptr DeviceManager::GetDevice(size_t deviceId) const return nullptr; } - std::shared_ptr device = CreateSharedPtr(idevice); + std::shared_ptr device = CreateSharedPtr(idevice); if (device == nullptr) { LOGE("DeviceManager mock GetDevice failed, the device is nullptr"); return nullptr; @@ -71,7 +70,7 @@ std::shared_ptr DeviceManager::GetDevice(size_t deviceId) const } } -OH_NN_ReturnCode HDIDevice::GetDeviceType(OH_NN_DeviceType& deviceType) +OH_NN_ReturnCode HDIDeviceV1_0::GetDeviceType(OH_NN_DeviceType& deviceType) { if (deviceType == OH_NN_OTHERS) { return OH_NN_UNAVALIDABLE_DEVICE; @@ -104,35 +103,35 @@ const std::vector& DeviceManager::GetAllDeviceId() return deviceIds; } -OH_NN_ReturnCode HDIDevice::IsModelCacheSupported(bool& isSupported) +OH_NN_ReturnCode HDIDeviceV1_0::IsModelCacheSupported(bool& isSupported) { isSupported = true; return OH_NN_SUCCESS; } -OH_NN_ReturnCode HDIDevice::IsPerformanceModeSupported(bool& isSupported) +OH_NN_ReturnCode HDIDeviceV1_0::IsPerformanceModeSupported(bool& isSupported) { isSupported = true; return OH_NN_SUCCESS; } -OH_NN_ReturnCode HDIDevice::IsPrioritySupported(bool& isSupported) +OH_NN_ReturnCode HDIDeviceV1_0::IsPrioritySupported(bool& isSupported) { isSupported = true; return OH_NN_SUCCESS; } -OH_NN_ReturnCode HDIDevice::IsFloat16PrecisionSupported(bool& isSupported) +OH_NN_ReturnCode HDIDeviceV1_0::IsFloat16PrecisionSupported(bool& isSupported) { isSupported = true; return OH_NN_SUCCESS; } -OH_NN_ReturnCode HDIDevice::GetSupportedOperation(std::shared_ptr model, - std::vector& ops) +OH_NN_ReturnCode HDIDeviceV1_0::GetSupportedOperation(std::shared_ptr model, + std::vector& ops) { if (model == nullptr) { - LOGE("HDIDevice mock GetSupportedOperation failed, Model is nullptr, cannot query supported operation."); + LOGE("HDIDeviceV1_0 mock GetSupportedOperation failed, Model is nullptr, cannot query supported operation."); return OH_NN_NULL_PTR; } @@ -140,7 +139,7 @@ OH_NN_ReturnCode HDIDevice::GetSupportedOperation(std::shared_ptr(&activation), sizeof(int8_t))); - OH_NN_OperationType opType{OH_NN_OPS_ADD}; + OH_NN_OperationType opType {OH_NN_OPS_ADD}; EXPECT_EQ(OH_NN_SUCCESS, innerModel.AddOperation(opType, m_paramIndices, m_inputIndices, m_outputIndices)); EXPECT_EQ(OH_NN_SUCCESS, innerModel.SpecifyInputsAndOutputs(m_inputIndices, m_outputIndices)); EXPECT_EQ(OH_NN_SUCCESS, innerModel.Build()); @@ -303,7 +302,7 @@ HWTEST_F(NeuralNetworkRuntimeTest, model_add_operation_001, testing::ext::TestSi { InnerModel innerModel; OH_NNModel* model = nullptr; - OH_NN_OperationType opType{OH_NN_OPS_ADD}; + OH_NN_OperationType opType {OH_NN_OPS_ADD}; InitIndices(); AddModelTensor(innerModel); @@ -326,7 +325,7 @@ HWTEST_F(NeuralNetworkRuntimeTest, model_add_operation_002, testing::ext::TestSi { InnerModel innerModel; OH_NNModel* model = reinterpret_cast(&innerModel); - OH_NN_OperationType opType{OH_NN_OPS_ADD}; + OH_NN_OperationType opType {OH_NN_OPS_ADD}; m_inputIndices.data = m_inputIndexs; m_inputIndices.size = sizeof(m_inputIndexs) / sizeof(uint32_t); @@ -353,7 +352,7 @@ HWTEST_F(NeuralNetworkRuntimeTest, model_add_operation_003, testing::ext::TestSi { InnerModel innerModel; OH_NNModel* model = reinterpret_cast(&innerModel); - OH_NN_OperationType opType{OH_NN_OPS_ADD}; + OH_NN_OperationType opType {OH_NN_OPS_ADD}; m_paramIndices.data = m_paramIndexs; m_paramIndices.size = sizeof(m_paramIndexs) / sizeof(uint32_t); @@ -380,7 +379,7 @@ HWTEST_F(NeuralNetworkRuntimeTest, model_add_operation_004, testing::ext::TestSi { InnerModel innerModel; OH_NNModel* model = reinterpret_cast(&innerModel); - OH_NN_OperationType opType{OH_NN_OPS_ADD}; + OH_NN_OperationType opType {OH_NN_OPS_ADD}; m_paramIndices.data = m_paramIndexs; m_paramIndices.size = sizeof(m_paramIndexs) / sizeof(uint32_t); @@ -407,7 +406,7 @@ HWTEST_F(NeuralNetworkRuntimeTest, model_add_operation_005, testing::ext::TestSi { InnerModel innerModel; OH_NNModel* model = reinterpret_cast(&innerModel); - OH_NN_OperationType opType{OH_NN_OPS_ADD}; + OH_NN_OperationType opType {OH_NN_OPS_ADD}; InitIndices(); AddModelTensor(innerModel); @@ -572,7 +571,7 @@ HWTEST_F(NeuralNetworkRuntimeTest, model_finish_001, testing::ext::TestSize.Leve InnerModel innerModel; OH_NNModel* model = nullptr; - OH_NN_OperationType opType{OH_NN_OPS_ADD}; + OH_NN_OperationType opType {OH_NN_OPS_ADD}; InitIndices(); AddModelTensor(innerModel); @@ -599,7 +598,7 @@ HWTEST_F(NeuralNetworkRuntimeTest, model_finish_002, testing::ext::TestSize.Leve InnerModel innerModel; OH_NNModel* model = reinterpret_cast(&innerModel); - OH_NN_OperationType opType{OH_NN_OPS_ADD}; + OH_NN_OperationType opType {OH_NN_OPS_ADD}; InitIndices(); AddModelTensor(innerModel); diff --git a/test/unittest/components/neural_network_runtime_test/neural_network_runtime_test.h b/test/unittest/components/v1_0/neural_network_runtime_test/neural_network_runtime_test.h similarity index 93% rename from test/unittest/components/neural_network_runtime_test/neural_network_runtime_test.h rename to test/unittest/components/v1_0/neural_network_runtime_test/neural_network_runtime_test.h index 61f1ed2b157af6f9b546b11c490770971a984ba4..dc18e2b4452ff57e16a9ae963dd1dc8df08a717d 100644 --- a/test/unittest/components/neural_network_runtime_test/neural_network_runtime_test.h +++ b/test/unittest/components/v1_0/neural_network_runtime_test/neural_network_runtime_test.h @@ -41,9 +41,9 @@ public: OH_NN_UInt32Array m_paramIndices; OH_NN_Tensor m_tensor; - uint32_t m_inputIndexs[2]{0, 1}; - uint32_t m_outputIndexs[1]{2}; - uint32_t m_paramIndexs[1]{3}; + uint32_t m_inputIndexs[2] {0, 1}; + uint32_t m_outputIndexs[1] {2}; + uint32_t m_paramIndexs[1] {3}; }; } // namespace Unittest } // namespace NeuralNetworkRuntime diff --git a/test/unittest/components/transform/transform_test.cpp b/test/unittest/components/v1_0/transform/transform_test.cpp similarity index 57% rename from test/unittest/components/transform/transform_test.cpp rename to test/unittest/components/v1_0/transform/transform_test.cpp index ae9e4cada1ea40178c467d5a8d9792dd2bcc14d3..ebfcf273490cd5d9cb18d0c5d0497057cef791dd 100644 --- a/test/unittest/components/transform/transform_test.cpp +++ b/test/unittest/components/v1_0/transform/transform_test.cpp @@ -14,6 +14,7 @@ */ #include +#include #include "frameworks/native/transform.h" #include "frameworks/native/memory_manager.h" @@ -30,403 +31,6 @@ public: ~TransformTestTest() = default; }; -/** - * @tc.name: transform_transhdidevicetype_001 - * @tc.desc: Verify the TransHDIDeviceType function return OH_NN_CPU - * @tc.type: FUNC - */ -HWTEST_F(TransformTestTest, transform_transhdidevicetype_001, TestSize.Level0) -{ - V1_0::DeviceType iDeviceType = V1_0::DeviceType::CPU; - OH_NN_DeviceType result = HDIToNN::TransHDIDeviceType(iDeviceType); - EXPECT_EQ(OH_NN_CPU, result); -} - -/** - * @tc.name: transform_transhdidevicetype_002 - * @tc.desc: Verify the TransHDIDeviceType function return OH_NN_GPU - * @tc.type: FUNC - */ -HWTEST_F(TransformTestTest, transform_transhdidevicetype_002, TestSize.Level0) -{ - V1_0::DeviceType iDeviceType = V1_0::DeviceType::GPU; - OH_NN_DeviceType result = HDIToNN::TransHDIDeviceType(iDeviceType); - EXPECT_EQ(OH_NN_GPU, result); -} - -/** - * @tc.name: transform_transhdidevicetype_003 - * @tc.desc: Verify the TransHDIDeviceType function return OH_NN_ACCELERATOR - * @tc.type: FUNC - */ -HWTEST_F(TransformTestTest, transform_transhdidevicetype_003, TestSize.Level0) -{ - V1_0::DeviceType iDeviceType = V1_0::DeviceType::ACCELERATOR; - OH_NN_DeviceType result = HDIToNN::TransHDIDeviceType(iDeviceType); - EXPECT_EQ(OH_NN_ACCELERATOR, result); -} - -/** - * @tc.name: transform_transhdidevicetype_004 - * @tc.desc: Verify the TransHDIDeviceType function return OH_NN_OTHERS - * @tc.type: FUNC - */ -HWTEST_F(TransformTestTest, transform_transhdidevicetype_004, TestSize.Level0) -{ - V1_0::DeviceType iDeviceType = V1_0::DeviceType::OTHER; - OH_NN_DeviceType result = HDIToNN::TransHDIDeviceType(iDeviceType); - EXPECT_EQ(OH_NN_OTHERS, result); -} - -/** - * @tc.name: transform_transhdidevicestatus_001 - * @tc.desc: Verify the TransHDIDeviceStatus function return AVAILABLE - * @tc.type: FUNC - */ -HWTEST_F(TransformTestTest, transform_transhdidevicestatus_001, TestSize.Level0) -{ - V1_0::DeviceStatus iDeviceStatus = V1_0::DeviceStatus::AVAILABLE; - DeviceStatus result = HDIToNN::TransHDIDeviceStatus(iDeviceStatus); - EXPECT_EQ(DeviceStatus::AVAILABLE, result); -} - -/** - * @tc.name: transform_transhdidevicestatus_002 - * @tc.desc: Verify the TransHDIDeviceStatus function return BUSY. - * @tc.type: FUNC - */ -HWTEST_F(TransformTestTest, transform_transhdidevicestatus_002, TestSize.Level0) -{ - V1_0::DeviceStatus iDeviceStatus = V1_0::DeviceStatus::BUSY; - DeviceStatus result = HDIToNN::TransHDIDeviceStatus(iDeviceStatus); - EXPECT_EQ(DeviceStatus::BUSY, result); -} - -/** - * @tc.name: transform_transhdidevicestatus_003 - * @tc.desc: Verify the TransHDIDeviceStatus function return OFFLINE. - * @tc.type: FUNC - */ -HWTEST_F(TransformTestTest, transform_transhdidevicestatus_003, TestSize.Level0) -{ - V1_0::DeviceStatus iDeviceStatus = V1_0::DeviceStatus::OFFLINE; - DeviceStatus result = HDIToNN::TransHDIDeviceStatus(iDeviceStatus); - EXPECT_EQ(DeviceStatus::OFFLINE, result); -} - -/** - * @tc.name: transform_transhdidevicestatus_004 - * @tc.desc: Verify the TransHDIDeviceStatus function return UNKNOWN. - * @tc.type: FUNC - */ -HWTEST_F(TransformTestTest, transform_transhdidevicestatus_004, TestSize.Level0) -{ - V1_0::DeviceStatus iDeviceStatus = V1_0::DeviceStatus::UNKNOWN; - DeviceStatus result = HDIToNN::TransHDIDeviceStatus(iDeviceStatus); - EXPECT_EQ(DeviceStatus::UNKNOWN, result); -} - -/** - * @tc.name: transform_transperformancemode_001 - * @tc.desc: Verify the TransPerformanceMode function return PERFORMANCE_LOW. - * @tc.type: FUNC - */ -HWTEST_F(TransformTestTest, transform_transperformancemode_001, TestSize.Level0) -{ - OH_NN_PerformanceMode mode = OH_NN_PERFORMANCE_LOW; - V1_0::PerformanceMode result = NNToHDI::TransPerformanceMode(mode); - EXPECT_EQ(V1_0::PerformanceMode::PERFORMANCE_LOW, result); -} - -/** - * @tc.name: transform_transperformancemode_002 - * @tc.desc: Verify the TransPerformanceMode function return PERFORMANCE_MEDIUM. - * @tc.type: FUNC - */ -HWTEST_F(TransformTestTest, transform_transperformancemode_002, TestSize.Level0) -{ - OH_NN_PerformanceMode mode = OH_NN_PERFORMANCE_MEDIUM; - V1_0::PerformanceMode result = NNToHDI::TransPerformanceMode(mode); - EXPECT_EQ(V1_0::PerformanceMode::PERFORMANCE_MEDIUM, result); -} - -/** - * @tc.name: transform_transperformancemode_003 - * @tc.desc: Verify the TransPerformanceMode function return PERFORMANCE_HIGH. - * @tc.type: FUNC - */ -HWTEST_F(TransformTestTest, transform_transperformancemode_003, TestSize.Level0) -{ - OH_NN_PerformanceMode mode = OH_NN_PERFORMANCE_HIGH; - V1_0::PerformanceMode result = NNToHDI::TransPerformanceMode(mode); - EXPECT_EQ(V1_0::PerformanceMode::PERFORMANCE_HIGH, result); -} - -/** - * @tc.name: transform_transperformancemode_004 - * @tc.desc: Verify the TransPerformanceMode function return PERFORMANCE_EXTREME. - * @tc.type: FUNC - */ -HWTEST_F(TransformTestTest, transform_transperformancemode_004, TestSize.Level0) -{ - OH_NN_PerformanceMode mode = OH_NN_PERFORMANCE_EXTREME; - V1_0::PerformanceMode result = NNToHDI::TransPerformanceMode(mode); - EXPECT_EQ(V1_0::PerformanceMode::PERFORMANCE_EXTREME, result); -} - -/** - * @tc.name: transform_transperformancemode_005 - * @tc.desc: Verify the TransPerformanceMode function return PERFORMANCE_NONE. - * @tc.type: FUNC - */ -HWTEST_F(TransformTestTest, transform_transperformancemode_005, TestSize.Level0) -{ - OH_NN_PerformanceMode mode = OH_NN_PERFORMANCE_NONE; - V1_0::PerformanceMode result = NNToHDI::TransPerformanceMode(mode); - EXPECT_EQ(V1_0::PerformanceMode::PERFORMANCE_NONE, result); -} - -/** - * @tc.name: transform_transpriority_001 - * @tc.desc: Verify the TransPriority function return PRIORITY_LOW. - * @tc.type: FUNC - */ -HWTEST_F(TransformTestTest, transform_transpriority_001, TestSize.Level0) -{ - OH_NN_Priority priority = OH_NN_PRIORITY_LOW; - V1_0::Priority result = NNToHDI::TransPriority(priority); - EXPECT_EQ(V1_0::Priority::PRIORITY_LOW, result); -} - -/** - * @tc.name: transform_transpriority_002 - * @tc.desc: Verify the TransPriority function return PRIORITY_MEDIUM. - * @tc.type: FUNC - */ -HWTEST_F(TransformTestTest, transform_transpriority_002, TestSize.Level0) -{ - OH_NN_Priority priority = OH_NN_PRIORITY_MEDIUM; - V1_0::Priority result = NNToHDI::TransPriority(priority); - EXPECT_EQ(V1_0::Priority::PRIORITY_MEDIUM, result); -} - -/** - * @tc.name: transform_transpriority_003 - * @tc.desc: Verify the TransPriority function return PRIORITY_HIGH. - * @tc.type: FUNC - */ -HWTEST_F(TransformTestTest, transform_transpriority_003, TestSize.Level0) -{ - OH_NN_Priority priority = OH_NN_PRIORITY_HIGH; - V1_0::Priority result = NNToHDI::TransPriority(priority); - EXPECT_EQ(V1_0::Priority::PRIORITY_HIGH, result); -} - -/** - * @tc.name: transform_transdatatype_001 - * @tc.desc: Verify the TransDataType function return DATA_TYPE_BOOL. - * @tc.type: FUNC - */ -HWTEST_F(TransformTestTest, transform_transdatatype_001, TestSize.Level0) -{ - OH_NN_DataType dataType = OH_NN_BOOL; - V1_0::DataType result = NNToHDI::TransDataType(dataType); - EXPECT_EQ(V1_0::DataType::DATA_TYPE_BOOL, result); -} - -/** - * @tc.name: transform_transdatatype_002 - * @tc.desc: Verify the TransDataType function return DATA_TYPE_INT8. - * @tc.type: FUNC - */ -HWTEST_F(TransformTestTest, transform_transdatatype_002, TestSize.Level0) -{ - OH_NN_DataType dataType = OH_NN_INT8; - V1_0::DataType result = NNToHDI::TransDataType(dataType); - EXPECT_EQ(V1_0::DataType::DATA_TYPE_INT8, result); -} - -/** - * @tc.name: transform_transdatatype_003 - * @tc.desc: Verify the TransDataType function return DATA_TYPE_INT16. - * @tc.type: FUNC - */ -HWTEST_F(TransformTestTest, transform_transdatatype_003, TestSize.Level0) -{ - OH_NN_DataType dataType = OH_NN_INT16; - V1_0::DataType result = NNToHDI::TransDataType(dataType); - EXPECT_EQ(V1_0::DataType::DATA_TYPE_INT16, result); -} - -/** - * @tc.name: transform_transdatatype_004 - * @tc.desc: Verify the TransDataType function return DATA_TYPE_INT32. - * @tc.type: FUNC - */ -HWTEST_F(TransformTestTest, transform_transdatatype_004, TestSize.Level0) -{ - OH_NN_DataType dataType = OH_NN_INT32; - V1_0::DataType result = NNToHDI::TransDataType(dataType); - EXPECT_EQ(V1_0::DataType::DATA_TYPE_INT32, result); -} - -/** - * @tc.name: transform_transdatatype_005 - * @tc.desc: Verify the TransDataType function return DATA_TYPE_INT64. - * @tc.type: FUNC - */ -HWTEST_F(TransformTestTest, transform_transdatatype_005, TestSize.Level0) -{ - OH_NN_DataType dataType = OH_NN_INT64; - V1_0::DataType result = NNToHDI::TransDataType(dataType); - EXPECT_EQ(V1_0::DataType::DATA_TYPE_INT64, result); -} - -/** - * @tc.name: transform_transdatatype_006 - * @tc.desc: Verify the TransDataType function return DATA_TYPE_UINT8. - * @tc.type: FUNC - */ -HWTEST_F(TransformTestTest, transform_transdatatype_006, TestSize.Level0) -{ - OH_NN_DataType dataType = OH_NN_UINT8; - V1_0::DataType result = NNToHDI::TransDataType(dataType); - EXPECT_EQ(V1_0::DataType::DATA_TYPE_UINT8, result); -} - -/** - * @tc.name: transform_transdatatype_007 - * @tc.desc: Verify the TransDataType function return DATA_TYPE_UINT16. - * @tc.type: FUNC - */ -HWTEST_F(TransformTestTest, transform_transdatatype_007, TestSize.Level0) -{ - OH_NN_DataType dataType = OH_NN_UINT16; - V1_0::DataType result = NNToHDI::TransDataType(dataType); - EXPECT_EQ(V1_0::DataType::DATA_TYPE_UINT16, result); -} - -/** - * @tc.name: transform_transdatatype_008 - * @tc.desc: Verify the TransDataType function return DATA_TYPE_UINT32. - * @tc.type: FUNC - */ -HWTEST_F(TransformTestTest, transform_transdatatype_008, TestSize.Level0) -{ - OH_NN_DataType dataType = OH_NN_UINT32; - V1_0::DataType result = NNToHDI::TransDataType(dataType); - EXPECT_EQ(V1_0::DataType::DATA_TYPE_UINT32, result); -} - -/** - * @tc.name: transform_transdatatype_009 - * @tc.desc: Verify the TransDataType function return DATA_TYPE_UINT64. - * @tc.type: FUNC - */ -HWTEST_F(TransformTestTest, transform_transdatatype_009, TestSize.Level0) -{ - OH_NN_DataType dataType = OH_NN_UINT64; - V1_0::DataType result = NNToHDI::TransDataType(dataType); - EXPECT_EQ(V1_0::DataType::DATA_TYPE_UINT64, result); -} - -/** - * @tc.name: transform_transdatatype_010 - * @tc.desc: Verify the TransDataType function return DATA_TYPE_FLOAT16. - * @tc.type: FUNC - */ -HWTEST_F(TransformTestTest, transform_transdatatype_010, TestSize.Level0) -{ - OH_NN_DataType dataType = OH_NN_FLOAT16; - V1_0::DataType result = NNToHDI::TransDataType(dataType); - EXPECT_EQ(V1_0::DataType::DATA_TYPE_FLOAT16, result); -} - -/** - * @tc.name: transform_transdatatype_011 - * @tc.desc: Verify the TransDataType function return DATA_TYPE_FLOAT32. - * @tc.type: FUNC - */ -HWTEST_F(TransformTestTest, transform_transdatatype_011, TestSize.Level0) -{ - OH_NN_DataType dataType = OH_NN_FLOAT32; - V1_0::DataType result = NNToHDI::TransDataType(dataType); - EXPECT_EQ(V1_0::DataType::DATA_TYPE_FLOAT32, result); -} - -/** - * @tc.name: transform_transdatatype_012 - * @tc.desc: Verify the TransDataType function return DATA_TYPE_UNKNOWN. - * @tc.type: FUNC - */ -HWTEST_F(TransformTestTest, transform_transdatatype_012, TestSize.Level0) -{ - OH_NN_DataType dataType = OH_NN_UNKNOWN; - V1_0::DataType result = NNToHDI::TransDataType(dataType); - EXPECT_EQ(V1_0::DataType::DATA_TYPE_UNKNOWN, result); -} - -/** - * @tc.name: transform_transdatatype_013 - * @tc.desc: Verify the TransDataType function return DATA_TYPE_FLOAT64. - * @tc.type: FUNC - */ -HWTEST_F(TransformTestTest, transform_transdatatype_013, TestSize.Level0) -{ - OH_NN_DataType dataType = OH_NN_FLOAT64; - V1_0::DataType result = NNToHDI::TransDataType(dataType); - EXPECT_EQ(V1_0::DataType::DATA_TYPE_FLOAT64, result); -} - -/** - * @tc.name: transform_transformat_001 - * @tc.desc: Verify the TransFormat function return FORMAT_NCHW. - * @tc.type: FUNC - */ -HWTEST_F(TransformTestTest, transform_transformat_001, TestSize.Level0) -{ - OH_NN_Format format = OH_NN_FORMAT_NCHW; - V1_0::Format result = NNToHDI::TransFormat(format); - EXPECT_EQ(V1_0::Format::FORMAT_NCHW, result); -} - -/** - * @tc.name: transform_transformat_002 - * @tc.desc: Verify the TransFormat function return FORMAT_NHWC. - * @tc.type: FUNC - */ -HWTEST_F(TransformTestTest, transform_transformat_002, TestSize.Level0) -{ - OH_NN_Format format = OH_NN_FORMAT_NHWC; - V1_0::Format result = NNToHDI::TransFormat(format); - EXPECT_EQ(V1_0::Format::FORMAT_NHWC, result); -} - -/** - * @tc.name: transform_transformat_003 - * @tc.desc: Verify the TransFormat function return FORMAT_NONE. - * @tc.type: FUNC - */ -HWTEST_F(TransformTestTest, transform_transformat_003, TestSize.Level0) -{ - OH_NN_Format format = OH_NN_FORMAT_NONE; - V1_0::Format result = NNToHDI::TransFormat(format); - EXPECT_EQ(V1_0::Format::FORMAT_NONE, result); -} - -/** - * @tc.name: transform_transiotensor_001 - * @tc.desc: Verify the TransIOTensor function return int8 - * @tc.type: FUNC - */ -HWTEST_F(TransformTestTest, transform_transiotensor_001, TestSize.Level0) -{ - IOTensor tensor; - tensor.dataType = OH_NN_INT8; - V1_0::IOTensor result = NNToHDI::TransIOTensor(tensor); - EXPECT_EQ(V1_0::DataType::DATA_TYPE_INT8, result.dataType); -} - /** * @tc.name: transform_gettypesize_001 * @tc.desc: Verify the TransIOTensor function return 1. diff --git a/test/unittest/components/v2_0/compilation/compilation_test.cpp b/test/unittest/components/v2_0/compilation/compilation_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a4087b3c64a92e8d86564d1a60adaaa245bd73d6 --- /dev/null +++ b/test/unittest/components/v2_0/compilation/compilation_test.cpp @@ -0,0 +1,1143 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "compilation_test.h" + +#include + +#include "mindir.h" + +#include "test/unittest/common/v2_0/mock_idevice.h" + +using namespace OHOS::NeuralNetworkRuntime; +using namespace OHOS::HDI::Nnrt::V2_0; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +static const int DATA_VALUE = 1; +static const int DATA_NUM = 36; +static const int DIM_NUM = 3; +OH_NN_ReturnCode CompilationTest::BuildModelGraph(NeuralNetworkRuntime::InnerModel& innerModel) +{ + // liteGraph is released internally by innerModel + mindspore::lite::LiteGraph* liteGraph = new (std::nothrow) mindspore::lite::LiteGraph; + EXPECT_NE(nullptr, liteGraph); + + liteGraph->name_ = "testGraph"; + liteGraph->input_indices_ = {0}; + liteGraph->output_indices_ = {1}; + liteGraph->all_tensors_ = {nullptr}; + const std::vector quant_params {}; + const std::vector data(DATA_NUM, DATA_VALUE); + const std::vector dim = {DIM_NUM, DIM_NUM}; + + for (size_t indexInput = 0; indexInput < liteGraph->input_indices_.size(); ++indexInput) { + liteGraph->all_tensors_.emplace_back(mindspore::lite::MindIR_Tensor_Create(liteGraph->name_, + mindspore::lite::DATA_TYPE_FLOAT32, dim, mindspore::lite::FORMAT_NCHW, data, quant_params)); + } + + for (size_t indexOutput = 0; indexOutput < liteGraph->output_indices_.size(); ++indexOutput) { + liteGraph->all_tensors_.emplace_back(mindspore::lite::MindIR_Tensor_Create(liteGraph->name_, + mindspore::lite::DATA_TYPE_FLOAT32, dim, mindspore::lite::FORMAT_NCHW, data, quant_params)); + } + + OH_NN_ReturnCode ret = innerModel.BuildFromLiteGraph(liteGraph); + return ret; +} + +void CompilationTest::SetConfig(Compilation& compilationTest) +{ + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetPerformance(OH_NN_PERFORMANCE_EXTREME)); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetPriority(OH_NN_PRIORITY_HIGH)); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetEnableFp16(true)); +} + +void CompilationTest::WriteFile(uint64_t version, uint64_t fileNumber, std::size_t cacheDeviceId) +{ + uint64_t cacheSize = 4; + uint64_t writeSize = 7; + uint64_t cacheInfo[7] = {}; + auto cacheInfoPtr = cacheInfo; + *cacheInfoPtr++ = fileNumber; + *cacheInfoPtr++ = version; + *cacheInfoPtr++ = cacheDeviceId; + for (uint64_t i = 0; i < cacheSize; ++i) { + *cacheInfoPtr++ = i; + } + std::ofstream inFile("cache_info.nncache", std::ios::binary | std::ios::out | std::ios::trunc); + inFile.write(reinterpret_cast(cacheInfo), writeSize * sizeof(uint64_t)); + inFile.close(); +} + +void CompilationTest::BuildCompilation(InnerModel& innerModel) +{ + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + SetConfig(compilationTest); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetCacheDir("./", 1)); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.Build()); +} + +/* + * @tc.name: compilation_set_device_001 + * @tc.desc: Verify the set deviceId after compilation finish of the SetDevice function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_device_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + SetConfig(compilationTest); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.Build()); + + std::size_t deviceId = 1; + OH_NN_ReturnCode ret = compilationTest.SetDevice(deviceId); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* + * @tc.name: compilation_set_device_002 + * @tc.desc: Verify the deviceId does not exist of the SetDevice function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_device_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + size_t deviceId = 0; + OH_NN_ReturnCode ret = compilationTest.SetDevice(deviceId); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: compilation_set_device_003 + * @tc.desc: Verify the error happened when getting supported operation of the SetDevice function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_device_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + Compilation compilationTest(&innerModel); + std::size_t deviceId = 1; + OH_NN_ReturnCode ret = compilationTest.SetDevice(deviceId); + EXPECT_EQ(OH_NN_NULL_PTR, ret); +} + +/* + * @tc.name: compilation_set_device_004 + * @tc.desc: Verify the current device not support the model of the SetDevice function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_device_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + std::size_t deviceId = 1; + MockIPreparedModel::m_ExpectRetCode = OH_NN_SUCCESS; + OH_NN_ReturnCode ret = compilationTest.SetDevice(deviceId); + EXPECT_EQ(OH_NN_FAILED, ret); +} + +/* + * @tc.name: compilation_set_device_005 + * @tc.desc: Verify the error happened when checking whether device supports dynamic input of the SetDevice function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_device_005, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + std::size_t deviceId = 1; + MockIPreparedModel::m_ExpectRetCode = OH_NN_FAILED; + OH_NN_ReturnCode ret = compilationTest.SetDevice(deviceId); + EXPECT_EQ(OH_NN_FAILED, ret); +} + +/* + * @tc.name: compilation_set_device_006 + * @tc.desc: Verify the device does not support dynamic shape inputs of the SetDevice function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_device_006, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + std::size_t deviceId = 1; + MockIPreparedModel::m_ExpectRetCode = OH_NN_INVALID_PATH; + OH_NN_ReturnCode ret = compilationTest.SetDevice(deviceId); + EXPECT_EQ(OH_NN_FAILED, ret); +} + +/* + * @tc.name: compilation_set_device_007 + * @tc.desc: Verify the set normal deviceId of the SetDevice function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_device_007, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + std::size_t deviceId = 1; + OH_NN_ReturnCode ret = compilationTest.SetDevice(deviceId); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: compilation_set_cachedir_001 + * @tc.desc: Verify the set cache after compilation finish of the SetCacheDir function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_cachedir_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + SetConfig(compilationTest); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.Build()); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, compilationTest.SetDevice(deviceId)); + + OH_NN_ReturnCode ret = compilationTest.SetCacheDir("../", 1); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* + * @tc.name: compilation_set_cachedir_002 + * @tc.desc: Verify the not set device of the SetCacheDir function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_cachedir_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + Compilation compilationTest(&innerModel); + OH_NN_ReturnCode ret = compilationTest.SetCacheDir("../", 1); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* + * @tc.name: compilation_set_cachedir_003 + * @tc.desc: Verify the Fail to query whether the device is available to save cache model of the SetCacheDir function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_cachedir_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + + MockIPreparedModel::m_ExpectRetCode = OH_NN_FAILED; + OH_NN_ReturnCode ret = compilationTest.SetCacheDir("../", 1); + EXPECT_EQ(OH_NN_FAILED, ret); +} + +/* + * @tc.name: compilation_set_cachedir_004 + * @tc.desc: Verify the device is unavailable to save cache model of the SetCacheDir function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_cachedir_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + + MockIPreparedModel::m_ExpectRetCode = OH_NN_SUCCESS; + OH_NN_ReturnCode ret = compilationTest.SetCacheDir("../", 1); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* + * @tc.name: compilation_set_cachedir_005 + * @tc.desc: Verify the cache model path is invalid of the SetCacheDir function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_cachedir_005, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + OH_NN_ReturnCode ret = compilationTest.SetCacheDir("../compilation_test.cpp", 1); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: compilation_set_cachedir_006 + * @tc.desc: Verify the cache model path is not a directory of the SetCacheDir function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_cachedir_006, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + OH_NN_ReturnCode ret = compilationTest.SetCacheDir("./CompilationTest", 1); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: compilation_set_cachedir_007 + * @tc.desc: Verify the success of the SetCacheDir function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_cachedir_007, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + OH_NN_ReturnCode ret = compilationTest.SetCacheDir("../", 1); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: compilation_set_performance_001 + * @tc.desc: Verify the set performance after compilation finish of the SetPerformance function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_performance_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + + SetConfig(compilationTest); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.Build()); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, compilationTest.SetDevice(deviceId)); + + OH_NN_ReturnCode ret = compilationTest.SetPerformance(OH_NN_PERFORMANCE_NONE); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* + * @tc.name: compilation_set_performance_002 + * @tc.desc: Verify the set performance before set device of the SetPerformance function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_performance_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + Compilation compilationTest(&innerModel); + OH_NN_ReturnCode ret = compilationTest.SetPerformance(OH_NN_PERFORMANCE_NONE); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* + * @tc.name: compilation_set_performance_003 + * @tc.desc: Verify the call device failed of the SetPerformance function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_performance_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + + MockIPreparedModel::m_ExpectRetCode = OH_NN_FAILED; + OH_NN_ReturnCode ret = compilationTest.SetPerformance(OH_NN_PERFORMANCE_NONE); + EXPECT_EQ(OH_NN_FAILED, ret); +} + +/* + * @tc.name: compilation_set_performance_004 + * @tc.desc: Verify the device is not support performance setting of the SetPerformance function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_performance_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + + MockIPreparedModel::m_ExpectRetCode = OH_NN_SUCCESS; + OH_NN_ReturnCode ret = compilationTest.SetPerformance(OH_NN_PERFORMANCE_NONE); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* + * @tc.name: compilation_set_performance_005 + * @tc.desc: Verify the passed invalid performance of the SetPerformance function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_performance_005, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + + OH_NN_PerformanceMode performance = static_cast(5); + OH_NN_ReturnCode ret = compilationTest.SetPerformance(performance); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: compilation_set_performance_006 + * @tc.desc: Verify the success of the SetPerformance function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_performance_006, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + + OH_NN_ReturnCode ret = compilationTest.SetPerformance(OH_NN_PERFORMANCE_NONE); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: compilation_set_priority_001 + * @tc.desc: Verify the set priority after compilation finish of the SetPriority function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_priority_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + + SetConfig(compilationTest); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.Build()); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, compilationTest.SetDevice(deviceId)); + + OH_NN_ReturnCode ret = compilationTest.SetPriority(OH_NN_PRIORITY_LOW); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* + * @tc.name: compilation_set_priority_002 + * @tc.desc: Verify the set priority before set device of the SetPriority function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_priority_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + Compilation compilationTest(&innerModel); + + OH_NN_ReturnCode ret = compilationTest.SetPriority(OH_NN_PRIORITY_LOW); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* + * @tc.name: compilation_set_priority_003 + * @tc.desc: Verify the call device failed of the SetPriority function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_priority_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + + MockIPreparedModel::m_ExpectRetCode = OH_NN_INVALID_PARAMETER; + OH_NN_ReturnCode ret = compilationTest.SetPriority(OH_NN_PRIORITY_LOW); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: compilation_set_priority_004 + * @tc.desc: Verify the device is not support priority setting of the SetPriority function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_priority_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + + MockIPreparedModel::m_ExpectRetCode = OH_NN_SUCCESS; + OH_NN_ReturnCode ret = compilationTest.SetPriority(OH_NN_PRIORITY_LOW); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* + * @tc.name: compilation_set_priority_005 + * @tc.desc: Verify the passed invalid priority of the SetPriority function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_priority_005, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + + OH_NN_Priority priority = static_cast(5);; + OH_NN_ReturnCode ret = compilationTest.SetPriority(priority); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: compilation_set_priority_006 + * @tc.desc: Verify the success of the SetPriority function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_priority_006, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + + OH_NN_ReturnCode ret = compilationTest.SetPriority(OH_NN_PRIORITY_LOW); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: compilation_set_enable_fp16_001 + * @tc.desc: Verify the enable float16 after compilation finish of the SetEnableFp16 function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_enable_fp16_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + + SetConfig(compilationTest); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.Build()); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, compilationTest.SetDevice(deviceId)); + + OH_NN_ReturnCode ret = compilationTest.SetEnableFp16(true); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* + * @tc.name: compilation_set_enable_fp16_002 + * @tc.desc: Verify the set enable fp16 before set device of the SetEnableFp16 function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_enable_fp16_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + Compilation compilationTest(&innerModel); + + OH_NN_ReturnCode ret = compilationTest.SetEnableFp16(true); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* + * @tc.name: compilation_set_enable_fp16_003 + * @tc.desc: Verify the call device failed of the SetEnableFp16 function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_enable_fp16_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + + MockIPreparedModel::m_ExpectRetCode = OH_NN_MEMORY_ERROR; + OH_NN_ReturnCode ret = compilationTest.SetEnableFp16(true); + EXPECT_EQ(OH_NN_MEMORY_ERROR, ret); +} + +/* + * @tc.name: compilation_set_enable_fp16_004 + * @tc.desc: Verify the device is not support float16 precision setting of the SetEnableFp16 function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_enable_fp16_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + + MockIPreparedModel::m_ExpectRetCode = OH_NN_SUCCESS; + OH_NN_ReturnCode ret = compilationTest.SetEnableFp16(true); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* + * @tc.name: compilation_set_enable_fp16_005 + * @tc.desc: Verify the success of the SetEnableFp16 function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_set_enable_fp16_005, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + + OH_NN_ReturnCode ret = compilationTest.SetEnableFp16(true); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: compilation_get_input_tensors_001 + * @tc.desc: Verify the normal input tensors of the GetInputTensors function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_get_input_tensors_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + Compilation compilationTest(&innerModel); + EXPECT_EQ(innerModel.GetInputTensors(), compilationTest.GetInputTensors()); +} + +/* + * @tc.name: compilation_get_output_tensors_001 + * @tc.desc: Verify the normal output tensors of the GetOutputTensors function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_get_output_tensors_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + Compilation compilationTest(&innerModel); + EXPECT_EQ(innerModel.GetOutputTensors(), compilationTest.GetOutputTensors()); +} + +/* + * @tc.name: compilation_get_execution_plan_001 + * @tc.desc: Verify the passed nullptr of the GetExecutionPlan function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_get_execution_plan_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + Compilation compilationTest(&innerModel); + EXPECT_EQ(nullptr, compilationTest.GetExecutionPlan()); +} + +/* + * @tc.name: compilation_is_dynamic_shape_001 + * @tc.desc: Verify the input tensor is empth of the IsDynamicShape function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_is_dynamic_shape_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + Compilation compilationTest(&innerModel); + EXPECT_EQ(false, compilationTest.IsDynamicShape()); +} + +/* + * @tc.name: compilation_is_dynamic_shape_002 + * @tc.desc: Verify the return true of the IsDynamicShape function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_is_dynamic_shape_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + EXPECT_EQ(true, compilationTest.IsDynamicShape()); +} + +/* + * @tc.name: compilation_is_dynamic_shape_003 + * @tc.desc: Verify the return false of the IsDynamicShape function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_is_dynamic_shape_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + MockIPreparedModel::m_ExpectRetCode = OH_NN_FAILED; + EXPECT_EQ(false, compilationTest.IsDynamicShape()); +} + +/* + * @tc.name: compilation_is_build_001 + * @tc.desc: Verify return false of the IsBuild function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_is_build_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + Compilation compilationTest(&innerModel); + EXPECT_EQ(false, compilationTest.IsBuild()); +} + +/* + * @tc.name: compilation_build_001 + * @tc.desc: Verify the build after compilation finish of the Build function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + SetConfig(compilationTest); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.Build()); + + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* + * @tc.name: compilation_build_002 + * @tc.desc: Verify the not set device of the Build function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* + * @tc.name: compilation_build_003 + * @tc.desc: Verify the preparing model failed of the Build function without set cache path. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + Compilation compilationTest(&innerModel); + MockIPreparedModel::m_ExpectRetCode = OH_NN_INVALID_FILE; + SetConfig(compilationTest); + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: compilation_build_004 + * @tc.desc: Verify the success of the Build function without set cache path. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + SetConfig(compilationTest); + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: compilation_build_005 + * @tc.desc: Verify the preparing model failed of the Build function without cache file. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_005, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + Compilation compilationTest(&innerModel); + std::size_t deviceId = 1; + MockIPreparedModel::m_ExpectRetCode = OH_NN_INVALID_FILE; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetCacheDir("./", 1)); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetEnableFp16(true)); + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: compilation_build_006 + * @tc.desc: Verify the export model cache failed of the Build function without cache file. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_006, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetCacheDir("./", 1)); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetEnableFp16(true)); + + MockIPreparedModel::m_ExpectRetCode = OH_NN_FAILED; + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(OH_NN_FAILED, ret); +} + +/* + * @tc.name: compilation_build_007 + * @tc.desc: Verify the model cache file is invalid to generating cache mode of the Build function without cache file. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_007, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetCacheDir("/sys", 1)); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetEnableFp16(true)); + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(OH_NN_INVALID_FILE, ret); +} + +/* + * @tc.name: compilation_build_008 + * @tc.desc: Verify the success to generating cache mode of the Build function without cache file. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_008, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetDevice(deviceId)); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetCacheDir("./", 1)); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetEnableFp16(true)); + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(0, remove("0.nncache")); + EXPECT_EQ(0, remove("1.nncache")); + EXPECT_EQ(0, remove("cache_info.nncache")); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: compilation_build_009 + * @tc.desc: Verify the Fail to get the content of info cache file of the Build. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_009, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + BuildCompilation(innerModel); + + Compilation compilationTest(&innerModel); + SetConfig(compilationTest); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetCacheDir("./", 1)); + + std::ofstream createFile("cache_info.nncache"); + createFile.close(); + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(0, remove("0.nncache")); + EXPECT_EQ(0, remove("1.nncache")); + EXPECT_EQ(0, remove("cache_info.nncache")); + EXPECT_EQ(OH_NN_INVALID_FILE, ret); +} + +/* + * @tc.name: compilation_build_010 + * @tc.desc: Verify the deviceId in the cache files is different from current deviceId of the Build function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_010, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + BuildCompilation(innerModel); + WriteFile(1, 4, 2); + + Compilation compilationTest(&innerModel); + SetConfig(compilationTest); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetCacheDir("./", 1)); + + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(0, remove("0.nncache")); + EXPECT_EQ(0, remove("1.nncache")); + EXPECT_EQ(0, remove("cache_info.nncache")); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: compilation_build_011 + * @tc.desc: Verify the info cache file has been changed of the Build function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_011, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + BuildCompilation(innerModel); + WriteFile(1, 100, 1); + + Compilation compilationTest(&innerModel); + SetConfig(compilationTest); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetCacheDir("./", 1)); + + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(0, remove("0.nncache")); + EXPECT_EQ(0, remove("1.nncache")); + EXPECT_EQ(0, remove("cache_info.nncache")); + EXPECT_EQ(OH_NN_INVALID_FILE, ret); +} + +/* + * @tc.name: compilation_build_012 + * @tc.desc: Verify the Preparing model failed of the Build function model version is greater than cached versio. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_012, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + Compilation compilationTest(&innerModel); + MockIPreparedModel::m_ExpectRetCode = OH_NN_INVALID_FILE; + SetConfig(compilationTest); + WriteFile(0, 4, 1); + + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetCacheDir("./", 1)); + + std::ofstream inFile("0.nncache", std::ios::binary | std::ios::out | std::ios::trunc); + inFile.close(); + + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: compilation_build_013 + * @tc.desc: Verify that the build function return success message with model version is greater than cached version + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_013, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilationTest(&innerModel); + SetConfig(compilationTest); + WriteFile(0, 1, 1); + + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetCacheDir("./", 1)); + + std::ofstream inFile("0.nncache", std::ios::binary | std::ios::out | std::ios::trunc); + inFile.close(); + + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(0, remove("0.nncache")); + EXPECT_EQ(0, remove("1.nncache")); + EXPECT_EQ(0, remove("cache_info.nncache")); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: compilation_build_014 + * @tc.desc: Verify the model version is less than version cache of the Build function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_014, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + BuildCompilation(innerModel); + WriteFile(3, 4, 1); + + Compilation compilationTest(&innerModel); + SetConfig(compilationTest); + + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetCacheDir("./", 1)); + + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(0, remove("0.nncache")); + EXPECT_EQ(0, remove("1.nncache")); + EXPECT_EQ(0, remove("cache_info.nncache")); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* + * @tc.name: compilation_build_015 + * @tc.desc: Verify the checking cache model failed of the Build function with release buffer. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_015, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + BuildCompilation(innerModel); + EXPECT_EQ(0, remove("1.nncache")); + + Compilation compilationTest(&innerModel); + SetConfig(compilationTest); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetCacheDir("./", 1)); + + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(0, remove("0.nncache")); + EXPECT_EQ(0, remove("cache_info.nncache")); + EXPECT_EQ(OH_NN_INVALID_FILE, ret); +} + +/* + * @tc.name: compilation_build_016 + * @tc.desc: Verify the get cache file length of the Build function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_016, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + BuildCompilation(innerModel); + + Compilation compilationTest(&innerModel); + SetConfig(compilationTest); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetCacheDir("./", 1)); + + std::ofstream inFile("0.nncache", std::ios::binary | std::ios::out | std::ios::trunc); + inFile.close(); + + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(0, remove("0.nncache")); + EXPECT_EQ(0, remove("1.nncache")); + EXPECT_EQ(0, remove("cache_info.nncache")); + EXPECT_EQ(OH_NN_INVALID_FILE, ret); +} + +/* + * @tc.name: compilation_build_017 + * @tc.desc: Verify the fail to create file buffer of the Build function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_017, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + BuildCompilation(innerModel); + + Compilation compilationTest(&innerModel); + SetConfig(compilationTest); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetCacheDir("./", 1)); + + MockIPreparedModel::m_ExpectRetCode = OH_NN_NULL_PTR; + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(0, remove("0.nncache")); + EXPECT_EQ(0, remove("1.nncache")); + EXPECT_EQ(0, remove("cache_info.nncache")); + EXPECT_EQ(OH_NN_NULL_PTR, ret); +} + +/* + * @tc.name: compilation_build_018 + * @tc.desc: Verify the cache model file has been changed of the Build function. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_018, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + BuildCompilation(innerModel); + + Compilation compilationTest(&innerModel); + SetConfig(compilationTest); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetCacheDir("./", 1)); + + uint64_t version = 1; + uint64_t fileNumber = 1; + std::size_t cacheDeviceId = 1; + uint64_t cacheInfo[7] = {}; + auto cacheInfoPtr = cacheInfo; + *cacheInfoPtr++ = fileNumber; + *cacheInfoPtr++ = version; + *cacheInfoPtr++ = cacheDeviceId; + for (uint64_t i = 0; i < 4; ++i) { + *cacheInfoPtr++ = i; + } + + std::ofstream onFile("0.nncache", std::ios::binary | std::ios::out | std::ios::trunc); + onFile.write(reinterpret_cast(cacheInfo), 7 * sizeof(uint64_t)); + onFile.close(); + + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(0, remove("0.nncache")); + EXPECT_EQ(0, remove("1.nncache")); + EXPECT_EQ(0, remove("cache_info.nncache")); + EXPECT_EQ(OH_NN_INVALID_FILE, ret); +} + +/* + * @tc.name: compilation_build_019 + * @tc.desc: Verify the preparing model from cache failed of the Build function with load cache build. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_019, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + BuildCompilation(innerModel); + + Compilation compilationTest(&innerModel); + SetConfig(compilationTest); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetCacheDir("./", 1)); + + MockIPreparedModel::m_ExpectRetCode = OH_NN_FAILED; + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(0, remove("0.nncache")); + EXPECT_EQ(0, remove("1.nncache")); + EXPECT_EQ(0, remove("cache_info.nncache")); + EXPECT_EQ(OH_NN_FAILED, ret); +} + +/* + * @tc.name: compilation_build_020 + * @tc.desc: Verify the success of the Build function with load cache build. + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, compilation_build_020, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + BuildCompilation(innerModel); + + Compilation compilationTest(&innerModel); + SetConfig(compilationTest); + EXPECT_EQ(OH_NN_SUCCESS, compilationTest.SetCacheDir("./", 1)); + + OH_NN_ReturnCode ret = compilationTest.Build(); + EXPECT_EQ(0, remove("0.nncache")); + EXPECT_EQ(0, remove("1.nncache")); + EXPECT_EQ(0, remove("cache_info.nncache")); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/components/v2_0/compilation/compilation_test.h b/test/unittest/components/v2_0/compilation/compilation_test.h new file mode 100644 index 0000000000000000000000000000000000000000..8217f4f3acec605c1dd10cb2b198b180c38ce8bd --- /dev/null +++ b/test/unittest/components/v2_0/compilation/compilation_test.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_COMPILATION_UNITTEST_H +#define NEURAL_NETWORK_RUNTIME_COMPILATION_UNITTEST_H + +#include + +#include "frameworks/native/compilation.h" +#include "frameworks/native/inner_model.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class CompilationTest : public testing::Test { +public: + OH_NN_ReturnCode BuildModelGraph(NeuralNetworkRuntime::InnerModel& innerModel); + void SetConfig(Compilation& compilationTest); + void WriteFile(uint64_t version, uint64_t fileNumber, std::size_t cacheDeviceId); + void BuildCompilation(InnerModel& innerModel); +}; +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_COMPILATION_UNITTEST_H \ No newline at end of file diff --git a/test/unittest/components/v2_0/device_manager/device_manager_test.cpp b/test/unittest/components/v2_0/device_manager/device_manager_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a351ef7bf3407e5bb48afd21445ed26188cfb35c --- /dev/null +++ b/test/unittest/components/v2_0/device_manager/device_manager_test.cpp @@ -0,0 +1,242 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include "common/log.h" +#include "frameworks/native/device_manager.h" +#include "frameworks/native/hdi_device_v2_0.h" +#include "test/unittest/common/v2_0/mock_idevice.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime; +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class DeviceManagerTest : public testing::Test { +protected: + void MockInit(OHOS::sptr device, const std::vector& typeVect, + const std::string& deviceName, const std::string& vendorName); +}; + +void DeviceManagerTest::MockInit(OHOS::sptr device, const std::vector& typeVect, + const std::string& deviceName, const std::string& vendorName) +{ + const size_t typeSize = 4; + int index = 0; + EXPECT_EQ(typeSize, typeVect.size()); + EXPECT_CALL(*device, GetDeviceName(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(deviceName), + ::testing::Return(typeVect[index++]))); + + EXPECT_CALL(*device, GetVendorName(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(vendorName), + ::testing::Return(typeVect[index++]))); + + V2_0::DeviceStatus deviceStatus = V2_0::DeviceStatus::AVAILABLE; + EXPECT_CALL(*device, GetDeviceStatus(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(deviceStatus), + ::testing::Return(typeVect[index++]))); + + uint32_t majorVer = 1; + uint32_t minorVer = 0; + EXPECT_CALL(*device, GetVersion(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(majorVer), ::testing::SetArgReferee<1>(minorVer), + ::testing::Return(typeVect[index++]))); +} + +/** + * @tc.name: devicemanager_getalldeviceid_001 + * @tc.desc: Verify the GetAllDeviceId function return deviceid list is not null. + * @tc.type: FUNC + */ +HWTEST_F(DeviceManagerTest, devicemanager_getalldeviceid_001, TestSize.Level0) +{ + auto &deviceManager = DeviceManager::GetInstance(); + std::vector idVect = deviceManager.GetAllDeviceId(); + EXPECT_NE((size_t)0, idVect.size()); + + const size_t expectDeviceId {std::hash {} ("MockDevice_MockVendor")}; + EXPECT_EQ(expectDeviceId, idVect[0]); + + const std::string expectDeviceName = "MockDevice"; + std::string deviceName = ""; + std::shared_ptr retDevice = deviceManager.GetDevice(idVect[0]); + retDevice->GetDeviceName(deviceName); + EXPECT_EQ(deviceName, expectDeviceName); +} + +/** + * @tc.name: devicemanager_getdevice_001 + * @tc.desc: Verify the GetDevice function return nullptr in case of deviceId invalid. + * @tc.type: FUNC + */ +HWTEST_F(DeviceManagerTest, devicemanager_getdevice_001, TestSize.Level0) +{ + auto &deviceManager = DeviceManager::GetInstance(); + const size_t deviceId = 1; + std::shared_ptr result = deviceManager.GetDevice(deviceId); + EXPECT_EQ(nullptr, result); +} + +/** + * @tc.name: devicemanager_getdevice_002 + * @tc.desc: Verify the GetDevice function validate device name return specified device name. + * @tc.type: FUNC + */ +HWTEST_F(DeviceManagerTest, devicemanager_getdevice_002, TestSize.Level0) +{ + auto &deviceManager = DeviceManager::GetInstance(); + std::vector idVect = deviceManager.GetAllDeviceId(); + EXPECT_EQ((size_t)1, idVect.size()); + size_t deviceId = idVect[0]; + std::shared_ptr result = deviceManager.GetDevice(deviceId); + EXPECT_NE(nullptr, result); + + const std::string expectDeviceNameA = "MockDevice"; + std::string deviceName = ""; + result->GetDeviceName(deviceName); + EXPECT_EQ(deviceName, expectDeviceNameA); +} + +/** + * @tc.name: devicemanager_registerdevice_001 + * @tc.desc: Verify the RegisterDevice function register repeatly. + * @tc.type: FUNC + */ +HWTEST_F(DeviceManagerTest, devicemanager_registerdevice_001, TestSize.Level0) +{ + std::vector typeVect = {HDF_SUCCESS, HDF_SUCCESS, HDF_SUCCESS, HDF_SUCCESS}; + OHOS::sptr device = OHOS::sptr(new (std::nothrow) V2_0::MockIDevice()); + EXPECT_NE(device.GetRefPtr(), nullptr); + + std::string deviceName = "MockDevice"; + std::string vendorName = "MockVendor"; + MockInit(device, typeVect, deviceName, vendorName); + + std::function()> creator = + [&device]()->std::shared_ptr {return std::make_shared(device);}; + auto& deviceManager = DeviceManager::GetInstance(); + OH_NN_ReturnCode result = deviceManager.RegisterDevice(creator); + EXPECT_EQ(OH_NN_FAILED, result); +} + +/** + * @tc.name: devicemanager_registerdevice_002 + * @tc.desc: Verify the RegisterDevice function return invalid parameter. + * @tc.type: FUNC + */ +HWTEST_F(DeviceManagerTest, devicemanager_registerdevice_002, TestSize.Level0) +{ + std::function()> creator = + []()->std::shared_ptr {return nullptr;}; + auto& deviceManager = DeviceManager::GetInstance(); + OH_NN_ReturnCode result = deviceManager.RegisterDevice(creator); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); +} + +/** + * @tc.name: devicemanager_registerdevice_003 + * @tc.desc: Verify the RegisterDevice function return unavailable device in case of device name invalid param. + * @tc.type: FUNC + */ +HWTEST_F(DeviceManagerTest, devicemanager_registerdevice_003, TestSize.Level0) +{ + std::vector typeVect = {HDF_FAILURE, HDF_SUCCESS, HDF_SUCCESS, HDF_SUCCESS}; + OHOS::sptr device = OHOS::sptr(new (std::nothrow) V2_0::MockIDevice()); + EXPECT_NE(device.GetRefPtr(), nullptr); + + std::string deviceName = "MockDevice"; + std::string vendorName = "MockVendor"; + MockInit(device, typeVect, deviceName, vendorName); + + std::function()> creator = + [&device]()->std::shared_ptr {return std::make_shared(device);}; + auto& deviceManager = DeviceManager::GetInstance(); + OH_NN_ReturnCode result = deviceManager.RegisterDevice(creator); + EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, result); +} + +/** + * @tc.name: devicemanager_registerdevice_004 + * @tc.desc: Verify the RegisterDevice function return unavailable device in case of vendor name failure. + * @tc.type: FUNC + */ +HWTEST_F(DeviceManagerTest, devicemanager_registerdevice_004, TestSize.Level0) +{ + std::vector typeVect = {HDF_SUCCESS, HDF_FAILURE, HDF_SUCCESS, HDF_SUCCESS}; + OHOS::sptr device = OHOS::sptr(new (std::nothrow) V2_0::MockIDevice()); + EXPECT_NE(device.GetRefPtr(), nullptr); + + std::string deviceName = "MockDevice"; + std::string vendorName = "MockVendor"; + MockInit(device, typeVect, deviceName, vendorName); + + std::function()> creator = + [&device]()->std::shared_ptr {return std::make_shared(device);}; + auto& deviceManager = DeviceManager::GetInstance(); + OH_NN_ReturnCode result = deviceManager.RegisterDevice(creator); + EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, result); +} + +/** + * @tc.name: devicemanager_registerdevice_005 + * @tc.desc: Verify the RegisterDevice function return success. + * @tc.type: FUNC + */ +HWTEST_F(DeviceManagerTest, devicemanager_registerdevice_005, TestSize.Level0) +{ + std::vector typeVect = {HDF_SUCCESS, HDF_SUCCESS, HDF_SUCCESS, HDF_SUCCESS}; + OHOS::sptr device = OHOS::sptr(new (std::nothrow) V2_0::MockIDevice()); + EXPECT_NE(device.GetRefPtr(), nullptr); + + std::string deviceName = "MockDeviceA"; + std::string vendorName = "MockVendorA"; + MockInit(device, typeVect, deviceName, vendorName); + + std::function()> creator = + [&device]()->std::shared_ptr {return std::make_shared(device);}; + auto& deviceManager = DeviceManager::GetInstance(); + OH_NN_ReturnCode result = deviceManager.RegisterDevice(creator); + EXPECT_EQ(OH_NN_SUCCESS, result); + + std::vector idVect = deviceManager.GetAllDeviceId(); + EXPECT_NE((size_t)0, idVect.size()); + + const size_t expectDeviceId {std::hash {} ("MockDeviceA_MockVendorA")}; + EXPECT_EQ(expectDeviceId, idVect[0]); + + const std::string expectDeviceName = "MockDeviceA_MockVendorA"; + const std::string retDeviceName = deviceManager.GetDeviceName(idVect[0]); + EXPECT_EQ(retDeviceName, expectDeviceName); +} + +/** + * @tc.name: devicemanager_getdevicename_001 + * @tc.desc: Verify the GetDevice function return empty string in case of deviceid invalid. + * @tc.type: FUNC + */ +HWTEST_F(DeviceManagerTest, devicemanager_getdevicename_001, TestSize.Level0) +{ + auto &deviceManager = DeviceManager::GetInstance(); + const size_t deviceId = 1; + std::string result = deviceManager.GetDeviceName(deviceId); + EXPECT_EQ("", result); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/components/v2_0/device_registrar/device_registrar_test.cpp b/test/unittest/components/v2_0/device_registrar/device_registrar_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d022e142c4100cae264f9c4e18f25f0fc72e3664 --- /dev/null +++ b/test/unittest/components/v2_0/device_registrar/device_registrar_test.cpp @@ -0,0 +1,268 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include + +#include "common/log.h" +#include "frameworks/native/device_registrar.h" +#include "frameworks/native/hdi_device_v2_0.h" +#include "frameworks/native/device_manager.h" +#include "test/unittest/common/v2_0/mock_idevice.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime; +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class IRegisterDevice : public HDI::HdiBase { +public: + DECLARE_HDI_DESCRIPTOR(u"ohos.hdi.nnrt.v2_0.IRegisterDevice"); + + virtual ~IRegisterDevice() = default; + + static sptr Get(bool isStub = false); + static sptr Get(const std::string& serviceName, bool isStub = false); + + virtual int32_t GetDeviceName(std::string& name) = 0; + + virtual int32_t GetVendorName(std::string& name) = 0; + + virtual int32_t GetDeviceType(V2_0::DeviceType& deviceType) = 0; + + virtual int32_t GetDeviceStatus(V2_0::DeviceStatus& status) = 0; + + virtual int32_t GetSupportedOperation(const V2_0::Model& model, std::vector& ops) = 0; + + virtual int32_t IsFloat16PrecisionSupported(bool& isSupported) = 0; + + virtual int32_t IsPerformanceModeSupported(bool& isSupported) = 0; + + virtual int32_t IsPrioritySupported(bool& isSupported) = 0; + + virtual int32_t IsDynamicInputSupported(bool& isSupported) = 0; + + virtual int32_t PrepareModel(const V2_0::Model& model, const V2_0::ModelConfig& config, + sptr& preparedModel) = 0; + + virtual int32_t IsModelCacheSupported(bool& isSupported) = 0; + + virtual int32_t PrepareModelFromModelCache(const std::vector& modelCache, + const V2_0::ModelConfig& config, sptr& preparedModel) = 0; + + virtual int32_t AllocateBuffer(uint32_t length, V2_0::SharedBuffer& buffer) = 0; + + virtual int32_t ReleaseBuffer(const V2_0::SharedBuffer& buffer) = 0; + + virtual int32_t GetVersion(uint32_t& majorVer, uint32_t& minorVer) + { + majorVer = INNRT_DEVICE_MAJOR_VERSION; + minorVer = INNRT_DEVICE_MINOR_VERSION; + return HDF_SUCCESS; + } +}; + +class SimulationDevice : public Device { +public: + explicit SimulationDevice(OHOS::sptr device) {}; + + OH_NN_ReturnCode GetDeviceName(std::string& name) override + { + name = "MockIDeviceA"; + return OH_NN_SUCCESS; + }; + OH_NN_ReturnCode GetVendorName(std::string& name) override + { + name = "MockVendorA"; + return OH_NN_SUCCESS; + }; + OH_NN_ReturnCode GetVersion(std::string& version) override + { + version = "MockVersionA"; + return OH_NN_SUCCESS; + } + OH_NN_ReturnCode GetDeviceType(OH_NN_DeviceType& deviceType) override + { + return OH_NN_SUCCESS; + }; + OH_NN_ReturnCode GetDeviceStatus(DeviceStatus& status) override + { + status = DeviceStatus::AVAILABLE; + return OH_NN_SUCCESS; + }; + OH_NN_ReturnCode GetSupportedOperation(std::shared_ptr model, + std::vector& ops) override + { + return OH_NN_SUCCESS; + }; + + OH_NN_ReturnCode IsFloat16PrecisionSupported(bool& isSupported) override + { + return OH_NN_SUCCESS; + }; + OH_NN_ReturnCode IsPerformanceModeSupported(bool& isSupported) override + { + return OH_NN_SUCCESS; + }; + OH_NN_ReturnCode IsPrioritySupported(bool& isSupported) override + { + return OH_NN_SUCCESS; + }; + OH_NN_ReturnCode IsDynamicInputSupported(bool& isSupported) override + { + return OH_NN_SUCCESS; + }; + OH_NN_ReturnCode IsModelCacheSupported(bool& isSupported) override + { + return OH_NN_SUCCESS; + }; + + OH_NN_ReturnCode PrepareModel(std::shared_ptr model, const ModelConfig& config, + std::shared_ptr& preparedModel) override + { + return OH_NN_SUCCESS; + }; + OH_NN_ReturnCode PrepareModelFromModelCache(const std::vector& modelCache, const ModelConfig& config, + std::shared_ptr& preparedModel) override + { + return OH_NN_SUCCESS; + }; + + void *AllocateBuffer(size_t length) override + { + return nullptr; + }; + OH_NN_ReturnCode ReleaseBuffer(const void* buffer) override + { + return OH_NN_SUCCESS; + }; +}; + +class MockIDeviceImp : public IRegisterDevice { +public: + MOCK_METHOD1(GetDeviceName, int32_t(std::string&)); + MOCK_METHOD1(GetVendorName, int32_t(std::string&)); + MOCK_METHOD1(GetDeviceType, int32_t(V2_0::DeviceType&)); + MOCK_METHOD1(GetDeviceStatus, int32_t(V2_0::DeviceStatus&)); + MOCK_METHOD2(GetSupportedOperation, int32_t(const V2_0::Model&, std::vector&)); + MOCK_METHOD1(IsFloat16PrecisionSupported, int32_t(bool&)); + MOCK_METHOD1(IsPerformanceModeSupported, int32_t(bool&)); + MOCK_METHOD1(IsPrioritySupported, int32_t(bool&)); + MOCK_METHOD1(IsDynamicInputSupported, int32_t(bool&)); + MOCK_METHOD3(PrepareModel, + int32_t(const V2_0::Model&, const V2_0::ModelConfig&, OHOS::sptr&)); + MOCK_METHOD1(IsModelCacheSupported, int32_t(bool&)); + MOCK_METHOD3(PrepareModelFromModelCache, int32_t(const std::vector&, const V2_0::ModelConfig&, + OHOS::sptr&)); + MOCK_METHOD2(AllocateBuffer, int32_t(uint32_t, V2_0::SharedBuffer&)); + MOCK_METHOD1(ReleaseBuffer, int32_t(const V2_0::SharedBuffer&)); + MOCK_METHOD2(GetVersion, int32_t(uint32_t&, uint32_t&)); +}; + +sptr IRegisterDevice::Get(bool isStub) +{ + return IRegisterDevice::Get("device_service", isStub); +} + +sptr IRegisterDevice::Get(const std::string& serviceName, bool isStub) +{ + if (isStub) { + return nullptr; + } + + sptr mockIDevice = sptr(new (std::nothrow) MockIDeviceImp()); + if (mockIDevice.GetRefPtr() == nullptr) { + LOGE("Failed to new MockIDeviceImp object."); + return nullptr; + } + + std::string deviceName = "MockIDeviceA"; + EXPECT_CALL(*((MockIDeviceImp *)mockIDevice.GetRefPtr()), GetDeviceName(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(deviceName), ::testing::Return(HDF_SUCCESS))); + + std::string vendorName = "MockVendorA"; + EXPECT_CALL(*((MockIDeviceImp *)mockIDevice.GetRefPtr()), GetVendorName(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(vendorName), ::testing::Return(HDF_SUCCESS))); + + V2_0::DeviceStatus deviceStatus = V2_0::DeviceStatus::AVAILABLE; + EXPECT_CALL(*((MockIDeviceImp *)mockIDevice.GetRefPtr()), GetDeviceStatus(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(deviceStatus), ::testing::Return(HDF_SUCCESS))); + return mockIDevice; +} + +class DeviceRegistrarTest : public testing::Test { +public: + DeviceRegistrarTest() = default; + ~DeviceRegistrarTest() = default; +}; + +std::shared_ptr CreateDeviceObjectCallback() +{ + OHOS::sptr device = IRegisterDevice::Get(false); + EXPECT_NE(device, nullptr); + std::shared_ptr m_mockDevice = std::make_shared(device); + return m_mockDevice; +} + +std::shared_ptr CreateNullObjectCallback() +{ + return nullptr; +} + +/* * + * @tc.name: devicemanager_getalldeviceid_001 + * @tc.desc: Verify the Constructor function register object success. + * @tc.type: FUNC + */ +HWTEST_F(DeviceRegistrarTest, deviceregistrar_constructor_001, TestSize.Level0) +{ + CreateDevice creator = CreateDeviceObjectCallback; + std::unique_ptr deviceRegister = std::make_unique(creator); + EXPECT_NE(deviceRegister, nullptr); + auto &deviceManager = DeviceManager::GetInstance(); + std::vector idVect = deviceManager.GetAllDeviceId(); + EXPECT_EQ((size_t)2, idVect.size()); + + const size_t expectDeviceId {std::hash {} ("MockDevice_MockVendor")}; + EXPECT_EQ(expectDeviceId, idVect[1]); + + const std::string expectDeviceNameA = "MockDevice"; + std::string deviceName = ""; + std::shared_ptr retDevice = deviceManager.GetDevice(idVect[1]); + retDevice->GetDeviceName(deviceName); + EXPECT_EQ(deviceName, expectDeviceNameA); + + const std::string expectDeviceNameB = "MockDevice_MockVendor"; + std::string queryDeviceName = deviceManager.GetDeviceName(idVect[1]); + EXPECT_EQ(queryDeviceName, expectDeviceNameB); +} + +/* * + * @tc.name: devicemanager_getalldeviceid_002 + * @tc.desc: Verify the Constructor function register object creator return nullptr, used for branch coverage. + * @tc.type: FUNC + */ +HWTEST_F(DeviceRegistrarTest, deviceregistrar_constructor_002, TestSize.Level0) +{ + CreateDevice creator = CreateNullObjectCallback; + std::unique_ptr deviceRegister = std::make_unique(creator); + EXPECT_NE(deviceRegister, nullptr); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/components/v2_0/executor/executor_test.cpp b/test/unittest/components/v2_0/executor/executor_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..9cc0294fe0d35128c92fd70c96944c769031acbe --- /dev/null +++ b/test/unittest/components/v2_0/executor/executor_test.cpp @@ -0,0 +1,1206 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "executor_test.h" + +#include "common/scoped_trace.h" +#include "frameworks/native/compilation.h" +#include "frameworks/native/inner_model.h" +#include "test/unittest/common/v2_0/mock_idevice.h" + +using namespace OHOS::NeuralNetworkRuntime; +using namespace OHOS::NeuralNetworkRuntime::Ops; +using namespace OHOS::HDI::Nnrt::V2_0; +using namespace OHOS::HiviewDFX; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +using NNTensorPtr = std::shared_ptr; + +MSLITE::LiteGraph* ExecutorTest::BuildLiteGraph(const std::vector dim, const std::vector dimOut) +{ + MSLITE::LiteGraph* liteGraph = new (std::nothrow) MSLITE::LiteGraph(); + if (liteGraph == nullptr) { + LOGE("liteGraph build failed"); + return nullptr; + } + liteGraph->name_ = "testGraph"; + liteGraph->input_indices_.emplace_back(0); + liteGraph->output_indices_.emplace_back(1); + const std::vector quant_params; + + for (size_t indexInput = 0; indexInput < liteGraph->input_indices_.size(); ++indexInput) { + const std::vector data(36, 1); + void* liteGraphTensor1 = MSLITE::MindIR_Tensor_Create(liteGraph->name_, + MSLITE::DATA_TYPE_FLOAT32, dim, MSLITE::FORMAT_NCHW, data, quant_params); + liteGraph->all_tensors_.emplace_back(liteGraphTensor1); + } + + for (size_t indexOutput = 0; indexOutput < liteGraph->output_indices_.size(); ++indexOutput) { + const std::vector dataOut(36, 1); + void* liteGraphTensor2 = MSLITE::MindIR_Tensor_Create(liteGraph->name_, + MSLITE::DATA_TYPE_FLOAT32, dimOut, MSLITE::FORMAT_NCHW, dataOut, quant_params); + liteGraph->all_tensors_.emplace_back(liteGraphTensor2); + } + + return liteGraph; +} + +OH_NN_Tensor ExecutorTest::SetTensor(OH_NN_DataType dataType, uint32_t dimensionCount, const int32_t *dimensions, + const OH_NN_QuantParam *quantParam, OH_NN_TensorType type) +{ + OH_NN_Tensor tensor; + tensor.dataType = dataType; + tensor.dimensionCount = dimensionCount; + tensor.dimensions = dimensions; + tensor.quantParam = quantParam; + tensor.type = type; + + return tensor; +} + +void ExecutorTest::SetMermory(OH_NN_Memory** &memory) +{ + float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void * const data = dataArry; + OH_NN_Memory memoryPtr = {data, 9 * sizeof(float)}; + OH_NN_Memory* ptr = &memoryPtr; + memory = &ptr; +} + +/* + * @tc.name: executor_set_input_001 + * @tc.desc: Verify that the SetInput function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_input_001, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + void* buffer = m_dataArry; + size_t length = 9 * sizeof(float); + + OH_NN_ReturnCode ret = executorTest.SetInput(m_index, tensor, buffer, length); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: executor_set_input_002 + * @tc.desc: Verify that the SetInput function returns a failed message with out-of-range index. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_input_002, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + m_index = 6; + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + size_t length = 9 * sizeof(float); + void* buffer = m_dataArry; + + OH_NN_ReturnCode ret = executorTest.SetInput(m_index, tensor, buffer, length); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_input_003 + * @tc.desc: Verify that the SetInput function returns a failed message with dynamic shape. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_input_003, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + const int dim = -1; + m_dimensionCount = 1; + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, &dim, nullptr, OH_NN_TENSOR); + size_t length = 1 * sizeof(float); + float data = 0; + void* buffer = &data; + + OH_NN_ReturnCode ret = executorTest.SetInput(m_index, tensor, buffer, length); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_input_004 + * @tc.desc: Verify that the SetInput function returns a failed message with invalid tensor's dataType. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_input_004, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_Tensor tensor = SetTensor(OH_NN_INT64, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + void* buffer = m_dataArry; + size_t length = 9 * sizeof(float); + + OH_NN_ReturnCode ret = executorTest.SetInput(m_index, tensor, buffer, length); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_input_005 + * @tc.desc: Verify that the SetInput function returns a failed message with invalid length. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_input_005, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + size_t length = 1 * sizeof(float); + void* buffer = m_dataArry; + + OH_NN_ReturnCode ret = executorTest.SetInput(m_index, tensor, buffer, length); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_input_006 + * @tc.desc: Verify that the SetInput function returns a failed message with allocating buffer is unsuccessfully. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_input_006, testing::ext::TestSize.Level0) +{ + HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_INVALID_PARAMETER; + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + void* buffer = m_dataArry; + size_t length = 9 * sizeof(float); + + OH_NN_ReturnCode ret = executorTest.SetInput(m_index, tensor, buffer, length); + EXPECT_EQ(OH_NN_MEMORY_ERROR, ret); +} + +/* + * @tc.name: executor_set_input_007 + * @tc.desc: Verify that the SetInput function returns a failed message with empty buffer. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_input_007, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + void* buffer = nullptr; + size_t length = 9 * sizeof(float); + + OH_NN_ReturnCode ret = executorTest.SetInput(m_index, tensor, buffer, length); + EXPECT_EQ(OH_NN_MEMORY_ERROR, ret); +} + +/* + * @tc.name: executor_set_input_008 + * @tc.desc: Verify that the SetInput function returns a successful message with dataLength <= curBufferLength. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_input_008, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + float dataArry[15] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}; + void* buffer = dataArry; + size_t length = 9 * sizeof(float); + + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetInput(m_index, tensor, buffer, length)); + + float expectArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* expectBuffer = expectArry; + OH_NN_ReturnCode ret = executorTest.SetInput(m_index, tensor, expectBuffer, length); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: executor_set_input_009 + * @tc.desc: Verify that the SetInput function returns a failed message with length less than dataLength. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_input_009, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + void* const data = m_dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetInputFromMemory(m_index, tensor, memory)); + + float expectData = 0; + void* buffer = &expectData; + size_t length = 1 * sizeof(float); + + OH_NN_ReturnCode ret = executorTest.SetInput(m_index, tensor, buffer, length); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_input_010 + * @tc.desc: Verify that the SetInput function returns a failed message with BuildFromOHNNTensor unsuccessfully. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_input_010, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + m_dimensionCount = 0; + OH_NN_Tensor tensor = SetTensor(OH_NN_UNKNOWN, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + void* buffer = m_dataArry; + size_t length = 9 * sizeof(float); + + OH_NN_ReturnCode ret = executorTest.SetInput(m_index, tensor, buffer, length); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_input_011 + * @tc.desc: Verify that the SetInput function returns a successful message with dataLength <= curBufferLength. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_input_011, testing::ext::TestSize.Level0) +{ + const std::vector expectDim = {3, -1}; + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(expectDim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + void* buffer = m_dataArry; + size_t length = 9 * sizeof(float); + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetInput(m_index, tensor, buffer, length)); + + const int32_t testDim[2] = {3, 5}; + OH_NN_Tensor expectTensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, testDim, nullptr, OH_NN_TENSOR); + size_t expectLength = 15 * sizeof(float); + float expectArry[15] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}; + void* expectBuffer = expectArry; + OH_NN_ReturnCode ret = executorTest.SetInput(m_index, expectTensor, expectBuffer, expectLength); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: executor_set_input_from_memory_001 + * @tc.desc: Verify that the SetInputFromMemory function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_input_from_memory_001, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + void* const data = m_dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + + OH_NN_ReturnCode ret = executorTest.SetInputFromMemory(m_index, tensor, memory); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: executor_set_input_from_memory_002 + * @tc.desc: Verify that the SetInputFromMemory function returns a failed message with out-of-range index. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_input_from_memory_002, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + m_index = 6; + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + void* const data = m_dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + + OH_NN_ReturnCode ret = executorTest.SetInputFromMemory(m_index, tensor, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_input_from_memory_003 + * @tc.desc: Verify that the SetInputFromMemory function returns a failed message with dynamic shape. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_input_from_memory_003, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + const int dim = -1; + OH_NN_Tensor tensor; + tensor.dataType = OH_NN_FLOAT32; + tensor.dimensionCount = 1; + tensor.dimensions = &dim; + tensor.quantParam = nullptr; + tensor.type = OH_NN_TENSOR; + float value = 0; + void* const data = &value; + OH_NN_Memory memory = {data, 1 * sizeof(float)}; + + OH_NN_ReturnCode ret = executorTest.SetInputFromMemory(m_index, tensor, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_input_from_memory_004 + * @tc.desc: Verify that the SetInputFromMemory function returns a failed message with invalid tensor's dataType. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_input_from_memory_004, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_Tensor tensor = SetTensor(OH_NN_INT64, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + void* const data = m_dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + + OH_NN_ReturnCode ret = executorTest.SetInputFromMemory(m_index, tensor, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_input_from_memory_005 + * @tc.desc: Verify that the SetInput function returns a failed message with invalid memory.length. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_input_from_memory_005, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + void* const data = m_dataArry; + OH_NN_Memory memory = {data, 1 * sizeof(float)}; + + OH_NN_ReturnCode ret = executorTest.SetInputFromMemory(m_index, tensor, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_output_001 + * @tc.desc: Verify that the SetOutput function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_output_001, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + size_t length = 9 * sizeof(float); + void* buffer = m_dataArry; + + OH_NN_ReturnCode ret = executorTest.SetOutput(m_index, buffer, length); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: executor_set_output_002 + * @tc.desc: Verify that the SetOutput function returns a failed message with out-of-range index. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_output_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + m_index = 6; + size_t length = 9 * sizeof(float); + void* buffer = m_dataArry; + + OH_NN_ReturnCode ret = executorTest.SetOutput(m_index, buffer, length); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_output_003 + * @tc.desc: Verify that the SetOutput function returns a failed message with invalid length. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_output_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + size_t length = 2 * sizeof(float); + void* buffer = m_dataArry; + + OH_NN_ReturnCode ret = executorTest.SetOutput(m_index, buffer, length); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_output_004 + * @tc.desc: Verify that the SetOutput function returns a failed message with allocating buffer is failed. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_output_004, testing::ext::TestSize.Level0) +{ + HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_INVALID_PARAMETER; + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + size_t length = 9 * sizeof(float); + void* buffer = m_dataArry; + + OH_NN_ReturnCode ret = executorTest.SetOutput(m_index, buffer, length); + EXPECT_EQ(OH_NN_MEMORY_ERROR, ret); +} + +/* + * @tc.name: executor_set_output_005 + * @tc.desc: Verify that the SetOutput function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_output_005, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + void* const data = m_dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetOutputFromMemory(m_index, memory)); + + size_t length = 1 * sizeof(float); + float expectData = 0; + void* buffer = &expectData; + OH_NN_ReturnCode ret = executorTest.SetOutput(m_index, buffer, length); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_output_006 + * @tc.desc: Verify that the SetOutput function returns a successful message with length <= curBufferLength. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_output_006, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + size_t length = 9 * sizeof(float); + void* buffer = m_dataArry; + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetOutput(m_index, buffer, length)); + + float expectDataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* expectBuffer = expectDataArry; + OH_NN_ReturnCode ret = executorTest.SetOutput(m_index, expectBuffer, length); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: executor_set_output_007 + * @tc.desc: Verify that the SetOutput function returns a successful message with length > curBufferLength. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_output_007, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + size_t length = 9 * sizeof(float); + void* buffer = m_dataArry; + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetOutput(m_index, buffer, length)); + + size_t expectLength = 15 * sizeof(float); + float expectDataArry[15] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}; + void* expectBuffer = expectDataArry; + OH_NN_ReturnCode ret = executorTest.SetOutput(m_index, expectBuffer, expectLength); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: executor_set_output_from_memory_001 + * @tc.desc: Verify that the SetOutputFromMemory function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_output_from_memory_001, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + void* const data = m_dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + + OH_NN_ReturnCode ret = executorTest.SetOutputFromMemory(m_index, memory); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: executor_set_output_from_memory_002 + * @tc.desc: Verify that the SetOutputFromMemory function returns a failed message with out-of-range index. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_output_from_memory_002, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + m_index = 6; + void* const data = m_dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + + OH_NN_ReturnCode ret = executorTest.SetOutputFromMemory(m_index, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_output_from_memory_003 + * @tc.desc: Verify that the SetOutputFromMemory function returns a failed message with invalid memory.length. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_output_from_memory_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + void* const data = m_dataArry; + OH_NN_Memory memory = {data, 0}; + + OH_NN_ReturnCode ret = executorTest.SetOutputFromMemory(m_index, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_output_from_memory_004 + * @tc.desc: Verify that the SetOutputFromMemory function returns a failed message with memory.length < dataLength. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_output_from_memory_004, testing::ext::TestSize.Level0) +{ + const std::vector expectDim = {4, 4}; + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, expectDim); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + void* const data = m_dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + + OH_NN_ReturnCode ret = executorTest.SetOutputFromMemory(m_index, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_output_from_memory_005 + * @tc.desc: Verify that the SetOutputFromMemory function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_set_output_from_memory_005, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + size_t length = 9 * sizeof(float); + void* buffer = m_dataArry; + + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetOutput(m_index, buffer, length)); + void* const data = m_dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + + OH_NN_ReturnCode ret = executorTest.SetOutputFromMemory(m_index, memory); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: executor_get_output_dimensions_001 + * @tc.desc: Verify that the GetOutputShape function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_get_output_dimensions_001, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + size_t length = 9 * sizeof(float); + void* buffer = m_dataArry; + + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetInput(m_index, tensor, buffer, length)); + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetOutput(m_index, buffer, length)); + EXPECT_EQ(OH_NN_SUCCESS, executorTest.Run()); + + int32_t expectDim[2] = {3, 3}; + int32_t* ptr = expectDim; + int32_t** dimensions = &ptr; + uint32_t dimensionCount = 2; + + OH_NN_ReturnCode ret = executorTest.GetOutputShape(m_index, dimensions, dimensionCount); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: executor_get_output_dimensions_002 + * @tc.desc: Verify that the GetOutputShape function returns a failed message without run. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_get_output_dimensions_002, testing::ext::TestSize.Level0) +{ + size_t length = 9 * sizeof(float); + void* buffer = m_dataArry; + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetInput(m_index, tensor, buffer, length)); + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetOutput(m_index, buffer, length)); + + int32_t testDim[2] = {3, 3}; + int32_t* ptr = testDim; + int32_t** dimensions = &ptr; + uint32_t dimensionCount = 2; + + OH_NN_ReturnCode ret = executorTest.GetOutputShape(m_index, dimensions, dimensionCount); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* + * @tc.name: executor_get_output_dimensions_003 + * @tc.desc: Verify that the GetOutputShape function returns a failed message with out-of-range index. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_get_output_dimensions_003, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + void* buffer = m_dataArry; + size_t length = 9 * sizeof(float); + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetInput(m_index, tensor, buffer, length)); + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetOutput(m_index, buffer, length)); + EXPECT_EQ(OH_NN_SUCCESS, executorTest.Run()); + + uint32_t testIndex = 6; + int32_t testDim[2] = {3, 3}; + int32_t* ptr = testDim; + int32_t** dimensions = &ptr; + uint32_t dimensionCount = 2; + + OH_NN_ReturnCode ret = executorTest.GetOutputShape(testIndex, dimensions, dimensionCount); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_create_input_memory_001 + * @tc.desc: Verify that the CreateInputMemory function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_create_input_memory_001, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_Memory** memory = nullptr; + SetMermory(memory); + size_t length = 9 * sizeof(float); + + OH_NN_ReturnCode ret = executorTest.CreateInputMemory(m_index, length, memory); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: executor_create_input_memory_002 + * @tc.desc: Verify that the CreateInputMemory function returns a failed message with out-of-range index. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_create_input_memory_002, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + size_t length = 9 * sizeof(float); + OH_NN_Memory** memory = nullptr; + SetMermory(memory); + + m_index = 6; + OH_NN_ReturnCode ret = executorTest.CreateInputMemory(m_index, length, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_create_input_memory_003 + * @tc.desc: Verify that the CreateInputMemory function returns a failed message with allocating buffer unsuccessfully. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_create_input_memory_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_INVALID_PARAMETER; + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + size_t length = 9 * sizeof(float); + OH_NN_Memory** memory = nullptr; + SetMermory(memory); + + OH_NN_ReturnCode ret = executorTest.CreateInputMemory(m_index, length, memory); + EXPECT_EQ(OH_NN_MEMORY_ERROR, ret); +} + +/* + * @tc.name: executor_destroy_input_memory_001 + * @tc.desc: Verify that the DestroyInputMemory function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_destroy_input_memory_001, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + size_t length = 9 * sizeof(float); + float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* const data = dataArry; + OH_NN_Memory memoryPtr = {data, 9 * sizeof(float)}; + OH_NN_Memory* ptr = &memoryPtr; + OH_NN_Memory** memory = &ptr; + + EXPECT_EQ(OH_NN_SUCCESS, executorTest.CreateInputMemory(m_index, length, memory)); + OH_NN_ReturnCode ret = executorTest.DestroyInputMemory(m_index, memory); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: executor_destroy_input_memory_002 + * @tc.desc: Verify that the DestroyInputMemory function returns a failed message with out-of-range index. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_destroy_input_memory_002, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + size_t length = 9 * sizeof(float); + OH_NN_Memory** memory = nullptr; + SetMermory(memory); + + uint32_t testIndex = 6; + EXPECT_EQ(OH_NN_SUCCESS, executorTest.CreateInputMemory(m_index, length, memory)); + OH_NN_ReturnCode ret = executorTest.DestroyInputMemory(testIndex, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_destroy_input_memory_003 + * @tc.desc: Verify that the DestroyInputMemory function returns a failed message without creating memory. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_destroy_input_memory_003, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_Memory** memory = nullptr; + SetMermory(memory); + + OH_NN_ReturnCode ret = executorTest.DestroyInputMemory(m_index, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_destroy_input_memory_004 + * @tc.desc: Verify that the DestroyInputMemory function returns a failed message with invalid memory.data. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_destroy_input_memory_004, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + size_t length = 9 * sizeof(float); + OH_NN_Memory** memory = nullptr; + SetMermory(memory); + + EXPECT_EQ(OH_NN_SUCCESS, executorTest.CreateInputMemory(m_index, length, memory)); + + float arry[9] = {0, 0, 0, 0, 0, 0, 0, 0, 0}; + void* const expectData = arry; + OH_NN_Memory mptr = {expectData, 9 * sizeof(float)}; + OH_NN_Memory* expectPtr = &mptr; + OH_NN_Memory** expectMemory = &expectPtr; + + OH_NN_ReturnCode ret = executorTest.DestroyInputMemory(m_index, expectMemory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_create_output_memory_001 + * @tc.desc: Verify that the CreateOutputMemory function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_create_output_memory_001, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + size_t length = 9 * sizeof(float); + OH_NN_Memory** memory = nullptr; + SetMermory(memory); + + OH_NN_ReturnCode ret = executorTest.CreateOutputMemory(m_index, length, memory); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: executor_create_output_memory_002 + * @tc.desc: Verify that the CreateOutputMemory function returns a failed message with out-of-range index. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_create_output_memory_002, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + m_index = 6; + size_t length = 9 * sizeof(float); + OH_NN_Memory** memory = nullptr; + SetMermory(memory); + + OH_NN_ReturnCode ret = executorTest.CreateOutputMemory(m_index, length, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_create_output_memory_003 + * @tc.desc: Verify that the CreateOutputMemory function returns a failed message with allocating buffer unsuccessfully. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_create_output_memory_003, testing::ext::TestSize.Level0) +{ + HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_INVALID_PARAMETER; + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + size_t length = 9 * sizeof(float); + OH_NN_Memory** memory = nullptr; + SetMermory(memory); + + OH_NN_ReturnCode ret = executorTest.CreateOutputMemory(m_index, length, memory); + EXPECT_EQ(OH_NN_MEMORY_ERROR, ret); +} + +/* + * @tc.name: executor_destroy_output_memory_001 + * @tc.desc: Verify that the DestroyOutputMemory function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_destroy_output_memory_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + size_t length = 9 * sizeof(float); + OH_NN_Memory** memory = nullptr; + SetMermory(memory); + + EXPECT_EQ(OH_NN_SUCCESS, executorTest.CreateOutputMemory(m_index, length, memory)); + OH_NN_ReturnCode ret = executorTest.DestroyOutputMemory(m_index, memory); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: executor_destroy_output_memory_002 + * @tc.desc: Verify that the DestroyOutputMemory function returns a failed message with out-of-range index. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_destroy_output_memory_002, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + uint32_t testIndex = 6; + size_t length = 9 * sizeof(float); + OH_NN_Memory** memory = nullptr; + SetMermory(memory); + + EXPECT_EQ(OH_NN_SUCCESS, executorTest.CreateOutputMemory(m_index, length, memory)); + OH_NN_ReturnCode ret = executorTest.DestroyOutputMemory(testIndex, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_destroy_output_memory_003 + * @tc.desc: Verify that the DestroyOutputMemory function returns a failed message without creating memory. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_destroy_output_memory_003, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_Memory** memory = nullptr; + SetMermory(memory); + + OH_NN_ReturnCode ret = executorTest.DestroyOutputMemory(m_index, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_destroy_output_memory_004 + * @tc.desc: Verify that the DestroyOutputMemory function returns a failed message with invalid memory.data. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_destroy_output_memory_004, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + size_t length = 9 * sizeof(float); + OH_NN_Memory** memory = nullptr; + SetMermory(memory); + + EXPECT_EQ(OH_NN_SUCCESS, executorTest.CreateOutputMemory(m_index, length, memory)); + + float arry[9] = {0, 0, 0, 0, 0, 0, 0, 0, 0}; + void* const expectData = arry; + OH_NN_Memory mptr = {expectData, 9 * sizeof(float)}; + OH_NN_Memory* expectPtr = &mptr; + OH_NN_Memory** expectMemory = &expectPtr; + + OH_NN_ReturnCode ret = executorTest.DestroyOutputMemory(m_index, expectMemory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_run_test_001 + * @tc.desc: Verify that the Run function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_run_test_001, testing::ext::TestSize.Level0) +{ + HiviewDFX::HiTraceId traceId = HiTraceChain::Begin("executor_run_test_001", HITRACE_FLAG_TP_INFO); + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + size_t length = 9 * sizeof(float); + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + void* buffer = m_dataArry; + + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetInput(m_index, tensor, buffer, length)); + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetOutput(m_index, buffer, length)); + OH_NN_ReturnCode ret = executorTest.Run(); + HiTraceChain::End(traceId); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: executor_run_test_002 + * @tc.desc: Verify that the DestroyOutputMemory function returns a failed message without SetInput. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_run_test_002, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_ReturnCode ret = executorTest.Run(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_run_test_003 + * @tc.desc: Verify that the DestroyOutputMemory function returns a failed message without SetOutput. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_run_test_003, testing::ext::TestSize.Level0) +{ + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + void* buffer = m_dataArry; + size_t length = 9 * sizeof(float); + + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetInput(m_index, tensor, buffer, length)); + OH_NN_ReturnCode ret = executorTest.Run(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_run_test_004 + * @tc.desc: Verify that the DestroyOutputMemory function returns a failed message with failed executionPlan.Run. + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, executor_run_test_004, testing::ext::TestSize.Level0) +{ + HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_FAILED; + const MSLITE::LiteGraph* liteGraphTest = BuildLiteGraph(m_dim, m_dimOut); + InnerModel innerModel; + innerModel.BuildFromLiteGraph(liteGraphTest); + Compilation compilation(&innerModel); + Executor executorTest(&compilation); + + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + void* buffer = m_dataArry; + size_t length = 9 * sizeof(float); + + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetInput(m_index, tensor, buffer, length)); + EXPECT_EQ(OH_NN_SUCCESS, executorTest.SetOutput(m_index, buffer, length)); + OH_NN_ReturnCode ret = executorTest.Run(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/components/v2_0/executor/executor_test.h b/test/unittest/components/v2_0/executor/executor_test.h new file mode 100644 index 0000000000000000000000000000000000000000..05837b5bfe3895c89b5651432fceffbb812192a1 --- /dev/null +++ b/test/unittest/components/v2_0/executor/executor_test.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_EXECUTOR_UNITTEST_H +#define NEURAL_NETWORK_RUNTIME_EXECUTOR_UNITTEST_H + +#include + +#include "mindir.h" + +#include "frameworks/native/executor.h" + +namespace MSLITE = mindspore::lite; +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class ExecutorTest : public testing::Test { +public: + MSLITE::LiteGraph* BuildLiteGraph(const std::vector dim, const std::vector dimOut); + OH_NN_Tensor SetTensor(OH_NN_DataType dataType, uint32_t dimensionCount, const int32_t *dimensions, + const OH_NN_QuantParam *quantParam, OH_NN_TensorType type); + void SetMermory(OH_NN_Memory** &memory); + +public: + uint32_t m_index {0}; + const std::vector m_dim {3, 3}; + const std::vector m_dimOut {3, 3}; + const int32_t m_dimArry[2] {3, 3}; + uint32_t m_dimensionCount {2}; + float m_dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; +}; +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_EXECUTOR_UNITTEST_H \ No newline at end of file diff --git a/test/unittest/components/v2_0/hdi_device/hdi_device_test.cpp b/test/unittest/components/v2_0/hdi_device/hdi_device_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..2cfd496c6bf46989dac6e845a5e1b309b1ccd0c2 --- /dev/null +++ b/test/unittest/components/v2_0/hdi_device/hdi_device_test.cpp @@ -0,0 +1,875 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "frameworks/native/hdi_device_v2_0.h" +#include "test/unittest/common/v2_0/mock_idevice.h" +#include "test/unittest/common/file_utils.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime; +namespace mindspore { +namespace lite { +OHOS::HDI::Nnrt::V2_0::Model* MindIR_LiteGraph_To_Model(const LiteGraph* lite_graph, + const OHOS::HDI::Nnrt::V2_0::SharedBuffer& buffer) +{ + return new (std::nothrow) OHOS::HDI::Nnrt::V2_0::Model(); +} + +void MindIR_Model_Destroy(OHOS::HDI::Nnrt::V2_0::Model** model) +{ + if ((model != nullptr) && (*model != nullptr)) { + delete *model; + *model = nullptr; + } +} + +size_t MindIR_LiteGraph_GetConstTensorSize(const mindspore::lite::LiteGraph* lite_graph) +{ + return 1; +} +} +} + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class HDIDeviceTest : public testing::Test { +protected: + void GetBuffer(void*& buffer, size_t length); + OH_NN_ReturnCode PrepareModel(int32_t allocBufferType, int32_t prepareType); +}; + +void HDIDeviceTest::GetBuffer(void*& buffer, size_t length) +{ + std::string data = "ABCD"; + const size_t dataLength = 100; + data.resize(dataLength, '+'); + + std::string filename = "/data/log/memory-001.dat"; + FileUtils fileUtils(filename); + fileUtils.WriteFile(data); + + int fd = open(filename.c_str(), O_RDWR); + EXPECT_NE(fd, -1); + + const auto &memoryManager = MemoryManager::GetInstance(); + buffer = memoryManager->MapMemory(fd, length); + EXPECT_NE(buffer, nullptr); + + const char* result = static_cast(buffer); + int index = 0; + EXPECT_EQ('A', result[index++]); + EXPECT_EQ('B', result[index++]); + EXPECT_EQ('C', result[index++]); + EXPECT_EQ('D', result[index++]); + close(fd); +} + +OH_NN_ReturnCode HDIDeviceTest::PrepareModel(int32_t allocBufferType, int32_t prepareType) +{ + std::shared_ptr model = std::make_shared(); + OHOS::sptr sp = OHOS::sptr(new (std::nothrow) V2_0::MockIDevice()); + EXPECT_NE(sp, nullptr); + + std::unique_ptr hdiDevice = std::make_unique(sp); + EXPECT_NE(hdiDevice, nullptr); + + V2_0::SharedBuffer buffer {1, 1, 0, 1}; + EXPECT_CALL(*sp, AllocateBuffer(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<1>(buffer), ::testing::Return(allocBufferType))); + + std::shared_ptr preparedModel; + const int position = 2; + OHOS::sptr iPreparedModel = + OHOS::sptr(new (std::nothrow) V2_0::MockIPreparedModel()); + EXPECT_CALL(*sp, PrepareModel(::testing::_, ::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee(iPreparedModel), + ::testing::Return(prepareType))); + + ModelConfig config; + OH_NN_ReturnCode result = hdiDevice->PrepareModel(model, config, preparedModel); + return result; +} + +/* * + * @tc.name: hdidevice_constructor_001 + * @tc.desc: Verify the Constructor function return object success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_constructor_001, TestSize.Level0) +{ + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + EXPECT_NE(device, nullptr); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); +} + +/* * + * @tc.name: hdidevice_getdevicename_001 + * @tc.desc: Verify the GetDeviceName function validate device name success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_getdevicename_001, TestSize.Level0) +{ + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + std::string deviceName = "MockDevice"; + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), GetDeviceName(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(deviceName), ::testing::Return(HDF_SUCCESS))); + + const std::string expectDeviceName = "MockDevice"; + std::string newDeviceName = ""; + OH_NN_ReturnCode result = hdiDevice->GetDeviceName(newDeviceName); + EXPECT_EQ(OH_NN_SUCCESS, result); + EXPECT_EQ(expectDeviceName, newDeviceName); +} + +/* * + * @tc.name: hdidevice_getdevicename_002 + * @tc.desc: Verify the GetDeviceName function return unavailable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_getdevicename_002, TestSize.Level0) +{ + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + std::string deviceName = "MockDevice"; + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), GetDeviceName(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(deviceName), ::testing::Return(HDF_FAILURE))); + OH_NN_ReturnCode result = hdiDevice->GetDeviceName(deviceName); + EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, result); +} + +/* * + * @tc.name: hdidevice_getvendorname_001 + * @tc.desc: Verify the GetVendorName function validate vendor name success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_getvendorname_001, TestSize.Level0) +{ + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + std::string vendorName = "MockVendor"; + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), GetVendorName(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(vendorName), ::testing::Return(HDF_SUCCESS))); + + const std::string expectDeviceName = "MockVendor"; + std::string newVendorName = ""; + OH_NN_ReturnCode result = hdiDevice->GetVendorName(newVendorName); + EXPECT_EQ(OH_NN_SUCCESS, result); + EXPECT_EQ(expectDeviceName, newVendorName); +} + +/* * + * @tc.name: hdidevice_getvendorname_002 + * @tc.desc: Verify the GetVendorName function return unavailable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_getvendorname_002, TestSize.Level0) +{ + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + std::string vendorName = "MockVendor"; + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), GetVendorName(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(vendorName), ::testing::Return(HDF_FAILURE))); + OH_NN_ReturnCode result = hdiDevice->GetVendorName(vendorName); + EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, result); +} + +/* * + * @tc.name: hdidevice_getdevicetype_001 + * @tc.desc: Verify the GetDeviceType function validate device type success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_getdevicetype_001, TestSize.Level0) +{ + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + V2_0::DeviceType iDeviceType = V2_0::DeviceType::CPU; + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), GetDeviceType(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(iDeviceType), ::testing::Return(HDF_SUCCESS))); + + OH_NN_DeviceType expectDeviceType = OH_NN_CPU; + OH_NN_DeviceType newDeviceType = OH_NN_CPU; + OH_NN_ReturnCode result = hdiDevice->GetDeviceType(newDeviceType); + EXPECT_EQ(OH_NN_SUCCESS, result); + EXPECT_EQ(expectDeviceType, newDeviceType); +} + +/* * + * @tc.name: hdidevice_getdevicetype_002 + * @tc.desc: Verify the GetDeviceType function return unavailable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_getdevicetype_002, TestSize.Level0) +{ + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + OH_NN_DeviceType deviceType = OH_NN_CPU; + V2_0::DeviceType iDeviceType = V2_0::DeviceType::CPU; + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), GetDeviceType(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(iDeviceType), ::testing::Return(HDF_FAILURE))); + OH_NN_ReturnCode result = hdiDevice->GetDeviceType(deviceType); + EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, result); +} + +/* * + * @tc.name: hdidevice_getdevicestatus_001 + * @tc.desc: Verify the GetDeviceStatus function validate device status success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_getdevicestatus_001, TestSize.Level0) +{ + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + V2_0::DeviceStatus iDeviceStatus = V2_0::DeviceStatus::AVAILABLE; + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), GetDeviceStatus(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(iDeviceStatus), ::testing::Return(HDF_SUCCESS))); + + const DeviceStatus expectDeviceStatus = AVAILABLE; + DeviceStatus newDeviceStatus = AVAILABLE; + OH_NN_ReturnCode result = hdiDevice->GetDeviceStatus(newDeviceStatus); + EXPECT_EQ(OH_NN_SUCCESS, result); + EXPECT_EQ(expectDeviceStatus, newDeviceStatus); +} + +/* * + * @tc.name: hdidevice_getdevicestatus_002 + * @tc.desc: Verify the GetDeviceStatus function return unavailable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_getdevicestatus_002, TestSize.Level0) +{ + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + DeviceStatus deviceStatus = AVAILABLE; + V2_0::DeviceStatus iDeviceStatus = V2_0::DeviceStatus::AVAILABLE; + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), GetDeviceStatus(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(iDeviceStatus), ::testing::Return(HDF_FAILURE))); + OH_NN_ReturnCode result = hdiDevice->GetDeviceStatus(deviceStatus); + EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, result); +} + +/* * + * @tc.name: hdidevice_getsupportedoperation_001 + * @tc.desc: Verify the GetSupportedOperation function return success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_getsupportedoperation_001, TestSize.Level0) +{ + std::vector ops {true}; + std::shared_ptr model = std::make_shared(); + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + V2_0::SharedBuffer buffer {1, 1, 0, 1}; + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), AllocateBuffer(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<1>(buffer), ::testing::Return(HDF_SUCCESS))); + + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), GetSupportedOperation(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<1>(ops), ::testing::Return(HDF_SUCCESS))); + + std::vector newOps {true}; + const std::vector expectOps {true}; + OH_NN_ReturnCode result = hdiDevice->GetSupportedOperation(model, newOps); + EXPECT_EQ(OH_NN_SUCCESS, result); + auto expectOpsSize = expectOps.size(); + for (size_t i = 0; i < expectOpsSize; ++i) { + EXPECT_EQ(expectOps[i], newOps[i]); + } +} + +/* * + * @tc.name: hdidevice_getsupportedoperation_002 + * @tc.desc: Verify the GetSupportedOperation function return failed in case of allocate buffer failure. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_getsupportedoperation_002, TestSize.Level0) +{ + std::vector ops; + std::shared_ptr model = std::make_shared(); + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + V2_0::SharedBuffer buffer {1, 1, 0, 1}; + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), AllocateBuffer(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<1>(buffer), ::testing::Return(HDF_FAILURE))); + + OH_NN_ReturnCode result = hdiDevice->GetSupportedOperation(model, ops); + EXPECT_EQ(OH_NN_FAILED, result); +} + +/* * + * @tc.name: hdidevice_getsupportedoperation_003 + * @tc.desc: Verify the GetSupportedOperation function return nullptr. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_getsupportedoperation_003, TestSize.Level0) +{ + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + std::shared_ptr model = nullptr; + std::vector ops; + OH_NN_ReturnCode result = hdiDevice->GetSupportedOperation(model, ops); + EXPECT_EQ(OH_NN_NULL_PTR, result); +} + +/* * + * @tc.name: hdidevice_getsupportedoperation_004 + * @tc.desc: Verify the GetSupportedOperation function return unavalidable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_getsupportedoperation_004, TestSize.Level0) +{ + std::vector ops {true}; + std::shared_ptr model = std::make_shared(); + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + V2_0::SharedBuffer buffer {2, 1, 0, 1}; + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), AllocateBuffer(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<1>(buffer), ::testing::Return(HDF_SUCCESS))); + + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), GetSupportedOperation(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<1>(ops), ::testing::Return(HDF_FAILURE))); + + std::vector newOps {true}; + OH_NN_ReturnCode result = hdiDevice->GetSupportedOperation(model, newOps); + EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, result); +} + +/* * + * @tc.name: hdidevice_isfloat16precisionsupported_001 + * @tc.desc: Verify the IsFloat16PrecisionSupported function return success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_isfloat16precisionsupported_001, TestSize.Level0) +{ + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + bool isSupported = false; + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), IsFloat16PrecisionSupported(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(isSupported), ::testing::Return(HDF_SUCCESS))); + OH_NN_ReturnCode result = hdiDevice->IsFloat16PrecisionSupported(isSupported); + EXPECT_EQ(OH_NN_SUCCESS, result); +} + +/* * + * @tc.name: hdidevice_isfloat16precisionsupported_002 + * @tc.desc: Verify the IsFloat16PrecisionSupported function return unavailable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_isfloat16precisionsupported_002, TestSize.Level0) +{ + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + bool isSupported = false; + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), IsFloat16PrecisionSupported(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(isSupported), ::testing::Return(HDF_FAILURE))); + OH_NN_ReturnCode result = hdiDevice->IsFloat16PrecisionSupported(isSupported); + EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, result); +} + +/* * + * @tc.name: hdidevice_isperformancemodesupported_001 + * @tc.desc: Verify the IsPerformanceModeSupported function return success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_isperformancemodesupported_001, TestSize.Level0) +{ + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + bool isSupported = false; + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), IsPerformanceModeSupported(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(isSupported), ::testing::Return(HDF_SUCCESS))); + + bool newIsSupported = false; + const bool expectIsSupported = false; + OH_NN_ReturnCode result = hdiDevice->IsPerformanceModeSupported(newIsSupported); + EXPECT_EQ(OH_NN_SUCCESS, result); + EXPECT_EQ(expectIsSupported, newIsSupported); +} + +/* * + * @tc.name: hdidevice_isperformancemodesupported_002 + * @tc.desc: Verify the IsPerformanceModeSupported function return unavailable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_isperformancemodesupported_002, TestSize.Level0) +{ + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + bool isSupported = false; + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), IsPerformanceModeSupported(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(isSupported), ::testing::Return(HDF_FAILURE))); + OH_NN_ReturnCode result = hdiDevice->IsPerformanceModeSupported(isSupported); + EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, result); +} + +/* * + * @tc.name: hdidevice_isprioritysupported_001 + * @tc.desc: Verify the IsPrioritySupported function return success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_isprioritysupported_001, TestSize.Level0) +{ + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + bool isSupported = false; + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), IsPrioritySupported(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(isSupported), ::testing::Return(HDF_SUCCESS))); + + bool newIsSupported = false; + bool expectIsSupported = false; + OH_NN_ReturnCode result = hdiDevice->IsPrioritySupported(newIsSupported); + EXPECT_EQ(OH_NN_SUCCESS, result); + EXPECT_EQ(newIsSupported, expectIsSupported); +} + +/* * + * @tc.name: hdidevice_isprioritysupported_002 + * @tc.desc: Verify the IsPrioritySupported function return unavailable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_isprioritysupported_002, TestSize.Level0) +{ + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + bool isSupported = false; + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), IsPrioritySupported(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(isSupported), ::testing::Return(HDF_FAILURE))); + OH_NN_ReturnCode result = hdiDevice->IsPrioritySupported(isSupported); + EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, result); +} + +/* * + * @tc.name: hdidevice_isdynamicinputsupported_001 + * @tc.desc: Verify the IsDynamicInputSupported function return success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_isdynamicinputsupported_001, TestSize.Level0) +{ + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + bool isSupported = false; + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), IsDynamicInputSupported(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(isSupported), ::testing::Return(HDF_SUCCESS))); + + bool newIsSupported = false; + bool expectIsSupported = false; + OH_NN_ReturnCode result = hdiDevice->IsDynamicInputSupported(newIsSupported); + EXPECT_EQ(OH_NN_SUCCESS, result); + EXPECT_EQ(newIsSupported, expectIsSupported); +} + +/* * + * @tc.name: hdidevice_isdynamicinputsupported_002 + * @tc.desc: Verify the IsDynamicInputSupported function return unavailable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_isdynamicinputsupported_002, TestSize.Level0) +{ + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + bool isSupported = false; + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), IsDynamicInputSupported(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(isSupported), ::testing::Return(HDF_FAILURE))); + OH_NN_ReturnCode result = hdiDevice->IsDynamicInputSupported(isSupported); + EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, result); +} + +/* * + * @tc.name: hdidevice_isdynamicinputsupported_001 + * @tc.desc: Verify the IsModelCacheSupported function return success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_ismodelcachesupported_001, TestSize.Level0) +{ + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + bool isSupported = false; + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), IsModelCacheSupported(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(isSupported), ::testing::Return(HDF_SUCCESS))); + + bool newIsSupported = false; + bool expectIsSupported = false; + OH_NN_ReturnCode result = hdiDevice->IsModelCacheSupported(newIsSupported); + EXPECT_EQ(OH_NN_SUCCESS, result); + EXPECT_EQ(expectIsSupported, newIsSupported); +} + +/* * + * @tc.name: hdidevice_isdynamicinputsupported_002 + * @tc.desc: Verify the IsModelCacheSupported function return unavailable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_ismodelcachesupported_002, TestSize.Level0) +{ + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + bool isSupported = false; + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), IsModelCacheSupported(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(isSupported), ::testing::Return(HDF_FAILURE))); + OH_NN_ReturnCode result = hdiDevice->IsModelCacheSupported(isSupported); + EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, result); +} + +/* * + * @tc.name: hdidevice_preparemodel_001 + * @tc.desc: Verify the PrepareModel function return success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_preparemodel_001, TestSize.Level0) +{ + int32_t allocBufferType = HDF_SUCCESS; + int32_t prepareType = HDF_SUCCESS; + OH_NN_ReturnCode result = PrepareModel(allocBufferType, prepareType); + EXPECT_EQ(OH_NN_SUCCESS, result); +} + +/* * + * @tc.name: hdidevice_preparemodel_002 + * @tc.desc: Verify the PrepareModel function return invalid parameter. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_preparemodel_002, TestSize.Level0) +{ + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + std::shared_ptr model = nullptr; + ModelConfig config; + std::shared_ptr preparedModel; + OH_NN_ReturnCode result = hdiDevice->PrepareModel(model, config, preparedModel); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); +} + +/* * + * @tc.name: hdidevice_preparemodel_003 + * @tc.desc: Verify the PrepareModel function return failed. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_preparemodel_003, TestSize.Level0) +{ + int32_t allocBufferType = HDF_SUCCESS; + int32_t prepareType = HDF_FAILURE; + OH_NN_ReturnCode result = PrepareModel(allocBufferType, prepareType); + EXPECT_EQ(OH_NN_FAILED, result); +} + +/* * + * @tc.name: hdidevice_preparemodel_004 + * @tc.desc: Verify the PrepareModel function return failed. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_preparemodel_004, TestSize.Level0) +{ + int32_t allocBufferType = HDF_FAILURE; + int32_t prepareType = HDF_FAILURE; + OH_NN_ReturnCode result = PrepareModel(allocBufferType, prepareType); + EXPECT_EQ(OH_NN_FAILED, result); +} + +/* * + * @tc.name: hdidevice_preparemodelfrommodelcache_001 + * @tc.desc: Verify the PrepareModelFromModelCache function return success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_preparemodelfrommodelcache_001, TestSize.Level0) +{ + size_t length = 100; + void *buffer = nullptr; + GetBuffer(buffer, length); + + std::vector modelCache = { { buffer, 100 } }; + ModelConfig config; + + OHOS::sptr sp = OHOS::sptr(new (std::nothrow) V2_0::MockIDevice()); + EXPECT_NE(sp, nullptr); + + std::unique_ptr hdiDevice = std::make_unique(sp); + EXPECT_NE(hdiDevice, nullptr); + + std::shared_ptr preparedModel; + + OHOS::sptr iPreparedModel = + OHOS::sptr(new (std::nothrow) V2_0::MockIPreparedModel()); + EXPECT_CALL(*sp, PrepareModelFromModelCache(::testing::_, ::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<2>(iPreparedModel), ::testing::Return(HDF_SUCCESS))); + + OH_NN_ReturnCode result = hdiDevice->PrepareModelFromModelCache(modelCache, config, preparedModel); + const auto &memoryManager = MemoryManager::GetInstance(); + memoryManager->UnMapMemory(buffer); + EXPECT_EQ(OH_NN_SUCCESS, result); +} + +/* * + * @tc.name: hdidevice_preparemodelfrommodelcache_002 + * @tc.desc: Verify the PrepareModelFromModelCache function return unavailable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_preparemodelfrommodelcache_002, TestSize.Level0) +{ + size_t length = 100; + void *buffer = nullptr; + GetBuffer(buffer, length); + + OHOS::sptr sp = OHOS::sptr(new (std::nothrow) V2_0::MockIDevice()); + EXPECT_NE(sp, nullptr); + + std::unique_ptr hdiDevice = std::make_unique(sp); + EXPECT_NE(hdiDevice, nullptr); + + std::vector modelCache = { { buffer, 100 } }; + ModelConfig config; + OHOS::sptr preModel = + OHOS::sptr(new (std::nothrow) V2_0::MockIPreparedModel()); + EXPECT_NE(preModel, nullptr); + + std::shared_ptr preparedModel = std::make_shared(preModel); + + OHOS::sptr iPreparedModel = + OHOS::sptr(new (std::nothrow) V2_0::MockIPreparedModel); + EXPECT_CALL(*sp, PrepareModelFromModelCache(::testing::_, ::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<2>(iPreparedModel), ::testing::Return(HDF_FAILURE))); + + OH_NN_ReturnCode result = hdiDevice->PrepareModelFromModelCache(modelCache, config, preparedModel); + EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, result); +} + +/* * + * @tc.name: hdidevice_preparemodelfrommodelcache_003 + * @tc.desc: Verify the PrepareModelFromModelCache function return nullptr. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_preparemodelfrommodelcache_003, TestSize.Level0) +{ + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + std::vector modelCache = { { nullptr, 0 } }; + ModelConfig config; + std::shared_ptr preparedModel; + OH_NN_ReturnCode result = hdiDevice->PrepareModelFromModelCache(modelCache, config, preparedModel); + EXPECT_EQ(OH_NN_NULL_PTR, result); +} + +/* * + * @tc.name: hdidevice_allocatebuffer_001 + * @tc.desc: Verify the AllocateBuffer function return nullptr. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_allocatebuffer_001, TestSize.Level0) +{ + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + V2_0::SharedBuffer buffer; + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), AllocateBuffer(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<1>(buffer), ::testing::Return(HDF_FAILURE))); + + size_t length = 8; + void *result = hdiDevice->AllocateBuffer(length); + EXPECT_EQ(nullptr, result); + hdiDevice->ReleaseBuffer(result); +} + +/* * + * @tc.name: hdidevice_allocatebuffer_002 + * @tc.desc: Verify the AllocateBuffer function return nullptr and HDF_FAILURE. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_allocatebuffer_002, TestSize.Level0) +{ + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + size_t length = 8; + void *result = hdiDevice->AllocateBuffer(length); + EXPECT_EQ(nullptr, result); + hdiDevice->ReleaseBuffer(result); +} + +/* * + * @tc.name: hdidevice_allocatebuffer_003 + * @tc.desc: Verify the AllocateBuffer function return nullptr in case of 0 size. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_allocatebuffer_003, TestSize.Level0) +{ + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + size_t length = 0; + void *result = hdiDevice->AllocateBuffer(length); + EXPECT_EQ(nullptr, result); +} + +/* * + * @tc.name: hdidevice_releasebuffer_001 + * @tc.desc: Verify the ReleaseBuffer function validate buffer success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_releasebuffer_001, TestSize.Level0) +{ + size_t length = 100; + void *buffer = nullptr; + GetBuffer(buffer, length); + + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), ReleaseBuffer(::testing::_)) + .WillRepeatedly(::testing::Return(HDF_SUCCESS)); + + EXPECT_NE(hdiDevice, nullptr); + hdiDevice->ReleaseBuffer(buffer); + const auto &memoryManager = MemoryManager::GetInstance(); + memoryManager->UnMapMemory(buffer); +} + +/* * + * @tc.name: hdidevice_releasebuffer_002 + * @tc.desc: Verify the ReleaseBuffer function validate AllocateBuffer return nullptr. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_releasebuffer_002, TestSize.Level0) +{ + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + V2_0::SharedBuffer sharedbuffer; + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), AllocateBuffer(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<1>(sharedbuffer), ::testing::Return(HDF_FAILURE))); + + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), ReleaseBuffer(::testing::_)) + .WillRepeatedly(::testing::Return(HDF_FAILURE)); + + size_t length = 8; + void *buffer = hdiDevice->AllocateBuffer(length); + hdiDevice->ReleaseBuffer(buffer); +} + +/* * + * @tc.name: hdidevice_releasebuffer_003 + * @tc.desc: Verify the ReleaseBuffer function validate param buffer is nullptr. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_releasebuffer_003, TestSize.Level0) +{ + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + void *buffer = nullptr; + hdiDevice->ReleaseBuffer(buffer); +} + +/* * + * @tc.name: hdidevice_releasebuffer_004 + * @tc.desc: Verify the ReleaseBuffer function validate invalid buffer. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_releasebuffer_004, TestSize.Level0) +{ + const size_t length = 100; + auto* buffer = new(std::nothrow) char[length]; + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + hdiDevice->ReleaseBuffer(buffer); + delete[] buffer; + buffer = nullptr; +} + +/* * + * @tc.name: hdidevice_releasebuffer_005 + * @tc.desc: Verify the ReleaseBuffer function validate moc object's ReleaseBuffer return failure. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_releasebuffer_005, TestSize.Level0) +{ + size_t length = 100; + void *buffer = nullptr; + GetBuffer(buffer, length); + + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), ReleaseBuffer(::testing::_)) + .WillRepeatedly(::testing::Return(HDF_FAILURE)); + + hdiDevice->ReleaseBuffer(buffer); + const auto &memoryManager = MemoryManager::GetInstance(); + memoryManager->UnMapMemory(buffer); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/components/v2_0/hdi_prepared_model/hdi_prepared_model_test.cpp b/test/unittest/components/v2_0/hdi_prepared_model/hdi_prepared_model_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..5e4d2d8fcbc38252f0de46bc4c76fa08639439c7 --- /dev/null +++ b/test/unittest/components/v2_0/hdi_prepared_model/hdi_prepared_model_test.cpp @@ -0,0 +1,344 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include + +#include +#include + +#include "common/log.h" +#include "frameworks/native/hdi_prepared_model_v2_0.h" +#include "frameworks/native/memory_manager.h" +#include "frameworks/native/transform.h" +#include "test/unittest/common/v2_0/mock_idevice.h" +#include "test/unittest/common/file_utils.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime; +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class HDIPreparedModelTest : public testing::Test { +protected: + void GetBuffer(void*& buffer, size_t length); + void InitTensor(std::vector& inputs, void* buffer, size_t length); + OH_NN_ReturnCode Run(std::vector& inputs); +}; + +void HDIPreparedModelTest::GetBuffer(void*& buffer, size_t length) +{ + std::string data = "ABCD"; + const size_t dataLength = 100; + data.resize(dataLength, '-'); + + std::string filename = "/data/log/memory-001.dat"; + FileUtils fileUtils(filename); + fileUtils.WriteFile(data); + + int fd = open(filename.c_str(), O_RDWR); + EXPECT_NE(-1, fd); + + const auto& memoryManager = MemoryManager::GetInstance(); + buffer = memoryManager->MapMemory(fd, length); + close(fd); +} + +void HDIPreparedModelTest::InitTensor(std::vector& inputs, void* buffer, size_t length) +{ + IOTensor inputTensor; + inputTensor.dataType = OH_NN_INT8; + inputTensor.dataType = OH_NN_INT8; + inputTensor.format = OH_NN_FORMAT_NCHW; + inputTensor.data = buffer; + inputTensor.length = length; + inputs.emplace_back(std::move(inputTensor)); +} + +OH_NN_ReturnCode HDIPreparedModelTest::Run(std::vector& inputs) +{ + const int vvPosition = 2; + const int vPosition = 3; + std::vector outputs; + std::vector> outputsDims {{0}}; + std::vector isOutputBufferEnough {}; + + OHOS::sptr sp = + OHOS::sptr(new (std::nothrow) V2_0::MockIPreparedModel()); + EXPECT_NE(sp, nullptr); + + std::unique_ptr preparedModel = std::make_unique(sp); + EXPECT_CALL(*sp, Run(::testing::_, ::testing::_, ::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll( + ::testing::SetArgReferee(outputsDims), + ::testing::SetArgReferee(isOutputBufferEnough), + ::testing::Return(HDF_SUCCESS)) + ); + + OH_NN_ReturnCode result = preparedModel->Run(inputs, outputs, outputsDims, isOutputBufferEnough); + return result; +} + +/** + * @tc.name: hidpreparedmodel_constructor_001 + * @tc.desc: Verify the Constructor function validate constructor success. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_constructor_001, TestSize.Level0) +{ + OHOS::sptr hdiPreparedModel = + OHOS::sptr(new (std::nothrow) V2_0::MockIPreparedModel()); + EXPECT_NE(hdiPreparedModel, nullptr); + + std::unique_ptr preparedModel = std::make_unique(hdiPreparedModel); + EXPECT_NE(preparedModel, nullptr); +} + +/** + * @tc.name: hidpreparedmodel_exportmodelcache_001 + * @tc.desc: Verify the ExportModelCache function return memory error. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_exportmodelcache_001, TestSize.Level0) +{ + std::vector bufferVect = {{100, 100, 0, 100}}; + OHOS::sptr hdiPreparedModel = + OHOS::sptr(new (std::nothrow) V2_0::MockIPreparedModel()); + std::unique_ptr preparedModel = std::make_unique(hdiPreparedModel); + std::vector modelCache; + EXPECT_CALL(*((V2_0::MockIPreparedModel*)hdiPreparedModel.GetRefPtr()), + ExportModelCache(::testing::_)) + .WillRepeatedly( + ::testing::DoAll( + ::testing::SetArgReferee<0>(bufferVect), + ::testing::Return(HDF_SUCCESS) + ) + ); + + OH_NN_ReturnCode result = preparedModel->ExportModelCache(modelCache); + EXPECT_EQ(OH_NN_MEMORY_ERROR, result); +} + +/** + * @tc.name: hidpreparedmodel_exportmodelcache_002 + * @tc.desc: Verify the ExportModelCache function return success. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_exportmodelcache_002, TestSize.Level0) +{ + std::vector bufferVect; + OHOS::sptr mockPreparedModel = + OHOS::sptr(new (std::nothrow) V2_0::MockIPreparedModel()); + EXPECT_NE(mockPreparedModel, nullptr); + + std::unique_ptr preparedModel = std::make_unique(mockPreparedModel); + std::vector modelCache; + EXPECT_CALL(*((V2_0::MockIPreparedModel*)mockPreparedModel.GetRefPtr()), + ExportModelCache(::testing::_)) + .WillRepeatedly( + ::testing::DoAll( + ::testing::SetArgReferee<0>(bufferVect), + ::testing::Return(HDF_SUCCESS) + ) + ); + + OH_NN_ReturnCode result = preparedModel->ExportModelCache(modelCache); + EXPECT_EQ(OH_NN_SUCCESS, result); +} + +/** + * @tc.name: hidpreparedmodel_exportmodelcache_003 + * @tc.desc: Verify the ExportModelCache function return invalid parameter. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_exportmodelcache_003, TestSize.Level0) +{ + OHOS::sptr hdiPreparedModel = + OHOS::sptr(new (std::nothrow) V2_0::MockIPreparedModel()); + EXPECT_NE(hdiPreparedModel, nullptr); + + std::unique_ptr preparedModel = std::make_unique(hdiPreparedModel); + std::vector modelCache {{nullptr, 0}}; + OH_NN_ReturnCode result = preparedModel->ExportModelCache(modelCache); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); +} + +/** + * @tc.name: hidpreparedmodel_exportmodelcache_004 + * @tc.desc: Verify the ExportModelCache function return unvailable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_exportmodelcache_004, TestSize.Level0) +{ + std::vector bufferVect = {{100, 100, 0, 100}}; + OHOS::sptr mockPreparedModel = + OHOS::sptr(new (std::nothrow) V2_0::MockIPreparedModel()); + EXPECT_NE(mockPreparedModel, nullptr); + + std::unique_ptr preparedModel = std::make_unique(mockPreparedModel); + std::vector modelCache; + EXPECT_CALL(*((V2_0::MockIPreparedModel*)mockPreparedModel.GetRefPtr()), + ExportModelCache(::testing::_)) + .WillRepeatedly( + ::testing::DoAll( + ::testing::SetArgReferee<0>(bufferVect), + ::testing::Return(HDF_FAILURE) + ) + ); + + OH_NN_ReturnCode result = preparedModel->ExportModelCache(modelCache); + EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, result); +} + +/** + * @tc.name: hidpreparedmodel_run_001 + * @tc.desc: Verify the Run function return invalid parameter. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_001, TestSize.Level0) +{ + IOTensor inputTensor; + inputTensor.dataType = OH_NN_INT8; + + IOTensor outputTensor; + outputTensor.dataType = OH_NN_INT8; + std::vector inputs; + inputs.emplace_back(std::move(inputTensor)); + std::vector outputs; + + std::vector iOutputTensors; + V2_0::IOTensor iTensor; + iOutputTensors.emplace_back(iTensor); + std::vector> outputsDims {{0}}; + std::vector isOutputBufferEnough {}; + + std::shared_ptr sp = std::make_shared(); + OHOS::sptr hdiPreparedModel = + OHOS::sptr(new (std::nothrow) V2_0::MockIPreparedModel()); + EXPECT_NE(hdiPreparedModel, nullptr); + + std::unique_ptr preparedModel = std::make_unique(hdiPreparedModel); + OH_NN_ReturnCode result = preparedModel->Run(inputs, outputs, outputsDims, isOutputBufferEnough); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); +} + +/** + * @tc.name: hidpreparedmodel_run_002 + * @tc.desc: Verify the Run function return success. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_002, TestSize.Level0) +{ + const size_t length = 100; + void* buffer = nullptr; + GetBuffer(buffer, length); + + std::vector inputs; + std::vector outputs; + InitTensor(inputs, buffer, length); + + OH_NN_ReturnCode result = Run(inputs); + EXPECT_EQ(OH_NN_SUCCESS, result); + const auto& memoryManager = MemoryManager::GetInstance(); + memoryManager->UnMapMemory(buffer); +} + +/** + * @tc.name: hidpreparedmodel_run_003 + * @tc.desc: Verify the Run function return unavailable device in case of run failure. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_003, TestSize.Level0) +{ + const size_t length = 100; + void* buffer = nullptr; + GetBuffer(buffer, length); + + std::vector inputs; + std::vector outputs; + InitTensor(inputs, buffer, length); + + std::vector> outputsDims {}; + std::vector isOutputBufferEnough {}; + + OHOS::sptr sp = + OHOS::sptr(new (std::nothrow) V2_0::MockIPreparedModel()); + EXPECT_NE(sp, nullptr); + + std::unique_ptr preparedModel = std::make_unique(sp); + + EXPECT_CALL(*sp, Run(::testing::_, ::testing::_, ::testing::_, ::testing::_)) + .WillRepeatedly( + ::testing::DoAll( + ::testing::SetArgReferee<2>(outputsDims), + ::testing::SetArgReferee<3>(isOutputBufferEnough), + ::testing::Return(HDF_FAILURE) + ) + ); + + OH_NN_ReturnCode result = preparedModel->Run(inputs, outputs, outputsDims, isOutputBufferEnough); + EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, result); + const auto& memoryManager = MemoryManager::GetInstance(); + memoryManager->UnMapMemory(buffer); +} + +/** + * @tc.name: hidpreparedmodel_run_004 + * @tc.desc: Verify the Run function return invalid parameter. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_004, TestSize.Level0) +{ + std::vector inputs; + InitTensor(inputs, nullptr, 0); + OH_NN_ReturnCode result = Run(inputs); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); +} + +/** + * @tc.name: hidpreparedmodel_run_005 + * @tc.desc: Verify the Run function return invalid parameter in case of output invalid. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_005, TestSize.Level0) +{ + const size_t length = 100; + void* buffer = nullptr; + GetBuffer(buffer, length); + + std::vector inputs; + std::vector outputs; + InitTensor(inputs, buffer, length); + InitTensor(outputs, nullptr, 0); + + std::vector> outputsDims {}; + std::vector isOutputBufferEnough {}; + + OHOS::sptr sp = + OHOS::sptr(new (std::nothrow) V2_0::MockIPreparedModel()); + EXPECT_NE(sp, nullptr); + + std::unique_ptr preparedModel = std::make_unique(sp); + + OH_NN_ReturnCode result = preparedModel->Run(inputs, outputs, outputsDims, isOutputBufferEnough); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); + const auto& memoryManager = MemoryManager::GetInstance(); + memoryManager->UnMapMemory(buffer); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/components/v2_0/inner_model/inner_model_test.cpp b/test/unittest/components/v2_0/inner_model/inner_model_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..7e6dbb2a6cfa2daf3170a8f4bceae21d2312f43a --- /dev/null +++ b/test/unittest/components/v2_0/inner_model/inner_model_test.cpp @@ -0,0 +1,825 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include "common/utils.h" +#include "common/log.h" +#include "frameworks/native/nn_tensor.h" +#include "frameworks/native/inner_model.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime; + +namespace NNRT { +namespace UnitTest { +class InnerModelTest : public testing::Test { +public: + void SetLiteGraph(mindspore::lite::LiteGraph* liteGraph); + void SetTensors(); + void SetIndices(); + +public: + InnerModel m_innerModelTest; + + std::vector m_dimInput {3, 3}; + std::vector m_dimOutput {3, 3}; + std::vector m_inputIndices {0}; + std::vector m_outputIndices {1}; + + OH_NN_OperationType m_opType {OH_NN_OPS_ADD}; + + OH_NN_UInt32Array m_inputs; + OH_NN_UInt32Array m_outputs; + OH_NN_UInt32Array m_params; + + uint32_t m_paramIndexs[1] {3}; + uint32_t m_inputIndexs[2] {0, 1}; + uint32_t m_outputIndexs[1] {2}; +}; + +void InnerModelTest::SetLiteGraph(mindspore::lite::LiteGraph* liteGraph) +{ + liteGraph->name_ = "testGraph"; + liteGraph->input_indices_ = m_inputIndices; + liteGraph->output_indices_ = m_outputIndices; + + const std::vector quant_params {}; + + for (size_t indexInput = 0; indexInput < liteGraph->input_indices_.size(); ++indexInput) { + const std::vector data(36, 1); + liteGraph->all_tensors_.emplace_back(mindspore::lite::MindIR_Tensor_Create(liteGraph->name_, + mindspore::lite::DATA_TYPE_FLOAT32, m_dimInput, mindspore::lite::FORMAT_NCHW, data, quant_params)); + } + + for (size_t indexOutput = 0; indexOutput < liteGraph->output_indices_.size(); ++indexOutput) { + const std::vector dataOut(36, 1); + liteGraph->all_tensors_.emplace_back(mindspore::lite::MindIR_Tensor_Create(liteGraph->name_, + mindspore::lite::DATA_TYPE_FLOAT32, m_dimOutput, mindspore::lite::FORMAT_NCHW, dataOut, quant_params)); + } +} + +void InnerModelTest::SetTensors() +{ + const int dim[2] = {2, 2}; + const OH_NN_Tensor& tensor = {OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensor)); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensor)); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensor)); + + const OH_NN_Tensor& tensorParam = {OH_NN_INT8, 0, nullptr, nullptr, OH_NN_ADD_ACTIVATIONTYPE}; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensorParam)); +} + +void InnerModelTest::SetIndices() +{ + m_params.data = m_paramIndexs; + m_params.size = sizeof(m_paramIndexs) / sizeof(uint32_t); + + m_inputs.data = m_inputIndexs; + m_inputs.size = sizeof(m_inputIndexs) / sizeof(uint32_t); + + m_outputs.data = m_outputIndexs; + m_outputs.size = sizeof(m_outputIndexs) / sizeof(uint32_t); +} + +/** + * @tc.name: inner_model_construct_nntensor_from_litegraph_001 + * @tc.desc: Verify the input_indices is empty of the construct_nntensor_from_litegraph function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_construct_nntensor_from_litegraph_001, TestSize.Level1) +{ + mindspore::lite::LiteGraph* liteGraph = new (std::nothrow) mindspore::lite::LiteGraph(); + EXPECT_NE(nullptr, liteGraph); + m_inputIndices = {}; + + SetLiteGraph(liteGraph); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_innerModelTest.BuildFromLiteGraph(liteGraph)); + mindspore::lite::MindIR_LiteGraph_Destroy(&liteGraph); +} + +/** + * @tc.name: inner_model_construct_nntensor_from_litegraph_002 + * @tc.desc: Verify the input_indices is out of bounds of the construct_nntensor_from_litegraph function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_construct_nntensor_from_litegraph_002, TestSize.Level1) +{ + mindspore::lite::LiteGraph* liteGraph = new (std::nothrow) mindspore::lite::LiteGraph(); + EXPECT_NE(nullptr, liteGraph); + m_inputIndices = {6}; + + SetLiteGraph(liteGraph); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_innerModelTest.BuildFromLiteGraph(liteGraph)); + mindspore::lite::MindIR_LiteGraph_Destroy(&liteGraph); +} + +/** + * @tc.name: inner_model_construct_nntensor_from_litegraph_003 + * @tc.desc: Verify the success of the construct_nntensor_from_litegraph function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_construct_nntensor_from_litegraph_003, TestSize.Level1) +{ + mindspore::lite::LiteGraph* liteGraph = new (std::nothrow) mindspore::lite::LiteGraph(); + EXPECT_NE(nullptr, liteGraph); + + SetLiteGraph(liteGraph); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.BuildFromLiteGraph(liteGraph)); +} + +/** + * @tc.name: inner_model_construct_nntensor_from_litegraph_004 + * @tc.desc: Verify the nntensor build failed nullptr return of the construct_nntensor_from_litegraph function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_construct_nntensor_from_litegraph_004, TestSize.Level1) +{ + mindspore::lite::LiteGraph* liteGraph = new (std::nothrow) mindspore::lite::LiteGraph(); + EXPECT_NE(nullptr, liteGraph); + m_dimInput = {3, -3}; + + SetLiteGraph(liteGraph); + EXPECT_EQ(OH_NN_NULL_PTR, m_innerModelTest.BuildFromLiteGraph(liteGraph)); + mindspore::lite::MindIR_LiteGraph_Destroy(&liteGraph); +} + +/** + * @tc.name: inner_model_construct_nntensor_from_litegraph_005 + * @tc.desc: Verify the output indices out of bounds of the construct_nntensor_from_litegraph function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_construct_nntensor_from_litegraph_005, TestSize.Level1) +{ + mindspore::lite::LiteGraph* liteGraph = new (std::nothrow) mindspore::lite::LiteGraph(); + EXPECT_NE(nullptr, liteGraph); + m_outputIndices = {6}; + + SetLiteGraph(liteGraph); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_innerModelTest.BuildFromLiteGraph(liteGraph)); + mindspore::lite::MindIR_LiteGraph_Destroy(&liteGraph); +} + +/** + * @tc.name: inner_model_build_from_lite_graph_001 + * @tc.desc: Verify the litegraph is nullptr of the build_from_lite_graph function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_build_from_lite_graph_001, TestSize.Level1) +{ + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_innerModelTest.BuildFromLiteGraph(nullptr)); +} + +/** + * @tc.name: inner_model_build_from_lite_graph_002 + * @tc.desc: Verify the buildfromlitegraph twice forbidden of the build_from_lite_graph function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_build_from_lite_graph_002, TestSize.Level1) +{ + mindspore::lite::LiteGraph* liteGraph = new (std::nothrow) mindspore::lite::LiteGraph(); + EXPECT_NE(nullptr, liteGraph); + + SetLiteGraph(liteGraph); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.BuildFromLiteGraph(liteGraph)); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_innerModelTest.BuildFromLiteGraph(liteGraph)); +} + +/** + * @tc.name: inner_model_build_from_lite_graph_003 + * @tc.desc: Verify the litegraph->alltensors is empty of the build_from_lite_graph function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_build_from_lite_graph_003, TestSize.Level1) +{ + mindspore::lite::LiteGraph* liteGraph = new (std::nothrow) mindspore::lite::LiteGraph(); + EXPECT_NE(nullptr, liteGraph); + liteGraph->name_ = "testGraph"; + liteGraph->input_indices_ = {0}; + liteGraph->output_indices_ = {1}; + + const int32_t dimInput[2] = {2, 2}; + const OH_NN_Tensor& tensor = {OH_NN_INT8, 2, dimInput, nullptr, OH_NN_TENSOR}; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensor)); + + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_innerModelTest.BuildFromLiteGraph(liteGraph)); + mindspore::lite::MindIR_LiteGraph_Destroy(&liteGraph); +} + + +/** + * @tc.name: inner_model_add_tensor_001 + * @tc.desc: Verify the success of the addtensor function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_add_tensor_001, TestSize.Level1) +{ + const int32_t dimInput[2] = {2, 2}; + const OH_NN_Tensor& tensor = {OH_NN_INT8, 2, dimInput, nullptr, OH_NN_TENSOR}; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensor)); +} + +/** + * @tc.name: inner_model_add_tensor_002 + * @tc.desc: Verify the addtensor after buildfromlitegraph of the addtensor function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_add_tensor_002, TestSize.Level1) +{ + mindspore::lite::LiteGraph* liteGraph = new (std::nothrow) mindspore::lite::LiteGraph(); + EXPECT_NE(nullptr, liteGraph); + SetLiteGraph(liteGraph); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.BuildFromLiteGraph(liteGraph)); + + const int32_t dimInput[2] = {2, 2}; + const OH_NN_Tensor& tensor = {OH_NN_INT8, 2, dimInput, nullptr, OH_NN_TENSOR}; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_innerModelTest.AddTensor(tensor)); +} + +/** + * @tc.name: inner_model_add_tensor_003 + * @tc.desc: Verify the buildfromnntensor failed of the addtensor function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_add_tensor_003, TestSize.Level1) +{ + const int32_t dimInput[2] = {2, -2}; + const OH_NN_Tensor& tensor = {OH_NN_INT8, 2, dimInput, nullptr, OH_NN_TENSOR}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_innerModelTest.AddTensor(tensor)); +} + + +/** + * @tc.name: inner_model_set_tensor_value_001 + * @tc.desc: Verify the success of the set_tensor_value function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_set_tensor_value_001, TestSize.Level1) +{ + SetTensors(); + + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); +} + +/** + * @tc.name: inner_model_set_tensor_value_002 + * @tc.desc: Verify the index out of bounds of the set_tensor_value function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_set_tensor_value_002, TestSize.Level1) +{ + SetTensors(); + + uint32_t index = 6; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); +} + +/** + * @tc.name: inner_model_set_tensor_value_003 + * @tc.desc: Verify the buffer value is nullptr of the set_tensor_value function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_set_tensor_value_003, TestSize.Level1) +{ + SetTensors(); + + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SetTensorValue(index, + nullptr, sizeof(activation))); +} + +/** + * @tc.name: inner_model_set_tensor_value_004 + * @tc.desc: Verify the length invalid of the set_tensor_value function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_set_tensor_value_004, TestSize.Level1) +{ + SetTensors(); + + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), 0)); +} + +/** + * @tc.name: inner_model_set_tensor_value_005 + * @tc.desc: Verify the after buildgraph of the set_tensor_value function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_set_tensor_value_005, TestSize.Level1) +{ + uint32_t index = 3; + const int8_t activation = 0; + + mindspore::lite::LiteGraph* liteGraph = new (std::nothrow) mindspore::lite::LiteGraph(); + EXPECT_NE(nullptr, liteGraph); + SetLiteGraph(liteGraph); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.BuildFromLiteGraph(liteGraph)); + + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); +} + +/** + * @tc.name: inner_model_set_tensor_value_006 + * @tc.desc: Verify the set value twice of the set_tensor_value function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_set_tensor_value_006, TestSize.Level1) +{ + SetTensors(); + + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); +} + +/** + * @tc.name: inner_model_set_tensor_value_007 + * @tc.desc: Verify the tensor dynamicShape of the set_tensor_value function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_set_tensor_value_007, TestSize.Level1) +{ + const int32_t dimInput[2] = {2, -1}; + const OH_NN_Tensor& tensor = {OH_NN_FLOAT32, 2, dimInput, nullptr, OH_NN_TENSOR}; + + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensor)); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensor)); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensor)); + const OH_NN_Tensor& tensorParam = {OH_NN_INT8, 0, nullptr, nullptr, OH_NN_ADD_ACTIVATIONTYPE}; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensorParam)); + + uint32_t index = 0; + float x[4] = {0, 1, 2, 3}; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_innerModelTest.SetTensorValue(index, + x, sizeof(x)- 1)); +} + +/** + * @tc.name: inner_model_add_operation_001 + * @tc.desc: Verify the success of the addoperation function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_add_operation_001, TestSize.Level1) +{ + SetIndices(); + + SetTensors(); + + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddOperation(m_opType, m_params, m_inputs, m_outputs)); +} + +/** + * @tc.name: inner_model_add_operation_002 + * @tc.desc: Verify the after buildgraph of the addtensor function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_add_operation_002, TestSize.Level1) +{ + OH_NN_OperationType m_opType = OH_NN_OPS_ADD; + OH_NN_UInt32Array m_inputs; + OH_NN_UInt32Array m_outputs; + OH_NN_UInt32Array m_params; + + mindspore::lite::LiteGraph* liteGraph = new (std::nothrow) mindspore::lite::LiteGraph(); + EXPECT_NE(nullptr, liteGraph); + SetLiteGraph(liteGraph); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.BuildFromLiteGraph(liteGraph)); + + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_innerModelTest.AddOperation(m_opType, m_params, m_inputs, + m_outputs)); +} + +/** + * @tc.name: inner_model_add_operation_003 + * @tc.desc: Verify the without set buffer of the addtensor function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_add_operation_003, TestSize.Level1) +{ + SetIndices(); + SetTensors(); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_innerModelTest.AddOperation(m_opType, m_params, m_inputs, m_outputs)); +} + +/** + * @tc.name: inner_model_add_operation_004 + * @tc.desc: Verify the output indices equal to input indices of the addtensor function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_add_operation_004, TestSize.Level1) +{ + m_outputIndexs[0] = 0; + + SetIndices(); + SetTensors(); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_innerModelTest.AddOperation(m_opType, m_params, m_inputs, m_outputs)); +} + +/** + * @tc.name: inner_model_add_operation_005 + * @tc.desc: Verify the optype invalid of the addtensor function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_add_operation_005, TestSize.Level1) +{ + m_opType = OH_NN_OperationType(99); + + SetIndices(); + SetTensors(); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_innerModelTest.AddOperation(m_opType, m_params, m_inputs, m_outputs)); +} + +/** + * @tc.name: inner_model_add_operation_006 + * @tc.desc: Verify the input indices out of bounds of the addoperation function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_add_operation_006, TestSize.Level1) +{ + m_inputIndexs[1] = 6; + + SetIndices(); + SetTensors(); + + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_innerModelTest.AddOperation(m_opType, m_params, m_inputs, m_outputs)); +} + +/** + * @tc.name: inner_model_add_operation_007 + * @tc.desc: Verify the param indices out of bounds of the addoperation function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_add_operation_007, TestSize.Level1) +{ + m_paramIndexs[0] = 6; + + SetIndices(); + SetTensors(); + + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_innerModelTest.AddOperation(m_opType, m_params, m_inputs, m_outputs)); +} + +/** + * @tc.name: inner_model_add_operation_008 + * @tc.desc: Verify the input indices size is 0 of the addoperation function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_add_operation_008, TestSize.Level1) +{ + SetIndices(); + + m_inputs.size = 0; + m_inputs.data = nullptr; + SetTensors(); + + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_innerModelTest.AddOperation(m_opType, m_params, m_inputs, m_outputs)); +} + +/** + * @tc.name: inner_model_add_operation_009 + * @tc.desc: Verify the output indices size is 0 of the addoperation function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_add_operation_009, TestSize.Level1) +{ + SetIndices(); + + m_outputs.size = 0; + m_outputs.data = nullptr; + SetTensors(); + + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_innerModelTest.AddOperation(m_opType, m_params, m_inputs, m_outputs)); +} + +/** + * @tc.name: inner_model_add_operation_010 + * @tc.desc: Verify the ops build failed of the addoperation function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_add_operation_010, TestSize.Level1) +{ + SetIndices(); + + const int32_t dimInput1[2] = {2, 2}; + const OH_NN_Tensor& tensor = {OH_NN_FLOAT32, 2, dimInput1, nullptr, OH_NN_TENSOR}; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensor)); + const int32_t dimInput2[2] = {2, 2}; + const OH_NN_Tensor& tensor1 = {OH_NN_FLOAT32, 2, dimInput2, nullptr, OH_NN_TENSOR}; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensor1)); + const int32_t dimOutput[2] = {2, 2}; + const OH_NN_Tensor& tensor2 = {OH_NN_FLOAT32, 2, dimOutput, nullptr, OH_NN_TENSOR}; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensor2)); + const OH_NN_Tensor& tensor3 = {OH_NN_INT8, 0, nullptr, nullptr, OH_NN_DIV_ACTIVATIONTYPE}; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensor3)); + + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_innerModelTest.AddOperation(m_opType, m_params, m_inputs, m_outputs)); +} + +/** + * @tc.name: inner_model_specify_inputs_and_outputs_001 + * @tc.desc: Verify the success of the specify_inputs_and_outputs function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_specify_inputs_and_outputs_001, TestSize.Level1) +{ + SetIndices(); + SetTensors(); + + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SpecifyInputsAndOutputs(m_inputs, m_outputs)); + + std::vector> inTensors = m_innerModelTest.GetInputTensors(); + EXPECT_EQ(inTensors.size(), m_inputs.size); + std::vector> outTensors = m_innerModelTest.GetOutputTensors(); + EXPECT_EQ(outTensors.size(), m_outputs.size); +} + +/** + * @tc.name: inner_model_specify_inputs_and_outputs_002 + * @tc.desc: Verify the after buildgraph of the specify_inputs_and_outputs function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_specify_inputs_and_outputs_002, TestSize.Level1) +{ + OH_NN_UInt32Array inputs; + OH_NN_UInt32Array outputs; + inputs.data = m_inputIndexs; + inputs.size = sizeof(m_inputIndexs) / sizeof(uint32_t); + outputs.data = nullptr; + outputs.size = sizeof(m_outputIndexs) / sizeof(uint32_t); + + mindspore::lite::LiteGraph* liteGraph = new (std::nothrow) mindspore::lite::LiteGraph(); + EXPECT_NE(nullptr, liteGraph); + SetLiteGraph(liteGraph); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.BuildFromLiteGraph(liteGraph)); + + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_innerModelTest.SpecifyInputsAndOutputs(inputs, outputs)); +} + +/** + * @tc.name: inner_model_specify_inputs_and_outputs_003 + * @tc.desc: Verify the output indices is nullptr but length not 0 of the specify_inputs_and_outputs function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_specify_inputs_and_outputs_003, TestSize.Level1) +{ + SetIndices(); + + m_outputs.data = nullptr; + SetTensors(); + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_innerModelTest.SpecifyInputsAndOutputs(m_inputs, m_outputs)); +} + +/** + * @tc.name: inner_model_specify_inputs_and_outputs_004 + * @tc.desc: Verify the specift twice of the specify_inputs_and_outputs function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_specify_inputs_and_outputs_004, TestSize.Level1) +{ + SetIndices(); + SetTensors(); + + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SpecifyInputsAndOutputs(m_inputs, m_outputs)); + + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_innerModelTest.SpecifyInputsAndOutputs(m_inputs, m_outputs)); +} + +/** + * @tc.name: inner_model_build_001 + * @tc.desc: Verify the success of the build function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_build_001, TestSize.Level1) +{ + SetIndices(); + SetTensors(); + + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddOperation(m_opType, m_params, m_inputs, m_outputs)); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SpecifyInputsAndOutputs(m_inputs, m_outputs)); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.Build()); + EXPECT_EQ(true, m_innerModelTest.IsBuild()); +} + +/** + * @tc.name: inner_model_build_002 + * @tc.desc: Verify the build twice forbidden of the build function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_build_002, TestSize.Level1) +{ + SetIndices(); + SetTensors(); + + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddOperation(m_opType, m_params, m_inputs, m_outputs)); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SpecifyInputsAndOutputs(m_inputs, m_outputs)); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.Build()); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_innerModelTest.Build()); +} + +/** + * @tc.name: inner_model_build_003 + * @tc.desc: Verify the params not match optype of the build function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_build_003, TestSize.Level1) +{ + OH_NN_OperationType m_opType = OH_NN_OPS_DIV; + + SetIndices(); + + const int dim[2] = {2, 2}; + const OH_NN_Tensor& tensor = {OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensor)); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensor)); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensor)); + const OH_NN_Tensor& tensorParam = {OH_NN_INT8, 0, nullptr, nullptr, OH_NN_DIV_ACTIVATIONTYPE}; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddTensor(tensorParam)); + + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SpecifyInputsAndOutputs(m_inputs, m_outputs)); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddOperation(m_opType, m_params, m_inputs, m_outputs)); + EXPECT_EQ(OH_NN_FAILED, m_innerModelTest.Build()); +} + +/** + * @tc.name: inner_model_build_004 + * @tc.desc: Verify the success of the build function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_build_004, TestSize.Level1) +{ + SetIndices(); + SetTensors(); + + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddOperation(m_opType, m_params, m_inputs, m_outputs)); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SpecifyInputsAndOutputs(m_inputs, m_outputs)); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.Build()); +} + +/** + * @tc.name: inner_model_get_supported_operation_001 + * @tc.desc: Verify the success of the get_supported_operation function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_get_supported_operation_001, TestSize.Level1) +{ + const bool *isSupported = nullptr; + uint32_t opCount = 1; + + SetIndices(); + SetTensors(); + + uint32_t index = 3; + const int8_t activation = 0; + size_t deviceID = 10; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddOperation(m_opType, m_params, m_inputs, m_outputs)); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SpecifyInputsAndOutputs(m_inputs, m_outputs)); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.Build()); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.GetSupportedOperations(deviceID, &isSupported, opCount)); +} + +/** + * @tc.name: inner_model_get_supported_operation_002 + * @tc.desc: Verify the mock hdi device result of the get_supported_operation function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_get_supported_operation_002, TestSize.Level1) +{ + size_t deviceID = 10; + const bool *isSupported = nullptr; + uint32_t opCount = 1; + + mindspore::lite::LiteGraph* liteGraph = new (std::nothrow) mindspore::lite::LiteGraph(); + EXPECT_NE(nullptr, liteGraph); + SetLiteGraph(liteGraph); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.BuildFromLiteGraph(liteGraph)); + + EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, m_innerModelTest.GetSupportedOperations(deviceID, &isSupported, opCount)); +} + +/** + * @tc.name: inner_model_get_supported_operation_003 + * @tc.desc: Verify the mock device manager of the get_supported_operation function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_get_supported_operation_003, TestSize.Level1) +{ + const bool *isSupported = nullptr; + uint32_t opCount = 1; + + SetIndices(); + SetTensors(); + + uint32_t index = 3; + const int8_t activation = 0; + size_t deviceID = 0; + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.AddOperation(m_opType, m_params, m_inputs, m_outputs)); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.SpecifyInputsAndOutputs(m_inputs, m_outputs)); + EXPECT_EQ(OH_NN_SUCCESS, m_innerModelTest.Build()); + EXPECT_EQ(OH_NN_FAILED, m_innerModelTest.GetSupportedOperations(deviceID, &isSupported, opCount)); + + std::shared_ptr liteGraph = m_innerModelTest.GetLiteGraphs(); + EXPECT_EQ(liteGraph->name_, "NNR_Model"); +} + +/** + * @tc.name: inner_model_get_supported_operation_004 + * @tc.desc: Verify the before build of the get_supported_operation function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_get_supported_operation_004, TestSize.Level1) +{ + size_t deviceID = 10; + const bool *isSupported = nullptr; + uint32_t opCount = 1; + + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_innerModelTest.GetSupportedOperations(deviceID, &isSupported, opCount)); +} +} // namespace UnitTest +} // namespace NNRT diff --git a/test/unittest/components/v2_0/inner_model/nn_tensor_test.cpp b/test/unittest/components/v2_0/inner_model/nn_tensor_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..512194e078794072b0e11da9fc2c759534a62835 --- /dev/null +++ b/test/unittest/components/v2_0/inner_model/nn_tensor_test.cpp @@ -0,0 +1,525 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include "frameworks/native/validation.h" +#include "frameworks/native/nn_tensor.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime; +using namespace OHOS::NeuralNetworkRuntime::Validation; + +namespace NNRT { +namespace UnitTest { +class NnTensorTest : public testing::Test { +}; + +/** + * @tc.name: nn_tensor_parse_dimensions_001 + * @tc.desc: Verify the success of the parse_dimensions function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_parse_dimensions_001, TestSize.Level1) +{ + const int dim[2] = {2, 2}; + OH_NN_Tensor tensor {OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); +} + +/** + * @tc.name: nn_tensor_parse_dimensions_002 + * @tc.desc: Verify the invalid dimensions of the parse_dimensions function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_parse_dimensions_002, TestSize.Level1) +{ + OH_NN_Tensor tensor; + tensor.dataType = OH_NN_FLOAT32; + tensor.dimensionCount = 2; + tensor.dimensions = nullptr; + tensor.quantParam = nullptr; + tensor.type = OH_NN_TENSOR; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, nnTensor.BuildFromOHNNTensor(tensor)); +} + +/** + * @tc.name: nn_tensor_parse_dimensions_003 + * @tc.desc: Verify the invalid shape tensor of the parse_dimensions function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_parse_dimensions_003, TestSize.Level1) +{ + const int dim[2] = {2, -2}; + OH_NN_Tensor tensor {OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, nnTensor.BuildFromOHNNTensor(tensor)); +} + +/** + * @tc.name: nn_tensor_parse_dimensions_004 + * @tc.desc: Verify the dynamic shape of the parse_dimensions function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_parse_dimensions_004, TestSize.Level1) +{ + const int dim[2] = {2, -1}; + OH_NN_Tensor tensor {OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); +} + +/** + * @tc.name: nn_tensor_parse_dimensions_005 + * @tc.desc: Verify the dims out of bounds of the parse_dimensions function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_parse_dimensions_005, TestSize.Level1) +{ + const int dim[3] = {1000000, 1000000, 10000000}; + OH_NN_Tensor tensor {OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, nnTensor.BuildFromOHNNTensor(tensor)); +} + + +/** + * @tc.name: nn_tensor_parse_quant_params_001 + * @tc.desc: Verify the success of the parse_quant_params function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_parse_quant_params_001, TestSize.Level1) +{ + const double scale = 1.0; + const int32_t zeroPoint = 0; + const uint32_t numBits = 8; + const OH_NN_QuantParam quantParam = {1, &numBits, &scale, &zeroPoint}; + + NNTensor nnTensor; + const int dim[2] = {2, 2}; + OH_NN_Tensor tensor {OH_NN_FLOAT32, 2, dim, &quantParam, OH_NN_TENSOR}; + + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); +} + +/** + * @tc.name: nn_tensor_parse_quant_params_002 + * @tc.desc: Verify the invalid numbits of the parse_quant_params function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_parse_quant_params_002, TestSize.Level1) +{ + const double scale = 1.0; + const int32_t zeroPoint = 0; + const uint32_t numBits = 16; + const OH_NN_QuantParam quantParam = {1, &numBits, &scale, &zeroPoint}; + + NNTensor nnTensor; + const int dim[2] = {2, 2}; + OH_NN_Tensor tensor {OH_NN_FLOAT32, 2, dim, &quantParam, OH_NN_TENSOR}; + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, nnTensor.BuildFromOHNNTensor(tensor)); +} + +/** + * @tc.name: nn_tensor_parse_quant_params_004 + * @tc.desc: Verify the invalid scale of the parse_quant_params function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_parse_quant_params_004, TestSize.Level1) +{ + const int32_t zeroPoint = 0; + const uint32_t numBits = 8; + const OH_NN_QuantParam quantParam = {1, &numBits, nullptr, &zeroPoint}; + + NNTensor nnTensor; + const int dim[2] = {2, 2}; + OH_NN_Tensor tensor {OH_NN_FLOAT32, 2, dim, &quantParam, OH_NN_TENSOR}; + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, nnTensor.BuildFromOHNNTensor(tensor)); +} + +/** + * @tc.name: nn_tensor_parse_quant_params_005 + * @tc.desc: Verify the invalid zeropoint of the parse_quant_params function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_parse_quant_params_005, TestSize.Level1) +{ + const double scale = 1.0; + const uint32_t numBits = 8; + const OH_NN_QuantParam quantParam = {1, &numBits, &scale, nullptr}; + + NNTensor nnTensor; + const int dim[2] = {2, 2}; + OH_NN_Tensor tensor {OH_NN_FLOAT32, 2, dim, &quantParam, OH_NN_TENSOR}; + + EXPECT_EQ(OH_NN_INVALID_PARAMETER, nnTensor.BuildFromOHNNTensor(tensor)); +} + +/** + * @tc.name: nn_tensor_set_dimensions_001 + * @tc.desc: Verify the success of the set_dimensions function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_set_dimensions_001, TestSize.Level1) +{ + const int dim[2] = {2, -1}; + OH_NN_Tensor tensor {OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + const std::vector dimensions = {2, 3}; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.SetDimensions(dimensions)); +} + +/** + * @tc.name: nn_tensor_set_dimensions_002 + * @tc.desc: Verify the dim out of bounds of the set_dimensions function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_set_dimensions_002, TestSize.Level1) +{ + const int dim[2] = {2, -1}; + OH_NN_Tensor tensor {OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + + NNTensor nnTensor; + const std::vector dimensions = {2, 3, 5}; + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, nnTensor.SetDimensions(dimensions)); +} + +/** + * @tc.name: nn_tensor_compare_attribute_001 + * @tc.desc: Verify the success of the CompareAttribute function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_compare_attribute_001, TestSize.Level1) +{ + const int dim[2] = {2, 2}; + OH_NN_Tensor tensor {OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + + NNTensor nnTensor; + NNTensor expectTensor; + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); + expectTensor = std::move(nnTensor); + EXPECT_EQ(true, nnTensor.CompareAttribute(nnTensor)); +} + +/** + * @tc.name: nn_tensor_compare_attribute_002 + * @tc.desc: Verify the datatype not equal of the CompareAttribute function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_compare_attribute_002, TestSize.Level1) +{ + const int dim[2] = {2, 2}; + OH_NN_Tensor tensor {OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + + NNTensor nnTensor; + NNTensor expectTensor; + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); + + const int dimExpect[2] = {2, 2}; + OH_NN_Tensor tensorExpect {OH_NN_INT32, 2, dimExpect, nullptr, OH_NN_TENSOR}; + EXPECT_EQ(OH_NN_SUCCESS, expectTensor.BuildFromOHNNTensor(tensorExpect)); + + EXPECT_EQ(false, nnTensor.CompareAttribute(expectTensor)); +} + +/** + * @tc.name: nn_tensor_compare_attribute_003 + * @tc.desc: Verify the dim size not equal of the CompareAttribute function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_compare_attribute_003, TestSize.Level1) +{ + const int dim[2] = {2, 2}; + OH_NN_Tensor tensor {OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + + NNTensor nnTensor; + NNTensor expectTensor; + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); + + const int dimExpect[3] = {2, 2, 3}; + OH_NN_Tensor tensorExpect {OH_NN_FLOAT32, 3, dimExpect, nullptr, OH_NN_TENSOR}; + EXPECT_EQ(OH_NN_SUCCESS, expectTensor.BuildFromOHNNTensor(tensorExpect)); + + EXPECT_EQ(false, nnTensor.CompareAttribute(expectTensor)); +} + +/** + * @tc.name: nn_tensor_compare_attribute_004 + * @tc.desc: Verify the dim value not equal of the CompareAttribute function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_compare_attribute_004, TestSize.Level1) +{ + const int dim[2] = {2, 2}; + OH_NN_Tensor tensor {OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + + NNTensor nnTensor; + NNTensor expectTensor; + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); + + const int dimExpect[2] = {2, 3}; + OH_NN_Tensor tensorExpect {OH_NN_FLOAT32, 2, dimExpect, nullptr, OH_NN_TENSOR}; + EXPECT_EQ(OH_NN_SUCCESS, expectTensor.BuildFromOHNNTensor(tensorExpect)); + + EXPECT_EQ(false, nnTensor.CompareAttribute(expectTensor)); +} + +/** + * @tc.name: nn_tensor_is_scalar_001 + * @tc.desc: Verify the success of the is_scalar function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_is_scalar_001, TestSize.Level1) +{ + const int dim[2] = {2, 2}; + OH_NN_Tensor tensor {OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); + EXPECT_EQ(false, nnTensor.IsScalar()); +} + +/** + * @tc.name: nn_tensor_build_from_tensor_001 + * @tc.desc: Verify the success of the build_from_tensor function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_convert_to_io_tensor_001, TestSize.Level1) +{ + const int dim[2] = {2, 2}; + OH_NN_Tensor tensor {OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); + + int8_t* activationValue = new (std::nothrow) int8_t[1] {0}; + EXPECT_NE(nullptr, activationValue); + + // After SetBuffer, this memory is released by NNTensor + nnTensor.SetBuffer(activationValue, sizeof(int8_t)); + IOTensor ioTensor; + nnTensor.ConvertToIOTensor(ioTensor); + EXPECT_EQ(sizeof(int8_t), ioTensor.length); +} + +/** + * @tc.name: nn_tensor_get_buffer_length_001 + * @tc.desc: Verify the success of the get_buffer_length function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_get_buffer_length_001, TestSize.Level1) +{ + const int dim[2] = {2, 2}; + OH_NN_Tensor tensor {OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); + int8_t* activationValue = new (std::nothrow) int8_t[1] {0}; + EXPECT_NE(nullptr, activationValue); + + // After SetBuffer, this memory is released by NNTensor + nnTensor.SetBuffer(activationValue, sizeof(int8_t)); + size_t length = sizeof(int8_t); + EXPECT_EQ(length, nnTensor.GetBufferLength()); +} + +/** + * @tc.name: nn_tensor_get_format_001 + * @tc.desc: Verify the success of the get_format function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_get_format_001, TestSize.Level1) +{ + const int dim[2] = {2, 2}; + OH_NN_Tensor tensor {OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); + OH_NN_Format format = OH_NN_FORMAT_NHWC; + EXPECT_EQ(format, nnTensor.GetFormat()); +} + +/** + * @tc.name: nn_tensor_get_name_001 + * @tc.desc: Verify the success of the get name function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_get_name_001, TestSize.Level1) +{ + NNTensor nnTensor; + const std::string& name = "test"; + nnTensor.SetName(name); + EXPECT_EQ(name, nnTensor.GetName()); +} + +/** + * @tc.name: nn_tensor_get_quant_param_001 + * @tc.desc: Verify the success of the get_quant_param function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_get_quant_param_001, TestSize.Level1) +{ + const int dim[2] = {2, 2}; + OH_NN_Tensor tensor {OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); + + std::vector quantParam = nnTensor.GetQuantParam(); + size_t quantSize = 0; + EXPECT_EQ(quantSize, quantParam.size()); +} + +/** + * @tc.name: nn_tensor_build_from_tensor_002 + * @tc.desc: Verify the invalid datatype value of the build_from_tensor function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_build_from_tensor_002, TestSize.Level1) +{ + const int dim[2] = {2, 2}; + + int dataTypeTest = 13; + OH_NN_DataType dataType = (OH_NN_DataType)dataTypeTest; + OH_NN_Tensor tensor {dataType, 2, dim, nullptr, OH_NN_TENSOR}; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, nnTensor.BuildFromOHNNTensor(tensor)); +} + +/** + * @tc.name: nn_tensor_convert_to_lite_graph_tensor_001 + * @tc.desc: Verify the success of the convert_to_lite_graph function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_convert_to_lite_graph_tensor_001, TestSize.Level1) +{ + const int dim[2] = {2, 2}; + OH_NN_Tensor tensor {OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); + + LiteGraphTensorPtr tensorPtr = {nullptr, DestroyLiteGraphTensor}; + EXPECT_NE(tensorPtr, nnTensor.ConvertToLiteGraphTensor()); +} + +/** + * @tc.name: nn_tensor_convert_to_lite_graph_tensor_002 + * @tc.desc: Verify the success with quant of the convert_to_lite_graph function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_convert_to_lite_graph_tensor_002, TestSize.Level1) +{ + const int dim[2] = {2, 2}; + + OH_NN_Tensor tensor; + tensor.dataType = OH_NN_FLOAT32; + tensor.dimensionCount = 2; + tensor.dimensions = dim; + const double scale = 1.0; + const int32_t zeroPoint = 0; + const uint32_t numBits = 8; + const OH_NN_QuantParam quantParam = {1, &numBits, &scale, &zeroPoint}; + tensor.quantParam = &quantParam; + tensor.type = OH_NN_TENSOR; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.BuildFromOHNNTensor(tensor)); + + LiteGraphTensorPtr tensorPtr = {nullptr, DestroyLiteGraphTensor}; + EXPECT_NE(tensorPtr, nnTensor.ConvertToLiteGraphTensor()); +} + +/** + * @tc.name: nn_tensor_build_001 + * @tc.desc: Verify the success of the build function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_build_001, TestSize.Level1) +{ + OH_NN_DataType dataType = OH_NN_FLOAT32; + const std::vector dimensions = {2, 2}; + const std::vector quantParam = {{8, 1.0, 0}, {8, 1.0, 0}, {8, 1.0, 0}}; + OH_NN_TensorType type = OH_NN_ADD_ACTIVATIONTYPE; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_SUCCESS, nnTensor.Build(dataType, dimensions, quantParam, type)); +} + +/** + * @tc.name: nn_tensor_build_002 + * @tc.desc: Verify the invalid datatype value of the build function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_build_002, TestSize.Level1) +{ + int dataTypeTest = 13; + OH_NN_DataType dataType = (OH_NN_DataType)dataTypeTest; + const std::vector dimensions = {2, 2}; + const std::vector quantParam = {{8, 1.0, 0}, {8, 1.0, 0}, {8, 1.0, 0}}; + OH_NN_TensorType type = OH_NN_ADD_ACTIVATIONTYPE; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, nnTensor.Build(dataType, dimensions, quantParam, type)); +} + +/** + * @tc.name: nn_tensor_build_003 + * @tc.desc: Verify the dynamic shape of the build function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_build_003, TestSize.Level1) +{ + OH_NN_DataType dataType = OH_NN_FLOAT32; + const std::vector dimensions = {2, -2}; + const std::vector quantParam = {{8, 1.0, 0}, {8, 1.0, 0}, {8, 1.0, 0}}; + OH_NN_TensorType type = OH_NN_ADD_ACTIVATIONTYPE; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, nnTensor.Build(dataType, dimensions, quantParam, type)); +} + +/** + * @tc.name: nn_tensor_build_004 + * @tc.desc: Verify the invalid numbits of the build function + * @tc.type: FUNC + */ +HWTEST_F(NnTensorTest, nn_tensor_build_004, TestSize.Level1) +{ + OH_NN_DataType dataType = OH_NN_FLOAT32; + const std::vector dimensions = {2, 2}; + const std::vector quantParam = {{2, 1.0, 0}, {2, 1.0, 0}, {2, 1.0, 0}}; + OH_NN_TensorType type = OH_NN_ADD_ACTIVATIONTYPE; + + NNTensor nnTensor; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, nnTensor.Build(dataType, dimensions, quantParam, type)); +} +} // namespace UnitTest +} // namespace NNRT diff --git a/test/unittest/components/v2_0/inner_model/nn_validation_test.cpp b/test/unittest/components/v2_0/inner_model/nn_validation_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..49a2e81e465aa0582e6df75634466b02522bfbd2 --- /dev/null +++ b/test/unittest/components/v2_0/inner_model/nn_validation_test.cpp @@ -0,0 +1,175 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include "frameworks/native/validation.h" +#include "frameworks/native/nn_tensor.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime; +using namespace OHOS::NeuralNetworkRuntime::Validation; + +namespace NNRT { +namespace UnitTest { +class NnValidationTest : public testing::Test { +}; + +/** + * @tc.name: nn_validation_validate_tensor_datatype_001 + * @tc.desc: Verify the success of the validate_tensor_datatype function + * @tc.type: FUNC + */ +HWTEST_F(NnValidationTest, nn_validation_validate_tensor_datatype_001, TestSize.Level1) +{ + int dataTypeTest = 12; + OH_NN_DataType dataType = (OH_NN_DataType)dataTypeTest; + EXPECT_EQ(true, ValidateTensorDataType(dataType)); +} + +/** + * @tc.name: nn_validation_validate_tensor_datatype_002 + * @tc.desc: Verify the gt bounds of the validate_tensor_datatype function + * @tc.type: FUNC + */ +HWTEST_F(NnValidationTest, nn_validation_validate_tensor_datatype_002, TestSize.Level1) +{ + int dataTypeTest = 13; + OH_NN_DataType dataType = (OH_NN_DataType)dataTypeTest; + EXPECT_EQ(false, ValidateTensorDataType(dataType)); +} + +/** + * @tc.name: nn_validation_validate_tensor_datatype_003 + * @tc.desc: Verify the lt bounds of the validate_tensor_datatype function + * @tc.type: FUNC + */ +HWTEST_F(NnValidationTest, nn_validation_validate_tensor_datatype_003, TestSize.Level1) +{ + int dataTypeTest = -1; + OH_NN_DataType dataType = (OH_NN_DataType)dataTypeTest; + EXPECT_EQ(false, ValidateTensorDataType(dataType)); +} + +/** + * @tc.name: nn_validation_validate_preformance_mode_001 + * @tc.desc: Verify the success of the validate_preformance_mode function + * @tc.type: FUNC + */ +HWTEST_F(NnValidationTest, nn_validation_validate_preformance_mode_001, TestSize.Level1) +{ + int performanceModeTest = 4; + OH_NN_PerformanceMode performanceMode = (OH_NN_PerformanceMode)performanceModeTest; + EXPECT_EQ(true, ValidatePerformanceMode(performanceMode)); +} + +/** + * @tc.name: nn_validation_validate_preformance_mode_002 + * @tc.desc: Verify the gt bounds of the validate_preformance_mode function + * @tc.type: FUNC + */ +HWTEST_F(NnValidationTest, nn_validation_validate_preformance_mode_002, TestSize.Level1) +{ + int performanceModeTest = 5; + OH_NN_PerformanceMode performanceMode = (OH_NN_PerformanceMode)performanceModeTest; + EXPECT_EQ(false, ValidatePerformanceMode(performanceMode)); +} + +/** + * @tc.name: nn_validation_validate_preformance_mode_003 + * @tc.desc: Verify the lt bounds of the validate_preformance_mode function + * @tc.type: FUNC + */ +HWTEST_F(NnValidationTest, nn_validation_validate_preformance_mode_003, TestSize.Level1) +{ + int performanceModeTest = -1; + OH_NN_PerformanceMode performanceMode = (OH_NN_PerformanceMode)performanceModeTest; + EXPECT_EQ(false, ValidatePerformanceMode(performanceMode)); +} + +/** + * @tc.name: nn_validation_validate_priority_001 + * @tc.desc: Verify the success of the validate_priority function + * @tc.type: FUNC + */ +HWTEST_F(NnValidationTest, nn_validation_validate_priority_001, TestSize.Level1) +{ + int priorityTest = 2; + OH_NN_Priority priority = (OH_NN_Priority)priorityTest; + EXPECT_EQ(true, ValidatePriority(priority)); +} + +/** + * @tc.name: nn_validation_validate_priority_002 + * @tc.desc: Verify the gt bounds of the validate_priority function + * @tc.type: FUNC + */ +HWTEST_F(NnValidationTest, nn_validation_validate_priority_002, TestSize.Level1) +{ + int priorityTest = 4; + OH_NN_Priority priority = (OH_NN_Priority)priorityTest; + EXPECT_EQ(false, ValidatePriority(priority)); +} + +/** + * @tc.name: nn_validation_validate_priority_003 + * @tc.desc: Verify the lt bounds of the validate_priority function + * @tc.type: FUNC + */ +HWTEST_F(NnValidationTest, nn_validation_validate_priority_003, TestSize.Level1) +{ + int priorityTest = -1; + OH_NN_Priority priority = (OH_NN_Priority)priorityTest; + EXPECT_EQ(false, ValidatePriority(priority)); +} + +/** + * @tc.name: nn_validation_fusetype_001 + * @tc.desc: Verify the success of the validate_fusetype function + * @tc.type: FUNC + */ +HWTEST_F(NnValidationTest, nn_validation_fusetype_001, TestSize.Level1) +{ + int fuseTypeTest = 2; + OH_NN_FuseType fuseType = (OH_NN_FuseType)fuseTypeTest; + EXPECT_EQ(true, ValidateFuseType(fuseType)); +} + +/** + * @tc.name: nn_validation_fusetype_002 + * @tc.desc: Verify the gt bounds of the validate_fusetype function + * @tc.type: FUNC + */ +HWTEST_F(NnValidationTest, nn_validation_fusetype_002, TestSize.Level1) +{ + int fuseTypeTest = 3; + OH_NN_FuseType fuseType = (OH_NN_FuseType)fuseTypeTest; + EXPECT_EQ(false, ValidateFuseType(fuseType)); +} + +/** + * @tc.name: nn_validation_fusetype_003 + * @tc.desc: Verify the lt bounds of the validate_fusetype function + * @tc.type: FUNC + */ +HWTEST_F(NnValidationTest, nn_validation_fusetype_003, TestSize.Level1) +{ + int fuseTypeTest = -1; + OH_NN_FuseType fuseType = (OH_NN_FuseType)fuseTypeTest; + EXPECT_EQ(false, ValidateFuseType(fuseType)); +} +} // namespace UnitTest +} // namespace NNRT diff --git a/test/unittest/components/v2_0/inner_model/ops_regitstry_test.cpp b/test/unittest/components/v2_0/inner_model/ops_regitstry_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..de3cc84846f07e6cdc33067e5b09754de8a5e998 --- /dev/null +++ b/test/unittest/components/v2_0/inner_model/ops_regitstry_test.cpp @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include "frameworks/native/validation.h" +#include "frameworks/native/ops_registry.h" +#include "frameworks/native/ops/add_builder.h" +#include "frameworks/native/ops/div_builder.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime; +using namespace OHOS::NeuralNetworkRuntime::Validation; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace NNRT { +namespace UnitTest { +class OpsRegistryTest : public testing::Test { +}; + +/** + * @tc.name: registry_001 + * @tc.desc: Verify the registry success the registar function + * @tc.type: FUNC + */ +HWTEST_F(OpsRegistryTest, registry_001, TestSize.Level1) +{ + const int newRegistryOperationType = 100; + REGISTER_OPS(AddBuilder, OH_NN_OperationType(newRegistryOperationType)); + + OpsRegistry& opsregistry = OpsRegistry::GetSingleton(); + EXPECT_NE(nullptr, opsregistry.GetOpsBuilder(OH_NN_OperationType(newRegistryOperationType))); +} + +/** + * @tc.name: registry_002 + * @tc.desc: Verify the registry twice the registar function + * @tc.type: FUNC + */ +HWTEST_F(OpsRegistryTest, registry_002, TestSize.Level1) +{ + const int newRegistryOperationType = 1000; + REGISTER_OPS(AddBuilder, OH_NN_OperationType(newRegistryOperationType)); + + OpsRegistry& opsregistry = OpsRegistry::GetSingleton(); + EXPECT_NE(nullptr, opsregistry.GetOpsBuilder(OH_NN_OperationType(newRegistryOperationType))); + + REGISTER_OPS(DivBuilder, OH_NN_OperationType(newRegistryOperationType)); +} +} // namespace UnitTest +} // namespace NNRT diff --git a/test/unittest/components/v2_0/neural_network_runtime_test/neural_network_runtime_test.cpp b/test/unittest/components/v2_0/neural_network_runtime_test/neural_network_runtime_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e8c88ef233a3279b6c4835007be7b51ae4e19140 --- /dev/null +++ b/test/unittest/components/v2_0/neural_network_runtime_test/neural_network_runtime_test.cpp @@ -0,0 +1,2220 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "neural_network_runtime_test.h" + +#include "mindir.h" + +#include "common/utils.h" +#include "frameworks/native/compilation.h" +#include "frameworks/native/device_manager.h" +#include "frameworks/native/hdi_device_v2_0.h" +#include "test/unittest/common/v2_0/mock_idevice.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +OH_NN_ReturnCode HDIDeviceV2_0::PrepareModel(std::shared_ptr model, + const ModelConfig& config, std::shared_ptr& preparedModel) +{ + if (model == nullptr) { + return OH_NN_INVALID_PARAMETER; + } + + if (config.enableFloat16 == false) { + return OH_NN_FAILED; + } + + sptr iPreparedModel = sptr(new OHOS::HDI::Nnrt::V2_0::MockIPreparedModel()); + if (iPreparedModel == nullptr) { + LOGE("HDIDeviceV2_0 mock PrepareModel failed, error happened when new sptr"); + return OH_NN_NULL_PTR; + } + + preparedModel = CreateSharedPtr(iPreparedModel); + return OH_NN_SUCCESS; +} + +std::shared_ptr DeviceManager::GetDevice(size_t deviceId) const +{ + sptr idevice + = sptr(new (std::nothrow) OHOS::HDI::Nnrt::V2_0::MockIDevice()); + if (idevice == nullptr) { + LOGE("DeviceManager mock GetDevice failed, error happened when new sptr"); + return nullptr; + } + + std::shared_ptr device = CreateSharedPtr(idevice); + if (device == nullptr) { + LOGE("DeviceManager mock GetDevice failed, the device is nullptr"); + return nullptr; + } + + if (deviceId == 0) { + LOGE("DeviceManager mock GetDevice failed, the passed parameter deviceId is 0"); + return nullptr; + } else { + return device; + } +} + +OH_NN_ReturnCode HDIDeviceV2_0::GetDeviceType(OH_NN_DeviceType& deviceType) +{ + if (deviceType == OH_NN_OTHERS) { + return OH_NN_UNAVALIDABLE_DEVICE; + } + + return OH_NN_SUCCESS; +} + +const std::string& DeviceManager::GetDeviceName(size_t deviceId) +{ + static std::string deviceName = ""; + if (deviceId == 0) { + return deviceName; + } + + deviceName = "deviceId"; + return deviceName; +} + +const std::vector& DeviceManager::GetAllDeviceId() +{ + static std::vector deviceIds; + if (OHOS::HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode == OH_NN_FAILED) { + // In order not to affect other use cases, set to the OH_NN_OPERATION_FORBIDDEN + OHOS::HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_OPERATION_FORBIDDEN; + return deviceIds; + } + std::size_t device = 1; + deviceIds.emplace_back(device); + return deviceIds; +} + +OH_NN_ReturnCode HDIDeviceV2_0::IsModelCacheSupported(bool& isSupported) +{ + isSupported = true; + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::IsPerformanceModeSupported(bool& isSupported) +{ + isSupported = true; + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::IsPrioritySupported(bool& isSupported) +{ + isSupported = true; + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::IsFloat16PrecisionSupported(bool& isSupported) +{ + isSupported = true; + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::GetSupportedOperation(std::shared_ptr model, + std::vector& ops) +{ + if (model == nullptr) { + LOGE("HDIDeviceV2_0 mock GetSupportedOperation failed, Model is nullptr, cannot query supported operation."); + return OH_NN_NULL_PTR; + } + + ops.emplace_back(true); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::IsDynamicInputSupported(bool& isSupported) +{ + isSupported = true; + return OH_NN_SUCCESS; +} +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Unittest { +OH_NN_ReturnCode NeuralNetworkRuntimeTest::BuildModelGraph(InnerModel& innerModel) +{ + // liteGraph is released internally by innerModel + mindspore::lite::LiteGraph* liteGraph = new (std::nothrow) mindspore::lite::LiteGraph; + EXPECT_NE(nullptr, liteGraph); + + liteGraph->name_ = "testGraph"; + liteGraph->input_indices_ = {0}; + liteGraph->output_indices_ = {1}; + liteGraph->all_tensors_ = {nullptr}; + const std::vector data(36, 1); + const std::vector dim = {3, 3}; + const std::vector quant_params {}; + + for (size_t indexInput = 0; indexInput < liteGraph->input_indices_.size(); ++indexInput) { + liteGraph->all_tensors_.emplace_back(mindspore::lite::MindIR_Tensor_Create(liteGraph->name_, + mindspore::lite::DATA_TYPE_FLOAT32, dim, mindspore::lite::FORMAT_NCHW, data, quant_params)); + } + + for (size_t indexOutput = 0; indexOutput < liteGraph->output_indices_.size(); ++indexOutput) { + liteGraph->all_tensors_.emplace_back(mindspore::lite::MindIR_Tensor_Create(liteGraph->name_, + mindspore::lite::DATA_TYPE_FLOAT32, dim, mindspore::lite::FORMAT_NCHW, data, quant_params)); + } + + return innerModel.BuildFromLiteGraph(liteGraph); +} + +void NeuralNetworkRuntimeTest::InitIndices() +{ + m_inputIndices.data = m_inputIndexs; + m_inputIndices.size = sizeof(m_inputIndexs) / sizeof(uint32_t); + + m_outputIndices.data = m_outputIndexs; + m_outputIndices.size = sizeof(m_outputIndexs) / sizeof(uint32_t); + + m_paramIndices.data = m_paramIndexs; + m_paramIndices.size = sizeof(m_paramIndexs) / sizeof(uint32_t); +} + +void NeuralNetworkRuntimeTest::AddModelTensor(InnerModel& innerModel) +{ + const int dim[2] = {2, 2}; + const OH_NN_Tensor& tensor = {OH_NN_FLOAT32, 2, dim, nullptr, OH_NN_TENSOR}; + + EXPECT_EQ(OH_NN_SUCCESS, innerModel.AddTensor(tensor)); + EXPECT_EQ(OH_NN_SUCCESS, innerModel.AddTensor(tensor)); + EXPECT_EQ(OH_NN_SUCCESS, innerModel.AddTensor(tensor)); + + const OH_NN_Tensor& tensorParam = {OH_NN_INT8, 0, nullptr, nullptr, OH_NN_ADD_ACTIVATIONTYPE}; + EXPECT_EQ(OH_NN_SUCCESS, innerModel.AddTensor(tensorParam)); +} + +void NeuralNetworkRuntimeTest::SetTensor() +{ + m_tensor.dataType = OH_NN_INT32; + m_tensor.dimensionCount = 0; + m_tensor.dimensions = nullptr; + m_tensor.quantParam = nullptr; + m_tensor.type = OH_NN_TENSOR; +} + +void NeuralNetworkRuntimeTest::SetInnerBuild(InnerModel& innerModel) +{ + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, innerModel.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + + OH_NN_OperationType opType {OH_NN_OPS_ADD}; + EXPECT_EQ(OH_NN_SUCCESS, innerModel.AddOperation(opType, m_paramIndices, m_inputIndices, m_outputIndices)); + EXPECT_EQ(OH_NN_SUCCESS, innerModel.SpecifyInputsAndOutputs(m_inputIndices, m_outputIndices)); + EXPECT_EQ(OH_NN_SUCCESS, innerModel.Build()); +} + +void NeuralNetworkRuntimeTest::SetInputAndOutput(Executor& executor) +{ + size_t length = 9 * sizeof(int32_t); + float input[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void *buffer = input; + uint32_t index = 0; + + SetTensor(); + EXPECT_EQ(OH_NN_SUCCESS, executor.SetInput(index, m_tensor, buffer, length)); + EXPECT_EQ(OH_NN_SUCCESS, executor.SetOutput(index, buffer, length)); + EXPECT_EQ(OH_NN_SUCCESS, executor.Run()); +} + +/* + * @tc.name: model_construct_001 + * @tc.desc: Verify the return model of the OH_NNModel_Construct function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_construct_001, testing::ext::TestSize.Level0) +{ + OH_NNModel* ret = OH_NNModel_Construct(); + EXPECT_NE(nullptr, ret); +} + +/* + * @tc.name: model_add_tensor_001 + * @tc.desc: Verify the OH_NNModel is nullptr of the OH_NNModel_Tensor function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_add_tensor_001, testing::ext::TestSize.Level0) +{ + OH_NNModel* model = nullptr; + const int32_t dimInput[2] = {2, 2}; + const OH_NN_Tensor tensor = {OH_NN_INT8, 2, dimInput, nullptr, OH_NN_TENSOR}; + OH_NN_ReturnCode ret = OH_NNModel_AddTensor(model, &tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: model_add_tensor_002 + * @tc.desc: Verify the OH_NN_Tensor is nullptr of the OH_NNModel_AddTensor function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_add_tensor_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + + OH_NNModel* model = reinterpret_cast(&innerModel); + OH_NN_Tensor* tensor = nullptr; + OH_NN_ReturnCode ret = OH_NNModel_AddTensor(model, tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: model_add_tensor_003 + * @tc.desc: Verify the success of the OH_NNModel_AddTensor function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_add_tensor_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = reinterpret_cast(&innerModel); + + const int32_t dimInput[2] = {2, 2}; + const OH_NN_Tensor tensor = {OH_NN_INT8, 2, dimInput, nullptr, OH_NN_TENSOR}; + OH_NN_ReturnCode ret = OH_NNModel_AddTensor(model, &tensor); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: model_add_operation_001 + * @tc.desc: Verify the OH_NNModel is nullptr of the OH_NNModel_AddOperation function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_add_operation_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = nullptr; + OH_NN_OperationType opType {OH_NN_OPS_ADD}; + + InitIndices(); + AddModelTensor(innerModel); + + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, innerModel.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + + OH_NN_ReturnCode ret = OH_NNModel_AddOperation(model, opType, &m_paramIndices, &m_inputIndices, &m_outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: model_add_operation_002 + * @tc.desc: Verify the paramIndices is nullptr of the OH_NNModel_AddOperation function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_add_operation_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = reinterpret_cast(&innerModel); + OH_NN_OperationType opType {OH_NN_OPS_ADD}; + + m_inputIndices.data = m_inputIndexs; + m_inputIndices.size = sizeof(m_inputIndexs) / sizeof(uint32_t); + + m_outputIndices.data = m_outputIndexs; + m_outputIndices.size = sizeof(m_outputIndexs) / sizeof(uint32_t); + + AddModelTensor(innerModel); + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, innerModel.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + + OH_NN_ReturnCode ret = OH_NNModel_AddOperation(model, opType, nullptr, &m_inputIndices, &m_outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: model_add_operation_003 + * @tc.desc: Verify the inputIndices is nullptr of the OH_NNModel_AddOperation function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_add_operation_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = reinterpret_cast(&innerModel); + OH_NN_OperationType opType {OH_NN_OPS_ADD}; + + m_paramIndices.data = m_paramIndexs; + m_paramIndices.size = sizeof(m_paramIndexs) / sizeof(uint32_t); + + m_outputIndices.data = m_outputIndexs; + m_outputIndices.size = sizeof(m_outputIndexs) / sizeof(uint32_t); + + AddModelTensor(innerModel); + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, innerModel.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + + OH_NN_ReturnCode ret = OH_NNModel_AddOperation(model, opType, &m_paramIndices, nullptr, &m_outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: model_add_operation_004 + * @tc.desc: Verify the outputIndices is nullptr of the OH_NNModel_AddOperation function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_add_operation_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = reinterpret_cast(&innerModel); + OH_NN_OperationType opType {OH_NN_OPS_ADD}; + + m_paramIndices.data = m_paramIndexs; + m_paramIndices.size = sizeof(m_paramIndexs) / sizeof(uint32_t); + + m_inputIndices.data = m_inputIndexs; + m_inputIndices.size = sizeof(m_inputIndexs) / sizeof(uint32_t); + + AddModelTensor(innerModel); + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, innerModel.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + + OH_NN_ReturnCode ret = OH_NNModel_AddOperation(model, opType, &m_paramIndices, &m_inputIndices, nullptr); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: model_add_operation_005 + * @tc.desc: Verify the success of the OH_NNModel_AddOperation function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_add_operation_005, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = reinterpret_cast(&innerModel); + OH_NN_OperationType opType {OH_NN_OPS_ADD}; + + InitIndices(); + AddModelTensor(innerModel); + + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, innerModel.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + + OH_NN_ReturnCode ret = OH_NNModel_AddOperation(model, opType, &m_paramIndices, &m_inputIndices, &m_outputIndices); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: model_set_tensor_data_001 + * @tc.desc: Verify the OH_NNModel is nullptr of the OH_NNModel_SetTensorData function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_set_tensor_data_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = nullptr; + AddModelTensor(innerModel); + + uint32_t index = 3; + const int8_t activation = 0; + + OH_NN_ReturnCode ret = OH_NNModel_SetTensorData(model, index, static_cast(&activation), + sizeof(int8_t)); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: model_set_tensor_data_002 + * @tc.desc: Verify the data is nullptr of the OH_NNModel_SetTensorData function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_set_tensor_data_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = reinterpret_cast(&innerModel); + AddModelTensor(innerModel); + + uint32_t index = 3; + + OH_NN_ReturnCode ret = OH_NNModel_SetTensorData(model, index, nullptr, sizeof(int8_t)); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: model_set_tensor_data_003 + * @tc.desc: Verify the length is 0 of the OH_NNModel_SetTensorData function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_set_tensor_data_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = reinterpret_cast(&innerModel); + AddModelTensor(innerModel); + + uint32_t index = 3; + const int8_t activation = 0; + + OH_NN_ReturnCode ret = OH_NNModel_SetTensorData(model, index, static_cast(&activation), 0); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: model_set_tensor_data_004 + * @tc.desc: Verify the successs of the OH_NNModel_SetTensorData function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_set_tensor_data_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = reinterpret_cast(&innerModel); + AddModelTensor(innerModel); + + uint32_t index = 3; + const int8_t activation = 0; + + OH_NN_ReturnCode ret = OH_NNModel_SetTensorData(model, index, static_cast(&activation), + sizeof(int8_t)); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: model_specify_inputs_and_outputs_001 + * @tc.desc: Verify the OH_NNModel is nullptr of the OH_NNModel_SpecifyInputsAndOutputs function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_specify_inputs_and_outputs_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = nullptr; + + InitIndices(); + AddModelTensor(innerModel); + + OH_NN_ReturnCode ret = OH_NNModel_SpecifyInputsAndOutputs(model, &m_inputIndices, &m_outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: model_specify_inputs_and_outputs_002 + * @tc.desc: Verify the inputIndices is nullptr of the OH_NNModel_SpecifyInputsAndOutputs function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_specify_inputs_and_outputs_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = reinterpret_cast(&innerModel); + + InitIndices(); + AddModelTensor(innerModel); + + OH_NN_ReturnCode ret = OH_NNModel_SpecifyInputsAndOutputs(model, nullptr, &m_outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: model_specify_inputs_and_outputs_003 + * @tc.desc: Verify the outputIndices is nullptr of the OH_NNModel_SpecifyInputsAndOutputs function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_specify_inputs_and_outputs_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = reinterpret_cast(&innerModel); + + InitIndices(); + AddModelTensor(innerModel); + + OH_NN_ReturnCode ret = OH_NNModel_SpecifyInputsAndOutputs(model, &m_inputIndices, nullptr); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: model_specify_inputs_and_outputs_004 + * @tc.desc: Verify the success of the OH_NNModel_SpecifyInputsAndOutputs function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_specify_inputs_and_outputs_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = reinterpret_cast(&innerModel); + + InitIndices(); + AddModelTensor(innerModel); + + OH_NN_ReturnCode ret = OH_NNModel_SpecifyInputsAndOutputs(model, &m_inputIndices, &m_outputIndices); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: model_finish_001 + * @tc.desc: Verify the OH_NNModel is nullptr of the OH_NNModel_Finish function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_finish_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = nullptr; + + OH_NN_OperationType opType {OH_NN_OPS_ADD}; + + InitIndices(); + AddModelTensor(innerModel); + + uint32_t index = 3; + const int8_t activation = 0; + EXPECT_EQ(OH_NN_SUCCESS, innerModel.SetTensorValue(index, static_cast(&activation), + sizeof(int8_t))); + + EXPECT_EQ(OH_NN_SUCCESS, innerModel.AddOperation(opType, m_paramIndices, m_inputIndices, m_outputIndices)); + EXPECT_EQ(OH_NN_SUCCESS, innerModel.SpecifyInputsAndOutputs(m_inputIndices, m_outputIndices)); + + OH_NN_ReturnCode ret = OH_NNModel_Finish(model); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: model_finish_002 + * @tc.desc: Verify the success of the OH_NNModel_Finish function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_finish_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = reinterpret_cast(&innerModel); + + OH_NN_OperationType opType {OH_NN_OPS_ADD}; + + InitIndices(); + AddModelTensor(innerModel); + + const int8_t activation = 0; + uint32_t index = 3; + EXPECT_EQ(OH_NN_SUCCESS, innerModel.SetTensorValue(index, + static_cast(&activation), sizeof(int8_t))); + + EXPECT_EQ(OH_NN_SUCCESS, innerModel.AddOperation(opType, m_paramIndices, m_inputIndices, m_outputIndices)); + EXPECT_EQ(OH_NN_SUCCESS, innerModel.SpecifyInputsAndOutputs(m_inputIndices, m_outputIndices)); + + OH_NN_ReturnCode ret = OH_NNModel_Finish(model); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: model_destroy_001 + * @tc.desc: Verify the OH_NNModel is nullptr of the OH_NNModel_Destroy function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_destroy_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel** pModel = nullptr; + OH_NNModel_Destroy(pModel); + EXPECT_EQ(nullptr, pModel); +} + +/* + * @tc.name: model_destroy_002 + * @tc.desc: Verify the *OH_NNModel is nullptr of the OH_NNModel_Destroy function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_destroy_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = nullptr; + OH_NNModel** pModel = &model; + OH_NNModel_Destroy(pModel); + EXPECT_EQ(nullptr, model); +} + +/* + * @tc.name: model_destroy_003 + * @tc.desc: Verify the normal model of the OH_NNModel_Destroy function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_destroy_003, testing::ext::TestSize.Level0) +{ + InnerModel* innerModel = new InnerModel(); + EXPECT_NE(nullptr, innerModel); + OH_NNModel* model = reinterpret_cast(innerModel); + OH_NNModel_Destroy(&model); + EXPECT_EQ(nullptr, model); +} + +/* + * @tc.name: model_get_available_operation_001 + * @tc.desc: Verify the OH_NNModel is nullptr of the OH_NNModel_GetAvailableOperations function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_get_available_operation_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = nullptr; + + uint32_t opCount = 1; + const bool *pIsAvailable = nullptr; + + InitIndices(); + AddModelTensor(innerModel); + SetInnerBuild(innerModel); + + size_t deviceID = 10; + OH_NN_ReturnCode ret = OH_NNModel_GetAvailableOperations(model, deviceID, &pIsAvailable, &opCount); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: model_get_available_operation_002 + * @tc.desc: Verify the isAvailable is nullptr of the OH_NNModel_GetAvailableOperations function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_get_available_operation_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = reinterpret_cast(&innerModel); + + uint32_t opCount = 1; + InitIndices(); + AddModelTensor(innerModel); + SetInnerBuild(innerModel); + + size_t deviceID = 10; + OH_NN_ReturnCode ret = OH_NNModel_GetAvailableOperations(model, deviceID, nullptr, &opCount); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: model_get_available_operation_003 + * @tc.desc: Verify the *isAvailable is no nullptr of the OH_NNModel_GetAvailableOperations function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_get_available_operation_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = reinterpret_cast(&innerModel); + + const bool isAvailable = true; + const bool *pIsAvailable = &isAvailable; + uint32_t opCount = 1; + + InitIndices(); + AddModelTensor(innerModel); + SetInnerBuild(innerModel); + + size_t deviceID = 10; + OH_NN_ReturnCode ret = OH_NNModel_GetAvailableOperations(model, deviceID, &pIsAvailable, &opCount); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: model_get_available_operation_004 + * @tc.desc: Verify the opCount is nullptr of the OH_NNModel_GetAvailableOperations function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_get_available_operation_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = reinterpret_cast(&innerModel); + + const bool *pIsAvailable = nullptr; + uint32_t* opCount = nullptr; + + InitIndices(); + AddModelTensor(innerModel); + SetInnerBuild(innerModel); + + size_t deviceID = 10; + OH_NN_ReturnCode ret = OH_NNModel_GetAvailableOperations(model, deviceID, &pIsAvailable, opCount); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: model_get_available_operation_005 + * @tc.desc: Verify the success of the OH_NNModel_GetAvailableOperations function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, model_get_available_operation_005, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = reinterpret_cast(&innerModel); + + const bool *pIsAvailable = nullptr; + uint32_t opCount = 1; + + InitIndices(); + AddModelTensor(innerModel); + SetInnerBuild(innerModel); + + size_t deviceID = 10; + OH_NN_ReturnCode ret = OH_NNModel_GetAvailableOperations(model, deviceID, &pIsAvailable, &opCount); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: compilation_construct_001 + * @tc.desc: Verify the OH_NNModel is nullptr of the OH_NNCompilation_Construct function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_construct_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + const OH_NNModel* model = nullptr; + OH_NNCompilation* ret = OH_NNCompilation_Construct(model); + EXPECT_EQ(nullptr, ret); +} + +/* + * @tc.name: compilation_construct_002 + * @tc.desc: Verify the not OH_NNModel_Build before creating compilation of the OH_NNCompilation_Construct function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_construct_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + OH_NNModel* model = reinterpret_cast(&innerModel); + OH_NNCompilation* ret = OH_NNCompilation_Construct(model); + EXPECT_EQ(nullptr, ret); +} + +/* + * @tc.name: compilation_construct_003 + * @tc.desc: Verify the normal model of the OH_NNCompilation_Construct function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_construct_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + OH_NNModel* model = reinterpret_cast(&innerModel); + OH_NNCompilation* ret = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, ret); +} + +/* + * @tc.name: compilation_set_device_001 + * @tc.desc: Verify the OH_NNCompilation is nullptr of the OH_NNCompilation_SetDevice function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_set_device_001, testing::ext::TestSize.Level0) +{ + OH_NNCompilation* compilation = nullptr; + size_t deviceId = 1; + OH_NN_ReturnCode ret = OH_NNCompilation_SetDevice(compilation, deviceId); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: compilation_set_device_002 + * @tc.desc: Verify the success of the OH_NNCompilation_SetDevice function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_set_device_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilation(&innerModel); + OH_NNCompilation* nnCompilation = reinterpret_cast(&compilation); + size_t deviceId = 1; + OH_NN_ReturnCode ret = OH_NNCompilation_SetDevice(nnCompilation, deviceId); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: compilation_set_cache_001 + * @tc.desc: Verify the OH_NNCompilation is nullptr of the OH_NNCompilation_SetCache function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_set_cache_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilation(&innerModel); + OH_NNCompilation* nnCompilation = nullptr; + const char* cacheDir = "../"; + uint32_t version = 1; + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetDevice(deviceId)); + OH_NN_ReturnCode ret = OH_NNCompilation_SetCache(nnCompilation, cacheDir, version); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: compilation_set_cache_002 + * @tc.desc: Verify the cachePath is nullptr of the OH_NNCompilation_SetCache function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_set_cache_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilation(&innerModel); + OH_NNCompilation* nnCompilation = reinterpret_cast(&compilation); + const char* cacheDir = nullptr; + uint32_t version = 1; + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetDevice(deviceId)); + OH_NN_ReturnCode ret = OH_NNCompilation_SetCache(nnCompilation, cacheDir, version); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: compilation_set_cache_003 + * @tc.desc: Verify the success of the OH_NNCompilation_SetCache function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_set_cache_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilation(&innerModel); + OH_NNCompilation* nnCompilation = reinterpret_cast(&compilation); + const char* cacheDir = "../"; + uint32_t version = 1; + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetDevice(deviceId)); + OH_NN_ReturnCode ret = OH_NNCompilation_SetCache(nnCompilation, cacheDir, version); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: compilation_set_performance_mode_001 + * @tc.desc: Verify the OH_NNCompilation is nullptr of the OH_NNCompilation_SetPerformanceMode function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_set_performance_mode_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilation(&innerModel); + OH_NNCompilation* nnCompilation = nullptr; + OH_NN_PerformanceMode performanceMode = OH_NN_PERFORMANCE_NONE; + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetDevice(deviceId)); + OH_NN_ReturnCode ret = OH_NNCompilation_SetPerformanceMode(nnCompilation, performanceMode); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: compilation_set_performance_mode_002 + * @tc.desc: Verify the success of the OH_NNCompilation_SetPerformanceMode function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_set_performance_mode_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilation(&innerModel); + OH_NNCompilation* nnCompilation = reinterpret_cast(&compilation); + OH_NN_PerformanceMode performanceMode = OH_NN_PERFORMANCE_NONE; + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetDevice(deviceId)); + + OH_NN_ReturnCode ret = OH_NNCompilation_SetPerformanceMode(nnCompilation, performanceMode); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: compilation_set_priority_001 + * @tc.desc: Verify the OH_NNCompilation is nullptr of the OH_NNCompilation_SetPriority function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_set_priority_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilation(&innerModel); + OH_NNCompilation* nnCompilation = nullptr; + OH_NN_Priority priority = OH_NN_PRIORITY_LOW; + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetDevice(deviceId)); + + OH_NN_ReturnCode ret = OH_NNCompilation_SetPriority(nnCompilation, priority); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: compilation_set_priority_002 + * @tc.desc: Verify the success of the OH_NNCompilation_SetPriority function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_set_priority_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilation(&innerModel); + OH_NNCompilation* nnCompilation = reinterpret_cast(&compilation); + OH_NN_Priority priority = OH_NN_PRIORITY_LOW; + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetDevice(deviceId)); + + OH_NN_ReturnCode ret = OH_NNCompilation_SetPriority(nnCompilation, priority); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: compilation_set_enable_float16_001 + * @tc.desc: Verify the OH_NNCompilation is nullptr of the OH_NNCompilation_EnableFloat16 function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_set_enable_float16_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilation(&innerModel); + OH_NNCompilation* nnCompilation = nullptr; + bool enableFloat16 = true; + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetDevice(deviceId)); + + OH_NN_ReturnCode ret = OH_NNCompilation_EnableFloat16(nnCompilation, enableFloat16); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: compilation_set_enable_float16_002 + * @tc.desc: Verify the success of the OH_NNCompilation_EnableFloat16 function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_set_enable_float16_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilation(&innerModel); + OH_NNCompilation* nnCompilation = reinterpret_cast(&compilation); + bool enableFloat16 = true; + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetDevice(deviceId)); + + OH_NN_ReturnCode ret = OH_NNCompilation_EnableFloat16(nnCompilation, enableFloat16); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: compilation_build_001 + * @tc.desc: Verify the OH_NNCompilation is nullptr of the OH_NNCompilation_Build function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_build_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilation(&innerModel); + OH_NNCompilation* nnCompilation = nullptr; + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetDevice(deviceId)); + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetPerformance(OH_NN_PERFORMANCE_EXTREME)); + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetPriority(OH_NN_PRIORITY_HIGH)); + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetEnableFp16(true)); + + OH_NN_ReturnCode ret = OH_NNCompilation_Build(nnCompilation); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: compilation_build_002 + * @tc.desc: Verify the success of the OH_NNCompilation_Build function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_build_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilation(&innerModel); + OH_NNCompilation* nnCompilation = reinterpret_cast(&compilation); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetDevice(deviceId)); + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetPerformance(OH_NN_PERFORMANCE_EXTREME)); + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetPriority(OH_NN_PRIORITY_HIGH)); + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetEnableFp16(true)); + + OH_NN_ReturnCode ret = OH_NNCompilation_Build(nnCompilation); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: compilation_destroy_001 + * @tc.desc: Verify the OH_NNCompilation is nullptr of the OH_NNCompilation_Destroy function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_destroy_001, testing::ext::TestSize.Level0) +{ + OH_NNCompilation** pCompilation = nullptr; + OH_NNCompilation_Destroy(pCompilation); + EXPECT_EQ(nullptr, pCompilation); +} + +/* + * @tc.name: compilation_destroy_002 + * @tc.desc: Verify the *OH_NNCompilation is nullptr of the OH_NNCompilation_Destroy function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_destroy_002, testing::ext::TestSize.Level0) +{ + OH_NNCompilation* compilation = nullptr; + OH_NNCompilation** pCompilation = &compilation; + OH_NNCompilation_Destroy(pCompilation); + EXPECT_EQ(nullptr, compilation); +} + +/* + * @tc.name: compilation_destroy_003 + * @tc.desc: Verify the normal model of the OH_NNCompilation_Destroy function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, compilation_destroy_003, testing::ext::TestSize.Level0) +{ + InnerModel* innerModel = new InnerModel(); + EXPECT_NE(nullptr, innerModel); + Compilation* compilation = new(std::nothrow) Compilation(innerModel); + EXPECT_NE(nullptr, compilation); + OH_NNCompilation* nnCompilation = reinterpret_cast(compilation); + OH_NNCompilation_Destroy(&nnCompilation); + EXPECT_EQ(nullptr, nnCompilation); +} + +/** + * @tc.name: excutor_construct_001 + * @tc.desc: Verify the OH_NNCompilation is nullptr of the OH_NNExecutor_Construct function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_construct_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilation(&innerModel); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetDevice(deviceId)); + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetEnableFp16(true)); + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetPerformance(OH_NN_PERFORMANCE_EXTREME)); + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetPriority(OH_NN_PRIORITY_HIGH)); + EXPECT_EQ(OH_NN_SUCCESS, compilation.Build()); + + OH_NNCompilation* nnCompilation = nullptr; + OH_NNExecutor* executor = OH_NNExecutor_Construct(nnCompilation); + EXPECT_EQ(nullptr, executor); +} + +/** + * @tc.name: excutor_construct_002 + * @tc.desc: Verify the not OH_NNCompilation_Build before creating executor of the OH_NNExecutor_Construct function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_construct_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilation(&innerModel); + OH_NNCompilation* nnCompilation = reinterpret_cast(&compilation); + OH_NNExecutor * executor = OH_NNExecutor_Construct(nnCompilation); + EXPECT_EQ(nullptr, executor); +} + +/** + * @tc.name: excutor_construct_003 + * @tc.desc: Verify the success of the OH_NNExecutor_Construct function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_construct_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation compilation(&innerModel); + + std::size_t deviceId = 1; + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetDevice(deviceId)); + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetPerformance(OH_NN_PERFORMANCE_EXTREME)); + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetPriority(OH_NN_PRIORITY_HIGH)); + EXPECT_EQ(OH_NN_SUCCESS, compilation.SetEnableFp16(true)); + EXPECT_EQ(OH_NN_SUCCESS, compilation.Build()); + + OH_NNCompilation* nnCompilation = reinterpret_cast(&compilation); + OH_NNExecutor * executor = OH_NNExecutor_Construct(nnCompilation); + EXPECT_NE(nullptr, executor); +} + +/** + * @tc.name: excutor_setinput_001 + * @tc.desc: Verify the OH_NNExecutor is nullptr of the OH_NNExecutor_SetInput function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_setinput_001, testing::ext::TestSize.Level0) +{ + SetTensor(); + + float input[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + const void *buffer = input; + size_t length = 2 * sizeof(float); + uint32_t inputIndex = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_SetInput(nullptr, inputIndex, &m_tensor, buffer, length)); +} + +/** + * @tc.name: excutor_setinput_002 + * @tc.desc: Verify the OH_NN_Tensor is nullptr of the OH_NNExecutor_SetInput function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_setinput_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t inputIndex = 0; + float input[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + const void *buffer = input; + size_t length = 2 * sizeof(float); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_SetInput(nnExecutor, inputIndex, nullptr, buffer, length)); +} + +/** + * @tc.name: excutor_setinput_003 + * @tc.desc: Verify the data is nullptr of the OH_NNExecutor_SetInput function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_setinput_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + SetTensor(); + + uint32_t inputIndex = 0; + const void *buffer = nullptr; + size_t length = 2 * sizeof(float); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_SetInput(nnExecutor, inputIndex, &m_tensor, buffer, length)); +} + +/** + * @tc.name: excutor_setinput_004 + * @tc.desc: Verify the length is 0 of the OH_NNExecutor_SetInput function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_setinput_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t inputIndex = 0; + SetTensor(); + + size_t length = 0; + float input[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + const void *buffer = input; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_SetInput(nnExecutor, inputIndex, &m_tensor, buffer, length)); +} + +/** + * @tc.name: excutor_setinput_005 + * @tc.desc: Verify the success of the OH_NNExecutor_SetInput function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_setinput_005, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t inputIndex = 0; + SetTensor(); + + float input[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + const void *buffer = input; + size_t length = 9 * sizeof(int32_t); + OH_NN_ReturnCode ret = OH_NNExecutor_SetInput(nnExecutor, inputIndex, &m_tensor, buffer, length); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: excutor_setoutput_001 + * @tc.desc: Verify the OH_NNExecutor is nullptr of the OH_NNExecutor_SetOutput function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_setoutput_001, testing::ext::TestSize.Level0) +{ + uint32_t outputIndex = 0; + float input[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void *buffer = input; + size_t length = 9 * sizeof(int32_t); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_SetOutput(nullptr, outputIndex, buffer, length)); +} + +/** + * @tc.name: excutor_setoutput_002 + * @tc.desc: Verify the data is nullptr of the OH_NNExecutor_SetOutput function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_setoutput_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t outputIndex = 0; + void *buffer = nullptr; + size_t length = 9 * sizeof(int32_t); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_SetOutput(nnExecutor, outputIndex, buffer, length)); +} + +/** + * @tc.name: excutor_setoutput_003 + * @tc.desc: Verify the length is 0 of the OH_NNExecutor_SetOutput function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_setoutput_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t outputIndex = 0; + float input[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void *buffer = input; + size_t length = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_SetOutput(nnExecutor, outputIndex, buffer, length)); +} + +/** + * @tc.name: excutor_setoutput_004 + * @tc.desc: Verify the success of the OH_NNExecutor_SetOutput function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_setoutput_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t outputIndex = 0; + float input[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void *buffer = input; + size_t length = 9 * sizeof(int32_t); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNExecutor_SetOutput(nnExecutor, outputIndex, buffer, length)); +} + +/** + * @tc.name: excutor_getoutputshape_001 + * @tc.desc: Verify the OH_NNExecutor is nullptr of the OH_NNExecutor_GetOutputShape function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_getoutputshape_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = nullptr; + + SetInputAndOutput(executor); + + int32_t* ptr = nullptr; + int32_t** shape = &ptr; + uint32_t length = 2; + uint32_t outputIndex = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_GetOutputShape(nnExecutor, outputIndex, + shape, &length)); +} + +/** + * @tc.name: excutor_getoutputshape_002 + * @tc.desc: Verify the shape is nullptr of the OH_NNExecutor_GetOutputShape function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_getoutputshape_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + SetInputAndOutput(executor); + + uint32_t outputIndex = 0; + int32_t** shape = nullptr; + uint32_t length = 2; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_GetOutputShape(nnExecutor, outputIndex, + shape, &length)); +} + +/** + * @tc.name: excutor_getoutputshape_003 + * @tc.desc: Verify the *shape is not nullptr of the OH_NNExecutor_GetOutputShape function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_getoutputshape_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + SetInputAndOutput(executor); + + int32_t expectDim[2] = {3, 3}; + int32_t* ptr = expectDim; + int32_t** shape = &ptr; + uint32_t length = 2; + uint32_t outputIndex = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_GetOutputShape(nnExecutor, outputIndex, + shape, &length)); +} + +/** + * @tc.name: excutor_getoutputshape_004 + * @tc.desc: Verify the length is nullptr of the OH_NNExecutor_GetOutputShape function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_getoutputshape_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + SetInputAndOutput(executor); + + int32_t* ptr = nullptr; + int32_t** shape = &ptr; + uint32_t outputIndex = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_GetOutputShape(nnExecutor, outputIndex, shape, nullptr)); +} + +/** + * @tc.name: excutor_getoutputshape_005 + * @tc.desc: Verify the success of the OH_NNExecutor_GetOutputShape function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_getoutputshape_005, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + SetInputAndOutput(executor); + + int32_t* ptr = nullptr; + int32_t** shape = &ptr; + uint32_t length = 2; + uint32_t outputIndex = 0; + EXPECT_EQ(OH_NN_SUCCESS, OH_NNExecutor_GetOutputShape(nnExecutor, outputIndex, shape, &length)); +} + +/** + * @tc.name: excutor_run_001 + * @tc.desc: Verify the OH_NNExecutor is nullptr of the OH_NNExecutor_Run function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_run_001, testing::ext::TestSize.Level0) +{ + OH_NNExecutor* nnExecutor = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_Run(nnExecutor)); +} + +/** + * @tc.name: excutor_run_002 + * @tc.desc: Verify the success of the OH_NNExecutor_Run function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_run_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t index = 0; + size_t length = 9 * sizeof(int32_t); + float input[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void *buffer = input; + + SetTensor(); + EXPECT_EQ(OH_NN_SUCCESS, executor.SetInput(index, m_tensor, buffer, length)); + EXPECT_EQ(OH_NN_SUCCESS, executor.SetOutput(index, buffer, length)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNExecutor_Run(nnExecutor)); +} + +/* + * @tc.name: executor_allocate_input_memory_001 + * @tc.desc: Verify the OH_NNExecutor is nullptr of the OH_NNExecutor_AllocateInputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_allocate_input_memory_001, testing::ext::TestSize.Level0) +{ + OH_NNExecutor* nnExecutor = nullptr; + uint32_t outputIndex = 0; + size_t length = 9 * sizeof(float); + + OH_NN_Memory* ret = OH_NNExecutor_AllocateInputMemory(nnExecutor, outputIndex, length); + EXPECT_EQ(nullptr, ret); +} + +/* + * @tc.name: executor_allocate_input_memory_002 + * @tc.desc: Verify the passed length equals 0 of the OH_NNExecutor_AllocateInputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_allocate_input_memory_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t outputIndex = 0; + size_t length = 0; + + OH_NN_Memory* ret = OH_NNExecutor_AllocateInputMemory(nnExecutor, outputIndex, length); + EXPECT_EQ(nullptr, ret); +} + +/* + * @tc.name: executor_allocate_input_memory_003 + * @tc.desc: Verify the error when creating input memory in executor of the OH_NNExecutor_AllocateInputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_allocate_input_memory_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t outputIndex = 6; + size_t length = 9 * sizeof(float); + + OH_NN_Memory* ret = OH_NNExecutor_AllocateInputMemory(nnExecutor, outputIndex, length); + EXPECT_EQ(nullptr, ret); +} + +/* + * @tc.name: executor_allocate_input_memory_004 + * @tc.desc: Verify the success of the OH_NNExecutor_AllocateInputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_allocate_input_memory_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t outputIndex = 0; + size_t length = 9 * sizeof(float); + + OH_NN_Memory* ret = OH_NNExecutor_AllocateInputMemory(nnExecutor, outputIndex, length); + EXPECT_NE(nullptr, ret); +} + +/* + * @tc.name: executor_allocate_output_memory_001 + * @tc.desc: Verify the OH_NNExecutor is nullptr of the OH_NNExecutor_AllocateOutputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_allocate_output_memory_001, testing::ext::TestSize.Level0) +{ + OH_NNExecutor* nnExecutor = nullptr; + uint32_t outputIndex = 0; + size_t length = 9 * sizeof(float); + + OH_NN_Memory* ret = OH_NNExecutor_AllocateOutputMemory(nnExecutor, outputIndex, length); + EXPECT_EQ(nullptr, ret); +} + +/* + * @tc.name: executor_allocate_output_memory_002 + * @tc.desc: Verify the passed length equals 0 of the OH_NNExecutor_AllocateOutputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_allocate_output_memory_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t outputIndex = 0; + size_t length = 0; + + OH_NN_Memory* ret = OH_NNExecutor_AllocateOutputMemory(nnExecutor, outputIndex, length); + EXPECT_EQ(nullptr, ret); +} + +/* + * @tc.name: executor_allocate_output_memory_003 + * @tc.desc: Verify the error when create output memory in executor of the OH_NNExecutor_AllocateOutputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_allocate_output_memory_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t outputIndex = 6; + size_t length = 9 * sizeof(float); + + OH_NN_Memory* ret = OH_NNExecutor_AllocateOutputMemory(nnExecutor, outputIndex, length); + EXPECT_EQ(nullptr, ret); +} + +/* + * @tc.name: executor_allocate_output_memory_004 + * @tc.desc: Verify the success of the OH_NNExecutor_AllocateOutputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_allocate_output_memory_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t outputIndex = 0; + size_t length = 9 * sizeof(float); + + OH_NN_Memory* ret = OH_NNExecutor_AllocateOutputMemory(nnExecutor, outputIndex, length); + EXPECT_NE(nullptr, ret); +} + + +/* + * @tc.name: executor_destroy_input_memory_001 + * @tc.desc: Verify the OH_NNExecutor is nullptr of the OH_NNExecutor_DestroyInputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_input_memory_001, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + BuildModelGraph(innerModel); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = nullptr; + + uint32_t inputIndex = 0; + float dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* const data = dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + OH_NN_Memory* pMemory = &memory; + size_t length = 9 * sizeof(float); + EXPECT_EQ(OH_NN_SUCCESS, executor.CreateInputMemory(inputIndex, length, &pMemory)); + OH_NNExecutor_DestroyInputMemory(nnExecutor, inputIndex, &pMemory); + EXPECT_EQ(nullptr, nnExecutor); +} + +/* + * @tc.name: executor_destroy_input_memory_002 + * @tc.desc: Verify the memory is nullptr of the OH_NNExecutor_DestroyInputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_input_memory_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + BuildModelGraph(innerModel); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t inputIndex = 0; + OH_NN_Memory** memory = nullptr; + OH_NNExecutor_DestroyInputMemory(nnExecutor, inputIndex, memory); + EXPECT_EQ(nullptr, memory); +} + +/* + * @tc.name: executor_destroy_input_memory_003 + * @tc.desc: Verify the *memory is nullptr of the OH_NNExecutor_DestroyInputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_input_memory_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + BuildModelGraph(innerModel); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t inputIndex = 0; + OH_NN_Memory* memory = nullptr; + OH_NN_Memory** pMemory = &memory; + OH_NNExecutor_DestroyInputMemory(nnExecutor, inputIndex, pMemory); + EXPECT_EQ(nullptr, memory); +} + +/* + * @tc.name: executor_destroy_input_memory_004 + * @tc.desc: Verify the error happened when destroying input memory of the OH_NNExecutor_DestroyInputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_input_memory_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + BuildModelGraph(innerModel); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t inputIndex = 6; + float dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* const data = dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + OH_NN_Memory* pMemory = &memory; + OH_NNExecutor_DestroyInputMemory(nnExecutor, inputIndex, &pMemory); + EXPECT_NE(nullptr, pMemory); +} + +/* + * @tc.name: executor_destroy_input_memory_005 + * @tc.desc: Verify the success of the OH_NNExecutor_DestroyInputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_input_memory_005, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + BuildModelGraph(innerModel); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t inputIndex = 0; + float dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* const data = dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + OH_NN_Memory* pMemory = &memory; + size_t length = 9 * sizeof(float); + EXPECT_EQ(OH_NN_SUCCESS, executor.CreateInputMemory(inputIndex, length, &pMemory)); + OH_NNExecutor_DestroyInputMemory(nnExecutor, inputIndex, &pMemory); + EXPECT_EQ(nullptr, pMemory); +} + +/* + * @tc.name: executor_destroy_output_memory_001 + * @tc.desc: Verify the OH_NNExecutor is nullptr of the OH_NNExecutor_DestroyOutputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_output_memory_001, testing::ext::TestSize.Level0) +{ + OH_NNExecutor* nnExecutor = nullptr; + uint32_t outputIndex = 0; + float dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* const data = dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + OH_NN_Memory* pMemory = &memory; + OH_NNExecutor_DestroyOutputMemory(nnExecutor, outputIndex, &pMemory); + EXPECT_EQ(nullptr, nnExecutor); +} + +/* + * @tc.name: executor_destroy_output_memory_002 + * @tc.desc: Verify the memory is nullptr of the OH_NNExecutor_DestroyOutputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_output_memory_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t outputIndex = 0; + OH_NN_Memory** memory = nullptr; + OH_NNExecutor_DestroyOutputMemory(nnExecutor, outputIndex, memory); + EXPECT_EQ(nullptr, memory); +} + +/* + * @tc.name: executor_destroy_output_memory_003 + * @tc.desc: Verify the *memory is nullptr of the OH_NNExecutor_DestroyOutputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_output_memory_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t outputIndex = 0; + OH_NN_Memory* memory = nullptr; + OH_NN_Memory** pMemory = &memory; + OH_NNExecutor_DestroyOutputMemory(nnExecutor, outputIndex, pMemory); + EXPECT_EQ(nullptr, memory); +} + +/* + * @tc.name: executor_destroy_output_memory_004 + * @tc.desc: Verify the error happened when destroying output memory of the OH_NNExecutor_DestroyOutputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_output_memory_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t outputIndex = 6; + float dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* const data = dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + OH_NN_Memory* pMemory = &memory; + OH_NNExecutor_DestroyOutputMemory(nnExecutor, outputIndex, &pMemory); + EXPECT_NE(nullptr, pMemory); +} + +/* + * @tc.name: executor_destroy_output_memory_005 + * @tc.desc: Verify the success of the OH_NNExecutor_DestroyOutputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_output_memory_005, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + float dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* const data = dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + OH_NN_Memory* pMemory = &memory; + size_t length = 9 * sizeof(float); + uint32_t outputIndex = 0; + EXPECT_EQ(OH_NN_SUCCESS, executor.CreateOutputMemory(outputIndex, length, &pMemory)); + OH_NNExecutor_DestroyOutputMemory(nnExecutor, outputIndex, &pMemory); + EXPECT_EQ(nullptr, pMemory); +} + +/* + * @tc.name: executor_set_input_with_memory_001 + * @tc.desc: Verify the OH_NNExecutor is nullptr of the OH_NNExecutor_SetInputWithMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_set_input_with_memory_001, testing::ext::TestSize.Level0) +{ + OH_NNExecutor* nnExecutor = nullptr; + + SetTensor(); + + uint32_t inputIndex = 0; + float dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* const data = dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + + OH_NN_ReturnCode ret = OH_NNExecutor_SetInputWithMemory(nnExecutor, inputIndex, &m_tensor, &memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_input_with_memory_002 + * @tc.desc: Verify the operand is nullptr of the OH_NNExecutor_SetInputWithMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_set_input_with_memory_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + OH_NN_Tensor* operand = nullptr; + + uint32_t inputIndex = 0; + float dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* const data = dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + + OH_NN_ReturnCode ret = OH_NNExecutor_SetInputWithMemory(nnExecutor, inputIndex, operand, &memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_input_with_memory_003 + * @tc.desc: Verify the memory is nullptr of the OH_NNExecutor_SetInputWithMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_set_input_with_memory_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + SetTensor(); + + uint32_t inputIndex = 0; + OH_NN_Memory* memory = nullptr; + OH_NN_ReturnCode ret = OH_NNExecutor_SetInputWithMemory(nnExecutor, inputIndex, &m_tensor, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_input_with_memory_004 + * @tc.desc: Verify the success of the OH_NNExecutor_SetInputWithMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_set_input_with_memory_004, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + SetTensor(); + + uint32_t inputIndex = 0; + float dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* const data = dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + + OH_NN_ReturnCode ret = OH_NNExecutor_SetInputWithMemory(nnExecutor, inputIndex, &m_tensor, &memory); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + + +/* + * @tc.name: executor_set_output_with_memory_001 + * @tc.desc: Verify the OH_NNExecutor is nullptr of the OH_NNExecutor_SetOutputWithMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_set_output_with_memory_001, testing::ext::TestSize.Level0) +{ + OH_NNExecutor* nnExecutor = nullptr; + uint32_t outputIndex = 0; + float dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* const data = dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + OH_NN_ReturnCode ret = OH_NNExecutor_SetOutputWithMemory(nnExecutor, outputIndex, &memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_output_with_memory_002 + * @tc.desc: Verify the memory is nullptr of the OH_NNExecutor_SetOutputWithMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_set_output_with_memory_002, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t outputIndex = 0; + OH_NN_Memory* memory = nullptr; + OH_NN_ReturnCode ret = OH_NNExecutor_SetOutputWithMemory(nnExecutor, outputIndex, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: executor_set_output_with_memory_003 + * @tc.desc: Verify the success of the OH_NNExecutor_SetOutputWithMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_set_output_with_memory_003, testing::ext::TestSize.Level0) +{ + InnerModel innerModel; + EXPECT_EQ(OH_NN_SUCCESS, BuildModelGraph(innerModel)); + Compilation innerCompilation(&innerModel); + Executor executor(&innerCompilation); + OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); + + uint32_t outputIndex = 0; + float dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* const data = dataArry; + OH_NN_Memory memory = {data, 9 * sizeof(float)}; + OH_NN_ReturnCode ret = OH_NNExecutor_SetOutputWithMemory(nnExecutor, outputIndex, &memory); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: executor_destroy_001 + * @tc.desc: Verify the OH_NNExecutor is nullptr of the OH_NNExecutor_Destroy function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_001, testing::ext::TestSize.Level0) +{ + OH_NNExecutor** pExecutor = nullptr; + OH_NNExecutor_Destroy(pExecutor); + EXPECT_EQ(nullptr, pExecutor); +} + +/* + * @tc.name: executor_destroy_002 + * @tc.desc: Verify the *OH_NNExecutor is nullptr of the OH_NNExecutor_Destroy function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_002, testing::ext::TestSize.Level0) +{ + OH_NNExecutor* nnExecutor = nullptr; + OH_NNExecutor** pExecutor = &nnExecutor; + OH_NNExecutor_Destroy(pExecutor); + EXPECT_EQ(nullptr, nnExecutor); +} + +/* + * @tc.name: executor_destroy_003 + * @tc.desc: Verify the normal model of the OH_NNExecutor_Destroy function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_003, testing::ext::TestSize.Level0) +{ + InnerModel* innerModel = new InnerModel(); + EXPECT_NE(nullptr, innerModel); + Compilation* innerCompilation = new(std::nothrow) Compilation(innerModel); + EXPECT_NE(nullptr, innerCompilation); + Executor* executor = new(std::nothrow) Executor(innerCompilation); + EXPECT_NE(nullptr, executor); + + OH_NNExecutor* nnExecutor = reinterpret_cast(executor); + OH_NNExecutor_Destroy(&nnExecutor); + EXPECT_EQ(nullptr, nnExecutor); +} + +/* + * @tc.name: device_get_all_devices_id_001 + * @tc.desc: Verify the allDevicesID is nullptr of the OH_NNDevice_GetAllDevicesID function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, device_get_all_devices_id_001, testing::ext::TestSize.Level0) +{ + const size_t** allDevicesId = nullptr; + uint32_t deviceCount = 1; + uint32_t* pDeviceCount = &deviceCount; + OH_NN_ReturnCode ret = OH_NNDevice_GetAllDevicesID(allDevicesId, pDeviceCount); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: device_get_all_devices_id_002 + * @tc.desc: Verify the *allDevicesID is not nullptr of the OH_NNDevice_GetAllDevicesID function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, device_get_all_devices_id_002, testing::ext::TestSize.Level0) +{ + const size_t devicesId = 1; + const size_t* allDevicesId = &devicesId; + const size_t** pAllDevicesId = &allDevicesId; + uint32_t deviceCount = 1; + uint32_t* pDeviceCount = &deviceCount; + OH_NN_ReturnCode ret = OH_NNDevice_GetAllDevicesID(pAllDevicesId, pDeviceCount); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: device_get_all_devices_id_003 + * @tc.desc: Verify the deviceCount is nullptr of the OH_NNDevice_GetAllDevicesID function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, device_get_all_devices_id_003, testing::ext::TestSize.Level0) +{ + const size_t* allDevicesId = nullptr; + const size_t** pAllDevicesId = &allDevicesId; + uint32_t* pDeviceCount = nullptr; + OH_NN_ReturnCode ret = OH_NNDevice_GetAllDevicesID(pAllDevicesId, pDeviceCount); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: device_get_all_devices_id_004 + * @tc.desc: Verify the get no device of the OH_NNDevice_GetAllDevicesID function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, device_get_all_devices_id_004, testing::ext::TestSize.Level0) +{ + const size_t* allDevicesId = nullptr; + const size_t** pAllDevicesId = &allDevicesId; + uint32_t deviceCount = 1; + uint32_t* pDeviceCount = &deviceCount; + OHOS::HDI::Nnrt::V2_0::MockIPreparedModel::m_ExpectRetCode = OH_NN_FAILED; + OH_NN_ReturnCode ret = OH_NNDevice_GetAllDevicesID(pAllDevicesId, pDeviceCount); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: device_get_all_devices_id_005 + * @tc.desc: Verify the success of the OH_NNDevice_GetAllDevicesID function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, device_get_all_devices_id_005, testing::ext::TestSize.Level0) +{ + const size_t* allDevicesId = nullptr; + const size_t** pAllDevicesId = &allDevicesId; + uint32_t deviceCount = 1; + uint32_t* pDeviceCount = &deviceCount; + OH_NN_ReturnCode ret = OH_NNDevice_GetAllDevicesID(pAllDevicesId, pDeviceCount); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: device_get_name_001 + * @tc.desc: Verify the name is nullptr of the OH_NNDevice_GetName function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, device_get_name_001, testing::ext::TestSize.Level0) +{ + size_t deviceID = 1; + const char **name = nullptr; + OH_NN_ReturnCode ret = OH_NNDevice_GetName(deviceID, name); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: device_get_name_002 + * @tc.desc: Verify the *name is not nullptr of the OH_NNDevice_GetName function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, device_get_name_002, testing::ext::TestSize.Level0) +{ + size_t deviceID = 1; + const char* name = "diviceId"; + const char** pName = &name; + OH_NN_ReturnCode ret = OH_NNDevice_GetName(deviceID, pName); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: device_get_name_003 + * @tc.desc: Verify the error happened when getting name of deviceID of the OH_NNDevice_GetName function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, device_get_name_003, testing::ext::TestSize.Level0) +{ + size_t deviceID = 0; + const char* name = nullptr; + const char** pName = &name; + OH_NN_ReturnCode ret = OH_NNDevice_GetName(deviceID, pName); + EXPECT_EQ(OH_NN_FAILED, ret); +} + +/* + * @tc.name: device_get_name_004 + * @tc.desc: Verify the success of the OH_NNDevice_GetName function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, device_get_name_004, testing::ext::TestSize.Level0) +{ + size_t deviceID = 1; + const char* name = nullptr; + const char** pName = &name; + OH_NN_ReturnCode ret = OH_NNDevice_GetName(deviceID, pName); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: device_get_type_001 + * @tc.desc: Verify the device is nullptr of the OH_NNDevice_GetType function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, device_get_type_001, testing::ext::TestSize.Level0) +{ + size_t deviceID = 0; + OH_NN_DeviceType deviceType = OH_NN_CPU; + OH_NN_DeviceType* pDeviceType = &deviceType; + OH_NN_ReturnCode ret = OH_NNDevice_GetType(deviceID, pDeviceType); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: device_get_type_002 + * @tc.desc: Verify the OH_NN_DeviceType is nullptr of the OH_NNDevice_GetType function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, device_get_type_002, testing::ext::TestSize.Level0) +{ + size_t deviceID = 1; + OH_NN_DeviceType* pDeviceType = nullptr; + OH_NN_ReturnCode ret = OH_NNDevice_GetType(deviceID, pDeviceType); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: device_get_type_003 + * @tc.desc: Verify the error happened when getting name of deviceID of the OH_NNDevice_GetType function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, device_get_type_003, testing::ext::TestSize.Level0) +{ + size_t deviceID = 1; + OH_NN_DeviceType deviceType = OH_NN_OTHERS; + OH_NN_DeviceType* pDeviceType = &deviceType; + OH_NN_ReturnCode ret = OH_NNDevice_GetType(deviceID, pDeviceType); + EXPECT_EQ(OH_NN_UNAVALIDABLE_DEVICE, ret); +} + +/* + * @tc.name: device_get_type_004 + * @tc.desc: Verify the success of the OH_NNDevice_GetType function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, device_get_type_004, testing::ext::TestSize.Level0) +{ + size_t deviceID = 1; + OH_NN_DeviceType deviceType = OH_NN_CPU; + OH_NN_DeviceType* pDeviceType = &deviceType; + OH_NN_ReturnCode ret = OH_NNDevice_GetType(deviceID, pDeviceType); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} +} // namespace Unittest +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/unittest/components/v2_0/neural_network_runtime_test/neural_network_runtime_test.h b/test/unittest/components/v2_0/neural_network_runtime_test/neural_network_runtime_test.h new file mode 100644 index 0000000000000000000000000000000000000000..dc18e2b4452ff57e16a9ae963dd1dc8df08a717d --- /dev/null +++ b/test/unittest/components/v2_0/neural_network_runtime_test/neural_network_runtime_test.h @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_UNITTEST_H +#define NEURAL_NETWORK_RUNTIME_UNITTEST_H + +#include + +#include "interfaces/kits/c/neural_network_runtime.h" +#include "frameworks/native/inner_model.h" +#include "frameworks/native/executor.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Unittest { +class NeuralNetworkRuntimeTest : public testing::Test { +public: + OH_NN_ReturnCode BuildModelGraph(InnerModel& innerModel); + void InitIndices(); + void AddModelTensor(InnerModel& innerModel); + void SetInnerBuild(InnerModel& innerModel); + void SetExecutor(Executor& executor); + void SetInputAndOutput(Executor& executor); + void SetTensor(); + +public: + OH_NN_UInt32Array m_inputIndices; + OH_NN_UInt32Array m_outputIndices; + OH_NN_UInt32Array m_paramIndices; + OH_NN_Tensor m_tensor; + + uint32_t m_inputIndexs[2] {0, 1}; + uint32_t m_outputIndexs[1] {2}; + uint32_t m_paramIndexs[1] {3}; +}; +} // namespace Unittest +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_UNITTEST_H diff --git a/test/unittest/components/v2_0/transform/transform_test.cpp b/test/unittest/components/v2_0/transform/transform_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..94c6bf984f89e4e1a2c10c436acb3037057d169f --- /dev/null +++ b/test/unittest/components/v2_0/transform/transform_test.cpp @@ -0,0 +1,516 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include "frameworks/native/transform.h" +#include "frameworks/native/memory_manager.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime; +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class TransformTestTest : public testing::Test { +public: + TransformTestTest() = default; + ~TransformTestTest() = default; +}; + +/** + * @tc.name: transform_gettypesize_001 + * @tc.desc: Verify the TransIOTensor function return 1. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_gettypesize_001, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_BOOL; + uint32_t result = GetTypeSize(dataType); + EXPECT_EQ(static_cast(1), result); +} + +/** + * @tc.name: transform_gettypesize_002 + * @tc.desc: Verify the TransIOTensor function return 2. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_gettypesize_002, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_INT16; + uint32_t result = GetTypeSize(dataType); + EXPECT_EQ(static_cast(2), result); +} + +/** + * @tc.name: transform_gettypesize_003 + * @tc.desc: Verify the TransIOTensor function return 4. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_gettypesize_003, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_INT32; + uint32_t result = GetTypeSize(dataType); + EXPECT_EQ(static_cast(4), result); +} + +/** + * @tc.name: transform_gettypesize_004 + * @tc.desc: Verify the TransIOTensor function return 8. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_gettypesize_004, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_INT64; + uint32_t result = GetTypeSize(dataType); + EXPECT_EQ(static_cast(8), result); +} + +/** + * @tc.name: transform_gettypesize_005 + * @tc.desc: Verify the TransIOTensor function return 0. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_gettypesize_005, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_UNKNOWN; + uint32_t result = GetTypeSize(dataType); + EXPECT_EQ(static_cast(0), result); +} + +/** + * @tc.name: transform_nntoms_transformdatatype_001 + * @tc.desc: Verify the TransIOTensor function return DATA_TYPE_BOOL. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformdatatype_001, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_BOOL; + mindspore::lite::DataType result = NNToMS::TransformDataType(dataType); + EXPECT_EQ(mindspore::lite::DATA_TYPE_BOOL, result); +} + +/** + * @tc.name: transform_nntoms_transformdatatype_002 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_INT8. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformdatatype_002, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_INT8; + mindspore::lite::DataType result = NNToMS::TransformDataType(dataType); + EXPECT_EQ(mindspore::lite::DATA_TYPE_INT8, result); +} + +/** + * @tc.name: transform_nntoms_transformdatatype_003 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_INT16. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformdatatype_003, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_INT16; + mindspore::lite::DataType result = NNToMS::TransformDataType(dataType); + EXPECT_EQ(mindspore::lite::DATA_TYPE_INT16, result); +} + +/** + * @tc.name: transform_nntoms_transformdatatype_004 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_INT32. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformdatatype_004, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_INT32; + mindspore::lite::DataType result = NNToMS::TransformDataType(dataType); + EXPECT_EQ(mindspore::lite::DATA_TYPE_INT32, result); +} + +/** + * @tc.name: transform_nntoms_transformdatatype_005 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_INT64. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformdatatype_005, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_INT64; + mindspore::lite::DataType result = NNToMS::TransformDataType(dataType); + EXPECT_EQ(mindspore::lite::DATA_TYPE_INT64, result); +} + +/** + * @tc.name: transform_nntoms_transformdatatype_006 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_UINT8. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformdatatype_006, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_UINT8; + mindspore::lite::DataType result = NNToMS::TransformDataType(dataType); + EXPECT_EQ(mindspore::lite::DATA_TYPE_UINT8, result); +} + +/** + * @tc.name: transform_nntoms_transformdatatype_007 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_UINT16. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformdatatype_007, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_UINT16; + mindspore::lite::DataType result = NNToMS::TransformDataType(dataType); + EXPECT_EQ(mindspore::lite::DATA_TYPE_UINT16, result); +} + +/** + * @tc.name: transform_nntoms_transformdatatype_008 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_UINT32. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformdatatype_008, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_UINT32; + mindspore::lite::DataType result = NNToMS::TransformDataType(dataType); + EXPECT_EQ(mindspore::lite::DATA_TYPE_UINT32, result); +} + +/** + * @tc.name: transform_nntoms_transformdatatype_009 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_UINT64. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformdatatype_009, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_UINT64; + mindspore::lite::DataType result = NNToMS::TransformDataType(dataType); + EXPECT_EQ(mindspore::lite::DATA_TYPE_UINT64, result); +} + +/** + * @tc.name: transform_nntoms_transformdatatype_010 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_FLOAT16. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformdatatype_010, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_FLOAT16; + mindspore::lite::DataType result = NNToMS::TransformDataType(dataType); + EXPECT_EQ(mindspore::lite::DATA_TYPE_FLOAT16, result); +} + +/** + * @tc.name: transform_nntoms_transformdatatype_011 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_FLOAT32. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformdatatype_011, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_FLOAT32; + mindspore::lite::DataType result = NNToMS::TransformDataType(dataType); + EXPECT_EQ(mindspore::lite::DATA_TYPE_FLOAT32, result); +} + +/** + * @tc.name: transform_nntoms_transformdatatype_012 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_UNKNOWN. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformdatatype_012, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_UNKNOWN; + mindspore::lite::DataType result = NNToMS::TransformDataType(dataType); + EXPECT_EQ(mindspore::lite::DATA_TYPE_UNKNOWN, result); +} + +/** + * @tc.name: transform_nntoms_transformdatatype_013 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_FLOAT64 + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformdatatype_013, TestSize.Level0) +{ + OH_NN_DataType dataType = OH_NN_FLOAT64; + mindspore::lite::DataType result = NNToMS::TransformDataType(dataType); + EXPECT_EQ(mindspore::lite::DATA_TYPE_FLOAT64, result); +} + +/** + * @tc.name: transform_nntoms_transformformat_001 + * @tc.desc: Verify the TransFormat function return FORMAT_NCHW. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformformat_001, TestSize.Level0) +{ + OH_NN_Format format = OH_NN_FORMAT_NCHW; + mindspore::lite::Format result = NNToMS::TransformFormat(format); + EXPECT_EQ(mindspore::lite::FORMAT_NCHW, result); +} + +/** + * @tc.name: transform_nntoms_transformformat_002 + * @tc.desc: Verify the TransFormat function return FORMAT_NHWC. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformformat_002, TestSize.Level0) +{ + OH_NN_Format format = OH_NN_FORMAT_NHWC; + mindspore::lite::Format result = NNToMS::TransformFormat(format); + EXPECT_EQ(mindspore::lite::FORMAT_NHWC, result); +} + +/** + * @tc.name: transform_nntoms_transformformat_003 + * @tc.desc: Verify the TransFormat function return FORMAT_NHWC. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformformat_003, TestSize.Level0) +{ + OH_NN_Format format = OH_NN_FORMAT_NONE; + mindspore::lite::Format result = NNToMS::TransformFormat(format); + EXPECT_EQ(mindspore::lite::FORMAT_NHWC, result); +} + +/** + * @tc.name: transform_nntoms_transformfusiontype_001 + * @tc.desc: Verify the TransFormat function return ACTIVATION_TYPE_NO_ACTIVATION. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformfusiontype_001, TestSize.Level0) +{ + OH_NN_FuseType type = OH_NN_FUSED_NONE; + mindspore::lite::ActivationType result = NNToMS::TransfromFusionType(type); + EXPECT_EQ(mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION, result); +} + +/** + * @tc.name: transform_nntoms_transformfusiontype_002 + * @tc.desc: Verify the TransFormat function return ACTIVATION_TYPE_RELU. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformfusiontype_002, TestSize.Level0) +{ + OH_NN_FuseType type = OH_NN_FUSED_RELU; + mindspore::lite::ActivationType result = NNToMS::TransfromFusionType(type); + EXPECT_EQ(mindspore::lite::ACTIVATION_TYPE_RELU, result); +} + +/** + * @tc.name: transform_nntoms_transformfusiontype_003 + * @tc.desc: Verify the TransFormat function return ACTIVATION_TYPE_RELU6. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformfusiontype_003, TestSize.Level0) +{ + OH_NN_FuseType type = OH_NN_FUSED_RELU6; + mindspore::lite::ActivationType result = NNToMS::TransfromFusionType(type); + EXPECT_EQ(mindspore::lite::ACTIVATION_TYPE_RELU6, result); +} + +/** + * @tc.name: transform_nntoms_transformquanttype_001 + * @tc.desc: Verify the TransFormat function return QUANT_TYPE_NONE. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformquanttype_001, TestSize.Level0) +{ + OHOS::NeuralNetworkRuntime::Ops::OpsQuantType type = OHOS::NeuralNetworkRuntime::Ops::OpsQuantType::QUANT_NONE; + mindspore::lite::QuantType result = NNToMS::TransformQuantType(type); + EXPECT_EQ(mindspore::lite::QUANT_TYPE_NONE, result); +} + +/** + * @tc.name: transform_nntoms_transformquanttype_002 + * @tc.desc: Verify the TransFormat function return QUANT_TYPE_ALL. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_nntoms_transformquanttype_002, TestSize.Level0) +{ + OHOS::NeuralNetworkRuntime::Ops::OpsQuantType type = OHOS::NeuralNetworkRuntime::Ops::OpsQuantType::QUANT_ALL; + mindspore::lite::QuantType result = NNToMS::TransformQuantType(type); + EXPECT_EQ(mindspore::lite::QUANT_TYPE_ALL, result); +} + + +/** + * @tc.name: transform_mstonn_transformdatatype_001 + * @tc.desc: Verify the TransIOTensor function return OH_NN_BOOL. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_mstonn_transformdatatype_001, TestSize.Level0) +{ + mindspore::lite::DataType dataType = mindspore::lite::DATA_TYPE_BOOL; + OH_NN_DataType result = MSToNN::TransformDataType(dataType); + EXPECT_EQ(OH_NN_BOOL, result); +} + +/** + * @tc.name: transform_mstonn_transformdatatype_002 + * @tc.desc: Verify the TransDataType function return OH_NN_INT8. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_mstonn_transformdatatype_002, TestSize.Level0) +{ + mindspore::lite::DataType dataType = mindspore::lite::DATA_TYPE_INT8; + OH_NN_DataType result = MSToNN::TransformDataType(dataType); + EXPECT_EQ(OH_NN_INT8, result); +} + +/** + * @tc.name: transform_mstonn_transformdatatype_003 + * @tc.desc: Verify the TransDataType function return OH_NN_INT16. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_mstonn_transformdatatype_003, TestSize.Level0) +{ + mindspore::lite::DataType dataType = mindspore::lite::DATA_TYPE_INT16; + OH_NN_DataType result = MSToNN::TransformDataType(dataType); + EXPECT_EQ(OH_NN_INT16, result); +} + +/** + * @tc.name: transform_mstonn_transformdatatype_004 + * @tc.desc: Verify the TransDataType function return OH_NN_INT32. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_mstonn_transformdatatype_004, TestSize.Level0) +{ + mindspore::lite::DataType dataType = mindspore::lite::DATA_TYPE_INT32; + OH_NN_DataType result = MSToNN::TransformDataType(dataType); + EXPECT_EQ(OH_NN_INT32, result); +} + +/** + * @tc.name: transform_mstonn_transformdatatype_005 + * @tc.desc: Verify the TransDataType function return OH_NN_INT64. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_mstonn_transformdatatype_005, TestSize.Level0) +{ + mindspore::lite::DataType dataType = mindspore::lite::DATA_TYPE_INT64; + OH_NN_DataType result = MSToNN::TransformDataType(dataType); + EXPECT_EQ(OH_NN_INT64, result); +} + +/** + * @tc.name: transform_mstonn_transformdatatype_006 + * @tc.desc: Verify the TransDataType function return OH_NN_UINT8. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_mstonn_transformdatatype_006, TestSize.Level0) +{ + mindspore::lite::DataType dataType = mindspore::lite::DATA_TYPE_UINT8; + OH_NN_DataType result = MSToNN::TransformDataType(dataType); + EXPECT_EQ(OH_NN_UINT8, result); +} + +/** + * @tc.name: transform_mstonn_transformdatatype_007 + * @tc.desc: Verify the TransDataType function return OH_NN_UINT16. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_mstonn_transformdatatype_007, TestSize.Level0) +{ + mindspore::lite::DataType dataType = mindspore::lite::DATA_TYPE_UINT16; + OH_NN_DataType result = MSToNN::TransformDataType(dataType); + EXPECT_EQ(OH_NN_UINT16, result); +} + +/** + * @tc.name: transform_mstonn_transformdatatype_008 + * @tc.desc: Verify the TransDataType function return OH_NN_UINT32. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_mstonn_transformdatatype_008, TestSize.Level0) +{ + mindspore::lite::DataType dataType = mindspore::lite::DATA_TYPE_UINT32; + OH_NN_DataType result = MSToNN::TransformDataType(dataType); + EXPECT_EQ(OH_NN_UINT32, result); +} + +/** + * @tc.name: transform_mstonn_transformdatatype_009 + * @tc.desc: Verify the TransDataType function return OH_NN_UINT64. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_mstonn_transformdatatype_009, TestSize.Level0) +{ + mindspore::lite::DataType dataType = mindspore::lite::DATA_TYPE_UINT64; + OH_NN_DataType result = MSToNN::TransformDataType(dataType); + EXPECT_EQ(OH_NN_UINT64, result); +} + +/** + * @tc.name: transform_mstonn_transformdatatype_010 + * @tc.desc: Verify the TransDataType function return OH_NN_FLOAT16 + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_mstonn_transformdatatype_010, TestSize.Level0) +{ + mindspore::lite::DataType dataType = mindspore::lite::DATA_TYPE_FLOAT16; + OH_NN_DataType result = MSToNN::TransformDataType(dataType); + EXPECT_EQ(OH_NN_FLOAT16, result); +} + +/** + * @tc.name: transform_mstonn_transformdatatype_011 + * @tc.desc: Verify the TransDataType function return OH_NN_FLOAT32. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_mstonn_transformdatatype_011, TestSize.Level0) +{ + mindspore::lite::DataType dataType = mindspore::lite::DATA_TYPE_FLOAT32; + OH_NN_DataType result = MSToNN::TransformDataType(dataType); + EXPECT_EQ(OH_NN_FLOAT32, result); +} + +/** + * @tc.name: transform_mstonn_transformdatatype_012 + * @tc.desc: Verify the TransDataType function return OH_NN_UNKNOWN. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_mstonn_transformdatatype_012, TestSize.Level0) +{ + mindspore::lite::DataType dataType = mindspore::lite::DATA_TYPE_UNKNOWN; + OH_NN_DataType result = MSToNN::TransformDataType(dataType); + EXPECT_EQ(OH_NN_UNKNOWN, result); +} + +/** + * @tc.name: transform_mstonn_transformdatatype_013 + * @tc.desc: Verify the TransDataType function return DATA_TYPE_FLOAT64 + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_mstonn_transformdatatype_013, TestSize.Level0) +{ + mindspore::lite::DataType dataType = mindspore::lite::DATA_TYPE_FLOAT64; + OH_NN_DataType result = MSToNN::TransformDataType(dataType); + EXPECT_EQ(OH_NN_FLOAT64, result); +} + +/** + * @tc.name: transform_mstonn_transformquantparams_001 + * @tc.desc: Verify the TransformQuantParams function. + * @tc.type: FUNC + */ +HWTEST_F(TransformTestTest, transform_mstonn_transformquantparams_001, TestSize.Level0) +{ + std::vector msQuantParams = {{1, 1.0, 8}}; + std::vector result = MSToNN::TransformQuantParams(msQuantParams); + EXPECT_EQ(msQuantParams.size(), result.size()); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS