diff --git a/frameworks/native/neural_network_runtime/nncompiler.cpp b/frameworks/native/neural_network_runtime/nncompiler.cpp index 725bafbd80caaea1dd89a4511ff78ddd3ea91f6b..c05334689db101ad715d99feb4eb8a823d51cfcf 100644 --- a/frameworks/native/neural_network_runtime/nncompiler.cpp +++ b/frameworks/native/neural_network_runtime/nncompiler.cpp @@ -693,7 +693,7 @@ NNExecutor* NNCompiler::CreateExecutor() } if (m_preparedModel == nullptr) { - LOGE("[NNCompiler] CreateExecutor failed, m_device is nullptr"); + LOGE("[NNCompiler] CreateExecutor failed, m_preparedModel is nullptr"); return nullptr; } diff --git a/frameworks/native/neural_network_runtime/nntensor.cpp b/frameworks/native/neural_network_runtime/nntensor.cpp index 0bfdc71f015143916eec2c69af6f3b517eb7f302..5c11945cdcfcf41e2190a26afb9328747f1d3bff 100644 --- a/frameworks/native/neural_network_runtime/nntensor.cpp +++ b/frameworks/native/neural_network_runtime/nntensor.cpp @@ -181,21 +181,41 @@ void* NNTensor2_0::GetData() const return m_data; } +void NNTensor2_0::SetData(void* data) +{ + m_data = data; +} + int NNTensor2_0::GetFd() const { return m_fd; } +void NNTensor2_0::SetFd(int fd) +{ + m_fd = fd; +} + size_t NNTensor2_0::GetSize() const { return m_size; } +void NNTensor2_0::SetSize(size_t size) +{ + m_size = size; +} + size_t NNTensor2_0::GetOffset() const { return m_offset; } +void NNTensor2_0::SetOffset(size_t offset) +{ + m_offset = offset; +} + OH_NN_ReturnCode NNTensor2_0::AllocateMemory(size_t length) { BackendManager& backendManager = BackendManager::GetInstance(); diff --git a/frameworks/native/neural_network_runtime/nntensor.h b/frameworks/native/neural_network_runtime/nntensor.h index c15e3cd3fddee9c9b0faf296ca8b3deda19bec7e..0e4fa317811cac4a06a9accec9a824f13bd345c8 100644 --- a/frameworks/native/neural_network_runtime/nntensor.h +++ b/frameworks/native/neural_network_runtime/nntensor.h @@ -36,6 +36,10 @@ public: int GetFd() const override; size_t GetSize() const override; size_t GetOffset() const override; + void SetData(void* data); + void SetFd(int fd); + void SetSize(size_t size); + void SetOffset(size_t offset); size_t GetBackendID() const override; bool CheckTensorData() const; diff --git a/test/unittest/components/BUILD.gn b/test/unittest/components/BUILD.gn index 4442563e2659700b94be305fdd9951957ef58560..512cb75f61f85c3314ca9fb17b24e7e71b8df39e 100644 --- a/test/unittest/components/BUILD.gn +++ b/test/unittest/components/BUILD.gn @@ -313,6 +313,26 @@ ohos_unittest("NNExecutorTest") { ] } +ohos_unittest("NNTensor2_0Test") { + module_out_path = module_output_path + + sources = [ "./nn_tensor/nn_tensor_test.cpp" ] + configs = [ ":module_private_config" ] + + deps = [ + "../../../frameworks/native/neural_network_core:libneural_network_core", + "../../../frameworks/native/neural_network_runtime:libneural_network_runtime", + "//third_party/googletest:gmock_main", + "//third_party/googletest:gtest_main", + ] + + external_deps = [ + "drivers_interface_nnrt:libnnrt_proxy_1.0", + "hilog:libhilog", + "hitrace:libhitracechain", + ] +} + ohos_unittest("TransformV1_0Test") { module_out_path = module_output_path @@ -759,6 +779,7 @@ ohos_unittest("NeuralNetworkRuntimeV2_0Test") { group("components_unittest") { testonly = true deps = [ + ":DeviceManagerV1_0Test", ":HDIDeviceV1_0Test", ":HDIDeviceV2_0Test", ":HDIPreparedModelV1_0Test", @@ -771,6 +792,7 @@ group("components_unittest") { ":NNCompiledCacheTest", ":NNCompilerTest", ":NNExecutorTest", + ":NNTensor2_0Test", ":NeuralNetworkCoreV1_0Test", ":NeuralNetworkRuntimeV1_0Test", ":NeuralNetworkRuntimeV2_0Test", diff --git a/test/unittest/components/nn_compiled_cache/nn_compiled_cache_test.cpp b/test/unittest/components/nn_compiled_cache/nn_compiled_cache_test.cpp index 8450eb64f1ef57296e59ce2702dcefcf761d050a..8cdc1c7301b96a0c4e6951b792564d47b4479727 100644 --- a/test/unittest/components/nn_compiled_cache/nn_compiled_cache_test.cpp +++ b/test/unittest/components/nn_compiled_cache/nn_compiled_cache_test.cpp @@ -18,6 +18,8 @@ #include "nncompiled_cache.h" #include "device.h" +#include "nnbackend.h" +#include "backend_manager.h" #include "interfaces/kits/c/neural_network_runtime/neural_network_runtime_type.h" #include "common/utils.h" @@ -34,6 +36,41 @@ public: ~NNCompiledCacheTest() = default; }; +class MockIDevice : public Device { +public: + MOCK_METHOD1(GetDeviceName, OH_NN_ReturnCode(std::string&)); + MOCK_METHOD1(GetVendorName, OH_NN_ReturnCode(std::string&)); + MOCK_METHOD1(GetVersion, OH_NN_ReturnCode(std::string&)); + MOCK_METHOD1(GetDeviceType, OH_NN_ReturnCode(OH_NN_DeviceType&)); + MOCK_METHOD1(GetDeviceStatus, OH_NN_ReturnCode(DeviceStatus&)); + MOCK_METHOD2(GetSupportedOperation, OH_NN_ReturnCode(std::shared_ptr, + std::vector&)); + MOCK_METHOD1(IsFloat16PrecisionSupported, OH_NN_ReturnCode(bool&)); + MOCK_METHOD1(IsPerformanceModeSupported, OH_NN_ReturnCode(bool&)); + MOCK_METHOD1(IsPrioritySupported, OH_NN_ReturnCode(bool&)); + MOCK_METHOD1(IsDynamicInputSupported, OH_NN_ReturnCode(bool&)); + MOCK_METHOD1(IsModelCacheSupported, OH_NN_ReturnCode(bool&)); + MOCK_METHOD3(PrepareModel, OH_NN_ReturnCode(std::shared_ptr, + const ModelConfig&, + std::shared_ptr&)); + MOCK_METHOD3(PrepareModel, OH_NN_ReturnCode(const void*, + const ModelConfig&, + std::shared_ptr&)); + MOCK_METHOD4(PrepareModelFromModelCache, OH_NN_ReturnCode(const std::vector&, + const ModelConfig&, + std::shared_ptr&, + bool&)); + MOCK_METHOD3(PrepareOfflineModel, OH_NN_ReturnCode(std::shared_ptr, + const ModelConfig&, + std::shared_ptr&)); + MOCK_METHOD1(AllocateBuffer, void*(size_t)); + MOCK_METHOD2(AllocateTensorBuffer, void*(size_t, std::shared_ptr)); + MOCK_METHOD2(AllocateTensorBuffer, void*(size_t, std::shared_ptr)); + MOCK_METHOD1(ReleaseBuffer, OH_NN_ReturnCode(const void*)); + MOCK_METHOD2(AllocateBuffer, OH_NN_ReturnCode(size_t, int&)); + MOCK_METHOD2(ReleaseBuffer, OH_NN_ReturnCode(int, size_t)); +}; + /** * @tc.name: nncompiledcachetest_save_001 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. @@ -41,12 +78,181 @@ public: */ HWTEST_F(NNCompiledCacheTest, nncompiledcachetest_save_001, TestSize.Level0) { + LOGE("Save nncompiledcachetest_save_001"); NNCompiledCache nncompiledCache; + std::vector caches; std::string m_cachePath = "a"; uint32_t m_cacheVersion = 1; - EXPECT_EQ(OH_NN_INVALID_PARAMETER, nncompiledCache.Save(caches, m_cachePath, m_cacheVersion)); + OH_NN_ReturnCode ret = nncompiledCache.Save(caches, m_cachePath, m_cacheVersion); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: nncompiledcachetest_save_002 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNCompiledCacheTest, nncompiledcachetest_save_002, TestSize.Level0) +{ + LOGE("Save nncompiledcachetest_save_002"); + NNCompiledCache nncompiledCache; + + Buffer buffer; + float dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* data = dataArry; + buffer.data = data; + buffer.length = 1; + std::vector caches; + caches.emplace_back(buffer); + std::string m_cachePath = "a"; + uint32_t m_cacheVersion = 1; + + OH_NN_ReturnCode ret = nncompiledCache.Save(caches, m_cachePath, m_cacheVersion); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +std::shared_ptr Creator() +{ + size_t backendID = 1; + std::shared_ptr device = std::make_shared(); + + EXPECT_CALL(*((MockIDevice *) device.get()), GetDeviceStatus(::testing::_)) + .WillOnce(Invoke([](DeviceStatus& status) { + // 这里直接修改传入的引用参数 + status = AVAILABLE; + return OH_NN_SUCCESS; // 假设成功的状态码 + })); + + std::string backendName = "mock"; + EXPECT_CALL(*((MockIDevice *) device.get()), GetDeviceName(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(backendName), ::testing::Return(OH_NN_SUCCESS))); + + EXPECT_CALL(*((MockIDevice *) device.get()), GetVendorName(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(backendName), ::testing::Return(OH_NN_SUCCESS))); + + EXPECT_CALL(*((MockIDevice *) device.get()), GetVersion(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(backendName), ::testing::Return(OH_NN_SUCCESS))); + + EXPECT_CALL(*((MockIDevice *) device.get()), AllocateBuffer(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::Return(OH_NN_SUCCESS)); + + std::shared_ptr backend = std::make_unique(device, backendID); + return backend; +} + +/** + * @tc.name: nncompiledcachetest_save_003 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNCompiledCacheTest, nncompiledcachetest_save_003, TestSize.Level0) +{ + LOGE("Save nncompiledcachetest_save_003"); + NNCompiledCache nncompiledCache; + + size_t backendID = 1; + BackendManager& backendManager = BackendManager::GetInstance(); + + std::string backendName = "mock"; + std::function()> creator = Creator; + + backendManager.RegisterBackend(backendName, creator); + + OH_NN_ReturnCode ret = nncompiledCache.SetBackend(backendID); + EXPECT_EQ(OH_NN_SUCCESS, ret); + + Buffer buffer; + float dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* data = dataArry; + buffer.data = data; + buffer.length = 1; + std::vector caches; + caches.emplace_back(buffer); + std::string m_cachePath = "a"; + uint32_t m_cacheVersion = 1; + + OH_NN_ReturnCode retSave = nncompiledCache.Save(caches, m_cachePath, m_cacheVersion); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, retSave); +} + +std::shared_ptr Creator2() +{ + size_t backendID = 2; + std::shared_ptr device = std::make_shared(); + + EXPECT_CALL(*((MockIDevice *) device.get()), GetDeviceStatus(::testing::_)) + .WillOnce(Invoke([](DeviceStatus& status) { + // 这里直接修改传入的引用参数 + status = AVAILABLE; + return OH_NN_SUCCESS; // 假设成功的状态码 + })); + + std::string backendName = "mock"; + EXPECT_CALL(*((MockIDevice *) device.get()), GetDeviceName(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(backendName), ::testing::Return(OH_NN_SUCCESS))); + + EXPECT_CALL(*((MockIDevice *) device.get()), GetVendorName(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(backendName), ::testing::Return(OH_NN_SUCCESS))); + + EXPECT_CALL(*((MockIDevice *) device.get()), GetVersion(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(backendName), ::testing::Return(OH_NN_SUCCESS))); + + EXPECT_CALL(*((MockIDevice *) device.get()), AllocateBuffer(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::Return(OH_NN_SUCCESS)); + + char ptr = 'a'; + EXPECT_CALL(*((MockIDevice *) device.get()), AllocateBuffer(::testing::_)) + .WillRepeatedly(::testing::Return(&ptr)); + + std::shared_ptr backend = std::make_unique(device, backendID); + return backend; +} + +/** + * @tc.name: nncompiledcachetest_save_004 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNCompiledCacheTest, nncompiledcachetest_save_004, TestSize.Level0) +{ + LOGE("Save nncompiledcachetest_save_004"); + NNCompiledCache nncompiledCache; + + size_t backendID = 1; + OH_NN_ReturnCode ret = nncompiledCache.SetBackend(backendID); + EXPECT_EQ(OH_NN_SUCCESS, ret); + + Buffer buffer; + float dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* data = dataArry; + buffer.data = data; + buffer.length = 1; + std::vector caches; + caches.emplace_back(buffer); + std::string m_cachePath = "/data/data"; + uint32_t m_cacheVersion = 1; + + OH_NN_ReturnCode retSave = nncompiledCache.Save(caches, m_cachePath, m_cacheVersion); + EXPECT_EQ(OH_NN_SUCCESS, retSave); + + size_t backendID2 = 2; + BackendManager& backendManager = BackendManager::GetInstance(); + + std::string backendName = "mock"; + std::function()> creator = Creator2; + + backendManager.RegisterBackend(backendName, creator); + + OH_NN_ReturnCode retSetBackend = nncompiledCache.SetBackend(backendID2); + EXPECT_EQ(OH_NN_SUCCESS, retSetBackend); + + std::string m_modelName = "test"; + nncompiledCache.SetModelName(m_modelName); + + OH_NN_ReturnCode retSave2 = nncompiledCache.Save(caches, m_cachePath, m_cacheVersion); + EXPECT_EQ(OH_NN_SUCCESS, retSave2); } /** @@ -56,12 +262,170 @@ HWTEST_F(NNCompiledCacheTest, nncompiledcachetest_save_001, TestSize.Level0) */ HWTEST_F(NNCompiledCacheTest, nncompiledcachetest_restore_001, TestSize.Level0) { + LOGE("Restore nncompiledcachetest_restore_001"); + NNCompiledCache nncompiledCache; + + std::string m_cachePath = "a"; + uint32_t m_cacheVersion = 1; + std::vector caches; + + OH_NN_ReturnCode ret = nncompiledCache.Restore(m_cachePath, m_cacheVersion, caches); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: nncompiledcachetest_restore_002 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNCompiledCacheTest, nncompiledcachetest_restore_002, TestSize.Level0) +{ + LOGE("Restore nncompiledcachetest_restore_002"); + NNCompiledCache nncompiledCache; + + std::string m_cachePath; + uint32_t m_cacheVersion = 1; + std::vector caches; + + OH_NN_ReturnCode ret = nncompiledCache.Restore(m_cachePath, m_cacheVersion, caches); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: nncompiledcachetest_restore_003 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNCompiledCacheTest, nncompiledcachetest_restore_003, TestSize.Level0) +{ + LOGE("Restore nncompiledcachetest_restore_003"); + NNCompiledCache nncompiledCache; + + std::string m_cachePath = "a"; + uint32_t m_cacheVersion = 1; + Buffer buffer; + float dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* data = dataArry; + buffer.data = data; + buffer.length = 1; + std::vector caches; + caches.emplace_back(buffer); + + OH_NN_ReturnCode ret = nncompiledCache.Restore(m_cachePath, m_cacheVersion, caches); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: nncompiledcachetest_restore_004 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNCompiledCacheTest, nncompiledcachetest_restore_004, TestSize.Level0) +{ + LOGE("Restore nncompiledcachetest_restore_004"); NNCompiledCache nncompiledCache; + + size_t backendID = 1; + OH_NN_ReturnCode ret = nncompiledCache.SetBackend(backendID); + EXPECT_EQ(OH_NN_SUCCESS, ret); + std::string m_cachePath = "a"; uint32_t m_cacheVersion = 1; std::vector caches; - EXPECT_EQ(OH_NN_INVALID_PARAMETER, nncompiledCache.Restore(m_cachePath, m_cacheVersion, caches)); + OH_NN_ReturnCode retRestore = nncompiledCache.Restore(m_cachePath, m_cacheVersion, caches); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, retRestore); +} + +/** + * @tc.name: nncompiledcachetest_restore_005 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNCompiledCacheTest, nncompiledcachetest_restore_005, TestSize.Level0) +{ + LOGE("Restore nncompiledcachetest_restore_005"); + NNCompiledCache nncompiledCache; + + size_t backendID = 1; + OH_NN_ReturnCode ret = nncompiledCache.SetBackend(backendID); + EXPECT_EQ(OH_NN_SUCCESS, ret); + + std::string m_modelName = "test"; + nncompiledCache.SetModelName(m_modelName); + + std::string m_cachePath = "/data"; + uint32_t m_cacheVersion = 1; + std::vector caches; + + OH_NN_ReturnCode retRestore = nncompiledCache.Restore(m_cachePath, m_cacheVersion, caches); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, retRestore); +} + +/** + * @tc.name: nncompiledcachetest_restore_006 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNCompiledCacheTest, nncompiledcachetest_restore_006, TestSize.Level0) +{ + LOGE("Restore nncompiledcachetest_restore_006"); + NNCompiledCache nncompiledCache; + + size_t backendID = 1; + OH_NN_ReturnCode ret = nncompiledCache.SetBackend(backendID); + EXPECT_EQ(OH_NN_SUCCESS, ret); + + std::string m_cachePath = "/data/data"; + uint32_t m_cacheVersion = 1; + std::vector caches; + + OH_NN_ReturnCode retRestore = nncompiledCache.Restore(m_cachePath, m_cacheVersion, caches); + EXPECT_EQ(OH_NN_MEMORY_ERROR, retRestore); +} + +/** + * @tc.name: nncompiledcachetest_restore_007 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNCompiledCacheTest, nncompiledcachetest_restore_007, TestSize.Level0) +{ + LOGE("Restore nncompiledcachetest_restore_007"); + NNCompiledCache nncompiledCache; + + size_t backendID = 1; + OH_NN_ReturnCode ret = nncompiledCache.SetBackend(backendID); + EXPECT_EQ(OH_NN_SUCCESS, ret); + + std::string m_cachePath = "/data/data"; + uint32_t m_cacheVersion = 1; + std::vector caches; + + OH_NN_ReturnCode retRestore = nncompiledCache.Restore(m_cachePath, m_cacheVersion, caches); + EXPECT_EQ(OH_NN_MEMORY_ERROR, retRestore); +} + +/** + * @tc.name: nncompiledcachetest_restore_008 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNCompiledCacheTest, nncompiledcachetest_restore_008, TestSize.Level0) +{ + LOGE("Restore nncompiledcachetest_restore_008"); + NNCompiledCache nncompiledCache; + + size_t backendID = 2; + OH_NN_ReturnCode ret = nncompiledCache.SetBackend(backendID); + EXPECT_EQ(OH_NN_SUCCESS, ret); + + std::string m_cachePath = "/data/data"; + uint32_t m_cacheVersion = 1; + std::vector caches; + + OH_NN_ReturnCode retRestore = nncompiledCache.Restore(m_cachePath, m_cacheVersion, caches); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, retRestore); } /** @@ -71,10 +435,35 @@ HWTEST_F(NNCompiledCacheTest, nncompiledcachetest_restore_001, TestSize.Level0) */ HWTEST_F(NNCompiledCacheTest, nncompiledcachetest_setbackend_001, TestSize.Level0) { + LOGE("SetBackend nncompiledcachetest_setbackend_001"); NNCompiledCache nncompiledCache; + + size_t backendID = 3; + + OH_NN_ReturnCode ret = nncompiledCache.SetBackend(backendID); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: nncompiledcachetest_setbackend_002 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNCompiledCacheTest, nncompiledcachetest_setbackend_002, TestSize.Level0) +{ + LOGE("SetBackend nncompiledcachetest_setbackend_002"); + NNCompiledCache nncompiledCache; + size_t backendID = 1; + BackendManager& backendManager = BackendManager::GetInstance(); + + std::string backendName = "mock"; + std::function()> creator = Creator; + + backendManager.RegisterBackend(backendName, creator); - EXPECT_EQ(OH_NN_INVALID_PARAMETER, nncompiledCache.SetBackend(backendID)); + OH_NN_ReturnCode ret = nncompiledCache.SetBackend(backendID); + EXPECT_EQ(OH_NN_SUCCESS, ret); } /** @@ -84,8 +473,121 @@ HWTEST_F(NNCompiledCacheTest, nncompiledcachetest_setbackend_001, TestSize.Level */ HWTEST_F(NNCompiledCacheTest, nncompiledcachetest_setmodelname_001, TestSize.Level0) { + LOGE("SetModelName nncompiledcachetest_setmodelname_001"); NNCompiledCache nncompiledCache; std::string m_modelName; + nncompiledCache.SetModelName(m_modelName); +} + +/** + * @tc.name: nncompiledcachetest_writecacheinfo_001 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNCompiledCacheTest, nncompiledcachetest_writecacheinfo_001, TestSize.Level0) +{ + LOGE("WriteCacheInfo nncompiledcachetest_writecacheinfo_001"); + NNCompiledCache nncompiledCache; + + uint32_t cacheSize = 1; + std::unique_ptr cacheInfo = std::make_unique(cacheSize); + std::string cacheDir = "mock"; + + OH_NN_ReturnCode ret = nncompiledCache.WriteCacheInfo(cacheSize, cacheInfo, cacheDir); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: nncompiledcachetest_writecacheinfo_002 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNCompiledCacheTest, nncompiledcachetest_writecacheinfo_002, TestSize.Level0) +{ + LOGE("WriteCacheInfo nncompiledcachetest_writecacheinfo_002"); + NNCompiledCache nncompiledCache; + + uint32_t cacheSize = 1; + std::unique_ptr cacheInfo = std::make_unique(cacheSize); + std::string cacheDir = "/data/data"; + + OH_NN_ReturnCode ret = nncompiledCache.WriteCacheInfo(cacheSize, cacheInfo, cacheDir); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: nncompiledcachetest_checkcacheinfo_001 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNCompiledCacheTest, nncompiledcachetest_checkcacheinfo_001, TestSize.Level0) +{ + LOGE("CheckCacheInfo nncompiledcachetest_checkcacheinfo_001"); + NNCompiledCache nncompiledCache; + + NNCompiledCacheInfo modelCacheInfo; + std::string cacheInfoPath = "MOCK"; + + OH_NN_ReturnCode ret = nncompiledCache.CheckCacheInfo(modelCacheInfo, cacheInfoPath); + EXPECT_EQ(OH_NN_INVALID_FILE, ret); +} + +/** + * @tc.name: nncompiledcachetest_checkcacheinfo_002 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNCompiledCacheTest, nncompiledcachetest_checkcacheinfo_002, TestSize.Level0) +{ + LOGE("CheckCacheInfo nncompiledcachetest_checkcacheinfo_002"); + NNCompiledCache nncompiledCache; + + NNCompiledCacheInfo modelCacheInfo; + std::string cacheInfoPath = "/data/data/0.nncache"; + + OH_NN_ReturnCode ret = nncompiledCache.CheckCacheInfo(modelCacheInfo, cacheInfoPath); + EXPECT_EQ(OH_NN_INVALID_FILE, ret); +} + +/** + * @tc.name: nncompiledcachetest_checkcacheinfo_003 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNCompiledCacheTest, nncompiledcachetest_checkcacheinfo_003, TestSize.Level0) +{ + LOGE("CheckCacheInfo nncompiledcachetest_checkcacheinfo_003"); + NNCompiledCache nncompiledCache; + + NNCompiledCacheInfo modelCacheInfo; + std::string cacheInfoPath = "/data/data/testcache_info.nncache"; + + OH_NN_ReturnCode ret = nncompiledCache.CheckCacheInfo(modelCacheInfo, cacheInfoPath); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: nncompiledcachetest_checkcacheinfo_004 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNCompiledCacheTest, nncompiledcachetest_checkcacheinfo_004, TestSize.Level0) +{ + LOGE("CheckCacheInfo nncompiledcachetest_checkcacheinfo_004"); + NNCompiledCache nncompiledCache; + + size_t backendID = 2; + OH_NN_ReturnCode retSetBackend = nncompiledCache.SetBackend(backendID); + EXPECT_EQ(OH_NN_SUCCESS, retSetBackend); + + std::string m_modelName = "test"; + nncompiledCache.SetModelName(m_modelName); + + NNCompiledCacheInfo modelCacheInfo; + std::string cacheInfoPath = "/data/data/testcache_info.nncache"; + + OH_NN_ReturnCode ret = nncompiledCache.CheckCacheInfo(modelCacheInfo, cacheInfoPath); + EXPECT_EQ(OH_NN_SUCCESS, ret); } } // namespace UnitTest } // namespace NeuralNetworkRuntime diff --git a/test/unittest/components/nn_compiler/nn_compiler_test.cpp b/test/unittest/components/nn_compiler/nn_compiler_test.cpp index 33953621bf2bf9af5d283e9867121f49d6365a9a..99447714efa4f99fb2d663cb85c79f93039a99a3 100644 --- a/test/unittest/components/nn_compiler/nn_compiler_test.cpp +++ b/test/unittest/components/nn_compiler/nn_compiler_test.cpp @@ -21,6 +21,7 @@ #include "device.h" #include "interfaces/kits/c/neural_network_runtime/neural_network_runtime_type.h" #include "common/utils.h" +#include "inner_model.h" using namespace testing; using namespace testing::ext; @@ -33,6 +34,7 @@ class NNCompilerTest : public testing::Test { public: NNCompilerTest() = default; ~NNCompilerTest() = default; + OH_NN_ReturnCode BuildModel(InnerModel& innerModel); }; class MockIDevice : public Device { @@ -70,6 +72,120 @@ public: MOCK_METHOD2(ReleaseBuffer, OH_NN_ReturnCode(int, size_t)); }; +class MockIPreparedModel : public PreparedModel { +public: + MOCK_METHOD1(ExportModelCache, OH_NN_ReturnCode(std::vector&)); + MOCK_METHOD4(Run, OH_NN_ReturnCode(const std::vector&, + const std::vector&, + std::vector>&, + std::vector&)); + MOCK_METHOD4(Run, OH_NN_ReturnCode(const std::vector&, + const std::vector&, + std::vector>&, + std::vector&)); + MOCK_CONST_METHOD1(GetModelID, OH_NN_ReturnCode(uint32_t&)); + MOCK_METHOD2(GetInputDimRanges, OH_NN_ReturnCode(std::vector>&, + std::vector>&)); +}; + +class MockInnerModel : public InnerModel { +public: + MOCK_CONST_METHOD0(IsBuild, bool()); + MOCK_METHOD2(BuildFromLiteGraph, OH_NN_ReturnCode(const mindspore::lite::LiteGraph*, + const ExtensionConfig&)); + MOCK_METHOD2(BuildFromMetaGraph, OH_NN_ReturnCode(const void*, const ExtensionConfig&)); + MOCK_METHOD1(AddTensor, OH_NN_ReturnCode(const OH_NN_Tensor&)); + MOCK_METHOD1(AddTensorDesc, OH_NN_ReturnCode(const NN_TensorDesc*)); + MOCK_METHOD2(SetTensorQuantParam, OH_NN_ReturnCode(uint32_t, const NN_QuantParam*)); + MOCK_METHOD2(SetTensorType, OH_NN_ReturnCode(uint32_t, OH_NN_TensorType)); + MOCK_METHOD3(SetTensorValue, OH_NN_ReturnCode(uint32_t, const void*, size_t)); + MOCK_METHOD4(AddOperation, OH_NN_ReturnCode(OH_NN_OperationType, + const OH_NN_UInt32Array&, + const OH_NN_UInt32Array&, + const OH_NN_UInt32Array&)); + MOCK_METHOD3(GetSupportedOperations, OH_NN_ReturnCode(size_t, const bool**, uint32_t&)); + MOCK_METHOD2(SpecifyInputsAndOutputs, OH_NN_ReturnCode(const OH_NN_UInt32Array&, const OH_NN_UInt32Array&)); + MOCK_METHOD4(SetInputsAndOutputsInfo, OH_NN_ReturnCode(const OH_NN_TensorInfo*, size_t, + const OH_NN_TensorInfo*, size_t)); + MOCK_METHOD0(Build, OH_NN_ReturnCode()); + MOCK_CONST_METHOD0(GetInputTensors, std::vector>()); + MOCK_CONST_METHOD0(GetOutputTensors, std::vector>()); + MOCK_CONST_METHOD0(GetInputTensorDescs, std::vector, OH_NN_TensorType>>()); + MOCK_CONST_METHOD0(GetOutputTensorDescs, std::vector, OH_NN_TensorType>>()); + MOCK_CONST_METHOD0(GetLiteGraphs, std::shared_ptr()); + MOCK_CONST_METHOD0(GetMetaGraph, void*()); + MOCK_CONST_METHOD0(GetExtensionConfig, ExtensionConfig()); +}; + + +OH_NN_ReturnCode NNCompilerTest::BuildModel(InnerModel& innerModel) +{ + int32_t inputDims[4] = {1, 2, 2, 3}; + OH_NN_Tensor input1 = {OH_NN_FLOAT32, 4, inputDims, nullptr, OH_NN_TENSOR}; + OH_NN_ReturnCode ret = innerModel.AddTensor(input1); + if (ret != OH_NN_SUCCESS) { + return ret; + } + + // 添加Add算子的第二个输入Tensor,类型为float32,张量形状为[1, 2, 2, 3] + OH_NN_Tensor input2 = {OH_NN_FLOAT32, 4, inputDims, nullptr, OH_NN_TENSOR}; + ret = innerModel.AddTensor(input2); + if (ret != OH_NN_SUCCESS) { + return ret; + } + + // 添加Add算子的参数Tensor,该参数Tensor用于指定激活函数的类型,Tensor的数据类型为int8。 + int32_t activationDims = 1; + int8_t activationValue = OH_NN_FUSED_NONE; + OH_NN_Tensor activation = {OH_NN_INT8, 1, &activationDims, nullptr, OH_NN_ADD_ACTIVATIONTYPE}; + ret = innerModel.AddTensor(activation); + if (ret != OH_NN_SUCCESS) { + return ret; + } + + // 将激活函数类型设置为OH_NN_FUSED_NONE,表示该算子不添加激活函数。 + uint32_t index = 2; + ret = innerModel.SetTensorValue(index, &activationValue, sizeof(int8_t)); + if (ret != OH_NN_SUCCESS) { + return ret; + } + + // 设置Add算子的输出,类型为float32,张量形状为[1, 2, 2, 3] + OH_NN_Tensor output = {OH_NN_FLOAT32, 4, inputDims, nullptr, OH_NN_TENSOR}; + ret = innerModel.AddTensor(output); + if (ret != OH_NN_SUCCESS) { + return ret; + } + + // 指定Add算子的输入、参数和输出索引 + uint32_t inputIndicesValues[2] = {0, 1}; + uint32_t paramIndicesValues = 2; + uint32_t outputIndicesValues = 3; + OH_NN_UInt32Array paramIndices = {¶mIndicesValues, 1}; + OH_NN_UInt32Array inputIndices = {inputIndicesValues, 2}; + OH_NN_UInt32Array outputIndices = {&outputIndicesValues, 1}; + + // 向模型实例添加Add算子 + ret = innerModel.AddOperation(OH_NN_OPS_ADD, paramIndices, inputIndices, outputIndices); + if (ret != OH_NN_SUCCESS) { + return ret; + } + + // 设置模型实例的输入、输出索引 + ret = innerModel.SpecifyInputsAndOutputs(inputIndices, outputIndices); + if (ret != OH_NN_SUCCESS) { + return ret; + } + + // 完成模型实例的构建 + ret = innerModel.Build(); + if (ret != OH_NN_SUCCESS) { + return ret; + } + + return ret; +} + /** * @tc.name: nncompilertest_construct_001 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. @@ -77,13 +193,1004 @@ public: */ HWTEST_F(NNCompilerTest, nncompilertest_construct_001, TestSize.Level0) { + LOGE("NNCompiler nncompilertest_construct_001"); + size_t backendID = 1; + InnerModel innerModel; + BuildModel(innerModel); + void* model = &innerModel; + std::shared_ptr device = std::make_shared(); + + NNCompiler* nncompiler = new (std::nothrow) NNCompiler(model, device, backendID); + EXPECT_NE(nullptr, nncompiler); + + testing::Mock::AllowLeak(device.get()); +} + +/** + * @tc.name: nncompilertest_construct_002 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNCompilerTest, nncompilertest_construct_002, TestSize.Level0) +{ + LOGE("NNCompiler nncompilertest_construct_002"); + size_t backendID = 1; + std::shared_ptr device = std::make_shared(); + + NNCompiler* nncompiler = new (std::nothrow) NNCompiler(device, backendID); + EXPECT_NE(nullptr, nncompiler); + + testing::Mock::AllowLeak(device.get()); +} + +/** + * @tc.name: nncompilertest_getbackendid_001 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNCompilerTest, nncompilertest_getbackendid_001, TestSize.Level0) +{ + LOGE("GetBackendID nncompilertest_getbackendid_001"); + size_t backendID = 1; + std::shared_ptr device = std::make_shared(); + + NNCompiler* nncompiler = new (std::nothrow) NNCompiler(device, backendID); + EXPECT_NE(nullptr, nncompiler); + + size_t ret = nncompiler->GetBackendID(); + EXPECT_NE(0, ret); + + testing::Mock::AllowLeak(device.get()); +} + +/** + * @tc.name: nncompilertest_setcachedir_001 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNCompilerTest, nncompilertest_setcachedir_001, TestSize.Level0) +{ + LOGE("SetCacheDir nncompilertest_setcachedir_001"); + size_t backendID = 1; + + NNCompiler* nncompiler = new (std::nothrow) NNCompiler(nullptr, backendID); + EXPECT_NE(nullptr, nncompiler); + + std::string cacheModelPath = "mock"; + uint32_t version = 0; + OH_NN_ReturnCode ret = nncompiler->SetCacheDir(cacheModelPath, version); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: nncompilertest_setcachedir_002 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNCompilerTest, nncompilertest_setcachedir_002, TestSize.Level0) +{ + LOGE("SetCacheDir nncompilertest_setcachedir_002"); + size_t backendID = 1; + + std::shared_ptr device = std::make_shared(); + EXPECT_CALL(*((MockIDevice *) device.get()), IsModelCacheSupported(::testing::_)) + .WillRepeatedly(::testing::Return(OH_NN_OPERATION_FORBIDDEN)); + + NNCompiler* nncompiler = new (std::nothrow) NNCompiler(device, backendID); + EXPECT_NE(nullptr, nncompiler); + + std::string cacheModelPath = "mock"; + uint32_t version = 0; + OH_NN_ReturnCode ret = nncompiler->SetCacheDir(cacheModelPath, version); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); + + testing::Mock::AllowLeak(device.get()); +} + +/** + * @tc.name: nncompilertest_setcachedir_003 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNCompilerTest, nncompilertest_setcachedir_003, TestSize.Level0) +{ + LOGE("SetCacheDir nncompilertest_setcachedir_003"); + size_t backendID = 1; + + std::shared_ptr device = std::make_shared(); + EXPECT_CALL(*((MockIDevice *) device.get()), IsModelCacheSupported(::testing::_)) + .WillRepeatedly(::testing::Return(OH_NN_SUCCESS)); + + NNCompiler* nncompiler = new (std::nothrow) NNCompiler(device, backendID); + EXPECT_NE(nullptr, nncompiler); + + std::string cacheModelPath = "mock"; + uint32_t version = 0; + OH_NN_ReturnCode ret = nncompiler->SetCacheDir(cacheModelPath, version); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); + + testing::Mock::AllowLeak(device.get()); +} + +/** + * @tc.name: nncompilertest_setcachedir_004 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNCompilerTest, nncompilertest_setcachedir_004, TestSize.Level0) +{ + LOGE("SetCacheDir nncompilertest_setcachedir_004"); + size_t backendID = 1; + + std::shared_ptr device = std::make_shared(); + EXPECT_CALL(*((MockIDevice *) device.get()), IsModelCacheSupported(::testing::_)) + .WillOnce(Invoke([](bool& isSupportedCache) { + // 这里直接修改传入的引用参数 + isSupportedCache = true; + return OH_NN_SUCCESS; // 假设成功的状态码 + })); + + NNCompiler* nncompiler = new (std::nothrow) NNCompiler(device, backendID); + EXPECT_NE(nullptr, nncompiler); + + std::string cacheModelPath = "mock"; + uint32_t version = 0; + OH_NN_ReturnCode ret = nncompiler->SetCacheDir(cacheModelPath, version); + EXPECT_EQ(OH_NN_SUCCESS, ret); + + testing::Mock::AllowLeak(device.get()); +} + +/** + * @tc.name: nncompilertest_setperformance_001 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNCompilerTest, nncompilertest_setperformance_001, TestSize.Level0) +{ + LOGE("SetPerformance nncompilertest_setperformance_001"); + size_t backendID = 1; + std::shared_ptr device = std::make_shared(); + + NNCompiler* nncompiler = new (std::nothrow) NNCompiler(device, backendID); + EXPECT_NE(nullptr, nncompiler); + + OH_NN_PerformanceMode performance = OH_NN_PERFORMANCE_NONE; + OH_NN_ReturnCode ret = nncompiler->SetPerformance(performance); + EXPECT_EQ(OH_NN_SUCCESS, ret); + + testing::Mock::AllowLeak(device.get()); +} + +/** + * @tc.name: nncompilertest_setperformance_002 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNCompilerTest, nncompilertest_setperformance_002, TestSize.Level0) +{ + LOGE("SetPerformance nncompilertest_setperformance_002"); + size_t backendID = 1; + + NNCompiler* nncompiler = new (std::nothrow) NNCompiler(nullptr, backendID); + EXPECT_NE(nullptr, nncompiler); + + OH_NN_PerformanceMode performance = OH_NN_PERFORMANCE_NONE; + OH_NN_ReturnCode ret = nncompiler->SetPerformance(performance); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: nncompilertest_setperformance_003 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNCompilerTest, nncompilertest_setperformance_003, TestSize.Level0) +{ + LOGE("SetPerformance nncompilertest_setperformance_003"); + size_t backendID = 1; + + std::shared_ptr device = std::make_shared(); + EXPECT_CALL(*((MockIDevice *) device.get()), IsPerformanceModeSupported(::testing::_)) + .WillRepeatedly(::testing::Return(OH_NN_FAILED)); + + NNCompiler* nncompiler = new (std::nothrow) NNCompiler(device, backendID); + EXPECT_NE(nullptr, nncompiler); + + OH_NN_PerformanceMode performance = OH_NN_PERFORMANCE_NONE; + OH_NN_ReturnCode ret = nncompiler->SetPerformance(performance); + EXPECT_EQ(OH_NN_FAILED, ret); + + testing::Mock::AllowLeak(device.get()); +} + +/** + * @tc.name: nncompilertest_setperformance_004 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNCompilerTest, nncompilertest_setperformance_004, TestSize.Level0) +{ + LOGE("SetPerformance nncompilertest_setperformance_004"); + size_t backendID = 1; + + std::shared_ptr device = std::make_shared(); + EXPECT_CALL(*((MockIDevice *) device.get()), IsPerformanceModeSupported(::testing::_)) + .WillRepeatedly(::testing::Return(OH_NN_SUCCESS)); + + NNCompiler* nncompiler = new (std::nothrow) NNCompiler(device, backendID); + EXPECT_NE(nullptr, nncompiler); + + OH_NN_PerformanceMode performance = OH_NN_PERFORMANCE_LOW; + OH_NN_ReturnCode ret = nncompiler->SetPerformance(performance); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); + + testing::Mock::AllowLeak(device.get()); +} + +/** + * @tc.name: nncompilertest_setpriority_001 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNCompilerTest, nncompilertest_setpriority_001, TestSize.Level0) +{ + LOGE("SetPriority nncompilertest_setpriority_001"); + size_t backendID = 1; + std::shared_ptr device = std::make_shared(); + + NNCompiler* nncompiler = new (std::nothrow) NNCompiler(device, backendID); + EXPECT_NE(nullptr, nncompiler); + + OH_NN_Priority priority = OH_NN_PRIORITY_NONE; + OH_NN_ReturnCode ret = nncompiler->SetPriority(priority); + EXPECT_EQ(OH_NN_SUCCESS, ret); + + testing::Mock::AllowLeak(device.get()); +} + +/** + * @tc.name: nncompilertest_setpriority_002 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNCompilerTest, nncompilertest_setpriority_002, TestSize.Level0) +{ + LOGE("SetPriority nncompilertest_setpriority_002"); + size_t backendID = 1; + + NNCompiler* nncompiler = new (std::nothrow) NNCompiler(nullptr, backendID); + EXPECT_NE(nullptr, nncompiler); + + OH_NN_Priority priority = OH_NN_PRIORITY_NONE; + OH_NN_ReturnCode ret = nncompiler->SetPriority(priority); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: nncompilertest_setpriority_003 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNCompilerTest, nncompilertest_setpriority_003, TestSize.Level0) +{ + LOGE("SetPriority nncompilertest_setpriority_003"); + size_t backendID = 1; + std::shared_ptr device = std::make_shared(); + EXPECT_CALL(*((MockIDevice *) device.get()), IsPrioritySupported(::testing::_)) + .WillRepeatedly(::testing::Return(OH_NN_FAILED)); + + NNCompiler* nncompiler = new (std::nothrow) NNCompiler(device, backendID); + EXPECT_NE(nullptr, nncompiler); + + OH_NN_Priority priority = OH_NN_PRIORITY_NONE; + OH_NN_ReturnCode ret = nncompiler->SetPriority(priority); + EXPECT_EQ(OH_NN_FAILED, ret); + + testing::Mock::AllowLeak(device.get()); +} + +/** + * @tc.name: nncompilertest_setpriority_004 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNCompilerTest, nncompilertest_setpriority_004, TestSize.Level0) +{ + LOGE("SetPriority nncompilertest_setpriority_004"); + size_t backendID = 1; + std::shared_ptr device = std::make_shared(); + EXPECT_CALL(*((MockIDevice *) device.get()), IsPrioritySupported(::testing::_)) + .WillRepeatedly(::testing::Return(OH_NN_SUCCESS)); + + NNCompiler* nncompiler = new (std::nothrow) NNCompiler(device, backendID); + EXPECT_NE(nullptr, nncompiler); + + OH_NN_Priority priority = OH_NN_PRIORITY_LOW; + OH_NN_ReturnCode ret = nncompiler->SetPriority(priority); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); + + testing::Mock::AllowLeak(device.get()); +} + +/** + * @tc.name: nncompilertest_setenablefp16_001 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNCompilerTest, nncompilertest_setenablefp16_001, TestSize.Level0) +{ + LOGE("SetEnableFp16 nncompilertest_setenablefp16_001"); + size_t backendID = 1; + + NNCompiler* nncompiler = new (std::nothrow) NNCompiler(nullptr, backendID); + EXPECT_NE(nullptr, nncompiler); + + bool isFp16 = true; + OH_NN_ReturnCode ret = nncompiler->SetEnableFp16(isFp16); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: nncompilertest_setenablefp16_002 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNCompilerTest, nncompilertest_setenablefp16_002, TestSize.Level0) +{ + LOGE("SetEnableFp16 nncompilertest_setenablefp16_002"); size_t backendID = 1; std::shared_ptr device = std::make_shared(); + EXPECT_CALL(*((MockIDevice *) device.get()), IsFloat16PrecisionSupported(::testing::_)) + .WillRepeatedly(::testing::Return(OH_NN_FAILED)); NNCompiler* nncompiler = new (std::nothrow) NNCompiler(device, backendID); EXPECT_NE(nullptr, nncompiler); + bool isFp16 = true; + OH_NN_ReturnCode ret = nncompiler->SetEnableFp16(isFp16); + EXPECT_EQ(OH_NN_FAILED, ret); + + testing::Mock::AllowLeak(device.get()); +} + +/** + * @tc.name: nncompilertest_setenablefp16_003 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNCompilerTest, nncompilertest_setenablefp16_003, TestSize.Level0) +{ + LOGE("SetEnableFp16 nncompilertest_setenablefp16_003"); + size_t backendID = 1; + std::shared_ptr device = std::make_shared(); + EXPECT_CALL(*((MockIDevice *) device.get()), IsFloat16PrecisionSupported(::testing::_)) + .WillRepeatedly(::testing::Return(OH_NN_SUCCESS)); + + NNCompiler* nncompiler = new (std::nothrow) NNCompiler(device, backendID); + EXPECT_NE(nullptr, nncompiler); + + bool isFp16 = true; + OH_NN_ReturnCode ret = nncompiler->SetEnableFp16(isFp16); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); + + testing::Mock::AllowLeak(device.get()); +} + +/** + * @tc.name: nncompilertest_setenablefp16_004 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNCompilerTest, nncompilertest_setenablefp16_004, TestSize.Level0) +{ + LOGE("SetEnableFp16 nncompilertest_setenablefp16_004"); + size_t backendID = 1; + std::shared_ptr device = std::make_shared(); + EXPECT_CALL(*((MockIDevice *) device.get()), IsPrioritySupported(::testing::_)) + .WillRepeatedly(::testing::Return(OH_NN_SUCCESS)); + + NNCompiler* nncompiler = new (std::nothrow) NNCompiler(device, backendID); + EXPECT_NE(nullptr, nncompiler); + + bool isFp16 = false; + OH_NN_ReturnCode ret = nncompiler->SetEnableFp16(isFp16); + EXPECT_EQ(OH_NN_SUCCESS, ret); + + testing::Mock::AllowLeak(device.get()); +} + +/** + * @tc.name: nncompilertest_isbuild_001 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNCompilerTest, nncompilertest_isbuild_001, TestSize.Level0) +{ + LOGE("IsBuild nncompilertest_isbuild_001"); + size_t backendID = 1; + std::shared_ptr device = std::make_shared(); + + NNCompiler* nncompiler = new (std::nothrow) NNCompiler(device, backendID); + EXPECT_NE(nullptr, nncompiler); + + bool ret = nncompiler->IsBuild(); + EXPECT_EQ(false, ret); + + testing::Mock::AllowLeak(device.get()); +} + +/** + * @tc.name: nncompilertest_build_001 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNCompilerTest, nncompilertest_build_001, TestSize.Level0) +{ + LOGE("Build nncompilertest_build_001"); + size_t backendID = 1; + + NNCompiler* nncompiler = new (std::nothrow) NNCompiler(nullptr, backendID); + EXPECT_NE(nullptr, nncompiler); + + OH_NN_ReturnCode ret = nncompiler->Build(); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: nncompilertest_build_002 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNCompilerTest, nncompilertest_build_002, TestSize.Level0) +{ + LOGE("Build nncompilertest_build_002"); + size_t backendID = 1; + std::shared_ptr device = std::make_shared(); + + NNCompiler* nncompiler = new (std::nothrow) NNCompiler(device, backendID); + EXPECT_NE(nullptr, nncompiler); + + OH_NN_ReturnCode ret = nncompiler->Build(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + + testing::Mock::AllowLeak(device.get()); +} + +/** + * @tc.name: nncompilertest_build_003 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNCompilerTest, nncompilertest_build_003, TestSize.Level0) +{ + LOGE("Build nncompilertest_build_003"); + size_t backendID = 1; + InnerModel innerModel; + BuildModel(innerModel); + void* model = &innerModel; + std::shared_ptr device = std::make_shared(); + + NNCompiler* nncompiler = new (std::nothrow) NNCompiler(model, device, backendID); + EXPECT_NE(nullptr, nncompiler); + + OH_NN_ReturnCode ret = nncompiler->Build(); + EXPECT_EQ(OH_NN_SUCCESS, ret); + + OH_NN_ReturnCode retBuild = nncompiler->Build(); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, retBuild); + + testing::Mock::AllowLeak(device.get()); +} + +/** + * @tc.name: nncompilertest_build_004 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNCompilerTest, nncompilertest_build_004, TestSize.Level0) +{ + LOGE("Build nncompilertest_build_004"); + size_t backendID = 1; + InnerModel innerModel; + void* model = &innerModel; + std::shared_ptr device = std::make_shared(); + + NNCompiler* nncompiler = new (std::nothrow) NNCompiler(model, device, backendID); + EXPECT_NE(nullptr, nncompiler); + + OH_NN_ReturnCode ret = nncompiler->Build(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + + testing::Mock::AllowLeak(device.get()); +} + +/** + * @tc.name: nncompilertest_build_005 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNCompilerTest, nncompilertest_build_005, TestSize.Level0) +{ + LOGE("Build nncompilertest_build_005"); + size_t backendID = 1; + InnerModel innerModel; + BuildModel(innerModel); + void* model = &innerModel; + std::shared_ptr device = std::make_shared(); + EXPECT_CALL(*((MockIDevice *) device.get()), IsModelCacheSupported(::testing::_)) + .WillOnce(Invoke([](bool& isSupportedCache) { + // 这里直接修改传入的引用参数 + isSupportedCache = true; + return OH_NN_SUCCESS; // 假设成功的状态码 + })); + + NNCompiler* nncompiler = new (std::nothrow) NNCompiler(model, device, backendID); + EXPECT_NE(nullptr, nncompiler); + + std::string cacheModelPath = "mock"; + uint32_t version = UINT32_MAX; + OH_NN_ReturnCode retSetCacheDir = nncompiler->SetCacheDir(cacheModelPath, version); + EXPECT_EQ(OH_NN_SUCCESS, retSetCacheDir); + + OH_NN_ReturnCode ret = nncompiler->Build(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + + testing::Mock::AllowLeak(device.get()); +} + +/** + * @tc.name: nncompilertest_build_006 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNCompilerTest, nncompilertest_build_006, TestSize.Level0) +{ + LOGE("Build nncompilertest_build_006"); + size_t backendID = 1; + InnerModel innerModel; + BuildModel(innerModel); + void* model = &innerModel; + std::shared_ptr device = std::make_shared(); + EXPECT_CALL(*((MockIDevice *) device.get()), IsModelCacheSupported(::testing::_)) + .WillOnce(Invoke([](bool& isSupportedCache) { + // 这里直接修改传入的引用参数 + isSupportedCache = true; + return OH_NN_SUCCESS; // 假设成功的状态码 + })); + + NNCompiler* nncompiler = new (std::nothrow) NNCompiler(model, device, backendID); + EXPECT_NE(nullptr, nncompiler); + + std::string cacheModelPath = "mock"; + uint32_t version = 0; + OH_NN_ReturnCode retSetCacheDir = nncompiler->SetCacheDir(cacheModelPath, version); + EXPECT_EQ(OH_NN_SUCCESS, retSetCacheDir); + + OH_NN_ReturnCode ret = nncompiler->Build(); + EXPECT_EQ(OH_NN_FAILED, ret); + + testing::Mock::AllowLeak(device.get()); +} + +/** + * @tc.name: nncompilertest_build_007 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNCompilerTest, nncompilertest_build_007, TestSize.Level0) +{ + LOGE("Build nncompilertest_build_007"); + size_t backendID = 1; + InnerModel innerModel; + BuildModel(innerModel); + void* model = &innerModel; + std::shared_ptr device = std::make_shared(); + + NNCompiler* nncompiler = new (std::nothrow) NNCompiler(model, device, backendID); + EXPECT_NE(nullptr, nncompiler); + + OH_NN_ReturnCode ret = nncompiler->Build(); + EXPECT_EQ(OH_NN_SUCCESS, ret); + + testing::Mock::AllowLeak(device.get()); +} + +/** + * @tc.name: nncompilertest_savetocachefile_001 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNCompilerTest, nncompilertest_savetocachefile_001, TestSize.Level0) +{ + LOGE("SaveToCacheFile nncompilertest_savetocachefile_001"); + size_t backendID = 1; + std::shared_ptr device = std::make_shared(); + + NNCompiler* nncompiler = new (std::nothrow) NNCompiler(device, backendID); + EXPECT_NE(nullptr, nncompiler); + + OH_NN_ReturnCode ret = nncompiler->SaveToCacheFile(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + + testing::Mock::AllowLeak(device.get()); +} + +/** + * @tc.name: nncompilertest_savetocachefile_002 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNCompilerTest, nncompilertest_savetocachefile_002, TestSize.Level0) +{ + LOGE("SaveToCacheFile nncompilertest_savetocachefile_002"); + size_t backendID = 1; + + std::shared_ptr device = std::make_shared(); + EXPECT_CALL(*((MockIDevice *) device.get()), IsModelCacheSupported(::testing::_)) + .WillOnce(Invoke([](bool& isSupportedCache) { + // 这里直接修改传入的引用参数 + isSupportedCache = true; + return OH_NN_SUCCESS; // 假设成功的状态码 + })); + + NNCompiler* nncompiler = new (std::nothrow) NNCompiler(device, backendID); + EXPECT_NE(nullptr, nncompiler); + + std::string cacheModelPath = "mock"; + uint32_t version = UINT32_MAX; + OH_NN_ReturnCode retSetCacheDir = nncompiler->SetCacheDir(cacheModelPath, version); + EXPECT_EQ(OH_NN_SUCCESS, retSetCacheDir); + + OH_NN_ReturnCode retSave = nncompiler->SaveToCacheFile(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, retSave); + + testing::Mock::AllowLeak(device.get()); +} + +/** + * @tc.name: nncompilertest_savetocachefile_003 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNCompilerTest, nncompilertest_savetocachefile_003, TestSize.Level0) +{ + LOGE("SaveToCacheFile nncompilertest_savetocachefile_003"); + size_t backendID = 1; + + std::shared_ptr device = std::make_shared(); + EXPECT_CALL(*((MockIDevice *) device.get()), IsModelCacheSupported(::testing::_)) + .WillOnce(Invoke([](bool& isSupportedCache) { + // 这里直接修改传入的引用参数 + isSupportedCache = true; + return OH_NN_SUCCESS; // 假设成功的状态码 + })); + + NNCompiler* nncompiler = new (std::nothrow) NNCompiler(device, backendID); + EXPECT_NE(nullptr, nncompiler); + + std::string cacheModelPath = "mock"; + uint32_t version = 0; + OH_NN_ReturnCode retSetCacheDir = nncompiler->SetCacheDir(cacheModelPath, version); + EXPECT_EQ(OH_NN_SUCCESS, retSetCacheDir); + + OH_NN_ReturnCode retSave = nncompiler->SaveToCacheFile(); + EXPECT_EQ(OH_NN_FAILED, retSave); + + testing::Mock::AllowLeak(device.get()); +} + +/** + * @tc.name: nncompilertest_savetocachefile_004 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNCompilerTest, nncompilertest_savetocachefile_004, TestSize.Level0) +{ + LOGE("SaveToCacheFile nncompilertest_savetocachefile_004"); + size_t backendID = 1; + + std::shared_ptr device = std::make_shared(); + EXPECT_CALL(*((MockIDevice *) device.get()), IsModelCacheSupported(::testing::_)) + .WillOnce(Invoke([](bool& isSupportedCache) { + // 这里直接修改传入的引用参数 + isSupportedCache = true; + return OH_NN_SUCCESS; // 假设成功的状态码 + })); + + InnerModel innerModel; + BuildModel(innerModel); + void* model = &innerModel; + + NNCompiler* nncompiler = new (std::nothrow) NNCompiler(model, device, backendID); + EXPECT_NE(nullptr, nncompiler); + + OH_NN_ReturnCode retBuild = nncompiler->Build(); + EXPECT_EQ(OH_NN_SUCCESS, retBuild); + + std::string cacheModelPath = "mock"; + uint32_t version = 0; + OH_NN_ReturnCode retSetCacheDir = nncompiler->SetCacheDir(cacheModelPath, version); + EXPECT_EQ(OH_NN_SUCCESS, retSetCacheDir);; + + OH_NN_ReturnCode retSave = nncompiler->SaveToCacheFile(); + EXPECT_EQ(OH_NN_FAILED, retSave); + + testing::Mock::AllowLeak(device.get()); +} + +/** + * @tc.name: nncompilertest_restorefromcachefile_001 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNCompilerTest, nncompilertest_restorefromcachefile_001, TestSize.Level0) +{ + LOGE("RestoreFromCacheFile nncompilertest_restorefromcachefile_001"); + size_t backendID = 1; + std::shared_ptr device = std::make_shared(); + + NNCompiler* nncompiler = new (std::nothrow) NNCompiler(device, backendID); + EXPECT_NE(nullptr, nncompiler); + + OH_NN_ReturnCode ret = nncompiler->RestoreFromCacheFile(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + + testing::Mock::AllowLeak(device.get()); +} + +/** + * @tc.name: nncompilertest_restorefromcachefile_002 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNCompilerTest, nncompilertest_restorefromcachefile_002, TestSize.Level0) +{ + LOGE("RestoreFromCacheFile nncompilertest_restorefromcachefile_002"); + size_t backendID = 1; + std::shared_ptr device = std::make_shared(); + EXPECT_CALL(*((MockIDevice *) device.get()), IsModelCacheSupported(::testing::_)) + .WillOnce(Invoke([](bool& isSupportedCache) { + // 这里直接修改传入的引用参数 + isSupportedCache = true; + return OH_NN_SUCCESS; // 假设成功的状态码 + })); + + NNCompiler* nncompiler = new (std::nothrow) NNCompiler(device, backendID); + EXPECT_NE(nullptr, nncompiler); + + std::string cacheModelPath = "mock"; + uint32_t version = UINT32_MAX; + OH_NN_ReturnCode retSetCacheDir = nncompiler->SetCacheDir(cacheModelPath, version); + EXPECT_EQ(OH_NN_SUCCESS, retSetCacheDir);; + + OH_NN_ReturnCode ret = nncompiler->RestoreFromCacheFile(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + + testing::Mock::AllowLeak(device.get()); +} + +/** + * @tc.name: nncompilertest_restorefromcachefile_003 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNCompilerTest, nncompilertest_restorefromcachefile_003, TestSize.Level0) +{ + LOGE("RestoreFromCacheFile nncompilertest_restorefromcachefile_003"); + size_t backendID = 1; + std::shared_ptr device = std::make_shared(); + std::shared_ptr prepared = std::make_shared(); + EXPECT_CALL(*((MockIDevice *) device.get()), IsModelCacheSupported(::testing::_)) + .WillOnce(Invoke([](bool& isSupportedCache) { + // 这里直接修改传入的引用参数 + isSupportedCache = true; + return OH_NN_SUCCESS; + })); + + InnerModel innerModel; + BuildModel(innerModel); + void* model = &innerModel; + EXPECT_CALL(*((MockIDevice *) device.get()), + PrepareModel(testing::A>(), ::testing::_, ::testing::_)) + .WillOnce(Invoke([&prepared](std::shared_ptr model, + const ModelConfig& config, + std::shared_ptr& preparedModel) { + preparedModel = prepared; + return OH_NN_SUCCESS; + })); + + NNCompiler* nncompiler = new (std::nothrow) NNCompiler(model, device, backendID); + EXPECT_NE(nullptr, nncompiler); + + OH_NN_ReturnCode retBuild = nncompiler->Build(); + EXPECT_EQ(OH_NN_SUCCESS, retBuild); + + std::string cacheModelPath = "/data/data"; + uint32_t version = UINT32_MAX; + OH_NN_ReturnCode retSetCacheDir = nncompiler->SetCacheDir(cacheModelPath, version); + EXPECT_EQ(OH_NN_SUCCESS, retSetCacheDir);; + + OH_NN_ReturnCode ret = nncompiler->RestoreFromCacheFile(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + + testing::Mock::AllowLeak(device.get()); + testing::Mock::AllowLeak(prepared.get()); +} + +/** + * @tc.name: nncompilertest_savetocachebuffer_001 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNCompilerTest, nncompilertest_savetocachebuffer_001, TestSize.Level0) +{ + LOGE("SaveToCacheBuffer nncompilertest_savetocachebuffer_001"); + size_t backendID = 1; + std::shared_ptr device = std::make_shared(); + + NNCompiler* nncompiler = new (std::nothrow) NNCompiler(device, backendID); + EXPECT_NE(nullptr, nncompiler); + + size_t length = 10; + size_t* modelSize = &length; + InnerModel innerModel; + BuildModel(innerModel); + void* model = &innerModel; + OH_NN_ReturnCode ret = nncompiler->SaveToCacheBuffer(model, length, modelSize); + EXPECT_EQ(OH_NN_UNSUPPORTED, ret); + + testing::Mock::AllowLeak(device.get()); +} + +/** + * @tc.name: nncompilertest_restorefromcachebuffer_001 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNCompilerTest, nncompilertest_restorefromcachebuffer_001, TestSize.Level0) +{ + LOGE("RestoreFromCacheBuffer nncompilertest_restorefromcachebuffer_001"); + size_t backendID = 1; + std::shared_ptr device = std::make_shared(); + + NNCompiler* nncompiler = new (std::nothrow) NNCompiler(device, backendID); + EXPECT_NE(nullptr, nncompiler); + + size_t length = 10; + InnerModel innerModel; + BuildModel(innerModel); + void* model = &innerModel; + OH_NN_ReturnCode ret = nncompiler->RestoreFromCacheBuffer(model, length); + EXPECT_EQ(OH_NN_UNSUPPORTED, ret); + + testing::Mock::AllowLeak(device.get()); +} + +/** + * @tc.name: nncompilertest_setextensionconfig_001 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNCompilerTest, nncompilertest_setextensionconfig_001, TestSize.Level0) +{ + LOGE("SetExtensionConfig nncompilertest_setextensionconfig_001"); + size_t backendID = 1; + std::shared_ptr device = std::make_shared(); + + NNCompiler* nncompiler = new (std::nothrow) NNCompiler(device, backendID); + EXPECT_NE(nullptr, nncompiler); + + std::unordered_map> configs; + OH_NN_ReturnCode ret = nncompiler->SetExtensionConfig(configs); + EXPECT_EQ(OH_NN_SUCCESS, ret); + + testing::Mock::AllowLeak(device.get()); +} + +/** + * @tc.name: nncompilertest_setoptions_001 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNCompilerTest, nncompilertest_setoptions_001, TestSize.Level0) +{ + LOGE("SetOptions nncompilertest_setoptions_001"); + size_t backendID = 1; + std::shared_ptr device = std::make_shared(); + + NNCompiler* nncompiler = new (std::nothrow) NNCompiler(device, backendID); + EXPECT_NE(nullptr, nncompiler); + + std::vector> options; + OH_NN_ReturnCode ret = nncompiler->SetOptions(options); + EXPECT_EQ(OH_NN_UNSUPPORTED, ret); + + testing::Mock::AllowLeak(device.get()); +} + +/** + * @tc.name: nncompilertest_createexecutor_001 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNCompilerTest, nncompilertest_createexecutor_001, TestSize.Level0) +{ + LOGE("CreateExecutor nncompilertest_createexecutor_001"); + size_t backendID = 1; + std::shared_ptr device = std::make_shared(); + + NNCompiler* nncompiler = new (std::nothrow) NNCompiler(device, backendID); + EXPECT_NE(nullptr, nncompiler); + + NNExecutor* ret = nncompiler->CreateExecutor(); + EXPECT_EQ(nullptr, ret); + + testing::Mock::AllowLeak(device.get()); +} + +/** + * @tc.name: nncompilertest_createexecutor_002 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNCompilerTest, nncompilertest_createexecutor_002, TestSize.Level0) +{ + LOGE("CreateExecutor nncompilertest_createexecutor_002"); + size_t backendID = 1; + + NNCompiler* nncompiler = new (std::nothrow) NNCompiler(nullptr, backendID); + EXPECT_NE(nullptr, nncompiler); + + NNExecutor* ret = nncompiler->CreateExecutor(); + EXPECT_EQ(nullptr, ret); +} + +/** + * @tc.name: nncompilertest_createexecutor_003 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNCompilerTest, nncompilertest_createexecutor_003, TestSize.Level0) +{ + LOGE("CreateExecutor nncompilertest_createexecutor_003"); + size_t backendID = 1; + std::shared_ptr device = std::make_shared(); + std::shared_ptr prepared = std::make_shared(); + EXPECT_CALL(*((MockIDevice *) device.get()), IsModelCacheSupported(::testing::_)) + .WillOnce(Invoke([](bool& isSupportedCache) { + // 这里直接修改传入的引用参数 + isSupportedCache = true; + return OH_NN_SUCCESS; // 假设成功的状态码 + })); + + InnerModel innerModel; + BuildModel(innerModel); + void* model = &innerModel; + EXPECT_CALL(*((MockIDevice *) device.get()), + PrepareModel(testing::A>(), ::testing::_, ::testing::_)) + .WillOnce(Invoke([&prepared](std::shared_ptr model, + const ModelConfig& config, + std::shared_ptr& preparedModel) { + // 这里直接修改传入的引用参数 + preparedModel = prepared; + return OH_NN_SUCCESS; // 假设成功的状态码 + })); + + NNCompiler* nncompiler = new (std::nothrow) NNCompiler(model, device, backendID); + EXPECT_NE(nullptr, nncompiler); + + OH_NN_ReturnCode retBuild = nncompiler->Build(); + EXPECT_EQ(OH_NN_SUCCESS, retBuild); + + NNExecutor* ret = nncompiler->CreateExecutor(); + EXPECT_NE(nullptr, ret); + + delete nncompiler; + nncompiler = nullptr; + testing::Mock::AllowLeak(device.get()); + testing::Mock::AllowLeak(prepared.get()); } } // namespace UnitTest } // namespace NeuralNetworkRuntime diff --git a/test/unittest/components/nn_executor/nn_executor_test.cpp b/test/unittest/components/nn_executor/nn_executor_test.cpp index 40e2d3b3d20a8ba25396f902480003063f78d725..aae188545795b2f7ca9cb504500bcb3d4cab55e2 100644 --- a/test/unittest/components/nn_executor/nn_executor_test.cpp +++ b/test/unittest/components/nn_executor/nn_executor_test.cpp @@ -193,7 +193,7 @@ HWTEST_F(NNExecutorTest, nnexecutortest_getinputdimrange_001, TestSize.Level0) std::shared_ptr mockIPreparedMode = std::make_shared(); EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_)) - .WillRepeatedly(::testing::Return(OH_NN_FAILED)); + .WillRepeatedly(::testing::Return(OH_NN_FAILED)); std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( @@ -224,7 +224,7 @@ HWTEST_F(NNExecutorTest, nnexecutortest_getinputdimrange_002, TestSize.Level0) std::shared_ptr mockIPreparedMode = std::make_shared(); EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_)) - .WillRepeatedly(::testing::Return(OH_NN_FAILED)); + .WillRepeatedly(::testing::Return(OH_NN_FAILED)); std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( @@ -253,7 +253,7 @@ HWTEST_F(NNExecutorTest, nnexecutortest_getinputdimrange_003, TestSize.Level0) std::shared_ptr mockIPreparedMode = std::make_shared(); EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_)) - .WillRepeatedly(::testing::Return(OH_NN_FAILED)); + .WillRepeatedly(::testing::Return(OH_NN_FAILED)); std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( @@ -282,7 +282,7 @@ HWTEST_F(NNExecutorTest, nnexecutortest_getinputdimrange_004, TestSize.Level0) std::shared_ptr mockIPreparedMode = std::make_shared(); EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_)) - .WillRepeatedly(::testing::Return(OH_NN_FAILED)); + .WillRepeatedly(::testing::Return(OH_NN_FAILED)); std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( @@ -312,7 +312,7 @@ HWTEST_F(NNExecutorTest, nnexecutortest_getinputdimrange_005, TestSize.Level0) std::shared_ptr mockIPreparedMode = std::make_shared(); EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_)) - .WillRepeatedly(::testing::Return(OH_NN_SUCCESS)); + .WillRepeatedly(::testing::Return(OH_NN_SUCCESS)); std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( @@ -1202,7 +1202,7 @@ HWTEST_F(NNExecutorTest, nnexecutortest_setinput_001, TestSize.Level0) std::shared_ptr m_device {nullptr}; std::shared_ptr mockIPreparedMode = std::make_shared(); EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_)) - .WillRepeatedly(::testing::Return(OH_NN_FAILED)); + .WillRepeatedly(::testing::Return(OH_NN_FAILED)); std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( @@ -1230,7 +1230,7 @@ HWTEST_F(NNExecutorTest, nnexecutortest_setinput_002, TestSize.Level0) std::shared_ptr m_device {nullptr}; std::shared_ptr mockIPreparedMode = std::make_shared(); EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_)) - .WillRepeatedly(::testing::Return(OH_NN_OPERATION_FORBIDDEN)); + .WillRepeatedly(::testing::Return(OH_NN_OPERATION_FORBIDDEN)); std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; @@ -1264,7 +1264,7 @@ HWTEST_F(NNExecutorTest, nnexecutortest_setinput_003, TestSize.Level0) std::shared_ptr m_device {nullptr}; std::shared_ptr mockIPreparedMode = std::make_shared(); EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_)) - .WillRepeatedly(::testing::Return(OH_NN_OPERATION_FORBIDDEN)); + .WillRepeatedly(::testing::Return(OH_NN_OPERATION_FORBIDDEN)); std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; @@ -1305,7 +1305,7 @@ HWTEST_F(NNExecutorTest, nnexecutortest_setinputfrommemory_001, TestSize.Level0) std::shared_ptr m_device {nullptr}; std::shared_ptr mockIPreparedMode = std::make_shared(); EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_)) - .WillRepeatedly(::testing::Return(OH_NN_FAILED)); + .WillRepeatedly(::testing::Return(OH_NN_FAILED)); std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( @@ -1333,7 +1333,7 @@ HWTEST_F(NNExecutorTest, nnexecutortest_setinputfrommemory_002, TestSize.Level0) std::shared_ptr m_device {nullptr}; std::shared_ptr mockIPreparedMode = std::make_shared(); EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_)) - .WillRepeatedly(::testing::Return(OH_NN_FAILED)); + .WillRepeatedly(::testing::Return(OH_NN_FAILED)); std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; @@ -1366,7 +1366,7 @@ HWTEST_F(NNExecutorTest, nnexecutortest_setinputfrommemory_003, TestSize.Level0) std::shared_ptr m_device {nullptr}; std::shared_ptr mockIPreparedMode = std::make_shared(); EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_)) - .WillRepeatedly(::testing::Return(OH_NN_FAILED)); + .WillRepeatedly(::testing::Return(OH_NN_FAILED)); std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; @@ -1829,6 +1829,418 @@ HWTEST_F(NNExecutorTest, nnexecutortest_destroyinputmemory_003, TestSize.Level0) testing::Mock::AllowLeak(device.get()); } + +/** + * @tc.name: nnexecutortest_createoutputmemory_001 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNExecutorTest, nnexecutortest_createoutputmemory_001, TestSize.Level0) +{ + LOGE("CreateOutputMemory nnexecutortest_createoutputmemory_001"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + std::shared_ptr m_preparedModel {nullptr}; + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( + m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + + size_t length = 9 * sizeof(float); + OH_NN_Memory** memory = nullptr; + float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* const data = dataArry; + OH_NN_Memory memoryPtr = {data, 9 * sizeof(float)}; + OH_NN_Memory* ptr = &memoryPtr; + memory = &ptr; + + OH_NN_ReturnCode ret = nnExecutor->CreateOutputMemory(m_index, length, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: nnexecutortest_createoutputmemory_002 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNExecutorTest, nnexecutortest_createoutputmemory_002, TestSize.Level0) +{ + LOGE("CreateInputMemory nnexecutortest_createoutputmemory_002"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + std::shared_ptr m_preparedModel {nullptr}; + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + + std::pair, OH_NN_TensorType> pair1; + std::pair, OH_NN_TensorType> pair2; + m_outputTensorDescs.emplace_back(pair1); + m_outputTensorDescs.emplace_back(pair2); + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( + m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + + OH_NN_Memory** memory = nullptr; + float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* const data = dataArry; + OH_NN_Memory memoryPtr = {data, 9 * sizeof(float)}; + OH_NN_Memory* ptr = &memoryPtr; + memory = &ptr; + size_t length = 9 * sizeof(float); + + OH_NN_ReturnCode ret = nnExecutor->CreateOutputMemory(m_index, length, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: nnexecutortest_createoutputmemory_003 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNExecutorTest, nnexecutortest_createoutputmemory_003, TestSize.Level0) +{ + LOGE("CreateInputMemory nnexecutortest_createoutputmemory_003"); + size_t m_backendID {0}; + std::shared_ptr device = std::make_shared(); + + std::shared_ptr m_preparedModel {nullptr}; + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + + std::pair, OH_NN_TensorType> pair1; + std::pair, OH_NN_TensorType> pair2; + std::shared_ptr tensorDesr = std::make_shared(); + int32_t expectDim[2] = {3, 3}; + int32_t* ptr = expectDim; + uint32_t dimensionCount = 2; + tensorDesr->SetShape(ptr, dimensionCount); + pair1.first = tensorDesr; + pair2.first = tensorDesr; + m_outputTensorDescs.emplace_back(pair1); + m_outputTensorDescs.emplace_back(pair2); + + float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + size_t length = 9 * sizeof(float); + EXPECT_CALL(*((MockIDevice *) device.get()), AllocateTensorBuffer(length, m_outputTensorDescs[m_index].first)) + .WillRepeatedly(::testing::Return(nullptr)); + + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( + m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + + OH_NN_Memory** memory = nullptr; + void* const data = dataArry; + OH_NN_Memory memoryPtr = {data, 9 * sizeof(float)}; + OH_NN_Memory* mPtr = &memoryPtr; + memory = &mPtr; + + OH_NN_ReturnCode ret = nnExecutor->CreateOutputMemory(m_index, length, memory); + EXPECT_EQ(OH_NN_MEMORY_ERROR, ret); + + testing::Mock::AllowLeak(device.get()); +} + +/** + * @tc.name: nnexecutortest_createoutputmemory_004 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNExecutorTest, nnexecutortest_createoutputmemory_004, TestSize.Level0) +{ + LOGE("CreateInputMemory nnexecutortest_createoutputmemory_004"); + size_t m_backendID {0}; + std::shared_ptr device = std::make_shared(); + + std::shared_ptr m_preparedModel {nullptr}; + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + + std::pair, OH_NN_TensorType> pair1; + std::pair, OH_NN_TensorType> pair2; + std::shared_ptr tensorDesr = std::make_shared(); + int32_t expectDim[2] = {3, 3}; + int32_t* ptr = expectDim; + uint32_t dimensionCount = 2; + tensorDesr->SetShape(ptr, dimensionCount); + pair1.first = tensorDesr; + pair2.first = tensorDesr; + m_outputTensorDescs.emplace_back(pair1); + m_outputTensorDescs.emplace_back(pair2); + + float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + size_t length = 9 * sizeof(float); + EXPECT_CALL(*((MockIDevice *) device.get()), AllocateTensorBuffer(length, m_outputTensorDescs[m_index].first)) + .WillRepeatedly(::testing::Return(reinterpret_cast(0x1000))); + + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( + m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + + OH_NN_Memory** memory = nullptr; + void* const data = dataArry; + OH_NN_Memory memoryPtr = {data, 9 * sizeof(float)}; + OH_NN_Memory* mPtr = &memoryPtr; + memory = &mPtr; + + OH_NN_ReturnCode ret = nnExecutor->CreateOutputMemory(m_index, length, memory); + EXPECT_EQ(OH_NN_SUCCESS, ret); + + testing::Mock::AllowLeak(device.get()); +} + +/** + * @tc.name: nnexecutortest_destroyoutputmemory_001 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNExecutorTest, nnexecutortest_destroyoutputmemory_001, TestSize.Level0) +{ + LOGE("DestroyOutputMemory nnexecutortest_destroyoutputmemory_001"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + std::shared_ptr m_preparedModel {nullptr}; + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( + m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + + size_t length = 9 * sizeof(float); + OH_NN_Memory** memory = nullptr; + float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* const data = dataArry; + OH_NN_Memory memoryPtr = {data, 9 * sizeof(float)}; + OH_NN_Memory* ptr = &memoryPtr; + memory = &ptr; + + nnExecutor->CreateOutputMemory(m_index, length, memory); + OH_NN_ReturnCode ret = nnExecutor->DestroyOutputMemory(m_index, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: nnexecutortest_destroyoutputmemory_002 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNExecutorTest, nnexecutortest_destroyoutputmemory_002, TestSize.Level0) +{ + LOGE("DestroyInputMemory nnexecutortest_destroyoutputmemory_002"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + std::shared_ptr m_preparedModel {nullptr}; + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + + std::pair, OH_NN_TensorType> pair1; + std::pair, OH_NN_TensorType> pair2; + m_outputTensorDescs.emplace_back(pair1); + m_outputTensorDescs.emplace_back(pair2); + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( + m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + + size_t length = 9 * sizeof(float); + OH_NN_Memory** memory = nullptr; + float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* const data = dataArry; + OH_NN_Memory memoryPtr = {data, 9 * sizeof(float)}; + OH_NN_Memory* ptr = &memoryPtr; + memory = &ptr; + + nnExecutor->CreateOutputMemory(m_index, length, memory); + OH_NN_ReturnCode ret = nnExecutor->DestroyOutputMemory(m_index, memory); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: nnexecutortest_destroyoutputmemory_003 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNExecutorTest, nnexecutortest_destroyoutputmemory_003, TestSize.Level0) +{ + LOGE("DestroyInputMemory nnexecutortest_destroyoutputmemory_003"); + size_t m_backendID {0}; + std::shared_ptr device = std::make_shared(); + std::shared_ptr m_preparedModel {nullptr}; + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + + std::pair, OH_NN_TensorType> pair1; + std::pair, OH_NN_TensorType> pair2; + std::shared_ptr tensorDesr = std::make_shared(); + int32_t expectDim[2] = {3, 3}; + int32_t* ptr = expectDim; + uint32_t dimensionCount = 2; + tensorDesr->SetShape(ptr, dimensionCount); + pair1.first = tensorDesr; + pair2.first = tensorDesr; + m_outputTensorDescs.emplace_back(pair1); + m_outputTensorDescs.emplace_back(pair2); + + float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + size_t length = 9 * sizeof(float); + EXPECT_CALL(*((MockIDevice *) device.get()), AllocateTensorBuffer(length, m_outputTensorDescs[m_index].first)) + .WillRepeatedly(::testing::Return(reinterpret_cast(0x1000))); + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( + m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + + OH_NN_Memory** memory = nullptr; + void* const data = dataArry; + OH_NN_Memory memoryPtr = {data, 9 * sizeof(float)}; + OH_NN_Memory* mPtr = &memoryPtr; + memory = &mPtr; + + nnExecutor->CreateOutputMemory(m_index, length, memory); + OH_NN_ReturnCode ret = nnExecutor->DestroyOutputMemory(m_index, memory); + EXPECT_EQ(OH_NN_SUCCESS, ret); + + testing::Mock::AllowLeak(device.get()); +} + +/** + * @tc.name: nnexecutortest_run_001 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNExecutorTest, nnexecutortest_run_001, TestSize.Level0) +{ + LOGE("Run nnexecutortest_run_001"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + std::shared_ptr mockIPreparedMode = std::make_shared(); + EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::Return(OH_NN_FAILED)); + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + + size_t length = 9 * sizeof(float); + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + void* buffer = m_dataArry; + + nnExecutor->SetInput(m_index, tensor, buffer, length); + nnExecutor->SetOutput(m_index, buffer, length); + OH_NN_ReturnCode ret = nnExecutor->Run(); + EXPECT_EQ(OH_NN_SUCCESS, ret); + + testing::Mock::AllowLeak(mockIPreparedMode.get()); +} + +/** + * @tc.name: nnexecutortest_run_002 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNExecutorTest, nnexecutortest_run_002, TestSize.Level0) +{ + LOGE("Run nnexecutortest_run_002"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + std::shared_ptr mockIPreparedMode = std::make_shared(); + EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::Return(OH_NN_FAILED)); + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + + std::pair, OH_NN_TensorType> pair1; + std::pair, OH_NN_TensorType> pair2; + m_inputTensorDescs.emplace_back(pair1); + m_inputTensorDescs.emplace_back(pair2); + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + + size_t length = 9 * sizeof(float); + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + void* buffer = m_dataArry; + + nnExecutor->SetInput(m_index, tensor, buffer, length); + nnExecutor->SetOutput(m_index, buffer, length); + OH_NN_ReturnCode ret = nnExecutor->Run(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + + testing::Mock::AllowLeak(mockIPreparedMode.get()); +} + +/** + * @tc.name: nnexecutortest_run_003 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNExecutorTest, nnexecutortest_run_003, TestSize.Level0) +{ + LOGE("Run nnexecutortest_run_003"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + std::shared_ptr mockIPreparedMode = std::make_shared(); + EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::Return(OH_NN_FAILED)); + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + + std::pair, OH_NN_TensorType> pair1; + std::pair, OH_NN_TensorType> pair2; + m_inputTensorDescs.emplace_back(pair1); + m_inputTensorDescs.emplace_back(pair2); + m_outputTensorDescs.emplace_back(pair1); + m_outputTensorDescs.emplace_back(pair2); + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + + size_t length = 9 * sizeof(float); + OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); + void* buffer = m_dataArry; + + nnExecutor->SetInput(m_index, tensor, buffer, length); + nnExecutor->SetOutput(m_index, buffer, length); + OH_NN_ReturnCode ret = nnExecutor->Run(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + + testing::Mock::AllowLeak(mockIPreparedMode.get()); +} + +/** + * @tc.name: nnexecutortest_setextensionconfig_001 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNExecutorTest, nnexecutortest_setextensionconfig_001, TestSize.Level0) +{ + LOGE("SetExtensionConfig nnexecutortest_setextensionconfig_001"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + std::shared_ptr mockIPreparedMode = std::make_shared(); + EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::Return(OH_NN_FAILED)); + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + + std::pair, OH_NN_TensorType> pair1; + std::pair, OH_NN_TensorType> pair2; + m_inputTensorDescs.emplace_back(pair1); + m_inputTensorDescs.emplace_back(pair2); + m_outputTensorDescs.emplace_back(pair1); + m_outputTensorDescs.emplace_back(pair2); + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + + std::unordered_map> configMap; + std::string callingPidStr = "callingPid"; + std::vector vecCallingPid(callingPidStr.begin(), callingPidStr.end()); + configMap["callingPid"] = vecCallingPid; + + std::string hiaiModelIdStr = "hiaiModelId"; + std::vector vechiaiModelId(hiaiModelIdStr.begin(), hiaiModelIdStr.end()); + configMap["hiaiModelId"] = vechiaiModelId; + + std::string vecNeedLatencyStr = "isNeedModelLatency"; + std::vector vecNeedLatency(vecNeedLatencyStr.begin(), vecNeedLatencyStr.end()); + configMap["isNeedModelLatency"] = vecNeedLatency; + OH_NN_ReturnCode retSetExtensionConfig = nnExecutor->SetExtensionConfig(configMap); + EXPECT_EQ(OH_NN_SUCCESS, retSetExtensionConfig); + + ExecutorConfig* retGetExecutorConfig = nnExecutor->GetExecutorConfig(); + EXPECT_NE(nullptr, retGetExecutorConfig); + + testing::Mock::AllowLeak(mockIPreparedMode.get()); +} } // namespace UnitTest } // namespace NeuralNetworkRuntime } // namespace OHOS \ No newline at end of file diff --git a/test/unittest/components/nn_tensor/nn_tensor_test.cpp b/test/unittest/components/nn_tensor/nn_tensor_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b651ac2f804ad8e0995738689a18cfa28f753c5f --- /dev/null +++ b/test/unittest/components/nn_tensor/nn_tensor_test.cpp @@ -0,0 +1,1298 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include "nntensor.h" +#include "nnexecutor.h" +#include "nncompiler.h" +#include "nnbackend.h" +#include "backend_manager.h" +#include "device.h" +#include "prepared_model.h" +#include "interfaces/kits/c/neural_network_runtime/neural_network_runtime_type.h" +#include "common/utils.h" +#include "common/log.h" +#include "hdi_device_v1_0.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace V1_0 = OHOS::HDI::Nnrt::V1_0; +namespace UnitTest { +class NNTensor2Test : public testing::Test { +public: + NNTensor2Test() = default; + ~NNTensor2Test() = default; +}; + +class MockIDevice : public Device { +public: + MOCK_METHOD1(GetDeviceName, OH_NN_ReturnCode(std::string&)); + MOCK_METHOD1(GetVendorName, OH_NN_ReturnCode(std::string&)); + MOCK_METHOD1(GetVersion, OH_NN_ReturnCode(std::string&)); + MOCK_METHOD1(GetDeviceType, OH_NN_ReturnCode(OH_NN_DeviceType&)); + MOCK_METHOD1(GetDeviceStatus, OH_NN_ReturnCode(DeviceStatus&)); + MOCK_METHOD2(GetSupportedOperation, OH_NN_ReturnCode(std::shared_ptr, + std::vector&)); + MOCK_METHOD1(IsFloat16PrecisionSupported, OH_NN_ReturnCode(bool&)); + MOCK_METHOD1(IsPerformanceModeSupported, OH_NN_ReturnCode(bool&)); + MOCK_METHOD1(IsPrioritySupported, OH_NN_ReturnCode(bool&)); + MOCK_METHOD1(IsDynamicInputSupported, OH_NN_ReturnCode(bool&)); + MOCK_METHOD1(IsModelCacheSupported, OH_NN_ReturnCode(bool&)); + MOCK_METHOD3(PrepareModel, OH_NN_ReturnCode(std::shared_ptr, + const ModelConfig&, + std::shared_ptr&)); + MOCK_METHOD3(PrepareModel, OH_NN_ReturnCode(const void*, + const ModelConfig&, + std::shared_ptr&)); + MOCK_METHOD4(PrepareModelFromModelCache, OH_NN_ReturnCode(const std::vector&, + const ModelConfig&, + std::shared_ptr&, + bool&)); + MOCK_METHOD3(PrepareOfflineModel, OH_NN_ReturnCode(std::shared_ptr, + const ModelConfig&, + std::shared_ptr&)); + MOCK_METHOD1(AllocateBuffer, void*(size_t)); + MOCK_METHOD2(AllocateTensorBuffer, void*(size_t, std::shared_ptr)); + MOCK_METHOD2(AllocateTensorBuffer, void*(size_t, std::shared_ptr)); + MOCK_METHOD1(ReleaseBuffer, OH_NN_ReturnCode(const void*)); + MOCK_METHOD2(AllocateBuffer, OH_NN_ReturnCode(size_t, int&)); + MOCK_METHOD2(ReleaseBuffer, OH_NN_ReturnCode(int, size_t)); +}; + +class MockTensorDesc : public TensorDesc { +public: + MOCK_METHOD1(GetDataType, OH_NN_ReturnCode(OH_NN_DataType*)); + MOCK_METHOD1(SetDataType, OH_NN_ReturnCode(OH_NN_DataType)); + MOCK_METHOD1(GetFormat, OH_NN_ReturnCode(OH_NN_Format*)); + MOCK_METHOD1(SetFormat, OH_NN_ReturnCode(OH_NN_Format)); + MOCK_METHOD2(GetShape, OH_NN_ReturnCode(int32_t**, size_t*)); + MOCK_METHOD2(SetShape, OH_NN_ReturnCode(const int32_t*, size_t)); + MOCK_METHOD1(GetElementNum, OH_NN_ReturnCode(size_t*)); + MOCK_METHOD1(GetByteSize, OH_NN_ReturnCode(size_t*)); + MOCK_METHOD1(SetName, OH_NN_ReturnCode(const char*)); + MOCK_METHOD1(GetName, OH_NN_ReturnCode(const char**)); +}; + +class MockBackend : public Backend { +public: + MOCK_CONST_METHOD0(GetBackendID, size_t()); + MOCK_CONST_METHOD1(GetBackendName, OH_NN_ReturnCode(std::string&)); + MOCK_CONST_METHOD1(GetBackendType, OH_NN_ReturnCode(OH_NN_DeviceType&)); + MOCK_CONST_METHOD1(GetBackendStatus, OH_NN_ReturnCode(DeviceStatus&)); + MOCK_METHOD1(CreateCompiler, Compiler*(Compilation*)); + MOCK_METHOD1(DestroyCompiler, OH_NN_ReturnCode(Compiler*)); + MOCK_METHOD1(CreateExecutor, Executor*(Compilation*)); + MOCK_METHOD1(DestroyExecutor, OH_NN_ReturnCode(Executor*)); + MOCK_METHOD1(CreateTensor, Tensor*(TensorDesc*)); + MOCK_METHOD1(DestroyTensor, OH_NN_ReturnCode(Tensor*)); + + std::shared_ptr GetDevice() + { + std::shared_ptr device = std::make_shared(); + EXPECT_CALL(*((MockIDevice *) device.get()), AllocateBuffer(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::Return(OH_NN_SUCCESS)); + return device; + } + MOCK_METHOD2(GetSupportedOperation, OH_NN_ReturnCode(std::shared_ptr, + std::vector&)); +}; + +/** + * @tc.name: nntensor2_0test_construct_001 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNTensor2Test, nntensor2_0test_construct_001, TestSize.Level0) +{ + LOGE("NNTensor2_0 nntensor2_0test_construct_001"); + size_t backendId = 1; + + NNTensor2_0* nnTensor = new (std::nothrow) NNTensor2_0(backendId); + EXPECT_NE(nullptr, nnTensor); + + delete nnTensor; +} + +/** + * @tc.name: nntensor2_0test_construct_002 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNTensor2Test, nntensor2_0test_construct_002, TestSize.Level0) +{ + LOGE("NNTensor2_0 nntensor2_0test_construct_002"); + size_t backendId = 1; + + NNTensor2_0* nnTensor = new (std::nothrow) NNTensor2_0(backendId); + EXPECT_NE(nullptr, nnTensor); + + nnTensor->SetSize(1); + float dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* buffer = dataArry; + nnTensor->SetData(buffer); + nnTensor->SetFd(-1); + delete nnTensor; +} + +/** + * @tc.name: nntensor2_0test_construct_003 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNTensor2Test, nntensor2_0test_construct_003, TestSize.Level0) +{ + LOGE("NNTensor2_0 nntensor2_0test_construct_003"); + size_t backendId = 1; + + NNTensor2_0* nnTensor = new (std::nothrow) NNTensor2_0(backendId); + EXPECT_NE(nullptr, nnTensor); + + nnTensor->SetSize(1); + float dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* buffer = dataArry; + nnTensor->SetData(buffer); + nnTensor->SetFd(0); + delete nnTensor; +} + +/** + * @tc.name: nntensor2_0test_settensordesc_001 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNTensor2Test, nntensor2_0test_settensordesc_001, TestSize.Level0) +{ + LOGE("SetTensorDesc nntensor2_0test_settensordesc_001"); + size_t backendId = 1; + + NNTensor2_0* nnTensor = new (std::nothrow) NNTensor2_0(backendId); + EXPECT_NE(nullptr, nnTensor); + + TensorDesc desc; + TensorDesc* tensorDesc = &desc; + OH_NN_ReturnCode setTensorDescRet = nnTensor->SetTensorDesc(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, setTensorDescRet); + + OH_NN_ReturnCode ret = nnTensor->SetTensorDesc(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: nntensor2_0test_createdata_001 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNTensor2Test, nntensor2_0test_createdata_001, TestSize.Level0) +{ + LOGE("CreateData nntensor2_0test_createdata_001"); + size_t backendId = 1; + + NNTensor2_0* nnTensor = new (std::nothrow) NNTensor2_0(backendId); + EXPECT_NE(nullptr, nnTensor); + + OH_NN_ReturnCode ret = nnTensor->CreateData(); + EXPECT_EQ(OH_NN_NULL_PTR, ret); +} + +/** + * @tc.name: nntensor2_0test_createdata_002 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNTensor2Test, nntensor2_0test_createdata_002, TestSize.Level0) +{ + LOGE("CreateData nntensor2_0test_createdata_002"); + size_t backendId = 1; + + NNTensor2_0* nnTensor = new (std::nothrow) NNTensor2_0(backendId); + EXPECT_NE(nullptr, nnTensor); + + std::shared_ptr tensorDesc = std::make_shared(); + EXPECT_CALL(*((MockTensorDesc *) tensorDesc.get()), GetByteSize(::testing::_)) + .WillRepeatedly(::testing::Return(OH_NN_INVALID_PARAMETER)); + + OH_NN_ReturnCode retSetTensorDesc = nnTensor->SetTensorDesc(tensorDesc.get()); + EXPECT_EQ(OH_NN_SUCCESS, retSetTensorDesc); + + OH_NN_ReturnCode retCreateData = nnTensor->CreateData(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, retCreateData); + + testing::Mock::AllowLeak(tensorDesc.get()); +} + +std::shared_ptr Creator() +{ + size_t backendID = 1; + std::shared_ptr device = std::make_shared(); + + EXPECT_CALL(*((MockIDevice *) device.get()), GetDeviceStatus(::testing::_)) + .WillOnce(Invoke([](DeviceStatus& status) { + // 这里直接修改传入的引用参数 + status = AVAILABLE; + return OH_NN_SUCCESS; // 假设成功的状态码 + })); + + std::string backendName = "mock"; + EXPECT_CALL(*((MockIDevice *) device.get()), GetDeviceName(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(backendName), ::testing::Return(OH_NN_SUCCESS))); + + EXPECT_CALL(*((MockIDevice *) device.get()), GetVendorName(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(backendName), ::testing::Return(OH_NN_SUCCESS))); + + EXPECT_CALL(*((MockIDevice *) device.get()), GetVersion(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(backendName), ::testing::Return(OH_NN_SUCCESS))); + + EXPECT_CALL(*((MockIDevice *) device.get()), AllocateBuffer(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::Return(OH_NN_SUCCESS)); + + std::shared_ptr backend = std::make_unique(device, backendID); + return backend; +} + +/** + * @tc.name: nntensor2_0test_createdata_003 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNTensor2Test, nntensor2_0test_createdata_003, TestSize.Level0) +{ + LOGE("CreateData nntensor2_0test_createdata_003"); + size_t backendId = 1; + + NNTensor2_0* nnTensor = new (std::nothrow) NNTensor2_0(backendId); + EXPECT_NE(nullptr, nnTensor); + + BackendManager& backendManager = BackendManager::GetInstance(); + + std::string backendName = "mock"; + std::function()> creator = Creator; + + backendManager.RegisterBackend(backendName, creator); + + TensorDesc desc; + desc.SetDataType(OH_NN_INT64); + size_t shapeNum = 1; + int32_t index = 10; + int32_t* shape = &index; + desc.SetShape(shape, shapeNum); + TensorDesc* tensorDesc = &desc; + + OH_NN_ReturnCode retSetTensorDesc = nnTensor->SetTensorDesc(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, retSetTensorDesc); + + OH_NN_ReturnCode ret = nnTensor->CreateData(); + EXPECT_EQ(OH_NN_MEMORY_ERROR, ret); +} + +/** + * @tc.name: nntensor2_0test_createdata_004 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNTensor2Test, nntensor2_0test_createdata_004, TestSize.Level0) +{ + LOGE("CreateData nntensor2_0test_createdata_004"); + size_t backendId = 1; + + NNTensor2_0* nnTensor = new (std::nothrow) NNTensor2_0(backendId); + EXPECT_NE(nullptr, nnTensor); + + std::shared_ptr tensorDesc = std::make_shared(); + EXPECT_CALL(*((MockTensorDesc *) tensorDesc.get()), GetByteSize(::testing::_)) + .WillRepeatedly(Invoke([](size_t* byteSize) { + // 这里直接修改传入的引用参数 + *byteSize = ALLOCATE_BUFFER_LIMIT + 1; + return OH_NN_SUCCESS; // 假设成功的状态码 + })); + + OH_NN_ReturnCode retSetTensorDesc = nnTensor->SetTensorDesc(tensorDesc.get()); + EXPECT_EQ(OH_NN_SUCCESS, retSetTensorDesc); + + OH_NN_ReturnCode ret = nnTensor->CreateData(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + + testing::Mock::AllowLeak(tensorDesc.get()); +} + +/** + * @tc.name: nntensor2_0test_createdata_005 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNTensor2Test, nntensor2_0test_createdata_005, TestSize.Level0) +{ + LOGE("CreateData nntensor2_0test_createdata_005"); + size_t backendId = 1; + + NNTensor2_0* nnTensor = new (std::nothrow) NNTensor2_0(backendId); + EXPECT_NE(nullptr, nnTensor); + + std::shared_ptr tensorDesc = std::make_shared(); + EXPECT_CALL(*((MockTensorDesc *) tensorDesc.get()), GetByteSize(::testing::_)) + .WillRepeatedly(Invoke([](size_t* byteSize) { + // 这里直接修改传入的引用参数 + *byteSize = 1; + return OH_NN_SUCCESS; // 假设成功的状态码 + })); + + OH_NN_ReturnCode retSetTensorDesc = nnTensor->SetTensorDesc(tensorDesc.get()); + EXPECT_EQ(OH_NN_SUCCESS, retSetTensorDesc); + + OH_NN_ReturnCode ret = nnTensor->CreateData(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + + testing::Mock::AllowLeak(tensorDesc.get()); +} + +std::shared_ptr Creator2() +{ + size_t backendID = 2; + + std::shared_ptr backend = std::make_unique(nullptr, backendID); + return backend; +} + +/** + * @tc.name: nntensor2_0test_createdata_006 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNTensor2Test, nntensor2_0test_createdata_006, TestSize.Level0) +{ + LOGE("CreateData nntensor2_0test_createdata_006"); + size_t backendId = 2; + + NNTensor2_0* nnTensor = new (std::nothrow) NNTensor2_0(backendId); + EXPECT_NE(nullptr, nnTensor); + + BackendManager& backendManager = BackendManager::GetInstance(); + + std::string backendName = "mock"; + std::function()> creator = Creator2; + + backendManager.RegisterBackend(backendName, creator); + + TensorDesc desc; + desc.SetDataType(OH_NN_INT64); + size_t shapeNum = 1; + int32_t index = 10; + int32_t* shape = &index; + desc.SetShape(shape, shapeNum); + TensorDesc* tensorDesc = &desc; + + OH_NN_ReturnCode retSetTensorDesc = nnTensor->SetTensorDesc(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, retSetTensorDesc); + + OH_NN_ReturnCode ret = nnTensor->CreateData(); + EXPECT_EQ(OH_NN_NULL_PTR, ret); +} + +std::shared_ptr Creator3() +{ + size_t backendID = 3; + std::shared_ptr device = std::make_shared(); + + EXPECT_CALL(*((MockIDevice *) device.get()), GetDeviceStatus(::testing::_)) + .WillRepeatedly(Invoke([](DeviceStatus& status) { + // 这里直接修改传入的引用参数 + status = AVAILABLE; + return OH_NN_SUCCESS; // 假设成功的状态码 + })); + + std::string backendName = "mock"; + EXPECT_CALL(*((MockIDevice *) device.get()), GetDeviceName(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(backendName), ::testing::Return(OH_NN_SUCCESS))); + + EXPECT_CALL(*((MockIDevice *) device.get()), GetVendorName(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(backendName), ::testing::Return(OH_NN_SUCCESS))); + + EXPECT_CALL(*((MockIDevice *) device.get()), GetVersion(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(backendName), ::testing::Return(OH_NN_SUCCESS))); + + EXPECT_CALL(*((MockIDevice *) device.get()), AllocateBuffer(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::Return(OH_NN_MEMORY_ERROR)); + + std::shared_ptr backend = std::make_unique(device, backendID); + + return backend; +} + +/** + * @tc.name: nntensor2_0test_createdata_007 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNTensor2Test, nntensor2_0test_createdata_007, TestSize.Level0) +{ + LOGE("CreateData nntensor2_0test_createdata_007"); + size_t backendId = 3; + + NNTensor2_0* nnTensor = new (std::nothrow) NNTensor2_0(backendId); + EXPECT_NE(nullptr, nnTensor); + + BackendManager& backendManager = BackendManager::GetInstance(); + + std::string backendName = "mock"; + std::function()> creator = Creator3; + + backendManager.RegisterBackend(backendName, creator); + + TensorDesc desc; + desc.SetDataType(OH_NN_INT64); + size_t shapeNum = 1; + int32_t index = 10; + int32_t* shape = &index; + desc.SetShape(shape, shapeNum); + TensorDesc* tensorDesc = &desc; + + OH_NN_ReturnCode retSetTensorDesc = nnTensor->SetTensorDesc(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, retSetTensorDesc); + + OH_NN_ReturnCode ret = nnTensor->CreateData(); + EXPECT_EQ(OH_NN_MEMORY_ERROR, ret); +} + +std::shared_ptr Creator4() +{ + size_t backendID = 4; + std::shared_ptr device = std::make_shared(); + + EXPECT_CALL(*((MockIDevice *) device.get()), GetDeviceStatus(::testing::_)) + .WillRepeatedly(Invoke([](DeviceStatus& status) { + // 这里直接修改传入的引用参数 + status = AVAILABLE; + return OH_NN_SUCCESS; // 假设成功的状态码 + })); + + std::string backendName = "mock"; + EXPECT_CALL(*((MockIDevice *) device.get()), GetDeviceName(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(backendName), ::testing::Return(OH_NN_SUCCESS))); + + EXPECT_CALL(*((MockIDevice *) device.get()), GetVendorName(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(backendName), ::testing::Return(OH_NN_SUCCESS))); + + EXPECT_CALL(*((MockIDevice *) device.get()), GetVersion(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(backendName), ::testing::Return(OH_NN_SUCCESS))); + + EXPECT_CALL(*((MockIDevice *) device.get()), AllocateBuffer(::testing::_, ::testing::_)) + .WillRepeatedly(Invoke([](size_t length, int& fd) { + // 这里直接修改传入的引用参数 + fd = -1; + return OH_NN_SUCCESS; // 假设成功的状态码 + })); + + std::shared_ptr backend = std::make_unique(device, backendID); + + return backend; +} + +/** + * @tc.name: nntensor2_0test_createdata_008 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNTensor2Test, nntensor2_0test_createdata_008, TestSize.Level0) +{ + LOGE("CreateData nntensor2_0test_createdata_008"); + size_t backendId = 4; + + NNTensor2_0* nnTensor = new (std::nothrow) NNTensor2_0(backendId); + EXPECT_NE(nullptr, nnTensor); + + BackendManager& backendManager = BackendManager::GetInstance(); + + std::string backendName = "mock"; + std::function()> creator = Creator4; + + backendManager.RegisterBackend(backendName, creator); + + TensorDesc desc; + desc.SetDataType(OH_NN_INT64); + size_t shapeNum = 1; + int32_t index = 10; + int32_t* shape = &index; + desc.SetShape(shape, shapeNum); + TensorDesc* tensorDesc = &desc; + + OH_NN_ReturnCode retSetTensorDesc = nnTensor->SetTensorDesc(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, retSetTensorDesc); + + OH_NN_ReturnCode ret = nnTensor->CreateData(); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: nntensor2_0test_createdata_009 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNTensor2Test, nntensor2_0test_createdata_009, TestSize.Level0) +{ + LOGE("CreateData nntensor2_0test_createdata_009"); + size_t backendId = 4; + + NNTensor2_0* nnTensor = new (std::nothrow) NNTensor2_0(backendId); + EXPECT_NE(nullptr, nnTensor); + + float dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* buffer = dataArry; + nnTensor->SetData(buffer); + + OH_NN_ReturnCode ret = nnTensor->CreateData(); + EXPECT_EQ(OH_NN_FAILED, ret); +} + +/** + * @tc.name: nntensor2_0test_createdata_020 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNTensor2Test, nntensor2_0test_createdata_020, TestSize.Level0) +{ + LOGE("CreateData nntensor2_0test_createdata_020"); + size_t backendId = 1; + + NNTensor2_0* nnTensor = new (std::nothrow) NNTensor2_0(backendId); + EXPECT_NE(nullptr, nnTensor); + + size_t size = 1; + OH_NN_ReturnCode ret = nnTensor->CreateData(size); + EXPECT_EQ(OH_NN_NULL_PTR, ret); +} + +/** + * @tc.name: nntensor2_0test_createdata_021 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNTensor2Test, nntensor2_0test_createdata_021, TestSize.Level0) +{ + LOGE("CreateData nntensor2_0test_createdata_021"); + size_t backendId = 1; + + NNTensor2_0* nnTensor = new (std::nothrow) NNTensor2_0(backendId); + EXPECT_NE(nullptr, nnTensor); + + TensorDesc desc; + desc.SetDataType(OH_NN_INT64); + size_t shapeNum = 1; + int32_t index = 10; + int32_t* shape = &index; + desc.SetShape(shape, shapeNum); + TensorDesc* tensorDesc = &desc; + + OH_NN_ReturnCode retSetTensorDesc = nnTensor->SetTensorDesc(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, retSetTensorDesc); + + size_t size = ALLOCATE_BUFFER_LIMIT + 1; + OH_NN_ReturnCode ret = nnTensor->CreateData(size); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: nntensor2_0test_createdata_022 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNTensor2Test, nntensor2_0test_createdata_022, TestSize.Level0) +{ + LOGE("CreateData nntensor2_0test_createdata_022"); + size_t backendId = 1; + + NNTensor2_0* nnTensor = new (std::nothrow) NNTensor2_0(backendId); + EXPECT_NE(nullptr, nnTensor); + + TensorDesc desc; + desc.SetDataType(OH_NN_INT64); + size_t shapeNum = 1; + int32_t index = 10; + int32_t* shape = &index; + desc.SetShape(shape, shapeNum); + TensorDesc* tensorDesc = &desc; + + OH_NN_ReturnCode retSetTensorDesc = nnTensor->SetTensorDesc(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, retSetTensorDesc); + + size_t size = 1; + OH_NN_ReturnCode ret = nnTensor->CreateData(size); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: nntensor2_0test_createdata_023 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNTensor2Test, nntensor2_0test_createdata_023, TestSize.Level0) +{ + LOGE("CreateData nntensor2_0test_createdata_023"); + size_t backendId = 1; + + NNTensor2_0* nnTensor = new (std::nothrow) NNTensor2_0(backendId); + EXPECT_NE(nullptr, nnTensor); + + std::shared_ptr tensorDesc = std::make_shared(); + EXPECT_CALL(*((MockTensorDesc *) tensorDesc.get()), GetByteSize(::testing::_)) + .WillRepeatedly(::testing::Return(OH_NN_INVALID_PARAMETER)); + + OH_NN_ReturnCode retSetTensorDesc = nnTensor->SetTensorDesc(tensorDesc.get()); + EXPECT_EQ(OH_NN_SUCCESS, retSetTensorDesc); + + size_t size = 1; + OH_NN_ReturnCode ret = nnTensor->CreateData(size); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + + testing::Mock::AllowLeak(tensorDesc.get()); +} + +/** + * @tc.name: nntensor2_0test_createdata_024 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNTensor2Test, nntensor2_0test_createdata_024, TestSize.Level0) +{ + LOGE("CreateData nntensor2_0test_createdata_024"); + size_t backendId = 1; + + NNTensor2_0* nnTensor = new (std::nothrow) NNTensor2_0(backendId); + EXPECT_NE(nullptr, nnTensor); + + float dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* buffer = dataArry; + nnTensor->SetData(buffer); + + size_t size = 1; + OH_NN_ReturnCode ret = nnTensor->CreateData(size); + EXPECT_EQ(OH_NN_FAILED, ret); +} + +/** + * @tc.name: nntensor2_0test_createdata_029 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNTensor2Test, nntensor2_0test_createdata_029, TestSize.Level0) +{ + LOGE("CreateData nntensor2_0test_createdata_029"); + size_t backendId = 1; + + NNTensor2_0* nnTensor = new (std::nothrow) NNTensor2_0(backendId); + EXPECT_NE(nullptr, nnTensor); + + float dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* buffer = dataArry; + nnTensor->SetData(buffer); + + int fd = 1; + size_t size = 2; + size_t offset = 3; + OH_NN_ReturnCode ret = nnTensor->CreateData(fd, size, offset); + EXPECT_EQ(OH_NN_FAILED, ret); +} + +/** + * @tc.name: nntensor2_0test_createdata_030 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNTensor2Test, nntensor2_0test_createdata_030, TestSize.Level0) +{ + LOGE("CreateData nntensor2_0test_createdata_030"); + size_t backendId = 1; + + NNTensor2_0* nnTensor = new (std::nothrow) NNTensor2_0(backendId); + EXPECT_NE(nullptr, nnTensor); + + int fd = 1; + size_t size = 2; + size_t offset = 3; + OH_NN_ReturnCode ret = nnTensor->CreateData(fd, size, offset); + EXPECT_EQ(OH_NN_NULL_PTR, ret); +} + +/** + * @tc.name: nntensor2_0test_createdata_031 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNTensor2Test, nntensor2_0test_createdata_031, TestSize.Level0) +{ + LOGE("CreateData nntensor2_0test_createdata_031"); + size_t backendId = 1; + + NNTensor2_0* nnTensor = new (std::nothrow) NNTensor2_0(backendId); + EXPECT_NE(nullptr, nnTensor); + + std::shared_ptr tensorDesc = std::make_shared(); + EXPECT_CALL(*((MockTensorDesc *) tensorDesc.get()), GetByteSize(::testing::_)) + .WillRepeatedly(::testing::Return(OH_NN_INVALID_PARAMETER)); + + OH_NN_ReturnCode retSetTensorDesc = nnTensor->SetTensorDesc(tensorDesc.get()); + EXPECT_EQ(OH_NN_SUCCESS, retSetTensorDesc); + + int fd = 1; + size_t size = 2; + size_t offset = 3; + OH_NN_ReturnCode ret = nnTensor->CreateData(fd, size, offset); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + + testing::Mock::AllowLeak(tensorDesc.get()); +} + +/** + * @tc.name: nntensor2_0test_createdata_032 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNTensor2Test, nntensor2_0test_createdata_032, TestSize.Level0) +{ + LOGE("CreateData nntensor2_0test_createdata_032"); + size_t backendId = 1; + + NNTensor2_0* nnTensor = new (std::nothrow) NNTensor2_0(backendId); + EXPECT_NE(nullptr, nnTensor); + + TensorDesc desc; + desc.SetDataType(OH_NN_INT64); + size_t shapeNum = 1; + int32_t index = 10; + int32_t* shape = &index; + desc.SetShape(shape, shapeNum); + TensorDesc* tensorDesc = &desc; + + OH_NN_ReturnCode retSetTensorDesc = nnTensor->SetTensorDesc(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, retSetTensorDesc); + + int fd = -1; + size_t size = 2; + size_t offset = 3; + OH_NN_ReturnCode ret = nnTensor->CreateData(fd, size, offset); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: nntensor2_0test_createdata_033 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNTensor2Test, nntensor2_0test_createdata_033, TestSize.Level0) +{ + LOGE("CreateData nntensor2_0test_createdata_033"); + size_t backendId = 1; + + NNTensor2_0* nnTensor = new (std::nothrow) NNTensor2_0(backendId); + EXPECT_NE(nullptr, nnTensor); + + TensorDesc desc; + desc.SetDataType(OH_NN_INT64); + size_t shapeNum = 1; + int32_t index = 10; + int32_t* shape = &index; + desc.SetShape(shape, shapeNum); + TensorDesc* tensorDesc = &desc; + + OH_NN_ReturnCode retSetTensorDesc = nnTensor->SetTensorDesc(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, retSetTensorDesc); + + int fd = 0; + size_t size = 0; + size_t offset = 3; + OH_NN_ReturnCode ret = nnTensor->CreateData(fd, size, offset); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: nntensor2_0test_createdata_034 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNTensor2Test, nntensor2_0test_createdata_034, TestSize.Level0) +{ + LOGE("CreateData nntensor2_0test_createdata_034"); + size_t backendId = 1; + + NNTensor2_0* nnTensor = new (std::nothrow) NNTensor2_0(backendId); + EXPECT_NE(nullptr, nnTensor); + + TensorDesc desc; + desc.SetDataType(OH_NN_INT64); + size_t shapeNum = 1; + int32_t index = 10; + int32_t* shape = &index; + desc.SetShape(shape, shapeNum); + TensorDesc* tensorDesc = &desc; + + OH_NN_ReturnCode retSetTensorDesc = nnTensor->SetTensorDesc(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, retSetTensorDesc); + + int fd = 0; + size_t size = 1; + size_t offset = 3; + OH_NN_ReturnCode ret = nnTensor->CreateData(fd, size, offset); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: nntensor2_0test_createdata_035 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNTensor2Test, nntensor2_0test_createdata_035, TestSize.Level0) +{ + LOGE("CreateData nntensor2_0test_createdata_035"); + size_t backendId = 1; + + NNTensor2_0* nnTensor = new (std::nothrow) NNTensor2_0(backendId); + EXPECT_NE(nullptr, nnTensor); + + TensorDesc desc; + desc.SetDataType(OH_NN_INT64); + size_t shapeNum = 1; + int32_t index = 10; + int32_t* shape = &index; + desc.SetShape(shape, shapeNum); + TensorDesc* tensorDesc = &desc; + + OH_NN_ReturnCode retSetTensorDesc = nnTensor->SetTensorDesc(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, retSetTensorDesc); + + int fd = 0; + size_t size = 3; + size_t offset = 2; + OH_NN_ReturnCode ret = nnTensor->CreateData(fd, size, offset); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: nntensor2_0test_createdata_036 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNTensor2Test, nntensor2_0test_createdata_036, TestSize.Level0) +{ + LOGE("CreateData nntensor2_0test_createdata_036"); + size_t backendId = 1; + + NNTensor2_0* nnTensor = new (std::nothrow) NNTensor2_0(backendId); + EXPECT_NE(nullptr, nnTensor); + + TensorDesc desc; + desc.SetDataType(OH_NN_INT64); + size_t shapeNum = 1; + int32_t index = 10; + int32_t* shape = &index; + desc.SetShape(shape, shapeNum); + TensorDesc* tensorDesc = &desc; + + OH_NN_ReturnCode retSetTensorDesc = nnTensor->SetTensorDesc(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, retSetTensorDesc); + + int fd = 0; + size_t size = 200; + size_t offset = 1; + OH_NN_ReturnCode ret = nnTensor->CreateData(fd, size, offset); + EXPECT_EQ(OH_NN_MEMORY_ERROR, ret); +} + + +/** + * @tc.name: nntensor2_0test_gettensordesc_001 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNTensor2Test, nntensor2_0test_gettensordesc_001, TestSize.Level0) +{ + LOGE("GetTensorDesc nntensor2_0test_gettensordesc_001"); + size_t backendId = 1; + + NNTensor2_0* nnTensor = new (std::nothrow) NNTensor2_0(backendId); + EXPECT_NE(nullptr, nnTensor); + + TensorDesc* ret = nnTensor->GetTensorDesc(); + EXPECT_EQ(nullptr, ret); +} + +/** + * @tc.name: nntensor2_0test_getdata_001 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNTensor2Test, nntensor2_0test_getdata_001, TestSize.Level0) +{ + LOGE("GetData nntensor2_0test_getdata_001"); + size_t backendId = 1; + + NNTensor2_0* nnTensor = new (std::nothrow) NNTensor2_0(backendId); + EXPECT_NE(nullptr, nnTensor); + + void* ret = nnTensor->GetData(); + EXPECT_EQ(nullptr, ret); +} + +/** + * @tc.name: nntensor2_0test_getfd_001 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNTensor2Test, nntensor2_0test_getfd_001, TestSize.Level0) +{ + LOGE("GetFd nntensor2_0test_getfd_001"); + size_t backendId = 1; + + NNTensor2_0* nnTensor = new (std::nothrow) NNTensor2_0(backendId); + EXPECT_NE(nullptr, nnTensor); + + int ret = nnTensor->GetFd(); + EXPECT_EQ(0, ret); +} + +/** + * @tc.name: nntensor2_0test_getsize_001 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNTensor2Test, nntensor2_0test_getsize_001, TestSize.Level0) +{ + LOGE("GetSize nntensor2_0test_getsize_001"); + size_t backendId = 1; + + NNTensor2_0* nnTensor = new (std::nothrow) NNTensor2_0(backendId); + EXPECT_NE(nullptr, nnTensor); + + size_t ret = nnTensor->GetSize(); + EXPECT_EQ(0, ret); +} + +/** + * @tc.name: nntensor2_0test_getoffset_001 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNTensor2Test, nntensor2_0test_getoffset_001, TestSize.Level0) +{ + LOGE("GetOffset nntensor2_0test_getoffset_001"); + size_t backendId = 1; + + NNTensor2_0* nnTensor = new (std::nothrow) NNTensor2_0(backendId); + EXPECT_NE(nullptr, nnTensor); + + size_t ret = nnTensor->GetOffset(); + EXPECT_EQ(0, ret); +} + +/** + * @tc.name: nntensor2_0test_getbackendid_001 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNTensor2Test, nntensor2_0test_getbackendid_001, TestSize.Level0) +{ + LOGE("GetBackendID nntensor2_0test_getbackendid_001"); + size_t backendId = 1; + + NNTensor2_0* nnTensor = new (std::nothrow) NNTensor2_0(backendId); + EXPECT_NE(nullptr, nnTensor); + + size_t ret = nnTensor->GetBackendID(); + EXPECT_EQ(1, ret); +} + +/** + * @tc.name: nntensor2_0test_checktensordata_001 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNTensor2Test, nntensor2_0test_checktensordata_001, TestSize.Level0) +{ + LOGE("CheckTensorData nntensor2_0test_checktensordata_001"); + size_t backendId = 1; + + NNTensor2_0* nnTensor = new (std::nothrow) NNTensor2_0(backendId); + EXPECT_NE(nullptr, nnTensor); + + bool ret = nnTensor->CheckTensorData(); + EXPECT_EQ(false, ret); +} + +/** + * @tc.name: nntensor2_0test_checktensordata_002 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNTensor2Test, nntensor2_0test_checktensordata_002, TestSize.Level0) +{ + LOGE("CheckTensorData nntensor2_0test_checktensordata_002"); + size_t backendId = 1; + + NNTensor2_0* nnTensor = new (std::nothrow) NNTensor2_0(backendId); + EXPECT_NE(nullptr, nnTensor); + + std::shared_ptr tensorDesc = std::make_shared(); + EXPECT_CALL(*((MockTensorDesc *) tensorDesc.get()), GetByteSize(::testing::_)) + .WillRepeatedly(::testing::Return(OH_NN_INVALID_PARAMETER)); + + OH_NN_ReturnCode retSetTensorDesc = nnTensor->SetTensorDesc(tensorDesc.get()); + EXPECT_EQ(OH_NN_SUCCESS, retSetTensorDesc); + + bool ret = nnTensor->CheckTensorData(); + EXPECT_EQ(false, ret); + + testing::Mock::AllowLeak(tensorDesc.get()); +} + +/** + * @tc.name: nntensor2_0test_checktensordata_003 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNTensor2Test, nntensor2_0test_checktensordata_003, TestSize.Level0) +{ + LOGE("CheckTensorData nntensor2_0test_checktensordata_003"); + size_t backendId = 1; + + NNTensor2_0* nnTensor = new (std::nothrow) NNTensor2_0(backendId); + EXPECT_NE(nullptr, nnTensor); + + TensorDesc desc; + desc.SetDataType(OH_NN_INT64); + size_t shapeNum = 1; + int32_t index = 10; + int32_t* shape = &index; + desc.SetShape(shape, shapeNum); + TensorDesc* tensorDesc = &desc; + + OH_NN_ReturnCode retSetTensorDesc = nnTensor->SetTensorDesc(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, retSetTensorDesc); + + bool ret = nnTensor->CheckTensorData(); + EXPECT_EQ(false, ret); +} + +/** + * @tc.name: nntensor2_0test_checktensordata_004 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNTensor2Test, nntensor2_0test_checktensordata_004, TestSize.Level0) +{ + LOGE("CheckTensorData nntensor2_0test_checktensordata_004"); + size_t backendId = 1; + + NNTensor2_0* nnTensor = new (std::nothrow) NNTensor2_0(backendId); + EXPECT_NE(nullptr, nnTensor); + + TensorDesc desc; + desc.SetDataType(OH_NN_INT64); + size_t shapeNum = 1; + int32_t index = 10; + int32_t* shape = &index; + desc.SetShape(shape, shapeNum); + TensorDesc* tensorDesc = &desc; + + OH_NN_ReturnCode retSetTensorDesc = nnTensor->SetTensorDesc(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, retSetTensorDesc); + + nnTensor->SetSize(200); + nnTensor->SetOffset(0); + + bool ret = nnTensor->CheckTensorData(); + EXPECT_EQ(false, ret); +} + +/** + * @tc.name: nntensor2_0test_checktensordata_005 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNTensor2Test, nntensor2_0test_checktensordata_005, TestSize.Level0) +{ + LOGE("CheckTensorData nntensor2_0test_checktensordata_005"); + size_t backendId = 1; + + NNTensor2_0* nnTensor = new (std::nothrow) NNTensor2_0(backendId); + EXPECT_NE(nullptr, nnTensor); + + TensorDesc desc; + desc.SetDataType(OH_NN_INT64); + size_t shapeNum = 1; + int32_t index = 10; + int32_t* shape = &index; + desc.SetShape(shape, shapeNum); + TensorDesc* tensorDesc = &desc; + + OH_NN_ReturnCode retSetTensorDesc = nnTensor->SetTensorDesc(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, retSetTensorDesc); + + nnTensor->SetSize(200); + nnTensor->SetOffset(0); + float dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* buffer = dataArry; + nnTensor->SetData(buffer); + nnTensor->SetFd(-1); + + bool ret = nnTensor->CheckTensorData(); + EXPECT_EQ(false, ret); +} + +/** + * @tc.name: nntensor2_0test_checktensordata_006 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNTensor2Test, nntensor2_0test_checktensordata_006, TestSize.Level0) +{ + LOGE("CheckTensorData nntensor2_0test_checktensordata_006"); + size_t backendId = 1; + + NNTensor2_0* nnTensor = new (std::nothrow) NNTensor2_0(backendId); + EXPECT_NE(nullptr, nnTensor); + + TensorDesc desc; + desc.SetDataType(OH_NN_INT64); + size_t shapeNum = 1; + int32_t index = 10; + int32_t* shape = &index; + desc.SetShape(shape, shapeNum); + TensorDesc* tensorDesc = &desc; + + OH_NN_ReturnCode retSetTensorDesc = nnTensor->SetTensorDesc(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, retSetTensorDesc); + + nnTensor->SetSize(200); + nnTensor->SetOffset(0); + float dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* buffer = dataArry; + nnTensor->SetData(buffer); + + bool ret = nnTensor->CheckTensorData(); + EXPECT_EQ(true, ret); +} + +/** + * @tc.name: nntensor2_0test_checkdimranges_001 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNTensor2Test, nntensor2_0test_checkdimranges_001, TestSize.Level0) +{ + LOGE("CheckDimRanges nntensor2_0test_checkdimranges_001"); + size_t backendId = 1; + + NNTensor2_0* nnTensor = new (std::nothrow) NNTensor2_0(backendId); + EXPECT_NE(nullptr, nnTensor); + + std::vector minDimRanges; + const std::vector maxDimRanges; + OH_NN_ReturnCode ret = nnTensor->CheckDimRanges(minDimRanges, maxDimRanges); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: nntensor2_0test_checkdimranges_002 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNTensor2Test, nntensor2_0test_checkdimranges_002, TestSize.Level0) +{ + LOGE("CheckDimRanges nntensor2_0test_checkdimranges_002"); + size_t backendId = 1; + + NNTensor2_0* nnTensor = new (std::nothrow) NNTensor2_0(backendId); + EXPECT_NE(nullptr, nnTensor); + + TensorDesc desc; + desc.SetDataType(OH_NN_INT64); + TensorDesc* tensorDesc = &desc; + + OH_NN_ReturnCode retSetTensorDesc = nnTensor->SetTensorDesc(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, retSetTensorDesc); + + std::vector minDimRanges; + const std::vector maxDimRanges; + OH_NN_ReturnCode ret = nnTensor->CheckDimRanges(minDimRanges, maxDimRanges); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: nntensor2_0test_checkdimranges_003 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNTensor2Test, nntensor2_0test_checkdimranges_003, TestSize.Level0) +{ + LOGE("CheckDimRanges nntensor2_0test_checkdimranges_003"); + size_t backendId = 1; + + NNTensor2_0* nnTensor = new (std::nothrow) NNTensor2_0(backendId); + EXPECT_NE(nullptr, nnTensor); + + TensorDesc desc; + desc.SetDataType(OH_NN_INT64); + size_t shapeNum = 1; + int32_t index = -10; + int32_t* shape = &index; + desc.SetShape(shape, shapeNum); + TensorDesc* tensorDesc = &desc; + + OH_NN_ReturnCode retSetTensorDesc = nnTensor->SetTensorDesc(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, retSetTensorDesc); + + std::vector minDimRanges; + const std::vector maxDimRanges; + OH_NN_ReturnCode ret = nnTensor->CheckDimRanges(minDimRanges, maxDimRanges); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: nntensor2_0test_checkdimranges_004 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(NNTensor2Test, nntensor2_0test_checkdimranges_004, TestSize.Level0) +{ + LOGE("CheckDimRanges nntensor2_0test_checkdimranges_004"); + size_t backendId = 1; + + NNTensor2_0* nnTensor = new (std::nothrow) NNTensor2_0(backendId); + EXPECT_NE(nullptr, nnTensor); + + TensorDesc desc; + desc.SetDataType(OH_NN_INT64); + size_t shapeNum = 1; + int32_t index = 10; + int32_t* shape = &index; + desc.SetShape(shape, shapeNum); + TensorDesc* tensorDesc = &desc; + + OH_NN_ReturnCode retSetTensorDesc = nnTensor->SetTensorDesc(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, retSetTensorDesc); + + std::vector minDimRanges; + minDimRanges.emplace_back(20); + std::vector maxDimRanges; + maxDimRanges.emplace_back(20); + OH_NN_ReturnCode ret = nnTensor->CheckDimRanges(minDimRanges, maxDimRanges); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} +} // namespace UnitTest +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/components/v1_0/device_manager/device_manager_test.cpp b/test/unittest/components/v1_0/device_manager/device_manager_test.cpp index a6d28fd1b2ac009a609302668cd5a1fa69b1cf41..441fb5c24d9f78ec04381092c6e46af1ac5b3f03 100644 --- a/test/unittest/components/v1_0/device_manager/device_manager_test.cpp +++ b/test/unittest/components/v1_0/device_manager/device_manager_test.cpp @@ -13,227 +13,2258 @@ * limitations under the License. */ +#include #include #include #include "common/log.h" -#include "device_manager.h" #include "hdi_device_v1_0.h" #include "test/unittest/common/v1_0/mock_idevice.h" +#include "lite_graph_to_hdi_model_v1_0.h" +#include "device.h" +#include "interfaces/kits/c/neural_network_runtime/neural_network_runtime_type.h" +#include "nnbackend.h" +#include "ops_registry.h" +#include "transform.h" + using namespace testing; using namespace testing::ext; using namespace OHOS::NeuralNetworkRuntime; +using LiteGraphPrimitvePtr = std::unique_ptr; + +namespace MSLITE = mindspore::lite; namespace OHOS { namespace NeuralNetworkRuntime { +namespace V1 { namespace UnitTest { -class DeviceManagerTest : public testing::Test { -protected: - void MockInit(OHOS::sptr device, const std::vector& typeVect, - const std::string& deviceName, const std::string& vendorName); +class LiteGraphToHDIModelTest : public testing::Test { +public: + LiteGraphToHDIModelTest() = default; + ~LiteGraphToHDIModelTest() = default; +public: + std::vector m_inputs{0, 1}; + std::vector m_outputs{2}; + std::vector m_param{3}; + std::vector m_input_dim{3, 3}; + std::vector m_output_dim{3, 3}; + std::vector m_param_dim{}; }; -void DeviceManagerTest::MockInit(OHOS::sptr device, const std::vector& typeVect, - const std::string& deviceName, const std::string& vendorName) +MSLITE::LiteGraph::Node* getNode(void* primitive) +{ + MSLITE::LiteGraph::Node* node = new(std::nothrow) MSLITE::LiteGraph::Node(); + node->name_ = "NNRt_SubGraph"; + node->quant_type_ = 1; + node->primitive_ = primitive; + node->input_indices_ = {1, 1, 1, 1}; + node->output_indices_ = {1, 1, 1, 1}; + return node; +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_001 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_001, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_001"); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(nullptr, tensorBuffer); + EXPECT_EQ(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_002 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_002, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_002"); + std::shared_ptr liteGraph = std::make_shared(); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {0, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_EQ(nullptr, model); + + uint8_t *mmapPtr = static_cast(mmap(nullptr, + tensorBuffer.bufferSize, PROT_READ | PROT_WRITE, MAP_SHARED, tensorBuffer.fd, 0)); + EXPECT_EQ(MAP_FAILED, mmapPtr); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_003 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_003, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_003"); + std::shared_ptr liteGraph = std::make_shared(); + MSLITE::LiteGraph::SubGraph* subGraph = new (std::nothrow) MSLITE::LiteGraph::SubGraph(); + subGraph->name_ = "NNRt_SubGraph"; + subGraph->input_indices_ = {1, 1, 1, 1}; + subGraph->output_indices_ = {1, 1, 1, 1}; + subGraph->node_indices_ = {1, 1, 1, 1}; + + void* tp = MSLITE::MindIR_Tensor_Create(); + + liteGraph.get()->all_tensors_.emplace_back(tp); + liteGraph.get()->sub_graphs_.emplace_back(subGraph); + + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 1, 1, 1}; + + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); + + uint8_t *mmapPtr = static_cast(mmap(nullptr, + tensorBuffer.bufferSize, PROT_READ | PROT_WRITE, MAP_SHARED, tensorBuffer.fd, 0)); + EXPECT_EQ(MAP_FAILED, mmapPtr); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_004 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_004, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_004"); + std::shared_ptr liteGraph = std::make_shared(); + liteGraph.get()->all_nodes_.emplace_back(nullptr); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_EQ(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_005 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_005, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_005"); + std::shared_ptr liteGraph = std::make_shared(); + MSLITE::LiteGraph::Node* node = new(std::nothrow) MSLITE::LiteGraph::Node(); + liteGraph.get()->all_nodes_.emplace_back(node); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_EQ(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_006 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_006, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_006"); + std::shared_ptr liteGraph = std::make_shared(); + + float alpha {0.0f}; + float minVal {0.0f}; + float maxVal {0.0f}; + bool approximate {false}; + mindspore::lite::ActivationType activationType {mindspore::lite::ACTIVATION_TYPE_ABS}; + + void* primitive = mindspore::lite::MindIR_Activation_CreatePrimitive(activationType, alpha, + minVal, maxVal, approximate); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_007 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_007, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_007"); + std::shared_ptr liteGraph = std::make_shared(); + + int8_t num = 1; + int8_t* fuseData = # + mindspore::lite::ActivationType type = NNToMS::TransfromFusionType(static_cast(*fuseData)); + void* primitive = mindspore::lite::MindIR_AddFusion_CreatePrimitive(type); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_008 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_008, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_008"); + std::shared_ptr liteGraph = std::make_shared(); + + int64_t keepDims {0}; + void* primitive = mindspore::lite::MindIR_All_CreatePrimitive(keepDims); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_009 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_009, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_009"); + std::shared_ptr liteGraph = std::make_shared(); + + int64_t axis {-1}; + int64_t topK {1}; + bool keepDims {false}; + bool outMaxValue {false}; + void* primitive = mindspore::lite::MindIR_ArgMaxFusion_CreatePrimitive(axis, topK, keepDims, outMaxValue); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_010 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_010, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_010"); + std::shared_ptr liteGraph = std::make_shared(); + + int64_t summarize {0}; + void* primitive = mindspore::lite::MindIR_Assert_CreatePrimitive(summarize); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_011 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_011, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_011"); + std::shared_ptr liteGraph = std::make_shared(); + + std::vector kernelSize; + std::vector pad; + std::vector strides; + mindspore::lite::PadMode padMode {mindspore::lite::PAD_MODE_PAD}; + mindspore::lite::ActivationType activationType {mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; + mindspore::lite::RoundMode roundMode {mindspore::lite::ROUND_MODE_FLOOR}; + mindspore::lite::Format format {mindspore::lite::FORMAT_NCHW}; + bool global {false}; + void* primitive = mindspore::lite::MindIR_AvgPoolFusion_CreatePrimitive(kernelSize, strides, pad, + padMode, roundMode, format, global, activationType); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_012 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_012, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_012"); + std::shared_ptr liteGraph = std::make_shared(); + + std::vector blockSize; + std::vector> crops; + void* primitive = mindspore::lite::MindIR_BatchToSpaceND_CreatePrimitive(blockSize, crops); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_013 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_013, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_013"); + std::shared_ptr liteGraph = std::make_shared(); + + float epsilon {0.0001f}; + void* primitive = mindspore::lite::MindIR_FusedBatchNorm_CreatePrimitive(epsilon); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_014 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_014, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_014"); + std::shared_ptr liteGraph = std::make_shared(); + + float epsilon {0.0001f}; + void* primitive = mindspore::lite::MindIR_FusedBatchNorm_CreatePrimitive(epsilon); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_015 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_015, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_015"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_BiasAdd_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_016 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_016, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_016"); + std::shared_ptr liteGraph = std::make_shared(); + + std::vector shape; + void* primitive = mindspore::lite::MindIR_BroadcastTo_CreatePrimitive(shape); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_017 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_017, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_017"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Cast_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_018 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_018, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_018"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Ceil_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_019 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_019, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_019"); + std::shared_ptr liteGraph = std::make_shared(); + + float max {0.0f}; + float min {0.0f}; + void* primitive = mindspore::lite::MindIR_Clip_CreatePrimitive(max, min); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_020 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_020, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_020"); + std::shared_ptr liteGraph = std::make_shared(); + + int64_t axis{0}; + void* primitive = mindspore::lite::MindIR_Concat_CreatePrimitive(axis); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_021 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_021, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_021"); + std::shared_ptr liteGraph = std::make_shared(); + + int64_t dataType {0}; + std::vector value; + void* primitive = mindspore::lite::MindIR_ConstantOfShape_CreatePrimitive(dataType, value); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_022 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_022, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_022"); + std::shared_ptr liteGraph = std::make_shared(); + + int64_t group {1}; + int64_t inChannel {0}; + int64_t outChannel {0}; + std::vector kernelSize; + std::vector strides; + std::vector padList; + std::vector dilation; + std::vector outputPaddings; + mindspore::lite::PadMode padMode{mindspore::lite::PAD_MODE_PAD}; + mindspore::lite::ActivationType activationType{mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; + void* primitive = MindIR_Conv2dTransposeFusion_CreatePrimitive(kernelSize, + strides, dilation, padMode, padList, group, inChannel, outChannel, + activationType, outputPaddings); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_023 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_023, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_023"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Cos_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_024 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_024, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_024"); + std::shared_ptr liteGraph = std::make_shared(); + + int64_t axis {0}; + std::vector offset; + void* primitive = mindspore::lite::MindIR_Crop_CreatePrimitive(axis, offset); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_025 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_025, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_025"); + std::shared_ptr liteGraph = std::make_shared(); + + int64_t blockSize {0}; + std::string mode; + mindspore::lite::Format format {mindspore::lite::FORMAT_NCHW}; + void* primitive = mindspore::lite::MindIR_DepthToSpace_CreatePrimitive(blockSize, format, mode); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_026 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_026, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_026"); + std::shared_ptr liteGraph = std::make_shared(); + + int64_t inputSize {0}; + std::vector scale; + float nmsIoUThreshold {0.0f}; + float nmsScoreThreshold {0.0f}; + int64_t maxDetections {0}; + int64_t detectionsPerClass {0}; + int64_t maxClassesPerDetection {0}; + int64_t numClasses {0}; + bool useRegularNms {false}; + bool outQuantized {false}; + mindspore::lite::Format format {mindspore::lite::FORMAT_NCHW}; + void* primitive = mindspore::lite::MindIR_DetectionPostProcess_CreatePrimitive(format, inputSize, scale, + nmsIoUThreshold, nmsScoreThreshold, maxDetections, detectionsPerClass, maxClassesPerDetection, + numClasses, useRegularNms, outQuantized); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_027 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_027, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_027"); + std::shared_ptr liteGraph = std::make_shared(); + + mindspore::lite::EltwiseMode mode {mindspore::lite::ELTWISE_MODE_PROD}; + void* primitive = mindspore::lite::MindIR_Eltwise_CreatePrimitive(mode); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_028 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_028, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_028"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Equal_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_029 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_029, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_029"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Erf_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_030 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_030, TestSize.Level0) { - const size_t typeSize = 4; - int index = 0; - EXPECT_EQ(typeSize, typeVect.size()); - EXPECT_CALL(*device, GetDeviceName(::testing::_)) - .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(deviceName), - ::testing::Return(typeVect[index++]))); + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_030"); + std::shared_ptr liteGraph = std::make_shared(); + + float base {-1.0f}; + float scale {1.0f}; + float shift {0.0f}; + void* primitive = mindspore::lite::MindIR_ExpFusion_CreatePrimitive(base, scale, shift); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} - EXPECT_CALL(*device, GetVendorName(::testing::_)) - .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(vendorName), - ::testing::Return(typeVect[index++]))); +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_031 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_031, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_031"); + std::shared_ptr liteGraph = std::make_shared(); - V1_0::DeviceStatus deviceStatus = V1_0::DeviceStatus::AVAILABLE; - EXPECT_CALL(*device, GetDeviceStatus(::testing::_)) - .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(deviceStatus), - ::testing::Return(typeVect[index++]))); + void* primitive = mindspore::lite::MindIR_ExpandDims_CreatePrimitive(); - uint32_t majorVer = 1; - uint32_t minorVer = 0; - EXPECT_CALL(*device, GetVersion(::testing::_, ::testing::_)) - .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(majorVer), ::testing::SetArgReferee<1>(minorVer), - ::testing::Return(typeVect[index++]))); + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); } /** - * @tc.name: devicemanager_getalldeviceid_001 - * @tc.desc: Verify the GetAllDeviceId function return deviceid list is not null. + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_032 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. * @tc.type: FUNC */ -HWTEST_F(DeviceManagerTest, devicemanager_getalldeviceid_001, TestSize.Level0) +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_032, TestSize.Level0) { - auto &deviceManager = DeviceManager::GetInstance(); - std::vector idVect = deviceManager.GetAllDeviceId(); - EXPECT_NE((size_t)0, idVect.size()); + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_032"); + std::shared_ptr liteGraph = std::make_shared(); - const std::string expectDeviceName = "MockDevice"; - std::string deviceName = ""; - std::shared_ptr retDevice = deviceManager.GetDevice(idVect[0]); - retDevice->GetDeviceName(deviceName); - EXPECT_EQ(deviceName, expectDeviceName); + void* primitive = mindspore::lite::MindIR_Fill_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); } /** - * @tc.name: devicemanager_getdevice_001 - * @tc.desc: Verify the GetDevice function return nullptr in case of deviceId invalid. + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_033 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. * @tc.type: FUNC */ -HWTEST_F(DeviceManagerTest, devicemanager_getdevice_001, TestSize.Level0) +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_033, TestSize.Level0) { - auto &deviceManager = DeviceManager::GetInstance(); - const size_t deviceId = 1; - std::shared_ptr result = deviceManager.GetDevice(deviceId); - EXPECT_EQ(nullptr, result); + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_033"); + std::shared_ptr liteGraph = std::make_shared(); + + int64_t axis {1}; + void* primitive = mindspore::lite::MindIR_Flatten_CreatePrimitive(axis); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); } /** - * @tc.name: devicemanager_getdevice_002 - * @tc.desc: Verify the GetDevice function validate device name return specified device name. + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_034 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. * @tc.type: FUNC */ -HWTEST_F(DeviceManagerTest, devicemanager_getdevice_002, TestSize.Level0) +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_034, TestSize.Level0) { - auto &deviceManager = DeviceManager::GetInstance(); - std::vector idVect = deviceManager.GetAllDeviceId(); - EXPECT_EQ((size_t)1, idVect.size()); - size_t deviceId = idVect[0]; - std::shared_ptr result = deviceManager.GetDevice(deviceId); - EXPECT_NE(nullptr, result); + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_034"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Floor_CreatePrimitive(); - const std::string expectDeviceNameA = "MockDevice"; - std::string deviceName = ""; - result->GetDeviceName(deviceName); - EXPECT_EQ(deviceName, expectDeviceNameA); + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); } /** - * @tc.name: devicemanager_registerdevice_001 - * @tc.desc: Verify the RegisterDevice function register repeatly. + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_035 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. * @tc.type: FUNC */ -HWTEST_F(DeviceManagerTest, devicemanager_registerdevice_001, TestSize.Level0) +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_035, TestSize.Level0) { - std::vector typeVect = {HDF_SUCCESS, HDF_SUCCESS, HDF_SUCCESS, HDF_SUCCESS}; - OHOS::sptr device = OHOS::sptr(new (std::nothrow) V1_0::MockIDevice()); - EXPECT_NE(device.GetRefPtr(), nullptr); + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_035"); + std::shared_ptr liteGraph = std::make_shared(); - std::string deviceName = "MockDevice"; - std::string vendorName = "MockVendor"; - MockInit(device, typeVect, deviceName, vendorName); + bool hasBias {false}; + bool useAxis {false}; + int64_t axis {0}; + mindspore::lite::ActivationType activationType {mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; + void* primitive = mindspore::lite::MindIR_FullConnection_CreatePrimitive(hasBias, useAxis, + axis, activationType); - std::function()> creator = - [&device]()->std::shared_ptr {return std::make_shared(device);}; - auto& deviceManager = DeviceManager::GetInstance(); - OH_NN_ReturnCode result = deviceManager.RegisterDevice(creator); - EXPECT_EQ(OH_NN_SUCCESS, result); + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); } /** - * @tc.name: devicemanager_registerdevice_002 - * @tc.desc: Verify the RegisterDevice function return invalid parameter. + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_036 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. * @tc.type: FUNC */ -HWTEST_F(DeviceManagerTest, devicemanager_registerdevice_002, TestSize.Level0) +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_036, TestSize.Level0) { - std::function()> creator = - []()->std::shared_ptr {return nullptr;}; - auto& deviceManager = DeviceManager::GetInstance(); - OH_NN_ReturnCode result = deviceManager.RegisterDevice(creator); - EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_036"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Gather_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); } /** - * @tc.name: devicemanager_registerdevice_003 - * @tc.desc: Verify the RegisterDevice function return unavailable device in case of device name invalid param. + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_037 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. * @tc.type: FUNC */ -HWTEST_F(DeviceManagerTest, devicemanager_registerdevice_003, TestSize.Level0) +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_037, TestSize.Level0) { - std::vector typeVect = {HDF_FAILURE, HDF_SUCCESS, HDF_SUCCESS, HDF_SUCCESS}; - OHOS::sptr device = OHOS::sptr(new (std::nothrow) V1_0::MockIDevice()); - EXPECT_NE(device.GetRefPtr(), nullptr); + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_037"); + std::shared_ptr liteGraph = std::make_shared(); - std::string deviceName = "MockDevice"; - std::string vendorName = "MockVendor"; - MockInit(device, typeVect, deviceName, vendorName); + void* primitive = mindspore::lite::MindIR_GatherNd_CreatePrimitive(); - std::function()> creator = - [&device]()->std::shared_ptr {return std::make_shared(device);}; - auto& deviceManager = DeviceManager::GetInstance(); - OH_NN_ReturnCode result = deviceManager.RegisterDevice(creator); - EXPECT_EQ(OH_NN_UNAVAILABLE_DEVICE, result); + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); } /** - * @tc.name: devicemanager_registerdevice_004 - * @tc.desc: Verify the RegisterDevice function return unavailable device in case of vendor name failure. + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_038 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. * @tc.type: FUNC */ -HWTEST_F(DeviceManagerTest, devicemanager_registerdevice_004, TestSize.Level0) +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_038, TestSize.Level0) { - std::vector typeVect = {HDF_SUCCESS, HDF_FAILURE, HDF_SUCCESS, HDF_SUCCESS}; - OHOS::sptr device = OHOS::sptr(new (std::nothrow) V1_0::MockIDevice()); - EXPECT_NE(device.GetRefPtr(), nullptr); + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_038"); + std::shared_ptr liteGraph = std::make_shared(); - std::string deviceName = "MockDevice"; - std::string vendorName = "MockVendor"; - MockInit(device, typeVect, deviceName, vendorName); + mindspore::lite::ActivationType activationType = mindspore::lite::ACTIVATION_TYPE_GELU; + float alpha = 0.0f; + float minVal = 0.0f; + float maxVal = 0.0f; + bool approximate = false; + void* primitive = mindspore::lite::MindIR_Activation_CreatePrimitive(activationType, + alpha, minVal, maxVal, approximate); - std::function()> creator = - [&device]()->std::shared_ptr {return std::make_shared(device);}; - auto& deviceManager = DeviceManager::GetInstance(); - OH_NN_ReturnCode result = deviceManager.RegisterDevice(creator); - EXPECT_EQ(OH_NN_UNAVAILABLE_DEVICE, result); + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); } /** - * @tc.name: devicemanager_registerdevice_005 - * @tc.desc: Verify the RegisterDevice function return success. + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_039 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. * @tc.type: FUNC */ -HWTEST_F(DeviceManagerTest, devicemanager_registerdevice_005, TestSize.Level0) +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_039, TestSize.Level0) { - std::vector typeVect = {HDF_SUCCESS, HDF_SUCCESS, HDF_SUCCESS, HDF_SUCCESS}; - OHOS::sptr device = OHOS::sptr(new (std::nothrow) V1_0::MockIDevice()); - EXPECT_NE(device.GetRefPtr(), nullptr); + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_039"); + std::shared_ptr liteGraph = std::make_shared(); - std::string deviceName = "MockDeviceA"; - std::string vendorName = "MockVendorA"; - MockInit(device, typeVect, deviceName, vendorName); + void* primitive = mindspore::lite::MindIR_Greater_CreatePrimitive(); - std::function()> creator = - [&device]()->std::shared_ptr {return std::make_shared(device);}; - auto& deviceManager = DeviceManager::GetInstance(); - OH_NN_ReturnCode result = deviceManager.RegisterDevice(creator); - EXPECT_EQ(OH_NN_SUCCESS, result); + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} - std::vector idVect = deviceManager.GetAllDeviceId(); - EXPECT_NE((size_t)0, idVect.size()); +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_040 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_040, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_040"); + std::shared_ptr liteGraph = std::make_shared(); - const size_t expectDeviceId {std::hash {} ("MockDeviceA_MockVendorA_v1_0")}; - EXPECT_EQ(expectDeviceId, idVect[0]); + void* primitive = mindspore::lite::MindIR_GreaterEqual_CreatePrimitive(); - const std::string expectDeviceName = "MockDeviceA_MockVendorA_v1_0"; - const std::string retDeviceName = deviceManager.GetDeviceName(idVect[0]); - EXPECT_EQ(retDeviceName, expectDeviceName); + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); } /** - * @tc.name: devicemanager_getdevicename_001 - * @tc.desc: Verify the GetDevice function return empty string in case of deviceid invalid. + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_041 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. * @tc.type: FUNC */ -HWTEST_F(DeviceManagerTest, devicemanager_getdevicename_001, TestSize.Level0) +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_041, TestSize.Level0) { - auto &deviceManager = DeviceManager::GetInstance(); - const size_t deviceId = 1; - std::string result = deviceManager.GetDeviceName(deviceId); - EXPECT_EQ("", result); + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_041"); + std::shared_ptr liteGraph = std::make_shared(); + + mindspore::lite::ActivationType activationType = mindspore::lite::ACTIVATION_TYPE_HSIGMOID; + float alpha = 0.0f; + float minVal = 0.0f; + float maxVal = 0.0f; + bool approximate = false; + void* primitive = mindspore::lite::MindIR_Activation_CreatePrimitive(activationType, + alpha, minVal, maxVal, approximate); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_042 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_042, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_042"); + std::shared_ptr liteGraph = std::make_shared(); + + float epsilon {0.0f}; + void* primitive = mindspore::lite::MindIR_InstanceNorm_CreatePrimitive(epsilon); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_043 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_043, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_043"); + std::shared_ptr liteGraph = std::make_shared(); + + float epsilon {0.0f}; + void* primitive = mindspore::lite::MindIR_InstanceNorm_CreatePrimitive(epsilon); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_044 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_044, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_044"); + std::shared_ptr liteGraph = std::make_shared(); + + std::vector axis; + float epsilon {1e-6}; + mindspore::lite::ActivationType activationType {mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; + void* primitive = mindspore::lite::MindIR_L2NormalizeFusion_CreatePrimitive(axis, epsilon, activationType); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_045 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_045, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_045"); + std::shared_ptr liteGraph = std::make_shared(); + + int64_t beginNormAxis {1}; + float epsilon {1e-7}; + bool elementwiseAffine {true}; + int64_t beginParamsAxis {1}; + void* primitive = mindspore::lite::MindIR_LayerNormFusion_CreatePrimitive(beginNormAxis, + epsilon, elementwiseAffine, beginParamsAxis); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_046 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_046, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_046"); + std::shared_ptr liteGraph = std::make_shared(); + + float alpha {0.0f}; + float minVal {0.0f}; + float maxVal {0.0f}; + bool approximate {false}; + mindspore::lite::ActivationType activationType {mindspore::lite::ACTIVATION_TYPE_LEAKY_RELU}; + void* primitive = mindspore::lite::MindIR_Activation_CreatePrimitive(activationType, alpha, + minVal, maxVal, approximate); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_047 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_047, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_047"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Less_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_048 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_048, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_048"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_LessEqual_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_049 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_049, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_049"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Log_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_050 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_050, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_050"); + std::shared_ptr liteGraph = std::make_shared(); + + int64_t axis {0}; + void* primitive = mindspore::lite::MindIR_LogSoftmax_CreatePrimitive(axis); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_051 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_051, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_051"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_LogicalAnd_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_052 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_052, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_052"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_LogicalNot_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_053 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_053, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_053"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_LogicalOr_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_054 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_054, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_054"); + std::shared_ptr liteGraph = std::make_shared(); + + int64_t depthRadius {0}; + float bias {0.0f}; + float alpha {0.0f}; + float beta {0.0f}; + std::string normRegion {"ACROSS_CHANNELS"}; + void* primitive = mindspore::lite::MindIR_LRN_CreatePrimitive(depthRadius, bias, alpha, + beta, normRegion); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_055 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_055, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_055"); + std::shared_ptr liteGraph = std::make_shared(); + + bool bidirectional {false}; + bool hasBias {false}; + int64_t inputSize {0}; + int64_t hiddenSize {0}; + int64_t numLayers {0}; + int64_t numDirections {0}; + float dropout {0.0f}; + float zoneoutCell {0.0f}; + float zoneoutHidden {0.0f}; + int64_t projSize {0}; + void* primitive = mindspore::lite::MindIR_LSTM_CreatePrimitive(bidirectional, hasBias, inputSize, + hiddenSize, numLayers, numDirections, dropout, zoneoutCell, zoneoutHidden, projSize); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_056 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_056, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_056"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Maximum_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_057 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_057, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_057"); + std::shared_ptr liteGraph = std::make_shared(); + + std::vector kernelSize; + std::vector pad; + std::vector strides; + mindspore::lite::PadMode padMode {mindspore::lite::PAD_MODE_PAD}; + mindspore::lite::ActivationType activationType {mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; + mindspore::lite::Format format {mindspore::lite::FORMAT_NCHW}; + bool global {false}; + void* primitive = MindIR_MaxPoolFusion_CreatePrimitive(kernelSize, strides, pad, + padMode, format, global, activationType); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_058 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_058, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_058"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Minimum_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_059 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_059, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_059"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Mod_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_060 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_060, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_060"); + std::shared_ptr liteGraph = std::make_shared(); + + mindspore::lite::ActivationType activationType {mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; + void* primitive = mindspore::lite::MindIR_MulFusion_CreatePrimitive(activationType); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_061 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_061, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_061"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Neg_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_062 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_062, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_062"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_NotEqual_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_063 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_063, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_063"); + std::shared_ptr liteGraph = std::make_shared(); + + int64_t axis {-1}; + void* primitive = mindspore::lite::MindIR_OneHot_CreatePrimitive(axis); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_064 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_064, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_064"); + std::shared_ptr liteGraph = std::make_shared(); + + std::vector> paddings; + float constantValue {0.0f}; + mindspore::lite::PaddingMode paddingMode {mindspore::lite::PADDING_MODE_CONSTANT}; + void* primitive = MindIR_PadFusion_CreatePrimitive(paddings, paddingMode, constantValue); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_065 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_065, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_065"); + std::shared_ptr liteGraph = std::make_shared(); + + float scale {1.0f}; + float shift {0.0f}; + void* primitive = mindspore::lite::MindIR_PowFusion_CreatePrimitive(scale, shift); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_066 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_066, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_066"); + std::shared_ptr liteGraph = std::make_shared(); + + bool channelShared{false}; + void* primitive = mindspore::lite::MindIR_PReLUFusion_CreatePrimitive(channelShared); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_067 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_067, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_067"); + std::shared_ptr liteGraph = std::make_shared(); + + const uint64_t* srcT{nullptr}; + const uint64_t* dstT{nullptr}; + int64_t axis {0}; + void* primitive = mindspore::lite::MindIR_QuantDTypeCast_CreatePrimitive(*srcT, *dstT, axis); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_068 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_068, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_068"); + std::shared_ptr liteGraph = std::make_shared(); + + int64_t dType {0.0f}; + int64_t start {0}; + int64_t limit {0}; + int64_t delta {1}; + void* primitive = mindspore::lite::MindIR_Range_CreatePrimitive(dType, start, limit, delta); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_069 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_069, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_069"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Rank_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_070 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_070, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_070"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Reciprocal_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_071 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_071, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_071"); + std::shared_ptr liteGraph = std::make_shared(); + + mindspore::lite::ReduceMode mode {mindspore::lite::REDUCE_MODE_ALL}; + float coeff {0.0f}; + bool reduceToEnd {false}; + bool keepDims {false}; + void* primitive = mindspore::lite::MindIR_ReduceFusion_CreatePrimitive(keepDims, mode, reduceToEnd, coeff); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_072 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_072, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_072"); + std::shared_ptr liteGraph = std::make_shared(); + + float alpha{0.0f}; + float minVal{0.0f}; + float maxVal{0.0f}; + bool approximate{false}; + mindspore::lite::ActivationType activationType{mindspore::lite::ACTIVATION_TYPE_RELU6}; + void* primitive = mindspore::lite::MindIR_Activation_CreatePrimitive(activationType, alpha, + minVal, maxVal, approximate); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_073 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_073, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_073"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Reshape_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_074 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_074, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_074"); + std::shared_ptr liteGraph = std::make_shared(); + + float cubicCoeff{0.0f}; + float extrapolationValue{0.0f}; + mindspore::lite::NearestMode nearestMode{mindspore::lite::NEAREST_MODE_NORMAL}; + mindspore::lite::ResizeMethod method {mindspore::lite::RESIZE_METHOD_LINEAR}; + uint64_t newHeight{0}; + uint64_t newWidth{0}; + bool preserveAspectRatio{false}; + mindspore::lite::CoordinateTransformMode coordinateTransformMode { + mindspore::lite::COORDINATE_TRANSFORM_MODE_ASYMMETRIC}; + uint64_t excludeOutside{0}; + void* primitive = mindspore::lite::MindIR_Resize_CreatePrimitive(method, newHeight, newWidth, + preserveAspectRatio, coordinateTransformMode, cubicCoeff, excludeOutside, + extrapolationValue, nearestMode); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_075 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_075, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_075"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Round_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_076 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_076, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_076"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Rsqrt_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_077 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_077, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_077"); + std::shared_ptr liteGraph = std::make_shared(); + + mindspore::lite::ActivationType activationType{mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; + const uint64_t* axis{nullptr}; + void* primitive = mindspore::lite::MindIR_ScaleFusion_CreatePrimitive(*axis, activationType); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_078 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_078, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_078"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_ScatterNd_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_079 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_079, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_079"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Select_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_080 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_080, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_080"); + std::shared_ptr liteGraph = std::make_shared(); + + float alpha{0.0f}; + float minVal{0.0f}; + float maxVal{0.0f}; + bool approximate{false}; + mindspore::lite::ActivationType activationType{mindspore::lite::ACTIVATION_TYPE_SIGMOID}; + void* primitive = mindspore::lite::MindIR_Activation_CreatePrimitive(activationType, alpha, minVal, + maxVal, approximate); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_081 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_081, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_081"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Sin_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_082 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_082, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_082"); + std::shared_ptr liteGraph = std::make_shared(); + + mindspore::lite::Format format {mindspore::lite::FORMAT_NCHW}; + int64_t blockSize {0}; + void* primitive = mindspore::lite::MindIR_SpaceToDepth_CreatePrimitive(blockSize, format); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_083 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_083, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_083"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_SparseToDense_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_084 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_084, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_084"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Square_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_085 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_085, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_085"); + std::shared_ptr liteGraph = std::make_shared(); + + float alpha {0.0f}; + float minVal {0.0f}; + float maxVal {0.0f}; + bool approximate {false}; + mindspore::lite::ActivationType activationType {mindspore::lite::ACTIVATION_TYPE_SWISH}; + void* primitive = mindspore::lite::MindIR_Activation_CreatePrimitive(activationType, alpha, + minVal, maxVal, approximate); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_086 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_086, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_086"); + std::shared_ptr liteGraph = std::make_shared(); + + int64_t axis {0}; + void* primitive = mindspore::lite::MindIR_Unstack_CreatePrimitive(axis); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_087 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_087, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_087"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Where_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_088 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_088, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_088"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Shape_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_089 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_089, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_089"); + std::shared_ptr liteGraph = std::make_shared(); + + std::vector axis; + void* primitive = mindspore::lite::MindIR_Unsqueeze_CreatePrimitive(axis); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_090 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_090, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_090"); + std::shared_ptr liteGraph = std::make_shared(); + + int64_t inChannel{0}; + int64_t outChannel{0}; + std::vector kernelSize; + std::vector strides; + std::vector pad; + std::vector dilation; + mindspore::lite::PadMode padMode{mindspore::lite::PAD_MODE_PAD}; + mindspore::lite::ActivationType activationType{mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; + void* primitive = mindspore::lite::MindIR_Conv2DFusion_CreatePrimitive(kernelSize, strides, + dilation, padMode, pad, inChannel, inChannel, outChannel, activationType); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_091 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_091, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_091"); + std::shared_ptr liteGraph = std::make_shared(); + + mindspore::lite::ActivationType activationType {mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; + void* primitive = mindspore::lite::MindIR_DivFusion_CreatePrimitive(activationType); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_092 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_092, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_092"); + std::shared_ptr liteGraph = std::make_shared(); + + mindspore::lite::ActivationType activationType{mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; + bool transposeA{false}; + bool transposeB{false}; + void* primitive = mindspore::lite::MindIR_MatMulFusion_CreatePrimitive(transposeA, transposeB, activationType); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_093 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_093, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_093"); + std::shared_ptr liteGraph = std::make_shared(); + + std::vector axes; + void* primitive = mindspore::lite::MindIR_SliceFusion_CreatePrimitive(axes); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_094 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_094, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_094"); + std::shared_ptr liteGraph = std::make_shared(); + + std::vector axis; + void* primitive = mindspore::lite::MindIR_Softmax_CreatePrimitive(axis); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_095 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_095, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_095"); + std::shared_ptr liteGraph = std::make_shared(); + + std::vector> paddings; + std::vector block_shape {}; + void* primitive = mindspore::lite::MindIR_SpaceToBatchND_CreatePrimitive(block_shape, paddings); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_096 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_096, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_096"); + std::shared_ptr liteGraph = std::make_shared(); + + int64_t outputNum {0}; + std::vector sizeSplits; + int64_t axis {0}; + void* primitive = mindspore::lite::MindIR_Split_CreatePrimitive(outputNum, sizeSplits, axis); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_097 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_097, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_097"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Sqrt_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_098 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_098, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_098"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_SquaredDifference_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_099 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_099, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_099"); + std::shared_ptr liteGraph = std::make_shared(); + + std::vector axis; + void* primitive = mindspore::lite::MindIR_Squeeze_CreatePrimitive(axis); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_100 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_100, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_100"); + std::shared_ptr liteGraph = std::make_shared(); + + int64_t axis = {0}; + void* primitive = mindspore::lite::MindIR_Stack_CreatePrimitive(axis); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_101 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_101, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_101"); + std::shared_ptr liteGraph = std::make_shared(); + + int64_t beginMask = {0}; + int64_t endMask = {0}; + int64_t ellipsisMask = {0}; + int64_t newAxisMask = {0}; + int64_t shrinkAxisMask = {0}; + void* primitive = mindspore::lite::MindIR_StridedSlice_CreatePrimitive(beginMask, endMask, ellipsisMask, + newAxisMask, shrinkAxisMask); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_102 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_102, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_102"); + std::shared_ptr liteGraph = std::make_shared(); + + mindspore::lite::ActivationType activationType {mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; + void* primitive = mindspore::lite::MindIR_SubFusion_CreatePrimitive(activationType); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_103 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_103, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_103"); + std::shared_ptr liteGraph = std::make_shared(); + + std::vector dims {0}; + void* primitive = mindspore::lite::MindIR_TileFusion_CreatePrimitive(dims); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_104 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_104, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_104"); + std::shared_ptr liteGraph = std::make_shared(); + + int64_t axis {0}; + bool sorted {true}; + void* primitive = mindspore::lite::MindIR_TopKFusion_CreatePrimitive(sorted, axis); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_105 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_105, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_105"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Transpose_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_106 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_106, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_106"); + std::shared_ptr liteGraph = std::make_shared(); + + std::vector axis; + void* primitive = mindspore::lite::MindIR_Unsqueeze_CreatePrimitive(axis); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_107 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_107, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_107"); + std::shared_ptr liteGraph = std::make_shared(); + + std::vector axis; + void* primitive = mindspore::lite::MindIR_Unsqueeze_CreatePrimitive(axis); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_108 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_108, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_108"); + std::shared_ptr liteGraph = std::make_shared(); + MSLITE::LiteGraph::SubGraph* subGraph = new (std::nothrow) MSLITE::LiteGraph::SubGraph(); + subGraph->name_ = "NNRt_SubGraph"; + subGraph->input_indices_ = {1, 1, 1, 1}; + subGraph->output_indices_ = {1, 1, 1, 1}; + subGraph->node_indices_ = {1, 1, 1, 1}; + + void* tp = MSLITE::MindIR_Tensor_Create(); + + liteGraph.get()->all_tensors_.emplace_back(tp); + liteGraph.get()->all_tensors_.emplace_back(nullptr); + liteGraph.get()->sub_graphs_.emplace_back(subGraph); + + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 1, 1, 1}; + + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); + + uint8_t *mmapPtr = static_cast(mmap(nullptr, + tensorBuffer.bufferSize, PROT_READ | PROT_WRITE, MAP_SHARED, tensorBuffer.fd, 0)); + EXPECT_EQ(MAP_FAILED, mmapPtr); +} + +/** + * @tc.name: litegraphtohdimodeltest_hdimodel_destroy_001 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_hdimodel_destroy_001, TestSize.Level0) +{ + LOGE("HDIModel_Destroy litegraphtohdimodeltest_hdimodel_destroy_001"); + std::shared_ptr liteGraph = std::make_shared(); + + float alpha {0.0f}; + float minVal {0.0f}; + float maxVal {0.0f}; + bool approximate {false}; + mindspore::lite::ActivationType activationType {mindspore::lite::ACTIVATION_TYPE_ABS}; + + void* primitive = mindspore::lite::MindIR_Activation_CreatePrimitive(activationType, alpha, + minVal, maxVal, approximate); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V1_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); + HDIModel_Destroy(&model); } } // namespace UnitTest +} // namespace V1 } // namespace NeuralNetworkRuntime -} // namespace OHOS +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/components/v1_0/hdi_device/hdi_device_test.cpp b/test/unittest/components/v1_0/hdi_device/hdi_device_test.cpp index 5c98750e36fa8fa337d098de7967a6246ce344a6..d3ce6af40f5f4fbbe05a7c747b2dac83123d17fe 100644 --- a/test/unittest/components/v1_0/hdi_device/hdi_device_test.cpp +++ b/test/unittest/components/v1_0/hdi_device/hdi_device_test.cpp @@ -13,6 +13,7 @@ * limitations under the License. */ +#include #include #include #include @@ -31,6 +32,7 @@ using namespace testing; using namespace testing::ext; using namespace OHOS::NeuralNetworkRuntime; + namespace mindspore { namespace lite { OHOS::HDI::Nnrt::V1_0::Model* MindIR_LiteGraph_To_Model(const LiteGraph* lite_graph, @@ -204,6 +206,45 @@ HWTEST_F(HDIDeviceTest, hdidevice_getvendorname_002, TestSize.Level0) EXPECT_EQ(OH_NN_UNAVAILABLE_DEVICE, result); } +/* * + * @tc.name: hdidevice_getversion_001 + * @tc.desc: Verify the GetVersion function validate vendor name success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_getversion_001, TestSize.Level0) +{ + LOGE("GetVersion hdidevice_getversion_001"); + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + std::string vendorName = "MockVendor"; + EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), GetVersion(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::Return(HDF_SUCCESS)); + + const std::string expectDeviceName = "MockVendor"; + std::string newVendorName = ""; + OH_NN_ReturnCode result = hdiDevice->GetVersion(newVendorName); + EXPECT_EQ(OH_NN_SUCCESS, result); +} + +/* * + * @tc.name: hdidevice_getversion_002 + * @tc.desc: Verify the GetVersion function return unavailable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_getversion_002, TestSize.Level0) +{ + LOGE("GetVersion hdidevice_getversion_002"); + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + std::string vendorName = "MockVendor"; + EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), GetVersion(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::Return(HDF_FAILURE)); + OH_NN_ReturnCode result = hdiDevice->GetVersion(vendorName); + EXPECT_EQ(OH_NN_UNAVAILABLE_DEVICE, result); +} + /* * * @tc.name: hdidevice_getdevicetype_001 * @tc.desc: Verify the GetDeviceType function validate device type success. @@ -244,6 +285,75 @@ HWTEST_F(HDIDeviceTest, hdidevice_getdevicetype_002, TestSize.Level0) EXPECT_EQ(OH_NN_UNAVAILABLE_DEVICE, result); } +/* * + * @tc.name: hdidevice_getdevicetype_003 + * @tc.desc: Verify the GetDeviceType function return unavailable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_getdevicetype_003, TestSize.Level0) +{ + LOGE("GetDeviceType hdidevice_getdevicetype_003"); + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + OH_NN_DeviceType deviceType = OH_NN_CPU; + EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), GetDeviceType(::testing::_)) + .WillRepeatedly(Invoke([](V1_0::DeviceType& type) { + // 这里直接修改传入的引用参数 + type = V1_0::DeviceType::GPU; + return OH_NN_SUCCESS; // 假设成功的状态码 + })); + OH_NN_ReturnCode result = hdiDevice->GetDeviceType(deviceType); + EXPECT_EQ(OH_NN_SUCCESS, result); +} + +/* * + * @tc.name: hdidevice_getdevicetype_004 + * @tc.desc: Verify the GetDeviceType function return unavailable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_getdevicetype_004, TestSize.Level0) +{ + LOGE("GetDeviceType hdidevice_getdevicetype_004"); + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + OH_NN_DeviceType deviceType = OH_NN_CPU; + EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), GetDeviceType(::testing::_)) + .WillRepeatedly(Invoke([](V1_0::DeviceType& type) { + // 这里直接修改传入的引用参数 + type = V1_0::DeviceType::ACCELERATOR; + return OH_NN_SUCCESS; // 假设成功的状态码 + })); + OH_NN_ReturnCode result = hdiDevice->GetDeviceType(deviceType); + EXPECT_EQ(OH_NN_SUCCESS, result); +} + +/* * + * @tc.name: hdidevice_getdevicetype_005 + * @tc.desc: Verify the GetDeviceType function return unavailable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_getdevicetype_005, TestSize.Level0) +{ + LOGE("GetDeviceType hdidevice_getdevicetype_005"); + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + OH_NN_DeviceType deviceType = OH_NN_CPU; + EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), GetDeviceType(::testing::_)) + .WillRepeatedly(Invoke([](V1_0::DeviceType& type) { + // 这里直接修改传入的引用参数 + type = V1_0::DeviceType::OTHER; + return OH_NN_SUCCESS; // 假设成功的状态码 + })); + OH_NN_ReturnCode result = hdiDevice->GetDeviceType(deviceType); + EXPECT_EQ(OH_NN_SUCCESS, result); +} + /* * * @tc.name: hdidevice_getdevicestatus_001 * @tc.desc: Verify the GetDeviceStatus function validate device status success. @@ -284,6 +394,78 @@ HWTEST_F(HDIDeviceTest, hdidevice_getdevicestatus_002, TestSize.Level0) EXPECT_EQ(OH_NN_UNAVAILABLE_DEVICE, result); } +/* * + * @tc.name: hdidevice_getdevicestatus_003 + * @tc.desc: Verify the GetDeviceStatus function validate device status success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_getdevicestatus_003, TestSize.Level0) +{ + LOGE("GetDeviceStatus hdidevice_getdevicestatus_003"); + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), GetDeviceStatus(::testing::_)) + .WillRepeatedly(Invoke([](V1_0::DeviceStatus& status) { + // 这里直接修改传入的引用参数 + status = V1_0::DeviceStatus::BUSY; + return OH_NN_SUCCESS; // 假设成功的状态码 + })); + + DeviceStatus newDeviceStatus = AVAILABLE; + OH_NN_ReturnCode result = hdiDevice->GetDeviceStatus(newDeviceStatus); + EXPECT_EQ(OH_NN_SUCCESS, result); +} + +/* * + * @tc.name: hdidevice_getdevicestatus_004 + * @tc.desc: Verify the GetDeviceStatus function validate device status success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_getdevicestatus_004, TestSize.Level0) +{ + LOGE("GetDeviceStatus hdidevice_getdevicestatus_004"); + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), GetDeviceStatus(::testing::_)) + .WillRepeatedly(Invoke([](V1_0::DeviceStatus& status) { + // 这里直接修改传入的引用参数 + status = V1_0::DeviceStatus::OFFLINE; + return OH_NN_SUCCESS; // 假设成功的状态码 + })); + + DeviceStatus newDeviceStatus = AVAILABLE; + OH_NN_ReturnCode result = hdiDevice->GetDeviceStatus(newDeviceStatus); + EXPECT_EQ(OH_NN_SUCCESS, result); +} + +/* * + * @tc.name: hdidevice_getdevicestatus_005 + * @tc.desc: Verify the GetDeviceStatus function validate device status success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_getdevicestatus_005, TestSize.Level0) +{ + LOGE("GetDeviceStatus hdidevice_getdevicestatus_005"); + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), GetDeviceStatus(::testing::_)) + .WillRepeatedly(Invoke([](V1_0::DeviceStatus& status) { + // 这里直接修改传入的引用参数 + status = V1_0::DeviceStatus::UNKNOWN; + return OH_NN_SUCCESS; // 假设成功的状态码 + })); + + DeviceStatus newDeviceStatus = AVAILABLE; + OH_NN_ReturnCode result = hdiDevice->GetDeviceStatus(newDeviceStatus); + EXPECT_EQ(OH_NN_SUCCESS, result); +} + /* * * @tc.name: hdidevice_getsupportedoperation_001 * @tc.desc: Verify the GetSupportedOperation function return success. @@ -630,6 +812,28 @@ HWTEST_F(HDIDeviceTest, hdidevice_preparemodel_004, TestSize.Level0) EXPECT_EQ(OH_NN_FAILED, result); } +/* * + * @tc.name: hdidevice_preparemodel_005 + * @tc.desc: Verify the PrepareModel function return failed. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_preparemodel_005, TestSize.Level0) +{ + LOGE("PrepareModel hdidevice_preparemodel_005"); + OHOS::sptr sp = OHOS::sptr(new (std::nothrow) V1_0::MockIDevice()); + EXPECT_NE(sp, nullptr); + + std::unique_ptr hdiDevice = std::make_unique(sp); + EXPECT_NE(hdiDevice, nullptr); + + float dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* data = dataArry; + ModelConfig config; + std::shared_ptr preparedModel; + OH_NN_ReturnCode result = hdiDevice->PrepareModel(data, config, preparedModel); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, result); +} + /* * * @tc.name: hdidevice_preparemodelfrommodelcache_001 * @tc.desc: Verify the PrepareModelFromModelCache function return success. @@ -718,6 +922,157 @@ HWTEST_F(HDIDeviceTest, hdidevice_preparemodelfrommodelcache_003, TestSize.Level EXPECT_EQ(OH_NN_NULL_PTR, result); } +/* * + * @tc.name: hdidevice_preparemodelfrommodelcache_004 + * @tc.desc: Verify the PrepareModelFromModelCache function return success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_preparemodelfrommodelcache_004, TestSize.Level0) +{ + LOGE("GetDeviceStatus hdidevice_getdevicestatus_005"); + size_t length = 100; + void *buffer = nullptr; + GetBuffer(buffer, length); + + OHOS::sptr sp = OHOS::sptr(new (std::nothrow) V1_0::MockIDevice()); + EXPECT_NE(sp, nullptr); + + std::unique_ptr hdiDevice = std::make_unique(sp); + EXPECT_NE(hdiDevice, nullptr); + + std::vector modelCache = { { buffer, 100 } }; + ModelConfig config; + config.mode = OH_NN_PERFORMANCE_LOW; + config.priority = OH_NN_PRIORITY_LOW; + OHOS::sptr preModel = + OHOS::sptr(new (std::nothrow) V1_0::MockIPreparedModel()); + EXPECT_NE(preModel, nullptr); + + std::shared_ptr preparedModel = std::make_shared(preModel); + + OHOS::sptr iPreparedModel = + OHOS::sptr(new (std::nothrow) V1_0::MockIPreparedModel); + EXPECT_CALL(*sp, PrepareModelFromModelCache(::testing::_, ::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<2>(iPreparedModel), ::testing::Return(HDF_FAILURE))); + + bool isUpdatable = false; + OH_NN_ReturnCode result = hdiDevice->PrepareModelFromModelCache(modelCache, config, preparedModel, isUpdatable); + EXPECT_EQ(OH_NN_UNAVAILABLE_DEVICE, result); +} + +/* * + * @tc.name: hdidevice_preparemodelfrommodelcache_005 + * @tc.desc: Verify the PrepareModelFromModelCache function return success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_preparemodelfrommodelcache_005, TestSize.Level0) +{ + LOGE("GetDeviceStatus hdidevice_preparemodelfrommodelcache_005"); + size_t length = 100; + void *buffer = nullptr; + GetBuffer(buffer, length); + + OHOS::sptr sp = OHOS::sptr(new (std::nothrow) V1_0::MockIDevice()); + EXPECT_NE(sp, nullptr); + + std::unique_ptr hdiDevice = std::make_unique(sp); + EXPECT_NE(hdiDevice, nullptr); + + std::vector modelCache = { { buffer, 100 } }; + ModelConfig config; + config.mode = OH_NN_PERFORMANCE_MEDIUM; + config.priority = OH_NN_PRIORITY_MEDIUM; + OHOS::sptr preModel = + OHOS::sptr(new (std::nothrow) V1_0::MockIPreparedModel()); + EXPECT_NE(preModel, nullptr); + + std::shared_ptr preparedModel = std::make_shared(preModel); + + OHOS::sptr iPreparedModel = + OHOS::sptr(new (std::nothrow) V1_0::MockIPreparedModel); + EXPECT_CALL(*sp, PrepareModelFromModelCache(::testing::_, ::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<2>(iPreparedModel), ::testing::Return(HDF_FAILURE))); + + bool isUpdatable = false; + OH_NN_ReturnCode result = hdiDevice->PrepareModelFromModelCache(modelCache, config, preparedModel, isUpdatable); + EXPECT_EQ(OH_NN_UNAVAILABLE_DEVICE, result); +} + +/* * + * @tc.name: hdidevice_preparemodelfrommodelcache_006 + * @tc.desc: Verify the PrepareModelFromModelCache function return success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_preparemodelfrommodelcache_006, TestSize.Level0) +{ + LOGE("GetDeviceStatus hdidevice_preparemodelfrommodelcache_006"); + size_t length = 100; + void *buffer = nullptr; + GetBuffer(buffer, length); + + OHOS::sptr sp = OHOS::sptr(new (std::nothrow) V1_0::MockIDevice()); + EXPECT_NE(sp, nullptr); + + std::unique_ptr hdiDevice = std::make_unique(sp); + EXPECT_NE(hdiDevice, nullptr); + + std::vector modelCache = { { buffer, 100 } }; + ModelConfig config; + config.mode = OH_NN_PERFORMANCE_HIGH; + config.priority = OH_NN_PRIORITY_HIGH; + OHOS::sptr preModel = + OHOS::sptr(new (std::nothrow) V1_0::MockIPreparedModel()); + EXPECT_NE(preModel, nullptr); + + std::shared_ptr preparedModel = std::make_shared(preModel); + + OHOS::sptr iPreparedModel = + OHOS::sptr(new (std::nothrow) V1_0::MockIPreparedModel); + EXPECT_CALL(*sp, PrepareModelFromModelCache(::testing::_, ::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<2>(iPreparedModel), ::testing::Return(HDF_FAILURE))); + + bool isUpdatable = false; + OH_NN_ReturnCode result = hdiDevice->PrepareModelFromModelCache(modelCache, config, preparedModel, isUpdatable); + EXPECT_EQ(OH_NN_UNAVAILABLE_DEVICE, result); +} + +/* * + * @tc.name: hdidevice_preparemodelfrommodelcache_007 + * @tc.desc: Verify the PrepareModelFromModelCache function return success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_preparemodelfrommodelcache_007, TestSize.Level0) +{ + LOGE("GetDeviceStatus hdidevice_preparemodelfrommodelcache_007"); + size_t length = 100; + void *buffer = nullptr; + GetBuffer(buffer, length); + + OHOS::sptr sp = OHOS::sptr(new (std::nothrow) V1_0::MockIDevice()); + EXPECT_NE(sp, nullptr); + + std::unique_ptr hdiDevice = std::make_unique(sp); + EXPECT_NE(hdiDevice, nullptr); + + std::vector modelCache = { { buffer, 100 } }; + ModelConfig config; + config.mode = OH_NN_PERFORMANCE_EXTREME; + OHOS::sptr preModel = + OHOS::sptr(new (std::nothrow) V1_0::MockIPreparedModel()); + EXPECT_NE(preModel, nullptr); + + std::shared_ptr preparedModel = std::make_shared(preModel); + + OHOS::sptr iPreparedModel = + OHOS::sptr(new (std::nothrow) V1_0::MockIPreparedModel); + EXPECT_CALL(*sp, PrepareModelFromModelCache(::testing::_, ::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<2>(iPreparedModel), ::testing::Return(HDF_FAILURE))); + + bool isUpdatable = false; + OH_NN_ReturnCode result = hdiDevice->PrepareModelFromModelCache(modelCache, config, preparedModel, isUpdatable); + EXPECT_EQ(OH_NN_UNAVAILABLE_DEVICE, result); +} + /* * * @tc.name: hdidevice_allocatebuffer_001 * @tc.desc: Verify the AllocateBuffer function return nullptr. @@ -772,6 +1127,66 @@ HWTEST_F(HDIDeviceTest, hdidevice_allocatebuffer_003, TestSize.Level0) EXPECT_EQ(nullptr, result); } +/* * + * @tc.name: hdidevice_allocatebuffer_004 + * @tc.desc: Verify the AllocateBuffer function return nullptr in case of 0 size. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_allocatebuffer_004, TestSize.Level0) +{ + LOGE("AllocateBuffer hdidevice_preparemodelfrommodelcache_007"); + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + size_t length = 0; + int fd = 0; + OH_NN_ReturnCode result = hdiDevice->AllocateBuffer(length, fd); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); +} + +/* * + * @tc.name: hdidevice_allocatebuffer_005 + * @tc.desc: Verify the AllocateBuffer function return nullptr in case of 0 size. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_allocatebuffer_005, TestSize.Level0) +{ + LOGE("AllocateBuffer hdidevice_allocatebuffer_005"); + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), AllocateBuffer(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::Return(HDF_FAILURE)); + + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + size_t length = 1; + int fd = 0; + OH_NN_ReturnCode result = hdiDevice->AllocateBuffer(length, fd); + EXPECT_EQ(OH_NN_MEMORY_ERROR, result); +} + +/* * + * @tc.name: hdidevice_allocatebuffer_006 + * @tc.desc: Verify the AllocateBuffer function return nullptr in case of 0 size. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_allocatebuffer_006, TestSize.Level0) +{ + LOGE("AllocateBuffer hdidevice_allocatebuffer_006"); + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), AllocateBuffer(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::Return(HDF_SUCCESS)); + + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + size_t length = 1; + int fd = 0; + OH_NN_ReturnCode result = hdiDevice->AllocateBuffer(length, fd); + EXPECT_EQ(OH_NN_SUCCESS, result); +} + /* * * @tc.name: hdidevice_releasebuffer_001 * @tc.desc: Verify the ReleaseBuffer function validate buffer success. @@ -873,6 +1288,178 @@ HWTEST_F(HDIDeviceTest, hdidevice_releasebuffer_005, TestSize.Level0) const auto &memoryManager = MemoryManager::GetInstance(); memoryManager->UnMapMemory(buffer); } + +/* * + * @tc.name: hdidevice_releasebuffer_007 + * @tc.desc: Verify the ReleaseBuffer function validate moc object's ReleaseBuffer return failure. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_releasebuffer_007, TestSize.Level0) +{ + LOGE("ReleaseBuffer hdidevice_releasebuffer_007"); + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), ReleaseBuffer(::testing::_)) + .WillRepeatedly(::testing::Return(HDF_FAILURE)); + + int fd = 0; + size_t length = 1; + OH_NN_ReturnCode ret = hdiDevice->ReleaseBuffer(fd, length); + EXPECT_EQ(OH_NN_MEMORY_ERROR, ret); +} + +/* * + * @tc.name: hdidevice_releasebuffer_008 + * @tc.desc: Verify the ReleaseBuffer function validate moc object's ReleaseBuffer return failure. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_releasebuffer_008, TestSize.Level0) +{ + LOGE("ReleaseBuffer hdidevice_releasebuffer_008"); + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + EXPECT_CALL(*((V1_0::MockIDevice *)device.GetRefPtr()), ReleaseBuffer(::testing::_)) + .WillRepeatedly(::testing::Return(HDF_SUCCESS)); + + int fd = 0; + size_t length = 1; + OH_NN_ReturnCode ret = hdiDevice->ReleaseBuffer(fd, length); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* * + * @tc.name: hdidevice_allocatetensorbuffer_001 + * @tc.desc: Verify the ReleaseBuffer function validate moc object's ReleaseBuffer return failure. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_allocatetensorbuffer_001, TestSize.Level0) +{ + LOGE("AllocateTensorBuffer hdidevice_allocatetensorbuffer_001"); + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + size_t length = 0; + std::shared_ptr tensor; + void* ret = hdiDevice->AllocateTensorBuffer(length, tensor); + EXPECT_EQ(nullptr, ret); +} + +/* * + * @tc.name: hdidevice_allocatetensorbuffer_002 + * @tc.desc: Verify the ReleaseBuffer function validate moc object's ReleaseBuffer return failure. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_allocatetensorbuffer_002, TestSize.Level0) +{ + LOGE("AllocateTensorBuffer hdidevice_allocatetensorbuffer_002"); + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + size_t length = 0; + std::shared_ptr tensor; + void* ret = hdiDevice->AllocateTensorBuffer(length, tensor); + EXPECT_EQ(nullptr, ret); +} + +/* * + * @tc.name: hdidevice_prepareofflinemodel_001 + * @tc.desc: Verify the ReleaseBuffer function validate moc object's ReleaseBuffer return failure. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_prepareofflinemodel_001, TestSize.Level0) +{ + LOGE("PrepareOfflineModel hdidevice_prepareofflinemodel_001"); + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + ModelConfig config; + std::shared_ptr preparedModel; + OH_NN_ReturnCode ret = hdiDevice->PrepareOfflineModel(nullptr, config, preparedModel); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* * + * @tc.name: hdidevice_prepareofflinemodel_002 + * @tc.desc: Verify the ReleaseBuffer function validate moc object's ReleaseBuffer return failure. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_prepareofflinemodel_002, TestSize.Level0) +{ + LOGE("PrepareOfflineModel hdidevice_prepareofflinemodel_002"); + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + std::shared_ptr model = std::make_shared(); + mindspore::lite::LiteGraph::Node node; + mindspore::lite::LiteGraph::Node* testNode = &node; + model->all_nodes_.emplace_back(testNode); + ModelConfig config; + std::shared_ptr preparedModel; + OH_NN_ReturnCode ret = hdiDevice->PrepareOfflineModel(model, config, preparedModel); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* * + * @tc.name: hdidevice_prepareofflinemodel_003 + * @tc.desc: Verify the ReleaseBuffer function validate moc object's ReleaseBuffer return failure. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_prepareofflinemodel_003, TestSize.Level0) +{ + LOGE("PrepareOfflineModel hdidevice_prepareofflinemodel_003"); + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + std::shared_ptr model = std::make_shared(); + mindspore::lite::LiteGraph::Node node; + uint32_t indice = 0; + node.input_indices_.emplace_back(indice); + node.input_indices_.emplace_back(indice); + mindspore::lite::LiteGraph::Node* testNode = &node; + model->all_nodes_.emplace_back(testNode); + model->all_tensors_.emplace_back(mindspore::lite::MindIR_Tensor_Create()); + model->all_tensors_.emplace_back(mindspore::lite::MindIR_Tensor_Create()); + ModelConfig config; + std::shared_ptr preparedModel; + OH_NN_ReturnCode ret = hdiDevice->PrepareOfflineModel(model, config, preparedModel); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* * + * @tc.name: hdidevice_prepareofflinemodel_004 + * @tc.desc: Verify the ReleaseBuffer function validate moc object's ReleaseBuffer return failure. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_prepareofflinemodel_004, TestSize.Level0) +{ + LOGE("PrepareOfflineModel hdidevice_prepareofflinemodel_004"); + OHOS::sptr device = V1_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + std::shared_ptr model = std::make_shared(); + mindspore::lite::LiteGraph::Node node; + uint32_t indice = 0; + node.input_indices_.emplace_back(indice); + node.input_indices_.emplace_back(indice); + mindspore::lite::LiteGraph::Node* testNode = &node; + model->all_nodes_.emplace_back(testNode); + model->all_tensors_.emplace_back(mindspore::lite::MindIR_Tensor_Create()); + model->all_tensors_.emplace_back(mindspore::lite::MindIR_Tensor_Create()); + ModelConfig config; + std::shared_ptr preparedModel; + OH_NN_ReturnCode ret = hdiDevice->PrepareOfflineModel(model, config, preparedModel); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} } // namespace UnitTest } // namespace NeuralNetworkRuntime -} // namespace OHOS +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/components/v1_0/hdi_prepared_model/hdi_prepared_model_test.cpp b/test/unittest/components/v1_0/hdi_prepared_model/hdi_prepared_model_test.cpp index ae6ef25abae2a638a9ab1f0626127ad264e36a16..b013e43642f95229ae5996a507753da7368e8982 100644 --- a/test/unittest/components/v1_0/hdi_prepared_model/hdi_prepared_model_test.cpp +++ b/test/unittest/components/v1_0/hdi_prepared_model/hdi_prepared_model_test.cpp @@ -26,6 +26,8 @@ #include "transform.h" #include "test/unittest/common/v1_0/mock_idevice.h" #include "test/unittest/common/file_utils.h" +#include "tensor.h" +#include "nntensor.h" using namespace testing; using namespace testing::ext; @@ -38,6 +40,21 @@ protected: void GetBuffer(void*& buffer, size_t length); void InitTensor(std::vector& inputs, void* buffer, size_t length); OH_NN_ReturnCode Run(std::vector& inputs); + OH_NN_ReturnCode RunFail(std::vector& inputs); +}; + +class MockTensor : public Tensor { +public: + MOCK_METHOD1(SetTensorDesc, OH_NN_ReturnCode(const TensorDesc*)); + MOCK_METHOD0(CreateData, OH_NN_ReturnCode()); + MOCK_METHOD1(CreateData, OH_NN_ReturnCode(size_t)); + MOCK_METHOD3(CreateData, OH_NN_ReturnCode(int, size_t, size_t)); + MOCK_CONST_METHOD0(GetTensorDesc, TensorDesc*()); + MOCK_CONST_METHOD0(GetData, void*()); + MOCK_CONST_METHOD0(GetFd, int()); + MOCK_CONST_METHOD0(GetSize, size_t()); + MOCK_CONST_METHOD0(GetOffset, size_t()); + MOCK_CONST_METHOD0(GetBackendID, size_t()); }; void HDIPreparedModelTest::GetBuffer(void*& buffer, size_t length) @@ -204,6 +221,28 @@ HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_exportmodelcache_004, TestSize.L EXPECT_EQ(OH_NN_SAVE_CACHE_EXCEPTION, result); } +/** + * @tc.name: hidpreparedmodel_exportmodelcache_005 + * @tc.desc: Verify the ExportModelCache function return unvailable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_exportmodelcache_005, TestSize.Level0) +{ + LOGE("ExportModelCache hidpreparedmodel_exportmodelcache_005"); + std::vector bufferVect = {{100, 100, 0, 100}}; + OHOS::sptr mockPreparedModel = + OHOS::sptr(new (std::nothrow) V1_0::MockIPreparedModel()); + EXPECT_NE(mockPreparedModel, nullptr); + + std::unique_ptr preparedModel = std::make_unique(mockPreparedModel); + + std::vector modelCache; + Buffer buffer; + modelCache.emplace_back(buffer); + OH_NN_ReturnCode result = preparedModel->ExportModelCache(modelCache); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); +} + /** * @tc.name: hidpreparedmodel_run_001 * @tc.desc: Verify the Run function return invalid parameter. @@ -339,6 +378,417 @@ HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_005, TestSize.Level0) const auto& memoryManager = MemoryManager::GetInstance(); memoryManager->UnMapMemory(buffer); } + +/** + * @tc.name: hidpreparedmodel_run_006 + * @tc.desc: Verify the Run function return success. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_006, TestSize.Level0) +{ + LOGE("Run hidpreparedmodel_run_006"); + const size_t length = 100; + void* buffer = nullptr; + GetBuffer(buffer, length); + + std::vector inputs; + std::vector outputs; + InitTensor(inputs, buffer, length); + InitTensor(outputs, buffer, length); + + const int vvPosition = 2; + const int vPosition = 3; + std::vector> outputsDims {{0}}; + std::vector isOutputBufferEnough {}; + + OHOS::sptr sp = + OHOS::sptr(new (std::nothrow) V1_0::MockIPreparedModel()); + EXPECT_NE(sp, nullptr); + + std::unique_ptr preparedModel = std::make_unique(sp); + EXPECT_CALL(*sp, Run(::testing::_, ::testing::_, ::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll( + ::testing::SetArgReferee(outputsDims), + ::testing::SetArgReferee(isOutputBufferEnough), + ::testing::Return(HDF_SUCCESS)) + ); + + OH_NN_ReturnCode result = preparedModel->Run(inputs, outputs, outputsDims, isOutputBufferEnough); + EXPECT_EQ(OH_NN_SUCCESS, result); + + const auto& memoryManager = MemoryManager::GetInstance(); + memoryManager->UnMapMemory(buffer); +} + +OH_NN_ReturnCode HDIPreparedModelTest::RunFail(std::vector& inputs) +{ + std::vector outputs; + std::vector> outputsDims {}; + std::vector isOutputBufferEnough {}; + + OHOS::sptr sp = + OHOS::sptr(new (std::nothrow) V1_0::MockIPreparedModel()); + EXPECT_NE(sp, nullptr); + + std::unique_ptr preparedModel = std::make_unique(sp); + + OH_NN_ReturnCode result = preparedModel->Run(inputs, outputs, outputsDims, isOutputBufferEnough); + return result; +} + +/** + * @tc.name: hidpreparedmodel_run_007 + * @tc.desc: Verify the Run function return invalid parameter in case of output invalid. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_007, TestSize.Level0) +{ + LOGE("Run hidpreparedmodel_run_007"); + std::vector inputs; + IOTensor inputTensor; + inputTensor.dataType = OH_NN_BOOL; + inputs.emplace_back(std::move(inputTensor)); + + OH_NN_ReturnCode result = RunFail(inputs); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); +} + +/** + * @tc.name: hidpreparedmodel_run_008 + * @tc.desc: Verify the Run function return invalid parameter in case of output invalid. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_008, TestSize.Level0) +{ + LOGE("Run hidpreparedmodel_run_008"); + std::vector inputs; + IOTensor inputTensor; + inputTensor.dataType = OH_NN_INT16; + inputs.emplace_back(std::move(inputTensor)); + + OH_NN_ReturnCode result = RunFail(inputs); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); +} + +/** + * @tc.name: hidpreparedmodel_run_009 + * @tc.desc: Verify the Run function return invalid parameter in case of output invalid. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_009, TestSize.Level0) +{ + LOGE("Run hidpreparedmodel_run_009"); + std::vector inputs; + IOTensor inputTensor; + inputTensor.dataType = OH_NN_INT64; + inputs.emplace_back(std::move(inputTensor)); + + OH_NN_ReturnCode result = RunFail(inputs); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); +} + +/** + * @tc.name: hidpreparedmodel_run_010 + * @tc.desc: Verify the Run function return invalid parameter in case of output invalid. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_010, TestSize.Level0) +{ + LOGE("Run hidpreparedmodel_run_010"); + std::vector inputs; + IOTensor inputTensor; + inputTensor.dataType = OH_NN_UINT8; + inputs.emplace_back(std::move(inputTensor)); + + OH_NN_ReturnCode result = RunFail(inputs); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); +} + +/** + * @tc.name: hidpreparedmodel_run_011 + * @tc.desc: Verify the Run function return invalid parameter in case of output invalid. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_011, TestSize.Level0) +{ + LOGE("Run hidpreparedmodel_run_011"); + std::vector inputs; + IOTensor inputTensor; + inputTensor.dataType = OH_NN_UINT16; + inputs.emplace_back(std::move(inputTensor)); + + OH_NN_ReturnCode result = RunFail(inputs); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); +} + +/** + * @tc.name: hidpreparedmodel_run_012 + * @tc.desc: Verify the Run function return invalid parameter in case of output invalid. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_012, TestSize.Level0) +{ + LOGE("Run hidpreparedmodel_run_012"); + std::vector inputs; + IOTensor inputTensor; + inputTensor.dataType = OH_NN_UINT32; + inputs.emplace_back(std::move(inputTensor)); + + OH_NN_ReturnCode result = RunFail(inputs); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); +} + +/** + * @tc.name: hidpreparedmodel_run_013 + * @tc.desc: Verify the Run function return invalid parameter in case of output invalid. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_013, TestSize.Level0) +{ + LOGE("Run hidpreparedmodel_run_013"); + std::vector inputs; + IOTensor inputTensor; + inputTensor.dataType = OH_NN_UINT64; + inputs.emplace_back(std::move(inputTensor)); + + OH_NN_ReturnCode result = RunFail(inputs); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); +} + +/** + * @tc.name: hidpreparedmodel_run_014 + * @tc.desc: Verify the Run function return invalid parameter in case of output invalid. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_014, TestSize.Level0) +{ + LOGE("Run hidpreparedmodel_run_014"); + std::vector inputs; + IOTensor inputTensor; + inputTensor.dataType = OH_NN_FLOAT16; + inputs.emplace_back(std::move(inputTensor)); + + OH_NN_ReturnCode result = RunFail(inputs); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); +} + +/** + * @tc.name: hidpreparedmodel_run_015 + * @tc.desc: Verify the Run function return invalid parameter in case of output invalid. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_015, TestSize.Level0) +{ + LOGE("Run hidpreparedmodel_run_015"); + std::vector inputs; + IOTensor inputTensor; + inputTensor.dataType = OH_NN_FLOAT32; + inputs.emplace_back(std::move(inputTensor)); + + OH_NN_ReturnCode result = RunFail(inputs); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); +} + +/** + * @tc.name: hidpreparedmodel_run_016 + * @tc.desc: Verify the Run function return invalid parameter in case of output invalid. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_016, TestSize.Level0) +{ + LOGE("Run hidpreparedmodel_run_016"); + std::vector inputs; + IOTensor inputTensor; + inputTensor.dataType = OH_NN_FLOAT64; + inputTensor.format = OH_NN_FORMAT_NHWC; + inputs.emplace_back(std::move(inputTensor)); + + OH_NN_ReturnCode result = RunFail(inputs); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); +} + +/** + * @tc.name: hidpreparedmodel_run_017 + * @tc.desc: Verify the Run function return invalid parameter in case of output invalid. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_017, TestSize.Level0) +{ + LOGE("Run hidpreparedmodel_run_017"); + std::vector inputs; + IOTensor inputTensor; + inputTensor.dataType = OH_NN_UNKNOWN; + inputTensor.format = OH_NN_FORMAT_NONE; + inputs.emplace_back(std::move(inputTensor)); + + OH_NN_ReturnCode result = RunFail(inputs); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); +} + +/** + * @tc.name: hidpreparedmodel_run_018 + * @tc.desc: Verify the Run function return invalid parameter in case of output invalid. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_018, TestSize.Level0) +{ + LOGE("Run hidpreparedmodel_run_018"); + std::vector inputs; + IOTensor inputTensor; + inputTensor.dataType = OH_NN_INT32; + inputs.emplace_back(std::move(inputTensor)); + + OH_NN_ReturnCode result = RunFail(inputs); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); +} + +/** + * @tc.name: hidpreparedmodel_run_019 + * @tc.desc: Verify the Run function return invalid parameter in case of output invalid. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_019, TestSize.Level0) +{ + LOGE("Run hidpreparedmodel_run_019"); + std::vector inputs; + std::vector outputs; + std::vector> outputsDims {}; + std::vector isOutputBufferEnough {}; + + inputs.emplace_back(nullptr); + + OHOS::sptr sp = + OHOS::sptr(new (std::nothrow) V1_0::MockIPreparedModel()); + EXPECT_NE(sp, nullptr); + + std::unique_ptr preparedModel = std::make_unique(sp); + OH_NN_ReturnCode ret = preparedModel->Run(inputs, outputs, outputsDims, isOutputBufferEnough); + EXPECT_EQ(OH_NN_FAILED, ret); +} + +/** + * @tc.name: hidpreparedmodel_run_020 + * @tc.desc: Verify the Run function return invalid parameter in case of output invalid. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_020, TestSize.Level0) +{ + LOGE("Run hidpreparedmodel_run_020"); + std::vector inputs; + std::vector outputs; + std::vector> outputsDims {}; + std::vector isOutputBufferEnough {}; + + MockTensor* tensorImpl = new (std::nothrow) MockTensor(); + NN_Tensor* tensor = reinterpret_cast(tensorImpl); + inputs.emplace_back(tensor); + + OHOS::sptr sp = + OHOS::sptr(new (std::nothrow) V1_0::MockIPreparedModel()); + EXPECT_NE(sp, nullptr); + + std::unique_ptr preparedModel = std::make_unique(sp); + OH_NN_ReturnCode ret = preparedModel->Run(inputs, outputs, outputsDims, isOutputBufferEnough); + EXPECT_EQ(OH_NN_FAILED, ret); + + testing::Mock::AllowLeak(tensorImpl); +} + +/** + * @tc.name: hidpreparedmodel_run_021 + * @tc.desc: Verify the Run function return invalid parameter in case of output invalid. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_021, TestSize.Level0) +{ + LOGE("Run hidpreparedmodel_run_021"); + std::vector inputs; + std::vector outputs; + std::vector> outputsDims {}; + std::vector isOutputBufferEnough {}; + + size_t deviceId = 1; + NNTensor2_0* tensorImpl = new (std::nothrow) NNTensor2_0(deviceId); + TensorDesc TensorDesc; + + tensorImpl->SetTensorDesc(&TensorDesc); + NN_Tensor* tensor = reinterpret_cast(tensorImpl); + inputs.emplace_back(tensor); + + OHOS::sptr sp = + OHOS::sptr(new (std::nothrow) V1_0::MockIPreparedModel()); + EXPECT_NE(sp, nullptr); + + std::unique_ptr preparedModel = std::make_unique(sp); + OH_NN_ReturnCode ret = preparedModel->Run(inputs, outputs, outputsDims, isOutputBufferEnough); + EXPECT_EQ(OH_NN_FAILED, ret); +} + +/** + * @tc.name: hidpreparedmodel_run_022 + * @tc.desc: Verify the Run function return invalid parameter in case of output invalid. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_022, TestSize.Level0) +{ + LOGE("Run hidpreparedmodel_run_022"); + std::vector inputs; + std::vector outputs; + std::vector> outputsDims {}; + std::vector isOutputBufferEnough {}; + + size_t backendId = 1; + NNTensor2_0* nnTensor = new (std::nothrow) NNTensor2_0(backendId); + EXPECT_NE(nullptr, nnTensor); + + TensorDesc tensorDesc; + char name = 'a'; + tensorDesc.SetName(&name); + tensorDesc.SetDataType(OH_NN_UINT32); + tensorDesc.SetFormat(OH_NN_FORMAT_NCHW); + int32_t expectDim[2] = {3, 3}; + int32_t* ptr = expectDim; + uint32_t dimensionCount = 2; + tensorDesc.SetShape(ptr, dimensionCount); + + OH_NN_ReturnCode retSetTensorDesc = nnTensor->SetTensorDesc(&tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, retSetTensorDesc); + + nnTensor->SetSize(200); + nnTensor->SetOffset(0); + float m_dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* buffer = m_dataArry; + nnTensor->SetData(buffer); + + NN_Tensor* tensor = reinterpret_cast(nnTensor); + inputs.emplace_back(tensor); + + OHOS::sptr sp = + OHOS::sptr(new (std::nothrow) V1_0::MockIPreparedModel()); + EXPECT_NE(sp, nullptr); + + std::unique_ptr preparedModel = std::make_unique(sp); + OH_NN_ReturnCode ret = preparedModel->Run(inputs, outputs, outputsDims, isOutputBufferEnough); + EXPECT_EQ(OH_NN_UNAVAILABLE_DEVICE, ret); +} + +/** + * @tc.name: hidpreparedmodel_getmodelid_001 + * @tc.desc: Verify the Run function return invalid parameter in case of output invalid. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_getmodelid_001, TestSize.Level0) +{ + LOGE("GetModelID hidpreparedmodel_getmodelid_001"); + OHOS::sptr sp = + OHOS::sptr(new (std::nothrow) V1_0::MockIPreparedModel()); + EXPECT_NE(sp, nullptr); + + uint32_t index = 0; + std::unique_ptr preparedModel = std::make_unique(sp); + OH_NN_ReturnCode ret = preparedModel->GetModelID(index); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} } // namespace UnitTest } // namespace NeuralNetworkRuntime } // namespace OHOS diff --git a/test/unittest/components/v1_0/inner_model/inner_model_test.cpp b/test/unittest/components/v1_0/inner_model/inner_model_test.cpp index 4ed1e4f2dc25c9b619a643c4bf64ac35bd52de2d..47660a058e143354254d947d7f4975908363fdc5 100644 --- a/test/unittest/components/v1_0/inner_model/inner_model_test.cpp +++ b/test/unittest/components/v1_0/inner_model/inner_model_test.cpp @@ -21,10 +21,21 @@ #include "nn_tensor.h" #include "inner_model.h" +#include + +#include "lite_graph_to_hdi_model_v2_1.h" +#include "device.h" +#include "interfaces/kits/c/neural_network_runtime/neural_network_runtime_type.h" +#include "nnbackend.h" +#include "ops_registry.h" +#include "transform.h" + using namespace testing; using namespace testing::ext; using namespace OHOS::NeuralNetworkRuntime; +namespace MSLITE = mindspore::lite; + namespace NNRT { namespace UnitTest { class InnerModelTest : public testing::Test { @@ -188,6 +199,62 @@ HWTEST_F(InnerModelTest, inner_model_construct_nntensor_from_litegraph_005, Test .BuildFromLiteGraph(liteGraph, extensionConfig)); } +/** + * @tc.name: inner_model_buildfrommetagraph_001 + * @tc.desc: Verify the nntensor build failed nullptr return of the construct_nntensor_from_litegraph function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_buildfrommetagraph_001, TestSize.Level1) +{ + LOGE("BuildFromMetaGraph inner_model_buildfrommetagraph_001"); + mindspore::lite::LiteGraph* liteGraph = new (std::nothrow) mindspore::lite::LiteGraph(); + EXPECT_NE(nullptr, liteGraph); + m_dimInput = {3, -3}; + + SetLiteGraph(liteGraph); + + ExtensionConfig extensionConfig; + InnerModel InnerModel; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, InnerModel.BuildFromMetaGraph(nullptr, extensionConfig)); +} + +/** + * @tc.name: inner_model_buildfrommetagraph_002 + * @tc.desc: Verify the nntensor build failed nullptr return of the construct_nntensor_from_litegraph function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_buildfrommetagraph_002, TestSize.Level1) +{ + LOGE("BuildFromMetaGraph inner_model_buildfrommetagraph_002"); + mindspore::lite::LiteGraph* liteGraph = new (std::nothrow) mindspore::lite::LiteGraph(); + EXPECT_NE(nullptr, liteGraph); + m_dimInput = {3, -3}; + + SetLiteGraph(liteGraph); + + ExtensionConfig extensionConfig; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_innerModelTest.BuildFromMetaGraph(liteGraph, extensionConfig)); +} + +/** + * @tc.name: inner_model_buildfrommetagraph_003 + * @tc.desc: Verify the nntensor build failed nullptr return of the construct_nntensor_from_litegraph function + * @tc.type: FUNC + */ +HWTEST_F(InnerModelTest, inner_model_buildfrommetagraph_003, TestSize.Level1) +{ + LOGE("BuildFromMetaGraph inner_model_buildfrommetagraph_003"); + mindspore::lite::LiteGraph* liteGraph = new (std::nothrow) mindspore::lite::LiteGraph(); + EXPECT_NE(nullptr, liteGraph); + m_dimInput = {3, -3}; + + SetLiteGraph(liteGraph); + + ExtensionConfig extensionConfig; + InnerModel InnerModel; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_innerModelTest.BuildFromMetaGraph(liteGraph, extensionConfig)); +} + /** * @tc.name: inner_model_build_from_lite_graph_001 * @tc.desc: Verify the litegraph is nullptr of the build_from_lite_graph function @@ -884,3 +951,2238 @@ HWTEST_F(InnerModelTest, inner_model_get_supported_operation_004, TestSize.Level } } // namespace UnitTest } // namespace NNRT + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace NNRt_V2_1 { +namespace UnitTest { +class LiteGraphToHDIModelTest : public testing::Test { +public: + LiteGraphToHDIModelTest() = default; + ~LiteGraphToHDIModelTest() = default; +public: + std::vector m_inputs{0, 1}; + std::vector m_outputs{2}; + std::vector m_param{3}; + std::vector m_input_dim{3, 3}; + std::vector m_output_dim{3, 3}; + std::vector m_param_dim{}; +}; + +MSLITE::LiteGraph::Node* getNode(void* primitive) +{ + MSLITE::LiteGraph::Node* node = new(std::nothrow) MSLITE::LiteGraph::Node(); + node->name_ = "NNRt_SubGraph"; + node->quant_type_ = 1; + node->primitive_ = primitive; + node->input_indices_ = {1, 1, 1, 1}; + node->output_indices_ = {1, 1, 1, 1}; + return node; +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_001 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_001, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_001"); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(nullptr, tensorBuffer); + EXPECT_EQ(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_002 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_002, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_002"); + std::shared_ptr liteGraph = std::make_shared(); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {0, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_EQ(nullptr, model); + + uint8_t *mmapPtr = static_cast(mmap(nullptr, + tensorBuffer.bufferSize, PROT_READ | PROT_WRITE, MAP_SHARED, tensorBuffer.fd, 0)); + EXPECT_EQ(MAP_FAILED, mmapPtr); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_003 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_003, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_003"); + std::shared_ptr liteGraph = std::make_shared(); + MSLITE::LiteGraph::SubGraph* subGraph = new (std::nothrow) MSLITE::LiteGraph::SubGraph(); + subGraph->name_ = "NNRt_SubGraph"; + subGraph->input_indices_ = {1, 1, 1, 1}; + subGraph->output_indices_ = {1, 1, 1, 1}; + subGraph->node_indices_ = {1, 1, 1, 1}; + + void* tp = MSLITE::MindIR_Tensor_Create(); + + liteGraph.get()->all_tensors_.emplace_back(tp); + liteGraph.get()->sub_graphs_.emplace_back(subGraph); + + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 1, 1, 1}; + + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); + + uint8_t *mmapPtr = static_cast(mmap(nullptr, + tensorBuffer.bufferSize, PROT_READ | PROT_WRITE, MAP_SHARED, tensorBuffer.fd, 0)); + EXPECT_EQ(MAP_FAILED, mmapPtr); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_004 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_004, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_004"); + std::shared_ptr liteGraph = std::make_shared(); + liteGraph.get()->all_nodes_.emplace_back(nullptr); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_EQ(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_005 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_005, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_005"); + std::shared_ptr liteGraph = std::make_shared(); + MSLITE::LiteGraph::Node* node = new(std::nothrow) MSLITE::LiteGraph::Node(); + liteGraph.get()->all_nodes_.emplace_back(node); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_EQ(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_006 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_006, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_006"); + std::shared_ptr liteGraph = std::make_shared(); + + float alpha {0.0f}; + float minVal {0.0f}; + float maxVal {0.0f}; + bool approximate {false}; + mindspore::lite::ActivationType activationType {mindspore::lite::ACTIVATION_TYPE_ABS}; + + void* primitive = mindspore::lite::MindIR_Activation_CreatePrimitive(activationType, alpha, + minVal, maxVal, approximate); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_007 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_007, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_007"); + std::shared_ptr liteGraph = std::make_shared(); + + int8_t num = 1; + int8_t* fuseData = # + mindspore::lite::ActivationType type = NNToMS::TransfromFusionType(static_cast(*fuseData)); + void* primitive = mindspore::lite::MindIR_AddFusion_CreatePrimitive(type); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_008 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_008, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_008"); + std::shared_ptr liteGraph = std::make_shared(); + + int64_t keepDims {0}; + void* primitive = mindspore::lite::MindIR_All_CreatePrimitive(keepDims); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_009 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_009, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_009"); + std::shared_ptr liteGraph = std::make_shared(); + + int64_t axis {-1}; + int64_t topK {1}; + bool keepDims {false}; + bool outMaxValue {false}; + void* primitive = mindspore::lite::MindIR_ArgMaxFusion_CreatePrimitive(axis, topK, keepDims, outMaxValue); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_010 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_010, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_010"); + std::shared_ptr liteGraph = std::make_shared(); + + int64_t summarize {0}; + void* primitive = mindspore::lite::MindIR_Assert_CreatePrimitive(summarize); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_011 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_011, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_011"); + std::shared_ptr liteGraph = std::make_shared(); + + std::vector kernelSize; + std::vector pad; + std::vector strides; + mindspore::lite::PadMode padMode {mindspore::lite::PAD_MODE_PAD}; + mindspore::lite::ActivationType activationType {mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; + mindspore::lite::RoundMode roundMode {mindspore::lite::ROUND_MODE_FLOOR}; + mindspore::lite::Format format {mindspore::lite::FORMAT_NCHW}; + bool global {false}; + void* primitive = mindspore::lite::MindIR_AvgPoolFusion_CreatePrimitive(kernelSize, strides, pad, + padMode, roundMode, format, global, activationType); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_012 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_012, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_012"); + std::shared_ptr liteGraph = std::make_shared(); + + std::vector blockSize; + std::vector> crops; + void* primitive = mindspore::lite::MindIR_BatchToSpaceND_CreatePrimitive(blockSize, crops); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_013 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_013, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_013"); + std::shared_ptr liteGraph = std::make_shared(); + + float epsilon {0.0001f}; + void* primitive = mindspore::lite::MindIR_FusedBatchNorm_CreatePrimitive(epsilon); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_014 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_014, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_014"); + std::shared_ptr liteGraph = std::make_shared(); + + float epsilon {0.0001f}; + void* primitive = mindspore::lite::MindIR_FusedBatchNorm_CreatePrimitive(epsilon); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_015 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_015, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_015"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_BiasAdd_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_016 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_016, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_016"); + std::shared_ptr liteGraph = std::make_shared(); + + std::vector shape; + void* primitive = mindspore::lite::MindIR_BroadcastTo_CreatePrimitive(shape); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_017 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_017, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_017"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Cast_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_018 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_018, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_018"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Ceil_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_019 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_019, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_019"); + std::shared_ptr liteGraph = std::make_shared(); + + float max {0.0f}; + float min {0.0f}; + void* primitive = mindspore::lite::MindIR_Clip_CreatePrimitive(max, min); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_020 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_020, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_020"); + std::shared_ptr liteGraph = std::make_shared(); + + int64_t axis{0}; + void* primitive = mindspore::lite::MindIR_Concat_CreatePrimitive(axis); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_021 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_021, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_021"); + std::shared_ptr liteGraph = std::make_shared(); + + int64_t dataType {0}; + std::vector value; + void* primitive = mindspore::lite::MindIR_ConstantOfShape_CreatePrimitive(dataType, value); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_022 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_022, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_022"); + std::shared_ptr liteGraph = std::make_shared(); + + int64_t group {1}; + int64_t inChannel {0}; + int64_t outChannel {0}; + std::vector kernelSize; + std::vector strides; + std::vector padList; + std::vector dilation; + std::vector outputPaddings; + mindspore::lite::PadMode padMode{mindspore::lite::PAD_MODE_PAD}; + mindspore::lite::ActivationType activationType{mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; + void* primitive = MindIR_Conv2dTransposeFusion_CreatePrimitive(kernelSize, + strides, dilation, padMode, padList, group, inChannel, outChannel, + activationType, outputPaddings); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_023 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_023, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_023"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Cos_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_024 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_024, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_024"); + std::shared_ptr liteGraph = std::make_shared(); + + int64_t axis {0}; + std::vector offset; + void* primitive = mindspore::lite::MindIR_Crop_CreatePrimitive(axis, offset); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_025 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_025, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_025"); + std::shared_ptr liteGraph = std::make_shared(); + + int64_t blockSize {0}; + std::string mode; + mindspore::lite::Format format {mindspore::lite::FORMAT_NCHW}; + void* primitive = mindspore::lite::MindIR_DepthToSpace_CreatePrimitive(blockSize, format, mode); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_026 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_026, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_026"); + std::shared_ptr liteGraph = std::make_shared(); + + int64_t inputSize {0}; + std::vector scale; + float nmsIoUThreshold {0.0f}; + float nmsScoreThreshold {0.0f}; + int64_t maxDetections {0}; + int64_t detectionsPerClass {0}; + int64_t maxClassesPerDetection {0}; + int64_t numClasses {0}; + bool useRegularNms {false}; + bool outQuantized {false}; + mindspore::lite::Format format {mindspore::lite::FORMAT_NCHW}; + void* primitive = mindspore::lite::MindIR_DetectionPostProcess_CreatePrimitive(format, inputSize, scale, + nmsIoUThreshold, nmsScoreThreshold, maxDetections, detectionsPerClass, maxClassesPerDetection, + numClasses, useRegularNms, outQuantized); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_027 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_027, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_027"); + std::shared_ptr liteGraph = std::make_shared(); + + mindspore::lite::EltwiseMode mode {mindspore::lite::ELTWISE_MODE_PROD}; + void* primitive = mindspore::lite::MindIR_Eltwise_CreatePrimitive(mode); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_028 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_028, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_028"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Equal_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_029 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_029, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_029"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Erf_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_030 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_030, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_030"); + std::shared_ptr liteGraph = std::make_shared(); + + float base {-1.0f}; + float scale {1.0f}; + float shift {0.0f}; + void* primitive = mindspore::lite::MindIR_ExpFusion_CreatePrimitive(base, scale, shift); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_031 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_031, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_031"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_ExpandDims_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_032 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_032, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_032"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Fill_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_033 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_033, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_033"); + std::shared_ptr liteGraph = std::make_shared(); + + int64_t axis {1}; + void* primitive = mindspore::lite::MindIR_Flatten_CreatePrimitive(axis); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_034 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_034, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_034"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Floor_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_035 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_035, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_035"); + std::shared_ptr liteGraph = std::make_shared(); + + bool hasBias {false}; + bool useAxis {false}; + int64_t axis {0}; + mindspore::lite::ActivationType activationType {mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; + void* primitive = mindspore::lite::MindIR_FullConnection_CreatePrimitive(hasBias, useAxis, + axis, activationType); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_036 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_036, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_036"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Gather_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_037 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_037, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_037"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_GatherNd_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_038 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_038, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_038"); + std::shared_ptr liteGraph = std::make_shared(); + + mindspore::lite::ActivationType activationType = mindspore::lite::ACTIVATION_TYPE_GELU; + float alpha = 0.0f; + float minVal = 0.0f; + float maxVal = 0.0f; + bool approximate = false; + void* primitive = mindspore::lite::MindIR_Activation_CreatePrimitive(activationType, + alpha, minVal, maxVal, approximate); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_039 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_039, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_039"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Greater_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_040 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_040, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_040"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_GreaterEqual_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_041 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_041, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_041"); + std::shared_ptr liteGraph = std::make_shared(); + + mindspore::lite::ActivationType activationType = mindspore::lite::ACTIVATION_TYPE_HSIGMOID; + float alpha = 0.0f; + float minVal = 0.0f; + float maxVal = 0.0f; + bool approximate = false; + void* primitive = mindspore::lite::MindIR_Activation_CreatePrimitive(activationType, + alpha, minVal, maxVal, approximate); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_042 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_042, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_042"); + std::shared_ptr liteGraph = std::make_shared(); + + float epsilon {0.0f}; + void* primitive = mindspore::lite::MindIR_InstanceNorm_CreatePrimitive(epsilon); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_043 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_043, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_043"); + std::shared_ptr liteGraph = std::make_shared(); + + float epsilon {0.0f}; + void* primitive = mindspore::lite::MindIR_InstanceNorm_CreatePrimitive(epsilon); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_044 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_044, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_044"); + std::shared_ptr liteGraph = std::make_shared(); + + std::vector axis; + float epsilon {1e-6}; + mindspore::lite::ActivationType activationType {mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; + void* primitive = mindspore::lite::MindIR_L2NormalizeFusion_CreatePrimitive(axis, epsilon, activationType); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_045 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_045, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_045"); + std::shared_ptr liteGraph = std::make_shared(); + + int64_t beginNormAxis {1}; + float epsilon {1e-7}; + bool elementwiseAffine {true}; + int64_t beginParamsAxis {1}; + void* primitive = mindspore::lite::MindIR_LayerNormFusion_CreatePrimitive(beginNormAxis, + epsilon, elementwiseAffine, beginParamsAxis); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_046 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_046, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_046"); + std::shared_ptr liteGraph = std::make_shared(); + + float alpha {0.0f}; + float minVal {0.0f}; + float maxVal {0.0f}; + bool approximate {false}; + mindspore::lite::ActivationType activationType {mindspore::lite::ACTIVATION_TYPE_LEAKY_RELU}; + void* primitive = mindspore::lite::MindIR_Activation_CreatePrimitive(activationType, alpha, + minVal, maxVal, approximate); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_047 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_047, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_047"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Less_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_048 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_048, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_048"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_LessEqual_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_049 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_049, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_049"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Log_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_050 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_050, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_050"); + std::shared_ptr liteGraph = std::make_shared(); + + int64_t axis {0}; + void* primitive = mindspore::lite::MindIR_LogSoftmax_CreatePrimitive(axis); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_051 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_051, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_051"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_LogicalAnd_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_052 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_052, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_052"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_LogicalNot_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_053 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_053, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_053"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_LogicalOr_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_054 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_054, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_054"); + std::shared_ptr liteGraph = std::make_shared(); + + int64_t depthRadius {0}; + float bias {0.0f}; + float alpha {0.0f}; + float beta {0.0f}; + std::string normRegion {"ACROSS_CHANNELS"}; + void* primitive = mindspore::lite::MindIR_LRN_CreatePrimitive(depthRadius, bias, alpha, + beta, normRegion); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_055 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_055, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_055"); + std::shared_ptr liteGraph = std::make_shared(); + + bool bidirectional {false}; + bool hasBias {false}; + int64_t inputSize {0}; + int64_t hiddenSize {0}; + int64_t numLayers {0}; + int64_t numDirections {0}; + float dropout {0.0f}; + float zoneoutCell {0.0f}; + float zoneoutHidden {0.0f}; + int64_t projSize {0}; + void* primitive = mindspore::lite::MindIR_LSTM_CreatePrimitive(bidirectional, hasBias, inputSize, + hiddenSize, numLayers, numDirections, dropout, zoneoutCell, zoneoutHidden, projSize); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_056 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_056, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_056"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Maximum_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_057 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_057, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_057"); + std::shared_ptr liteGraph = std::make_shared(); + + std::vector kernelSize; + std::vector pad; + std::vector strides; + mindspore::lite::PadMode padMode {mindspore::lite::PAD_MODE_PAD}; + mindspore::lite::ActivationType activationType {mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; + mindspore::lite::Format format {mindspore::lite::FORMAT_NCHW}; + bool global {false}; + void* primitive = MindIR_MaxPoolFusion_CreatePrimitive(kernelSize, strides, pad, + padMode, format, global, activationType); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_058 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_058, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_058"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Minimum_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_059 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_059, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_059"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Mod_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_060 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_060, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_060"); + std::shared_ptr liteGraph = std::make_shared(); + + mindspore::lite::ActivationType activationType {mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; + void* primitive = mindspore::lite::MindIR_MulFusion_CreatePrimitive(activationType); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_061 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_061, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_061"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Neg_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_062 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_062, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_062"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_NotEqual_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_063 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_063, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_063"); + std::shared_ptr liteGraph = std::make_shared(); + + int64_t axis {-1}; + void* primitive = mindspore::lite::MindIR_OneHot_CreatePrimitive(axis); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_064 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_064, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_064"); + std::shared_ptr liteGraph = std::make_shared(); + + std::vector> paddings; + float constantValue {0.0f}; + mindspore::lite::PaddingMode paddingMode {mindspore::lite::PADDING_MODE_CONSTANT}; + void* primitive = MindIR_PadFusion_CreatePrimitive(paddings, paddingMode, constantValue); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_065 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_065, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_065"); + std::shared_ptr liteGraph = std::make_shared(); + + float scale {1.0f}; + float shift {0.0f}; + void* primitive = mindspore::lite::MindIR_PowFusion_CreatePrimitive(scale, shift); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_066 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_066, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_066"); + std::shared_ptr liteGraph = std::make_shared(); + + bool channelShared{false}; + void* primitive = mindspore::lite::MindIR_PReLUFusion_CreatePrimitive(channelShared); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_067 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_067, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_067"); + std::shared_ptr liteGraph = std::make_shared(); + + const uint64_t* srcT{nullptr}; + const uint64_t* dstT{nullptr}; + int64_t axis {0}; + void* primitive = mindspore::lite::MindIR_QuantDTypeCast_CreatePrimitive(*srcT, *dstT, axis); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_068 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_068, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_068"); + std::shared_ptr liteGraph = std::make_shared(); + + int64_t dType {0.0f}; + int64_t start {0}; + int64_t limit {0}; + int64_t delta {1}; + void* primitive = mindspore::lite::MindIR_Range_CreatePrimitive(dType, start, limit, delta); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_069 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_069, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_069"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Rank_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_070 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_070, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_070"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Reciprocal_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_071 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_071, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_071"); + std::shared_ptr liteGraph = std::make_shared(); + + mindspore::lite::ReduceMode mode {mindspore::lite::REDUCE_MODE_ALL}; + float coeff {0.0f}; + bool reduceToEnd {false}; + bool keepDims {false}; + void* primitive = mindspore::lite::MindIR_ReduceFusion_CreatePrimitive(keepDims, mode, reduceToEnd, coeff); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_072 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_072, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_072"); + std::shared_ptr liteGraph = std::make_shared(); + + float alpha{0.0f}; + float minVal{0.0f}; + float maxVal{0.0f}; + bool approximate{false}; + mindspore::lite::ActivationType activationType{mindspore::lite::ACTIVATION_TYPE_RELU6}; + void* primitive = mindspore::lite::MindIR_Activation_CreatePrimitive(activationType, alpha, + minVal, maxVal, approximate); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_073 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_073, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_073"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Reshape_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_074 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_074, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_074"); + std::shared_ptr liteGraph = std::make_shared(); + + float cubicCoeff{0.0f}; + float extrapolationValue{0.0f}; + mindspore::lite::NearestMode nearestMode{mindspore::lite::NEAREST_MODE_NORMAL}; + mindspore::lite::ResizeMethod method {mindspore::lite::RESIZE_METHOD_LINEAR}; + uint64_t newHeight{0}; + uint64_t newWidth{0}; + bool preserveAspectRatio{false}; + mindspore::lite::CoordinateTransformMode coordinateTransformMode { + mindspore::lite::COORDINATE_TRANSFORM_MODE_ASYMMETRIC}; + uint64_t excludeOutside{0}; + void* primitive = mindspore::lite::MindIR_Resize_CreatePrimitive(method, newHeight, newWidth, + preserveAspectRatio, coordinateTransformMode, cubicCoeff, excludeOutside, + extrapolationValue, nearestMode); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_075 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_075, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_075"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Round_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_076 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_076, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_076"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Rsqrt_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_077 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_077, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_077"); + std::shared_ptr liteGraph = std::make_shared(); + + mindspore::lite::ActivationType activationType{mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; + const uint64_t* axis{nullptr}; + void* primitive = mindspore::lite::MindIR_ScaleFusion_CreatePrimitive(*axis, activationType); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_078 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_078, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_078"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_ScatterNd_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_079 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_079, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_079"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Select_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_080 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_080, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_080"); + std::shared_ptr liteGraph = std::make_shared(); + + float alpha{0.0f}; + float minVal{0.0f}; + float maxVal{0.0f}; + bool approximate{false}; + mindspore::lite::ActivationType activationType{mindspore::lite::ACTIVATION_TYPE_SIGMOID}; + void* primitive = mindspore::lite::MindIR_Activation_CreatePrimitive(activationType, alpha, minVal, + maxVal, approximate); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_081 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_081, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_081"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Sin_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_082 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_082, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_082"); + std::shared_ptr liteGraph = std::make_shared(); + + mindspore::lite::Format format {mindspore::lite::FORMAT_NCHW}; + int64_t blockSize {0}; + void* primitive = mindspore::lite::MindIR_SpaceToDepth_CreatePrimitive(blockSize, format); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_083 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_083, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_083"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_SparseToDense_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_084 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_084, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_084"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Square_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_085 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_085, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_085"); + std::shared_ptr liteGraph = std::make_shared(); + + float alpha {0.0f}; + float minVal {0.0f}; + float maxVal {0.0f}; + bool approximate {false}; + mindspore::lite::ActivationType activationType {mindspore::lite::ACTIVATION_TYPE_SWISH}; + void* primitive = mindspore::lite::MindIR_Activation_CreatePrimitive(activationType, alpha, + minVal, maxVal, approximate); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_086 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_086, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_086"); + std::shared_ptr liteGraph = std::make_shared(); + + int64_t axis {0}; + void* primitive = mindspore::lite::MindIR_Unstack_CreatePrimitive(axis); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_087 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_087, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_087"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Where_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_088 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_088, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_088"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Shape_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_089 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_089, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_089"); + std::shared_ptr liteGraph = std::make_shared(); + + std::vector axis; + void* primitive = mindspore::lite::MindIR_Unsqueeze_CreatePrimitive(axis); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_090 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_090, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_090"); + std::shared_ptr liteGraph = std::make_shared(); + + int64_t inChannel{0}; + int64_t outChannel{0}; + std::vector kernelSize; + std::vector strides; + std::vector pad; + std::vector dilation; + mindspore::lite::PadMode padMode{mindspore::lite::PAD_MODE_PAD}; + mindspore::lite::ActivationType activationType{mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; + void* primitive = mindspore::lite::MindIR_Conv2DFusion_CreatePrimitive(kernelSize, strides, + dilation, padMode, pad, inChannel, inChannel, outChannel, activationType); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_091 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_091, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_091"); + std::shared_ptr liteGraph = std::make_shared(); + + mindspore::lite::ActivationType activationType {mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; + void* primitive = mindspore::lite::MindIR_DivFusion_CreatePrimitive(activationType); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_092 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_092, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_092"); + std::shared_ptr liteGraph = std::make_shared(); + + mindspore::lite::ActivationType activationType{mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; + bool transposeA{false}; + bool transposeB{false}; + void* primitive = mindspore::lite::MindIR_MatMulFusion_CreatePrimitive(transposeA, transposeB, activationType); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_093 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_093, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_093"); + std::shared_ptr liteGraph = std::make_shared(); + + std::vector axes; + void* primitive = mindspore::lite::MindIR_SliceFusion_CreatePrimitive(axes); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_094 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_094, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_094"); + std::shared_ptr liteGraph = std::make_shared(); + + std::vector axis; + void* primitive = mindspore::lite::MindIR_Softmax_CreatePrimitive(axis); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_095 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_095, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_095"); + std::shared_ptr liteGraph = std::make_shared(); + + std::vector> paddings; + std::vector block_shape {}; + void* primitive = mindspore::lite::MindIR_SpaceToBatchND_CreatePrimitive(block_shape, paddings); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_096 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_096, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_096"); + std::shared_ptr liteGraph = std::make_shared(); + + int64_t outputNum {0}; + std::vector sizeSplits; + int64_t axis {0}; + void* primitive = mindspore::lite::MindIR_Split_CreatePrimitive(outputNum, sizeSplits, axis); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_097 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_097, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_097"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Sqrt_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_098 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_098, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_098"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_SquaredDifference_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_099 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_099, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_099"); + std::shared_ptr liteGraph = std::make_shared(); + + std::vector axis; + void* primitive = mindspore::lite::MindIR_Squeeze_CreatePrimitive(axis); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_100 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_100, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_100"); + std::shared_ptr liteGraph = std::make_shared(); + + int64_t axis = {0}; + void* primitive = mindspore::lite::MindIR_Stack_CreatePrimitive(axis); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_101 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_101, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_101"); + std::shared_ptr liteGraph = std::make_shared(); + + int64_t beginMask = {0}; + int64_t endMask = {0}; + int64_t ellipsisMask = {0}; + int64_t newAxisMask = {0}; + int64_t shrinkAxisMask = {0}; + void* primitive = mindspore::lite::MindIR_StridedSlice_CreatePrimitive(beginMask, endMask, ellipsisMask, + newAxisMask, shrinkAxisMask); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_102 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_102, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_102"); + std::shared_ptr liteGraph = std::make_shared(); + + mindspore::lite::ActivationType activationType {mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; + void* primitive = mindspore::lite::MindIR_SubFusion_CreatePrimitive(activationType); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_103 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_103, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_103"); + std::shared_ptr liteGraph = std::make_shared(); + + std::vector dims {0}; + void* primitive = mindspore::lite::MindIR_TileFusion_CreatePrimitive(dims); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_104 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_104, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_104"); + std::shared_ptr liteGraph = std::make_shared(); + + int64_t axis {0}; + bool sorted {true}; + void* primitive = mindspore::lite::MindIR_TopKFusion_CreatePrimitive(sorted, axis); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_105 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_105, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_105"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Transpose_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_106 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_106, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_106"); + std::shared_ptr liteGraph = std::make_shared(); + + std::vector axis; + void* primitive = mindspore::lite::MindIR_Unsqueeze_CreatePrimitive(axis); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_107 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_107, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_107"); + std::shared_ptr liteGraph = std::make_shared(); + + std::vector axis; + void* primitive = mindspore::lite::MindIR_Unsqueeze_CreatePrimitive(axis); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_108 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_litegraph_to_hdimodel_108, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_108"); + std::shared_ptr liteGraph = std::make_shared(); + MSLITE::LiteGraph::SubGraph* subGraph = new (std::nothrow) MSLITE::LiteGraph::SubGraph(); + subGraph->name_ = "NNRt_SubGraph"; + subGraph->input_indices_ = {1, 1, 1, 1}; + subGraph->output_indices_ = {1, 1, 1, 1}; + subGraph->node_indices_ = {1, 1, 1, 1}; + + void* tp = MSLITE::MindIR_Tensor_Create(); + + liteGraph.get()->all_tensors_.emplace_back(tp); + liteGraph.get()->all_tensors_.emplace_back(nullptr); + liteGraph.get()->sub_graphs_.emplace_back(subGraph); + + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 1, 1, 1}; + + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); + + uint8_t *mmapPtr = static_cast(mmap(nullptr, + tensorBuffer.bufferSize, PROT_READ | PROT_WRITE, MAP_SHARED, tensorBuffer.fd, 0)); + EXPECT_EQ(MAP_FAILED, mmapPtr); +} + +/** + * @tc.name: litegraphtohdimodeltest_hdimodel_destroy_001 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelTest, litegraphtohdimodeltest_hdimodel_destroy_001, TestSize.Level0) +{ + LOGE("HDIModel_Destroy litegraphtohdimodeltest_hdimodel_destroy_001"); + std::shared_ptr liteGraph = std::make_shared(); + + float alpha {0.0f}; + float minVal {0.0f}; + float maxVal {0.0f}; + bool approximate {false}; + mindspore::lite::ActivationType activationType {mindspore::lite::ACTIVATION_TYPE_ABS}; + + void* primitive = mindspore::lite::MindIR_Activation_CreatePrimitive(activationType, alpha, + minVal, maxVal, approximate); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_1::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_1::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); + HDIModel_Destroy(&model); +} +} // namespace UnitTest +} // namespace V1 +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/components/v1_0/neural_network_runtime_test/neural_network_runtime_test.cpp b/test/unittest/components/v1_0/neural_network_runtime_test/neural_network_runtime_test.cpp index 0e47be67b287289988bafe66d0cccfcc4ed11d53..344f00806be0acd9b2dc99cec0243f59669c9426 100644 --- a/test/unittest/components/v1_0/neural_network_runtime_test/neural_network_runtime_test.cpp +++ b/test/unittest/components/v1_0/neural_network_runtime_test/neural_network_runtime_test.cpp @@ -21,6 +21,7 @@ #include "compilation.h" #include "hdi_device_v1_0.h" #include "test/unittest/common/v1_0/mock_idevice.h" +#include "nnexecutor.h" namespace OHOS { namespace NeuralNetworkRuntime { @@ -245,6 +246,57 @@ void NeuralNetworkRuntimeTest::SetInputAndOutput(Executor& executor) EXPECT_EQ(OH_NN_SUCCESS, executor.GetOutputShape(outputIndex, shapeAA, shapeNum)); } +class MockIPreparedModel : public PreparedModel { +public: + MOCK_METHOD1(ExportModelCache, OH_NN_ReturnCode(std::vector&)); + MOCK_METHOD4(Run, OH_NN_ReturnCode(const std::vector&, + const std::vector&, + std::vector>&, + std::vector&)); + MOCK_METHOD4(Run, OH_NN_ReturnCode(const std::vector&, + const std::vector&, + std::vector>&, + std::vector&)); + MOCK_CONST_METHOD1(GetModelID, OH_NN_ReturnCode(uint32_t&)); + MOCK_METHOD2(GetInputDimRanges, OH_NN_ReturnCode(std::vector>&, + std::vector>&)); +}; + +class MockIDevice : public Device { +public: + MOCK_METHOD1(GetDeviceName, OH_NN_ReturnCode(std::string&)); + MOCK_METHOD1(GetVendorName, OH_NN_ReturnCode(std::string&)); + MOCK_METHOD1(GetVersion, OH_NN_ReturnCode(std::string&)); + MOCK_METHOD1(GetDeviceType, OH_NN_ReturnCode(OH_NN_DeviceType&)); + MOCK_METHOD1(GetDeviceStatus, OH_NN_ReturnCode(DeviceStatus&)); + MOCK_METHOD2(GetSupportedOperation, OH_NN_ReturnCode(std::shared_ptr, + std::vector&)); + MOCK_METHOD1(IsFloat16PrecisionSupported, OH_NN_ReturnCode(bool&)); + MOCK_METHOD1(IsPerformanceModeSupported, OH_NN_ReturnCode(bool&)); + MOCK_METHOD1(IsPrioritySupported, OH_NN_ReturnCode(bool&)); + MOCK_METHOD1(IsDynamicInputSupported, OH_NN_ReturnCode(bool&)); + MOCK_METHOD1(IsModelCacheSupported, OH_NN_ReturnCode(bool&)); + MOCK_METHOD3(PrepareModel, OH_NN_ReturnCode(std::shared_ptr, + const ModelConfig&, + std::shared_ptr&)); + MOCK_METHOD3(PrepareModel, OH_NN_ReturnCode(const void*, + const ModelConfig&, + std::shared_ptr&)); + MOCK_METHOD4(PrepareModelFromModelCache, OH_NN_ReturnCode(const std::vector&, + const ModelConfig&, + std::shared_ptr&, + bool&)); + MOCK_METHOD3(PrepareOfflineModel, OH_NN_ReturnCode(std::shared_ptr, + const ModelConfig&, + std::shared_ptr&)); + MOCK_METHOD1(AllocateBuffer, void*(size_t)); + MOCK_METHOD2(AllocateTensorBuffer, void*(size_t, std::shared_ptr)); + MOCK_METHOD2(AllocateTensorBuffer, void*(size_t, std::shared_ptr)); + MOCK_METHOD1(ReleaseBuffer, OH_NN_ReturnCode(const void*)); + MOCK_METHOD2(AllocateBuffer, OH_NN_ReturnCode(size_t, int&)); + MOCK_METHOD2(ReleaseBuffer, OH_NN_ReturnCode(int, size_t)); +}; + /* * @tc.name: model_construct_001 * @tc.desc: Verify the return model of the OH_NNModel_Construct function. @@ -1225,6 +1277,130 @@ HWTEST_F(NeuralNetworkRuntimeTest, excutor_setinput_005, testing::ext::TestSize. EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); } +/** + * @tc.name: excutor_setinput_006 + * @tc.desc: Verify the success of the OH_NNExecutor_SetInput function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_setinput_006, testing::ext::TestSize.Level0) +{ + LOGE("OH_NNExecutor_SetInput excutor_setinput_006"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + std::shared_ptr mockIPreparedMode = std::make_shared(); + EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::Return(OH_NN_FAILED)); + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + NNExecutor* executor = new (std::nothrow) NNExecutor( + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + OH_NNExecutor* nnExecutor = reinterpret_cast(executor); + + uint32_t inputIndex = 0; + int32_t dims[2] = {3, 4}; + m_tensor = {OH_NN_FLOAT32, 2, dims, nullptr, OH_NN_TENSOR}; + + float input[12] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}; + const void *buffer = input; + size_t length = 12 * sizeof(float); + OH_NN_ReturnCode ret = OH_NNExecutor_SetInput(nnExecutor, inputIndex, &m_tensor, buffer, length); + EXPECT_EQ(OH_NN_FAILED, ret); + + testing::Mock::AllowLeak(mockIPreparedMode.get()); +} + +/** + * @tc.name: excutor_setinput_007 + * @tc.desc: Verify the success of the OH_NNExecutor_SetInput function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_setinput_007, testing::ext::TestSize.Level0) +{ + LOGE("OH_NNExecutor_SetInput excutor_setinput_007"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + std::shared_ptr mockIPreparedMode = std::make_shared(); + EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::Return(OH_NN_FAILED)); + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + NNExecutor* executor = new (std::nothrow) NNExecutor( + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + OH_NNExecutor* nnExecutor = reinterpret_cast(executor); + + uint32_t inputIndex = 0; + + float input[12] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}; + const void *buffer = input; + size_t length = 12 * sizeof(float); + OH_NN_ReturnCode ret = OH_NNExecutor_SetInput(nnExecutor, inputIndex, nullptr, buffer, length); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + + testing::Mock::AllowLeak(mockIPreparedMode.get()); +} + +/** + * @tc.name: excutor_setinput_008 + * @tc.desc: Verify the success of the OH_NNExecutor_SetInput function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_setinput_008, testing::ext::TestSize.Level0) +{ + LOGE("OH_NNExecutor_SetInput excutor_setinput_008"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + std::shared_ptr mockIPreparedMode = std::make_shared(); + EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::Return(OH_NN_FAILED)); + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + NNExecutor* executor = new (std::nothrow) NNExecutor( + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + OH_NNExecutor* nnExecutor = reinterpret_cast(executor); + + uint32_t inputIndex = 0; + int32_t dims[2] = {3, 4}; + m_tensor = {OH_NN_FLOAT32, 2, dims, nullptr, OH_NN_TENSOR}; + + size_t length = 12 * sizeof(float); + OH_NN_ReturnCode ret = OH_NNExecutor_SetInput(nnExecutor, inputIndex, &m_tensor, nullptr, length); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + + testing::Mock::AllowLeak(mockIPreparedMode.get()); +} + +/** + * @tc.name: excutor_setinput_009 + * @tc.desc: Verify the success of the OH_NNExecutor_SetInput function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_setinput_009, testing::ext::TestSize.Level0) +{ + LOGE("OH_NNExecutor_SetInput excutor_setinput_009"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + std::shared_ptr mockIPreparedMode = std::make_shared(); + EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::Return(OH_NN_FAILED)); + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + NNExecutor* executor = new (std::nothrow) NNExecutor( + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + OH_NNExecutor* nnExecutor = reinterpret_cast(executor); + + uint32_t inputIndex = 0; + int32_t dims[2] = {3, 4}; + m_tensor = {OH_NN_FLOAT32, 2, dims, nullptr, OH_NN_TENSOR}; + + float input[12] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}; + const void *buffer = input; + size_t length = 0; + OH_NN_ReturnCode ret = OH_NNExecutor_SetInput(nnExecutor, inputIndex, &m_tensor, buffer, length); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + + testing::Mock::AllowLeak(mockIPreparedMode.get()); +} + /** * @tc.name: excutor_setoutput_001 * @tc.desc: Verify the OH_NNExecutor is nullptr of the OH_NNExecutor_SetOutput function @@ -1232,6 +1408,7 @@ HWTEST_F(NeuralNetworkRuntimeTest, excutor_setinput_005, testing::ext::TestSize. */ HWTEST_F(NeuralNetworkRuntimeTest, excutor_setoutput_001, testing::ext::TestSize.Level0) { + LOGE("OH_NNExecutor_SetOutput excutor_setoutput_001"); uint32_t outputIndex = 0; float input[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; void *buffer = input; @@ -1300,6 +1477,86 @@ HWTEST_F(NeuralNetworkRuntimeTest, excutor_setoutput_004, testing::ext::TestSize EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_SetOutput(nnExecutor, outputIndex, output, length)); } +/** + * @tc.name: excutor_setoutput_005 + * @tc.desc: Verify the success of the OH_NNExecutor_SetOutput function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_setoutput_005, testing::ext::TestSize.Level0) +{ + LOGE("OH_NNExecutor_SetOutput excutor_setinput_006"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + std::shared_ptr mockIPreparedMode = std::make_shared(); + EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::Return(OH_NN_FAILED)); + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + NNExecutor* executor = new (std::nothrow) NNExecutor( + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + OH_NNExecutor* nnExecutor = reinterpret_cast(executor); + + uint32_t outputIndex = 0; + float output[12]; + size_t length = 12 * sizeof(float); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_SetOutput(nnExecutor, outputIndex, output, length)); + + testing::Mock::AllowLeak(mockIPreparedMode.get()); +} + +/** + * @tc.name: excutor_setoutput_006 + * @tc.desc: Verify the success of the OH_NNExecutor_SetOutput function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_setoutput_006, testing::ext::TestSize.Level0) +{ + LOGE("OH_NNExecutor_SetOutput excutor_setinput_006"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + std::shared_ptr mockIPreparedMode = std::make_shared(); + EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::Return(OH_NN_FAILED)); + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + NNExecutor* executor = new (std::nothrow) NNExecutor( + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + OH_NNExecutor* nnExecutor = reinterpret_cast(executor); + + uint32_t outputIndex = 0; + size_t length = 12 * sizeof(float); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_SetOutput(nnExecutor, outputIndex, nullptr, length)); + + testing::Mock::AllowLeak(mockIPreparedMode.get()); +} + +/** + * @tc.name: excutor_setoutput_007 + * @tc.desc: Verify the success of the OH_NNExecutor_SetOutput function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_setoutput_007, testing::ext::TestSize.Level0) +{ + LOGE("OH_NNExecutor_SetOutput excutor_setoutput_007"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + std::shared_ptr mockIPreparedMode = std::make_shared(); + EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::Return(OH_NN_FAILED)); + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + NNExecutor* executor = new (std::nothrow) NNExecutor( + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + OH_NNExecutor* nnExecutor = reinterpret_cast(executor); + + uint32_t outputIndex = 0; + float output[12]; + size_t length = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_SetOutput(nnExecutor, outputIndex, output, length)); + + testing::Mock::AllowLeak(mockIPreparedMode.get()); +} + /** * @tc.name: excutor_getoutputshape_001 * @tc.desc: Verify the OH_NNExecutor is nullptr of the OH_NNExecutor_GetOutputShape function @@ -1434,6 +1691,33 @@ HWTEST_F(NeuralNetworkRuntimeTest, excutor_run_002, testing::ext::TestSize.Level EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_Run(nnExecutor)); } +/** + * @tc.name: excutor_run_003 + * @tc.desc: Verify the success of the OH_NNExecutor_Run function + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, excutor_run_003, testing::ext::TestSize.Level0) +{ + LOGE("OH_NNExecutor_Run excutor_run_003"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + std::shared_ptr mockIPreparedMode = std::make_shared(); + EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::Return(OH_NN_FAILED)); + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + NNExecutor* executor = new (std::nothrow) NNExecutor( + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + OH_NNExecutor* nnExecutor = reinterpret_cast(executor); + + int32_t inputDims[2] = {3, 4}; + m_tensor = {OH_NN_FLOAT32, 2, inputDims, nullptr, OH_NN_TENSOR}; + OH_NN_ReturnCode ret = OH_NNExecutor_Run(nnExecutor); + EXPECT_EQ(OH_NN_SUCCESS, ret); + + testing::Mock::AllowLeak(mockIPreparedMode.get()); +} + /* * @tc.name: executor_allocate_input_memory_001 * @tc.desc: Verify the OH_NNExecutor is nullptr of the OH_NNExecutor_AllocateInputMemory function. @@ -1512,6 +1796,108 @@ HWTEST_F(NeuralNetworkRuntimeTest, executor_allocate_input_memory_004, testing:: EXPECT_EQ(nullptr, ret); } +/* + * @tc.name: executor_allocate_input_memory_005 + * @tc.desc: Verify the success of the OH_NNExecutor_AllocateInputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_allocate_input_memory_005, testing::ext::TestSize.Level0) +{ + LOGE("OH_NNExecutor_AllocateInputMemory executor_allocate_input_memory_005"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + std::shared_ptr mockIPreparedMode = std::make_shared(); + EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::Return(OH_NN_FAILED)); + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + NNExecutor* executor = new (std::nothrow) NNExecutor( + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + OH_NNExecutor* nnExecutor = reinterpret_cast(executor); + + uint32_t outputIndex = 0; + size_t length = 9 * sizeof(float); + + OH_NN_Memory* ret = OH_NNExecutor_AllocateInputMemory(nnExecutor, outputIndex, length); + EXPECT_EQ(nullptr, ret); + + testing::Mock::AllowLeak(mockIPreparedMode.get()); +} + +/* + * @tc.name: executor_allocate_input_memory_006 + * @tc.desc: Verify the success of the OH_NNExecutor_AllocateInputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_allocate_input_memory_006, testing::ext::TestSize.Level0) +{ + LOGE("OH_NNExecutor_AllocateInputMemory executor_allocate_input_memory_006"); + size_t m_backendID {0}; + std::shared_ptr device = std::make_shared(); + + std::shared_ptr m_preparedModel {nullptr}; + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + + std::pair, OH_NN_TensorType> pair1; + std::pair, OH_NN_TensorType> pair2; + std::shared_ptr tensorDesr = std::make_shared(); + int32_t expectDim[2] = {3, 3}; + int32_t* ptr = expectDim; + uint32_t dimensionCount = 2; + tensorDesr->SetShape(ptr, dimensionCount); + pair1.first = tensorDesr; + pair2.first = tensorDesr; + m_inputTensorDescs.emplace_back(pair1); + m_inputTensorDescs.emplace_back(pair2); + m_outputTensorDescs.emplace_back(pair1); + m_outputTensorDescs.emplace_back(pair2); + + size_t length = 9 * sizeof(float); + EXPECT_CALL(*((MockIDevice *) device.get()), AllocateTensorBuffer(length, m_inputTensorDescs[0].first)) + .WillRepeatedly(::testing::Return(reinterpret_cast(0x1000))); + + NNExecutor* executor = new (std::nothrow) NNExecutor( + m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + EXPECT_NE(nullptr, executor); + OH_NNExecutor* nnExecutor = reinterpret_cast(executor); + + uint32_t outputIndex = 0; + + OH_NN_Memory* ret = OH_NNExecutor_AllocateInputMemory(nnExecutor, outputIndex, length); + EXPECT_NE(nullptr, ret); + + testing::Mock::AllowLeak(device.get()); +} + +/* + * @tc.name: executor_allocate_input_memory_007 + * @tc.desc: Verify the success of the OH_NNExecutor_AllocateInputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_allocate_input_memory_007, testing::ext::TestSize.Level0) +{ + LOGE("OH_NNExecutor_AllocateInputMemory executor_allocate_input_memory_007"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + std::shared_ptr mockIPreparedMode = std::make_shared(); + EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::Return(OH_NN_FAILED)); + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + NNExecutor* executor = new (std::nothrow) NNExecutor( + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + OH_NNExecutor* nnExecutor = reinterpret_cast(executor); + + uint32_t outputIndex = 0; + size_t length = 0; + + OH_NN_Memory* ret = OH_NNExecutor_AllocateInputMemory(nnExecutor, outputIndex, length); + EXPECT_EQ(nullptr, ret); + + testing::Mock::AllowLeak(mockIPreparedMode.get()); +} + /* * @tc.name: executor_allocate_output_memory_001 * @tc.desc: Verify the OH_NNExecutor is nullptr of the OH_NNExecutor_AllocateOutputMemory function. @@ -1590,6 +1976,107 @@ HWTEST_F(NeuralNetworkRuntimeTest, executor_allocate_output_memory_004, testing: EXPECT_EQ(nullptr, ret); } +/* + * @tc.name: executor_allocate_output_memory_005 + * @tc.desc: Verify the success of the OH_NNExecutor_AllocateInputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_allocate_output_memory_005, testing::ext::TestSize.Level0) +{ + LOGE("OH_NNExecutor_AllocateOutputMemory executor_allocate_output_memory_005"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + std::shared_ptr mockIPreparedMode = std::make_shared(); + EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::Return(OH_NN_FAILED)); + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + NNExecutor* executor = new (std::nothrow) NNExecutor( + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + OH_NNExecutor* nnExecutor = reinterpret_cast(executor); + + uint32_t outputIndex = 0; + size_t length = 9 * sizeof(float); + + OH_NN_Memory* ret = OH_NNExecutor_AllocateOutputMemory(nnExecutor, outputIndex, length); + EXPECT_EQ(nullptr, ret); + + testing::Mock::AllowLeak(mockIPreparedMode.get()); +} + +/* + * @tc.name: executor_allocate_output_memory_006 + * @tc.desc: Verify the success of the OH_NNExecutor_AllocateInputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_allocate_output_memory_006, testing::ext::TestSize.Level0) +{ + LOGE("OH_NNExecutor_AllocateInputMemory executor_allocate_output_memory_006"); + size_t m_backendID {0}; + std::shared_ptr device = std::make_shared(); + + std::shared_ptr m_preparedModel {nullptr}; + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + + std::pair, OH_NN_TensorType> pair1; + std::pair, OH_NN_TensorType> pair2; + std::shared_ptr tensorDesr = std::make_shared(); + int32_t expectDim[2] = {3, 3}; + int32_t* ptr = expectDim; + uint32_t dimensionCount = 2; + tensorDesr->SetShape(ptr, dimensionCount); + pair1.first = tensorDesr; + pair2.first = tensorDesr; + m_inputTensorDescs.emplace_back(pair1); + m_inputTensorDescs.emplace_back(pair2); + m_outputTensorDescs.emplace_back(pair1); + m_outputTensorDescs.emplace_back(pair2); + + size_t length = 9 * sizeof(float); + EXPECT_CALL(*((MockIDevice *) device.get()), AllocateTensorBuffer(length, m_outputTensorDescs[0].first)) + .WillRepeatedly(::testing::Return(reinterpret_cast(0x1000))); + + NNExecutor* executor = new (std::nothrow) NNExecutor( + m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + EXPECT_NE(nullptr, executor); + OH_NNExecutor* nnExecutor = reinterpret_cast(executor); + + uint32_t outputIndex = 0; + + OH_NN_Memory* ret = OH_NNExecutor_AllocateOutputMemory(nnExecutor, outputIndex, length); + EXPECT_NE(nullptr, ret); + + testing::Mock::AllowLeak(device.get()); +} + +/* + * @tc.name: executor_allocate_output_memory_007 + * @tc.desc: Verify the success of the OH_NNExecutor_AllocateInputMemory function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, executor_allocate_output_memory_007, testing::ext::TestSize.Level0) +{ + LOGE("OH_NNExecutor_AllocateInputMemory executor_allocate_output_memory_007"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + std::shared_ptr mockIPreparedMode = std::make_shared(); + EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::Return(OH_NN_FAILED)); + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + NNExecutor* executor = new (std::nothrow) NNExecutor( + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + OH_NNExecutor* nnExecutor = reinterpret_cast(executor); + + uint32_t outputIndex = 0; + size_t length = 0; + + OH_NN_Memory* ret = OH_NNExecutor_AllocateOutputMemory(nnExecutor, outputIndex, length); + EXPECT_EQ(nullptr, ret); + + testing::Mock::AllowLeak(mockIPreparedMode.get()); +} /* * @tc.name: executor_destroy_input_memory_001 @@ -1618,17 +2105,24 @@ HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_input_memory_001, testing::e */ HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_input_memory_002, testing::ext::TestSize.Level0) { - InnerModel innerModel; - BuildModel(innerModel); - - OH_NNModel* model = reinterpret_cast(&innerModel); - OH_NNCompilation* nnCompilation = OH_NNCompilation_Construct(model); - OH_NNExecutor* nnExecutor = OH_NNExecutor_Construct(nnCompilation); + LOGE("OH_NNExecutor_DestroyInputMemory executor_destroy_input_memory_002"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + std::shared_ptr mockIPreparedMode = std::make_shared(); + EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::Return(OH_NN_FAILED)); + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + NNExecutor* executor = new (std::nothrow) NNExecutor( + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + OH_NNExecutor* nnExecutor = reinterpret_cast(executor); uint32_t inputIndex = 0; OH_NN_Memory** memory = nullptr; OH_NNExecutor_DestroyInputMemory(nnExecutor, inputIndex, memory); EXPECT_EQ(nullptr, memory); + + testing::Mock::AllowLeak(mockIPreparedMode.get()); } /* @@ -1638,18 +2132,25 @@ HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_input_memory_002, testing::e */ HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_input_memory_003, testing::ext::TestSize.Level0) { - InnerModel innerModel; - BuildModel(innerModel); - - OH_NNModel* model = reinterpret_cast(&innerModel); - OH_NNCompilation* nnCompilation = OH_NNCompilation_Construct(model); - OH_NNExecutor* nnExecutor = OH_NNExecutor_Construct(nnCompilation); + LOGE("OH_NNExecutor_DestroyInputMemory executor_destroy_input_memory_003"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + std::shared_ptr mockIPreparedMode = std::make_shared(); + EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::Return(OH_NN_FAILED)); + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + NNExecutor* executor = new (std::nothrow) NNExecutor( + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + OH_NNExecutor* nnExecutor = reinterpret_cast(executor); uint32_t inputIndex = 0; OH_NN_Memory* memory = nullptr; OH_NN_Memory** pMemory = &memory; OH_NNExecutor_DestroyInputMemory(nnExecutor, inputIndex, pMemory); EXPECT_EQ(nullptr, memory); + + testing::Mock::AllowLeak(mockIPreparedMode.get()); } /* @@ -1659,12 +2160,17 @@ HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_input_memory_003, testing::e */ HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_input_memory_004, testing::ext::TestSize.Level0) { - InnerModel innerModel; - BuildModel(innerModel); - - OH_NNModel* model = reinterpret_cast(&innerModel); - OH_NNCompilation* nnCompilation = OH_NNCompilation_Construct(model); - OH_NNExecutor* nnExecutor = OH_NNExecutor_Construct(nnCompilation); + LOGE("OH_NNExecutor_DestroyInputMemory executor_destroy_input_memory_004"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + std::shared_ptr mockIPreparedMode = std::make_shared(); + EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::Return(OH_NN_FAILED)); + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + NNExecutor* executor = new (std::nothrow) NNExecutor( + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + OH_NNExecutor* nnExecutor = reinterpret_cast(executor); uint32_t inputIndex = 6; float dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; @@ -1673,29 +2179,8 @@ HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_input_memory_004, testing::e OH_NN_Memory* pMemory = &memory; OH_NNExecutor_DestroyInputMemory(nnExecutor, inputIndex, &pMemory); EXPECT_NE(nullptr, pMemory); -} - -/* - * @tc.name: executor_destroy_input_memory_005 - * @tc.desc: Verify the success of the OH_NNExecutor_DestroyInputMemory function. - * @tc.type: FUNC - */ -HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_input_memory_005, testing::ext::TestSize.Level0) -{ - InnerModel innerModel; - BuildModel(innerModel); - OH_NNModel* model = reinterpret_cast(&innerModel); - OH_NNCompilation* nnCompilation = OH_NNCompilation_Construct(model); - OH_NNExecutor* nnExecutor = OH_NNExecutor_Construct(nnCompilation); - - uint32_t inputIndex = 0; - float dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; - void* const data = dataArry; - OH_NN_Memory memory = {data, 9 * sizeof(float)}; - OH_NN_Memory* pMemory = &memory; - OH_NNExecutor_DestroyInputMemory(nnExecutor, inputIndex, &pMemory); - EXPECT_NE(nullptr, pMemory); + testing::Mock::AllowLeak(mockIPreparedMode.get()); } /* @@ -1722,17 +2207,24 @@ HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_output_memory_001, testing:: */ HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_output_memory_002, testing::ext::TestSize.Level0) { - InnerModel innerModel; - EXPECT_EQ(OH_NN_SUCCESS, BuildModel(innerModel)); - - OH_NNModel* model = reinterpret_cast(&innerModel); - OH_NNCompilation* nnCompilation = OH_NNCompilation_Construct(model); - OH_NNExecutor* nnExecutor = OH_NNExecutor_Construct(nnCompilation); + LOGE("OH_NNExecutor_DestroyOutputMemory executor_destroy_output_memory_002"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + std::shared_ptr mockIPreparedMode = std::make_shared(); + EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::Return(OH_NN_FAILED)); + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + NNExecutor* executor = new (std::nothrow) NNExecutor( + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + OH_NNExecutor* nnExecutor = reinterpret_cast(executor); uint32_t outputIndex = 0; OH_NN_Memory** memory = nullptr; OH_NNExecutor_DestroyOutputMemory(nnExecutor, outputIndex, memory); EXPECT_EQ(nullptr, memory); + + testing::Mock::AllowLeak(mockIPreparedMode.get()); } /* @@ -1742,18 +2234,25 @@ HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_output_memory_002, testing:: */ HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_output_memory_003, testing::ext::TestSize.Level0) { - InnerModel innerModel; - EXPECT_EQ(OH_NN_SUCCESS, BuildModel(innerModel)); - - OH_NNModel* model = reinterpret_cast(&innerModel); - OH_NNCompilation* nnCompilation = OH_NNCompilation_Construct(model); - OH_NNExecutor* nnExecutor = OH_NNExecutor_Construct(nnCompilation); + LOGE("OH_NNExecutor_DestroyOutputMemory executor_destroy_output_memory_003"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + std::shared_ptr mockIPreparedMode = std::make_shared(); + EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::Return(OH_NN_FAILED)); + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + NNExecutor* executor = new (std::nothrow) NNExecutor( + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + OH_NNExecutor* nnExecutor = reinterpret_cast(executor); uint32_t outputIndex = 0; OH_NN_Memory* memory = nullptr; OH_NN_Memory** pMemory = &memory; OH_NNExecutor_DestroyOutputMemory(nnExecutor, outputIndex, pMemory); EXPECT_EQ(nullptr, memory); + + testing::Mock::AllowLeak(mockIPreparedMode.get()); } /* @@ -1763,12 +2262,17 @@ HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_output_memory_003, testing:: */ HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_output_memory_004, testing::ext::TestSize.Level0) { - InnerModel innerModel; - EXPECT_EQ(OH_NN_SUCCESS, BuildModel(innerModel)); - - OH_NNModel* model = reinterpret_cast(&innerModel); - OH_NNCompilation* nnCompilation = OH_NNCompilation_Construct(model); - OH_NNExecutor* nnExecutor = OH_NNExecutor_Construct(nnCompilation); + LOGE("OH_NNExecutor_DestroyOutputMemory executor_destroy_output_memory_004"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + std::shared_ptr mockIPreparedMode = std::make_shared(); + EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::Return(OH_NN_FAILED)); + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + NNExecutor* executor = new (std::nothrow) NNExecutor( + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + OH_NNExecutor* nnExecutor = reinterpret_cast(executor); uint32_t outputIndex = 6; float dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; @@ -1777,6 +2281,8 @@ HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_output_memory_004, testing:: OH_NN_Memory* pMemory = &memory; OH_NNExecutor_DestroyOutputMemory(nnExecutor, outputIndex, &pMemory); EXPECT_NE(nullptr, pMemory); + + testing::Mock::AllowLeak(mockIPreparedMode.get()); } /* @@ -1786,12 +2292,17 @@ HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_output_memory_004, testing:: */ HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_output_memory_005, testing::ext::TestSize.Level0) { - InnerModel innerModel; - EXPECT_EQ(OH_NN_SUCCESS, BuildModel(innerModel)); - - OH_NNModel* model = reinterpret_cast(&innerModel); - OH_NNCompilation* nnCompilation = OH_NNCompilation_Construct(model); - OH_NNExecutor* nnExecutor = OH_NNExecutor_Construct(nnCompilation); + LOGE("OH_NNExecutor_DestroyOutputMemory executor_destroy_output_memory_005"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + std::shared_ptr mockIPreparedMode = std::make_shared(); + EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::Return(OH_NN_FAILED)); + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + NNExecutor* executor = new (std::nothrow) NNExecutor( + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + OH_NNExecutor* nnExecutor = reinterpret_cast(executor); float dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; void* const data = dataArry; @@ -1800,6 +2311,8 @@ HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_output_memory_005, testing:: uint32_t outputIndex = 0; OH_NNExecutor_DestroyOutputMemory(nnExecutor, outputIndex, &pMemory); EXPECT_NE(nullptr, pMemory); + + testing::Mock::AllowLeak(mockIPreparedMode.get()); } /* @@ -1829,12 +2342,17 @@ HWTEST_F(NeuralNetworkRuntimeTest, executor_set_input_with_memory_001, testing:: */ HWTEST_F(NeuralNetworkRuntimeTest, executor_set_input_with_memory_002, testing::ext::TestSize.Level0) { - InnerModel innerModel; - EXPECT_EQ(OH_NN_SUCCESS, BuildModel(innerModel)); - - OH_NNModel* model = reinterpret_cast(&innerModel); - OH_NNCompilation* nnCompilation = OH_NNCompilation_Construct(model); - OH_NNExecutor* nnExecutor = OH_NNExecutor_Construct(nnCompilation); + LOGE("OH_NNExecutor_SetInputWithMemory executor_set_input_with_memory_002"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + std::shared_ptr mockIPreparedMode = std::make_shared(); + EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::Return(OH_NN_FAILED)); + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + NNExecutor* executor = new (std::nothrow) NNExecutor( + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + OH_NNExecutor* nnExecutor = reinterpret_cast(executor); OH_NN_Tensor* operand = nullptr; @@ -1845,6 +2363,8 @@ HWTEST_F(NeuralNetworkRuntimeTest, executor_set_input_with_memory_002, testing:: OH_NN_ReturnCode ret = OH_NNExecutor_SetInputWithMemory(nnExecutor, inputIndex, operand, &memory); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + + testing::Mock::AllowLeak(mockIPreparedMode.get()); } /* @@ -1854,12 +2374,17 @@ HWTEST_F(NeuralNetworkRuntimeTest, executor_set_input_with_memory_002, testing:: */ HWTEST_F(NeuralNetworkRuntimeTest, executor_set_input_with_memory_003, testing::ext::TestSize.Level0) { - InnerModel innerModel; - EXPECT_EQ(OH_NN_SUCCESS, BuildModel(innerModel)); - - OH_NNModel* model = reinterpret_cast(&innerModel); - OH_NNCompilation* nnCompilation = OH_NNCompilation_Construct(model); - OH_NNExecutor* nnExecutor = OH_NNExecutor_Construct(nnCompilation); + LOGE("OH_NNExecutor_SetInputWithMemory executor_set_input_with_memory_003"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + std::shared_ptr mockIPreparedMode = std::make_shared(); + EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::Return(OH_NN_FAILED)); + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + NNExecutor* executor = new (std::nothrow) NNExecutor( + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + OH_NNExecutor* nnExecutor = reinterpret_cast(executor); SetTensor(); @@ -1867,6 +2392,8 @@ HWTEST_F(NeuralNetworkRuntimeTest, executor_set_input_with_memory_003, testing:: OH_NN_Memory* memory = nullptr; OH_NN_ReturnCode ret = OH_NNExecutor_SetInputWithMemory(nnExecutor, inputIndex, &m_tensor, memory); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + + testing::Mock::AllowLeak(mockIPreparedMode.get()); } /* @@ -1876,12 +2403,17 @@ HWTEST_F(NeuralNetworkRuntimeTest, executor_set_input_with_memory_003, testing:: */ HWTEST_F(NeuralNetworkRuntimeTest, executor_set_input_with_memory_004, testing::ext::TestSize.Level0) { - InnerModel innerModel; - EXPECT_EQ(OH_NN_SUCCESS, BuildModel(innerModel)); - - OH_NNModel* model = reinterpret_cast(&innerModel); - OH_NNCompilation* nnCompilation = OH_NNCompilation_Construct(model); - OH_NNExecutor* nnExecutor = OH_NNExecutor_Construct(nnCompilation); + LOGE("OH_NNExecutor_SetInputWithMemory executor_set_input_with_memory_004"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + std::shared_ptr mockIPreparedMode = std::make_shared(); + EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::Return(OH_NN_FAILED)); + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + NNExecutor* executor = new (std::nothrow) NNExecutor( + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + OH_NNExecutor* nnExecutor = reinterpret_cast(executor); uint32_t inputIndex = 0; int32_t dims[2] = {3, 4}; @@ -1892,7 +2424,9 @@ HWTEST_F(NeuralNetworkRuntimeTest, executor_set_input_with_memory_004, testing:: OH_NN_Memory memory = {data, 12 * sizeof(float)}; OH_NN_ReturnCode ret = OH_NNExecutor_SetInputWithMemory(nnExecutor, inputIndex, &m_tensor, &memory); - EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + EXPECT_EQ(OH_NN_FAILED, ret); + + testing::Mock::AllowLeak(mockIPreparedMode.get()); } @@ -1919,17 +2453,24 @@ HWTEST_F(NeuralNetworkRuntimeTest, executor_set_output_with_memory_001, testing: */ HWTEST_F(NeuralNetworkRuntimeTest, executor_set_output_with_memory_002, testing::ext::TestSize.Level0) { - InnerModel innerModel; - EXPECT_EQ(OH_NN_SUCCESS, BuildModel(innerModel)); - - OH_NNModel* model = reinterpret_cast(&innerModel); - OH_NNCompilation* nnCompilation = OH_NNCompilation_Construct(model); - OH_NNExecutor* nnExecutor = OH_NNExecutor_Construct(nnCompilation); + LOGE("OH_NNExecutor_SetOutputWithMemory executor_set_output_with_memory_002"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + std::shared_ptr mockIPreparedMode = std::make_shared(); + EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::Return(OH_NN_FAILED)); + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + NNExecutor* executor = new (std::nothrow) NNExecutor( + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + OH_NNExecutor* nnExecutor = reinterpret_cast(executor); uint32_t outputIndex = 0; OH_NN_Memory* memory = nullptr; OH_NN_ReturnCode ret = OH_NNExecutor_SetOutputWithMemory(nnExecutor, outputIndex, memory); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + + testing::Mock::AllowLeak(mockIPreparedMode.get()); } /* @@ -1939,12 +2480,17 @@ HWTEST_F(NeuralNetworkRuntimeTest, executor_set_output_with_memory_002, testing: */ HWTEST_F(NeuralNetworkRuntimeTest, executor_set_output_with_memory_003, testing::ext::TestSize.Level0) { - InnerModel innerModel; - EXPECT_EQ(OH_NN_SUCCESS, BuildModel(innerModel)); - - OH_NNModel* model = reinterpret_cast(&innerModel); - OH_NNCompilation* nnCompilation = OH_NNCompilation_Construct(model); - OH_NNExecutor* nnExecutor = OH_NNExecutor_Construct(nnCompilation); + LOGE("OH_NNExecutor_SetOutputWithMemory executor_set_output_with_memory_003"); + size_t m_backendID {0}; + std::shared_ptr m_device {nullptr}; + std::shared_ptr mockIPreparedMode = std::make_shared(); + EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::Return(OH_NN_FAILED)); + std::vector, OH_NN_TensorType>> m_inputTensorDescs; + std::vector, OH_NN_TensorType>> m_outputTensorDescs; + NNExecutor* executor = new (std::nothrow) NNExecutor( + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + OH_NNExecutor* nnExecutor = reinterpret_cast(executor); uint32_t outputIndex = 0; float dataArry[12] {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}; @@ -1952,6 +2498,8 @@ HWTEST_F(NeuralNetworkRuntimeTest, executor_set_output_with_memory_003, testing: OH_NN_Memory memory = {data, 12 * sizeof(float)}; OH_NN_ReturnCode ret = OH_NNExecutor_SetOutputWithMemory(nnExecutor, outputIndex, &memory); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + + testing::Mock::AllowLeak(mockIPreparedMode.get()); } /* @@ -2163,6 +2711,344 @@ HWTEST_F(NeuralNetworkRuntimeTest, device_get_type_004, testing::ext::TestSize.L OH_NN_ReturnCode ret = OH_NNDevice_GetType(deviceID, pDeviceType); EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); } + +/* + * @tc.name: oh_nnquantparam_create_001 + * @tc.desc: Verify the success of the OH_NNDevice_GetType function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, oh_nnquantparam_create_001, testing::ext::TestSize.Level0) +{ + LOGE("OH_NNQuantParam_Create oh_nnquantparam_create_001"); + NN_QuantParam* ret = OH_NNQuantParam_Create(); + EXPECT_NE(nullptr, ret); +} + +/* + * @tc.name: oh_nnquantparam_setscales_001 + * @tc.desc: Verify the success of the OH_NNDevice_GetType function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, oh_nnquantparam_setscales_001, testing::ext::TestSize.Level0) +{ + LOGE("OH_NNQuantParam_SetScales oh_nnquantparam_setscales_001"); + size_t quantNum = 1; + OH_NN_ReturnCode ret = OH_NNQuantParam_SetScales(nullptr, nullptr, quantNum); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: oh_nnquantparam_setscales_002 + * @tc.desc: Verify the success of the OH_NNDevice_GetType function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, oh_nnquantparam_setscales_002, testing::ext::TestSize.Level0) +{ + LOGE("OH_NNQuantParam_SetScales oh_nnquantparam_setscales_002"); + NN_QuantParam* quantParams = OH_NNQuantParam_Create(); + size_t quantNum = 1; + OH_NN_ReturnCode ret = OH_NNQuantParam_SetScales(quantParams, nullptr, quantNum); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: oh_nnquantparam_setscales_003 + * @tc.desc: Verify the success of the OH_NNDevice_GetType function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, oh_nnquantparam_setscales_003, testing::ext::TestSize.Level0) +{ + LOGE("OH_NNQuantParam_SetScales oh_nnquantparam_setscales_003"); + NN_QuantParam* quantParams = OH_NNQuantParam_Create(); + double scale = 2; + size_t quantNum = 0; + OH_NN_ReturnCode ret = OH_NNQuantParam_SetScales(quantParams, &scale, quantNum); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: oh_nnquantparam_setscales_004 + * @tc.desc: Verify the success of the OH_NNDevice_GetType function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, oh_nnquantparam_setscales_004, testing::ext::TestSize.Level0) +{ + LOGE("OH_NNQuantParam_SetScales oh_nnquantparam_setscales_004"); + NN_QuantParam* quantParams = OH_NNQuantParam_Create(); + double scale = 2; + size_t quantNum = 2; + OH_NN_ReturnCode ret = OH_NNQuantParam_SetScales(quantParams, &scale, quantNum); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: oh_nnquantparam_setzeropoints_001 + * @tc.desc: Verify the success of the OH_NNDevice_GetType function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, oh_nnquantparam_setzeropoints_001, testing::ext::TestSize.Level0) +{ + LOGE("OH_NNQuantParam_SetZeroPoints oh_nnquantparam_setzeropoints_001"); + size_t quantNum = 2; + OH_NN_ReturnCode ret = OH_NNQuantParam_SetZeroPoints(nullptr, nullptr, quantNum); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: oh_nnquantparam_setzeropoints_002 + * @tc.desc: Verify the success of the OH_NNDevice_GetType function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, oh_nnquantparam_setzeropoints_002, testing::ext::TestSize.Level0) +{ + LOGE("OH_NNQuantParam_SetZeroPoints oh_nnquantparam_setzeropoints_002"); + NN_QuantParam* quantParams = OH_NNQuantParam_Create(); + size_t quantNum = 2; + OH_NN_ReturnCode ret = OH_NNQuantParam_SetZeroPoints(quantParams, nullptr, quantNum); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: oh_nnquantparam_setzeropoints_003 + * @tc.desc: Verify the success of the OH_NNDevice_GetType function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, oh_nnquantparam_setzeropoints_003, testing::ext::TestSize.Level0) +{ + LOGE("OH_NNQuantParam_SetZeroPoints oh_nnquantparam_setzeropoints_003"); + NN_QuantParam* quantParams = OH_NNQuantParam_Create(); + int32_t zeroPoints = 2; + size_t quantNum = 0; + OH_NN_ReturnCode ret = OH_NNQuantParam_SetZeroPoints(quantParams, &zeroPoints, quantNum); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: oh_nnquantparam_setzeropoints_004 + * @tc.desc: Verify the success of the OH_NNDevice_GetType function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, oh_nnquantparam_setzeropoints_004, testing::ext::TestSize.Level0) +{ + LOGE("OH_NNQuantParam_SetZeroPoints oh_nnquantparam_setzeropoints_004"); + NN_QuantParam* quantParams = OH_NNQuantParam_Create(); + int32_t zeroPoints = 2; + size_t quantNum = 2; + OH_NN_ReturnCode ret = OH_NNQuantParam_SetZeroPoints(quantParams, &zeroPoints, quantNum); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: oh_nnquantparam_setnumbits_001 + * @tc.desc: Verify the success of the OH_NNDevice_GetType function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, oh_nnquantparam_setnumbits_001, testing::ext::TestSize.Level0) +{ + LOGE("OH_NNQuantParam_SetNumBits oh_nnquantparam_setnumbits_001"); + size_t quantNum = 2; + OH_NN_ReturnCode ret = OH_NNQuantParam_SetNumBits(nullptr, nullptr, quantNum); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: oh_nnquantparam_setnumbits_002 + * @tc.desc: Verify the success of the OH_NNDevice_GetType function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, oh_nnquantparam_setnumbits_002, testing::ext::TestSize.Level0) +{ + LOGE("OH_NNQuantParam_SetNumBits oh_nnquantparam_setnumbits_002"); + NN_QuantParam* quantParams = OH_NNQuantParam_Create(); + size_t quantNum = 2; + OH_NN_ReturnCode ret = OH_NNQuantParam_SetNumBits(quantParams, nullptr, quantNum); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: oh_nnquantparam_setnumbits_003 + * @tc.desc: Verify the success of the OH_NNDevice_GetType function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, oh_nnquantparam_setnumbits_003, testing::ext::TestSize.Level0) +{ + LOGE("OH_NNQuantParam_SetNumBits oh_nnquantparam_setnumbits_003"); + NN_QuantParam* quantParams = OH_NNQuantParam_Create(); + uint32_t zeroPoints = 2; + size_t quantNum = 0; + OH_NN_ReturnCode ret = OH_NNQuantParam_SetNumBits(quantParams, &zeroPoints, quantNum); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: oh_nnquantparam_setnumbits_004 + * @tc.desc: Verify the success of the OH_NNDevice_GetType function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, oh_nnquantparam_setnumbits_004, testing::ext::TestSize.Level0) +{ + LOGE("OH_NNQuantParam_SetNumBits oh_nnquantparam_setnumbits_004"); + NN_QuantParam* quantParams = OH_NNQuantParam_Create(); + uint32_t zeroPoints = 2; + size_t quantNum = 2; + OH_NN_ReturnCode ret = OH_NNQuantParam_SetNumBits(quantParams, &zeroPoints, quantNum); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: oh_nnquantparam_destroy_001 + * @tc.desc: Verify the success of the OH_NNDevice_GetType function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, oh_nnquantparam_destroy_001, testing::ext::TestSize.Level0) +{ + LOGE("OH_NNQuantParam_Destroy oh_nnquantparam_destroy_001"); + OH_NN_ReturnCode ret = OH_NNQuantParam_Destroy(nullptr); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: oh_nnquantparam_destroy_002 + * @tc.desc: Verify the success of the OH_NNDevice_GetType function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, oh_nnquantparam_destroy_002, testing::ext::TestSize.Level0) +{ + LOGE("OH_NNQuantParam_Destroy oh_nnquantparam_destroy_002"); + NN_QuantParam* quantParams = OH_NNQuantParam_Create(); + NN_QuantParam** quantParamsDex = &quantParams; + *quantParamsDex = nullptr; + OH_NN_ReturnCode ret = OH_NNQuantParam_Destroy(quantParamsDex); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: oh_nnquantparam_destroy_003 + * @tc.desc: Verify the success of the OH_NNDevice_GetType function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, oh_nnquantparam_destroy_003, testing::ext::TestSize.Level0) +{ + LOGE("OH_NNQuantParam_Destroy oh_nnquantparam_destroy_003"); + NN_QuantParam* quantParams = OH_NNQuantParam_Create(); + NN_QuantParam** quantParamsDex = &quantParams; + OH_NN_ReturnCode ret = OH_NNQuantParam_Destroy(quantParamsDex); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: oh_nnmodel_addtensortomodel_001 + * @tc.desc: Verify the success of the OH_NNDevice_GetType function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, oh_nnmodel_addtensortomodel_001, testing::ext::TestSize.Level0) +{ + LOGE("OH_NNModel_AddTensorToModel oh_nnmodel_addtensortomodel_001"); + TensorDesc* tensorDescImpl = new (std::nothrow) TensorDesc(); + NN_TensorDesc* tensor = reinterpret_cast(tensorDescImpl); + OH_NN_ReturnCode ret = OH_NNModel_AddTensorToModel(nullptr, tensor); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: oh_nnmodel_addtensortomodel_002 + * @tc.desc: Verify the success of the OH_NNDevice_GetType function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, oh_nnmodel_addtensortomodel_002, testing::ext::TestSize.Level0) +{ + LOGE("OH_NNModel_AddTensorToModel oh_nnmodel_addtensortomodel_002"); + OH_NNModel* model = OH_NNModel_Construct(); + OH_NN_ReturnCode ret = OH_NNModel_AddTensorToModel(model, nullptr); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: oh_nnmodel_addtensortomodel_003 + * @tc.desc: Verify the success of the OH_NNDevice_GetType function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, oh_nnmodel_addtensortomodel_003, testing::ext::TestSize.Level0) +{ + LOGE("OH_NNModel_AddTensorToModel oh_nnmodel_addtensortomodel_003"); + OH_NNModel* model = OH_NNModel_Construct(); + TensorDesc* tensorDescImpl = new (std::nothrow) TensorDesc(); + NN_TensorDesc* tensor = reinterpret_cast(tensorDescImpl); + OH_NN_ReturnCode ret = OH_NNModel_AddTensorToModel(model, tensor); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: oh_nnmodel_settensorquantparams_001 + * @tc.desc: Verify the success of the OH_NNDevice_GetType function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, oh_nnmodel_settensorquantparams_001, testing::ext::TestSize.Level0) +{ + LOGE("OH_NNModel_SetTensorQuantParams oh_nnmodel_settensorquantparams_001"); + NN_QuantParam* quantParams = OH_NNQuantParam_Create(); + uint32_t index = 10; + OH_NN_ReturnCode ret = OH_NNModel_SetTensorQuantParams(nullptr, index, quantParams); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: oh_nnmodel_settensorquantparams_002 + * @tc.desc: Verify the success of the OH_NNDevice_GetType function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, oh_nnmodel_settensorquantparams_002, testing::ext::TestSize.Level0) +{ + LOGE("OH_NNModel_SetTensorQuantParams oh_nnmodel_settensorquantparams_002"); + OH_NNModel* model = OH_NNModel_Construct(); + uint32_t index = 10; + OH_NN_ReturnCode ret = OH_NNModel_SetTensorQuantParams(model, index, nullptr); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: oh_nnmodel_settensorquantparams_003 + * @tc.desc: Verify the success of the OH_NNDevice_GetType function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, oh_nnmodel_settensorquantparams_003, testing::ext::TestSize.Level0) +{ + LOGE("OH_NNModel_SetTensorQuantParams oh_nnmodel_settensorquantparams_003"); + OH_NNModel* model = OH_NNModel_Construct(); + NN_QuantParam* quantParams = OH_NNQuantParam_Create(); + uint32_t index = 10; + OH_NN_ReturnCode ret = OH_NNModel_SetTensorQuantParams(model, index, quantParams); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: oh_nnmodel_settensortype_001 + * @tc.desc: Verify the success of the OH_NNDevice_GetType function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, oh_nnmodel_settensortype_001, testing::ext::TestSize.Level0) +{ + LOGE("OH_NNModel_SetTensorType oh_nnmodel_settensortype_001"); + OH_NN_TensorType tensorType = OH_NN_REDUCE_MIN_KEEP_DIMS; + uint32_t index = 10; + OH_NN_ReturnCode ret = OH_NNModel_SetTensorType(nullptr, index, tensorType); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: oh_nnmodel_settensortype_002 + * @tc.desc: Verify the success of the OH_NNDevice_GetType function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeTest, oh_nnmodel_settensortype_002, testing::ext::TestSize.Level0) +{ + LOGE("OH_NNModel_SetTensorType oh_nnmodel_settensortype_002"); + OH_NNModel* model = OH_NNModel_Construct(); + OH_NN_TensorType tensorType = OH_NN_REDUCE_MIN_COEFF; + uint32_t index = 10; + OH_NN_ReturnCode ret = OH_NNModel_SetTensorType(model, index, tensorType); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} } // namespace Unittest } // namespace NeuralNetworkRuntime } // namespace OHOS diff --git a/test/unittest/components/v2_0/hdi_device/hdi_device_test.cpp b/test/unittest/components/v2_0/hdi_device/hdi_device_test.cpp index 50fbd282025b290d8b371ffb06b95cfa7047c96b..12156f0657c150f0f91bf019e81a6d950661260d 100644 --- a/test/unittest/components/v2_0/hdi_device/hdi_device_test.cpp +++ b/test/unittest/components/v2_0/hdi_device/hdi_device_test.cpp @@ -13,6 +13,7 @@ * limitations under the License. */ +#include #include #include #include @@ -25,12 +26,98 @@ #include #include "hdi_device_v2_0.h" +#include "hdi_device_v2_1.h" +#include "hdi_prepared_model_v2_1.h" #include "test/unittest/common/v2_0/mock_idevice.h" #include "test/unittest/common/file_utils.h" +#include "lite_graph_to_hdi_model_v2_0.h" +#include "lite_graph_to_hdi_model_v2_1.h" +#include "device.h" +#include "interfaces/kits/c/neural_network_runtime/neural_network_runtime_type.h" +#include "common/log.h" +#include "nnbackend.h" +#include "ops_registry.h" +#include "transform.h" + using namespace testing; using namespace testing::ext; using namespace OHOS::NeuralNetworkRuntime; + +namespace MSLITE = mindspore::lite; + +namespace OHOS { +namespace HDI { +namespace Nnrt { +namespace V2_1 { +class MockIDevice : public INnrtDevice { +public: + MOCK_METHOD1(GetDeviceName, int32_t(std::string&)); + MOCK_METHOD1(GetVendorName, int32_t(std::string&)); + MOCK_METHOD1(GetDeviceType, int32_t(DeviceType&)); + MOCK_METHOD1(GetDeviceStatus, int32_t(DeviceStatus&)); + MOCK_METHOD2(GetSupportedOperation, int32_t(const Model&, std::vector&)); + MOCK_METHOD1(IsFloat16PrecisionSupported, int32_t(bool&)); + MOCK_METHOD1(IsPerformanceModeSupported, int32_t(bool&)); + MOCK_METHOD1(IsPrioritySupported, int32_t(bool&)); + MOCK_METHOD1(IsDynamicInputSupported, int32_t(bool&)); + MOCK_METHOD3(PrepareModel, int32_t(const Model&, const ModelConfig&, OHOS::sptr&)); + MOCK_METHOD1(IsModelCacheSupported, int32_t(bool&)); + MOCK_METHOD3(PrepareModelFromModelCache, int32_t(const std::vector&, const ModelConfig&, + OHOS::sptr&)); + MOCK_METHOD3(PrepareOfflineModel, int32_t(const std::vector&, const ModelConfig&, + sptr&)); + MOCK_METHOD2(AllocateBuffer, int32_t(uint32_t, SharedBuffer&)); + MOCK_METHOD1(ReleaseBuffer, int32_t(const SharedBuffer&)); + MOCK_METHOD2(GetVersion, int32_t(uint32_t&, uint32_t&)); +}; + +class MockIPreparedModel : public IPreparedModel { +public: + MOCK_METHOD1(ExportModelCache, int32_t(std::vector&)); + MOCK_METHOD3(Run, int32_t(const std::vector&, const std::vector&, + std::vector>&)); + MOCK_METHOD2(GetInputDimRanges, int32_t(std::vector>&, std::vector>&)); + MOCK_METHOD2(GetVersion, int32_t(uint32_t&, uint32_t&)); + + static OH_NN_ReturnCode m_ExpectRetCode; +}; + + +sptr INnrtDevice::Get(bool isStub) +{ + return INnrtDevice::Get("device_service", isStub); +} + +sptr INnrtDevice::Get(const std::string& serviceName, bool isStub) +{ + if (isStub) { + return nullptr; + } + + sptr mockIDevice = sptr(new (std::nothrow) MockIDevice()); + if (mockIDevice == nullptr) { + return nullptr; + } + std::string deviceName = "MockDevice"; + EXPECT_CALL(*((V2_1::MockIDevice*)mockIDevice.GetRefPtr()), GetDeviceName(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(deviceName), ::testing::Return(HDF_SUCCESS))); + + std::string vendorName = "MockVendor"; + EXPECT_CALL(*((V2_1::MockIDevice*)mockIDevice.GetRefPtr()), GetVendorName(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(vendorName), ::testing::Return(HDF_SUCCESS))); + + V2_1::DeviceStatus deviceStatus = V2_1::DeviceStatus::AVAILABLE; + EXPECT_CALL(*((V2_1::MockIDevice*)mockIDevice.GetRefPtr()), GetDeviceStatus(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(deviceStatus), ::testing::Return(HDF_SUCCESS))); + + return mockIDevice; +} +} // V2_1 +} // Nnrt +} // HDI +} // OHOS + namespace mindspore { namespace lite { OHOS::HDI::Nnrt::V2_0::Model* MindIR_LiteGraph_To_Model(const LiteGraph* lite_graph, @@ -90,6 +177,7 @@ class HDIDeviceTest : public testing::Test { protected: void GetBuffer(void*& buffer, size_t length); OH_NN_ReturnCode PrepareModel(int32_t allocBufferType, int32_t prepareType); + OH_NN_ReturnCode PrepareModelv2(int32_t allocBufferType, int32_t prepareType); }; void HDIDeviceTest::GetBuffer(void*& buffer, size_t length) @@ -144,6 +232,48 @@ OH_NN_ReturnCode HDIDeviceTest::PrepareModel(int32_t allocBufferType, int32_t pr return result; } +OH_NN_ReturnCode HDIDeviceTest::PrepareModelv2(int32_t allocBufferType, int32_t prepareType) +{ + std::shared_ptr model = std::make_shared(); + OHOS::sptr sp = OHOS::sptr(new (std::nothrow) V2_1::MockIDevice()); + EXPECT_NE(sp, nullptr); + + std::unique_ptr hdiDevice = std::make_unique(sp); + EXPECT_NE(hdiDevice, nullptr); + + V2_1::SharedBuffer buffer {1, 1, 0, 1}; + EXPECT_CALL(*sp, AllocateBuffer(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<1>(buffer), ::testing::Return(allocBufferType))); + + std::shared_ptr preparedModel; + const int position = 2; + OHOS::sptr iPreparedModel = + OHOS::sptr(new (std::nothrow) V2_1::MockIPreparedModel()); + EXPECT_CALL(*sp, PrepareModel(::testing::_, ::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee(iPreparedModel), + ::testing::Return(prepareType))); + + ModelConfig config; + OH_NN_ReturnCode result = hdiDevice->PrepareModel(model, config, preparedModel); + return result; +} + +class MockPreparedModel : public PreparedModel { +public: + MOCK_METHOD1(ExportModelCache, OH_NN_ReturnCode(std::vector&)); + MOCK_METHOD4(Run, OH_NN_ReturnCode(const std::vector&, + const std::vector&, + std::vector>&, + std::vector&)); + MOCK_METHOD4(Run, OH_NN_ReturnCode(const std::vector&, + const std::vector&, + std::vector>&, + std::vector&)); + MOCK_CONST_METHOD1(GetModelID, OH_NN_ReturnCode(uint32_t&)); + MOCK_METHOD2(GetInputDimRanges, OH_NN_ReturnCode(std::vector>&, + std::vector>&)); +}; + /* * * @tc.name: hdidevice_constructor_001 * @tc.desc: Verify the Constructor function return object success. @@ -233,6 +363,45 @@ HWTEST_F(HDIDeviceTest, hdidevice_getvendorname_002, TestSize.Level0) EXPECT_EQ(OH_NN_UNAVAILABLE_DEVICE, result); } +/* * + * @tc.name: hdidevice_getversion_001 + * @tc.desc: Verify the GetVersion function validate vendor name success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_getversion_001, TestSize.Level0) +{ + LOGE("GetVersion hdidevice_getversion_001"); + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + std::string vendorName = "MockVendor"; + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), GetVersion(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::Return(HDF_SUCCESS)); + + const std::string expectDeviceName = "MockVendor"; + std::string newVendorName = ""; + OH_NN_ReturnCode result = hdiDevice->GetVersion(newVendorName); + EXPECT_EQ(OH_NN_SUCCESS, result); +} + +/* * + * @tc.name: hdidevice_getversion_002 + * @tc.desc: Verify the GetVersion function return unavailable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_getversion_002, TestSize.Level0) +{ + LOGE("GetVersion hdidevice_getversion_002"); + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + std::string vendorName = "MockVendor"; + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), GetVersion(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::Return(HDF_FAILURE)); + OH_NN_ReturnCode result = hdiDevice->GetVersion(vendorName); + EXPECT_EQ(OH_NN_UNAVAILABLE_DEVICE, result); +} + /* * * @tc.name: hdidevice_getdevicetype_001 * @tc.desc: Verify the GetDeviceType function validate device type success. @@ -273,6 +442,75 @@ HWTEST_F(HDIDeviceTest, hdidevice_getdevicetype_002, TestSize.Level0) EXPECT_EQ(OH_NN_UNAVAILABLE_DEVICE, result); } +/* * + * @tc.name: hdidevice_getdevicetype_003 + * @tc.desc: Verify the GetDeviceType function return unavailable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_getdevicetype_003, TestSize.Level0) +{ + LOGE("GetDeviceType hdidevice_getdevicetype_003"); + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + OH_NN_DeviceType deviceType = OH_NN_CPU; + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), GetDeviceType(::testing::_)) + .WillRepeatedly(Invoke([](V2_0::DeviceType& type) { + // 这里直接修改传入的引用参数 + type = V2_0::DeviceType::GPU; + return OH_NN_SUCCESS; // 假设成功的状态码 + })); + OH_NN_ReturnCode result = hdiDevice->GetDeviceType(deviceType); + EXPECT_EQ(OH_NN_SUCCESS, result); +} + +/* * + * @tc.name: hdidevice_getdevicetype_004 + * @tc.desc: Verify the GetDeviceType function return unavailable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_getdevicetype_004, TestSize.Level0) +{ + LOGE("GetDeviceType hdidevice_getdevicetype_004"); + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + OH_NN_DeviceType deviceType = OH_NN_CPU; + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), GetDeviceType(::testing::_)) + .WillRepeatedly(Invoke([](V2_0::DeviceType& type) { + // 这里直接修改传入的引用参数 + type = V2_0::DeviceType::ACCELERATOR; + return OH_NN_SUCCESS; // 假设成功的状态码 + })); + OH_NN_ReturnCode result = hdiDevice->GetDeviceType(deviceType); + EXPECT_EQ(OH_NN_SUCCESS, result); +} + +/* * + * @tc.name: hdidevice_getdevicetype_005 + * @tc.desc: Verify the GetDeviceType function return unavailable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_getdevicetype_005, TestSize.Level0) +{ + LOGE("GetDeviceType hdidevice_getdevicetype_005"); + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + OH_NN_DeviceType deviceType = OH_NN_CPU; + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), GetDeviceType(::testing::_)) + .WillRepeatedly(Invoke([](V2_0::DeviceType& type) { + // 这里直接修改传入的引用参数 + type = V2_0::DeviceType::OTHER; + return OH_NN_SUCCESS; // 假设成功的状态码 + })); + OH_NN_ReturnCode result = hdiDevice->GetDeviceType(deviceType); + EXPECT_EQ(OH_NN_SUCCESS, result); +} + /* * * @tc.name: hdidevice_getdevicestatus_001 * @tc.desc: Verify the GetDeviceStatus function validate device status success. @@ -313,6 +551,78 @@ HWTEST_F(HDIDeviceTest, hdidevice_getdevicestatus_002, TestSize.Level0) EXPECT_EQ(OH_NN_UNAVAILABLE_DEVICE, result); } +/* * + * @tc.name: hdidevice_getdevicestatus_003 + * @tc.desc: Verify the GetDeviceStatus function validate device status success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_getdevicestatus_003, TestSize.Level0) +{ + LOGE("GetDeviceStatus hdidevice_getdevicestatus_003"); + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), GetDeviceStatus(::testing::_)) + .WillRepeatedly(Invoke([](V2_0::DeviceStatus& status) { + // 这里直接修改传入的引用参数 + status = V2_0::DeviceStatus::BUSY; + return OH_NN_SUCCESS; // 假设成功的状态码 + })); + + DeviceStatus newDeviceStatus = AVAILABLE; + OH_NN_ReturnCode result = hdiDevice->GetDeviceStatus(newDeviceStatus); + EXPECT_EQ(OH_NN_SUCCESS, result); +} + +/* * + * @tc.name: hdidevice_getdevicestatus_004 + * @tc.desc: Verify the GetDeviceStatus function validate device status success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_getdevicestatus_004, TestSize.Level0) +{ + LOGE("GetDeviceStatus hdidevice_getdevicestatus_004"); + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), GetDeviceStatus(::testing::_)) + .WillRepeatedly(Invoke([](V2_0::DeviceStatus& status) { + // 这里直接修改传入的引用参数 + status = V2_0::DeviceStatus::OFFLINE; + return OH_NN_SUCCESS; // 假设成功的状态码 + })); + + DeviceStatus newDeviceStatus = AVAILABLE; + OH_NN_ReturnCode result = hdiDevice->GetDeviceStatus(newDeviceStatus); + EXPECT_EQ(OH_NN_SUCCESS, result); +} + +/* * + * @tc.name: hdidevice_getdevicestatus_005 + * @tc.desc: Verify the GetDeviceStatus function validate device status success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_getdevicestatus_005, TestSize.Level0) +{ + LOGE("GetDeviceStatus hdidevice_getdevicestatus_005"); + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), GetDeviceStatus(::testing::_)) + .WillRepeatedly(Invoke([](V2_0::DeviceStatus& status) { + // 这里直接修改传入的引用参数 + status = V2_0::DeviceStatus::UNKNOWN; + return OH_NN_SUCCESS; // 假设成功的状态码 + })); + + DeviceStatus newDeviceStatus = AVAILABLE; + OH_NN_ReturnCode result = hdiDevice->GetDeviceStatus(newDeviceStatus); + EXPECT_EQ(OH_NN_SUCCESS, result); +} + /* * * @tc.name: hdidevice_getsupportedoperation_001 * @tc.desc: Verify the GetSupportedOperation function return success. @@ -668,6 +978,28 @@ HWTEST_F(HDIDeviceTest, hdidevice_preparemodel_004, TestSize.Level0) EXPECT_EQ(OH_NN_FAILED, result); } +/* * + * @tc.name: hdidevice_preparemodel_005 + * @tc.desc: Verify the PrepareModel function return failed. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_preparemodel_005, TestSize.Level0) +{ + LOGE("PrepareModel hdidevice_preparemodel_005"); + OHOS::sptr sp = OHOS::sptr(new (std::nothrow) V2_0::MockIDevice()); + EXPECT_NE(sp, nullptr); + + std::unique_ptr hdiDevice = std::make_unique(sp); + EXPECT_NE(hdiDevice, nullptr); + + float dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* data = dataArry; + ModelConfig config; + std::shared_ptr preparedModel; + OH_NN_ReturnCode result = hdiDevice->PrepareModel(data, config, preparedModel); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, result); +} + /* * * @tc.name: hdidevice_preparemodelfrommodelcache_001 * @tc.desc: Verify the PrepareModelFromModelCache function return success. @@ -757,75 +1089,286 @@ HWTEST_F(HDIDeviceTest, hdidevice_preparemodelfrommodelcache_003, TestSize.Level } /* * - * @tc.name: hdidevice_allocatebuffer_001 - * @tc.desc: Verify the AllocateBuffer function return nullptr. + * @tc.name: hdidevice_preparemodelfrommodelcache_004 + * @tc.desc: Verify the PrepareModelFromModelCache function return success. * @tc.type: FUNC */ -HWTEST_F(HDIDeviceTest, hdidevice_allocatebuffer_001, TestSize.Level0) +HWTEST_F(HDIDeviceTest, hdidevice_preparemodelfrommodelcache_004, TestSize.Level0) { - OHOS::sptr device = V2_0::INnrtDevice::Get(false); - std::unique_ptr hdiDevice = std::make_unique(device); - EXPECT_NE(hdiDevice, nullptr); - - V2_0::SharedBuffer buffer; - EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), AllocateBuffer(::testing::_, ::testing::_)) - .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<1>(buffer), ::testing::Return(HDF_FAILURE))); + LOGE("GetDeviceStatus hdidevice_getdevicestatus_005"); + size_t length = 100; + void *buffer = nullptr; + GetBuffer(buffer, length); - size_t length = 8; - void *result = hdiDevice->AllocateBuffer(length); - EXPECT_EQ(nullptr, result); - hdiDevice->ReleaseBuffer(result); -} + OHOS::sptr sp = OHOS::sptr(new (std::nothrow) V2_0::MockIDevice()); + EXPECT_NE(sp, nullptr); -/* * - * @tc.name: hdidevice_allocatebuffer_002 - * @tc.desc: Verify the AllocateBuffer function return nullptr and HDF_FAILURE. - * @tc.type: FUNC - */ -HWTEST_F(HDIDeviceTest, hdidevice_allocatebuffer_002, TestSize.Level0) -{ - OHOS::sptr device = V2_0::INnrtDevice::Get(false); - std::unique_ptr hdiDevice = std::make_unique(device); + std::unique_ptr hdiDevice = std::make_unique(sp); EXPECT_NE(hdiDevice, nullptr); - size_t length = 8; - void *result = hdiDevice->AllocateBuffer(length); - EXPECT_EQ(nullptr, result); - hdiDevice->ReleaseBuffer(result); -} + std::vector modelCache = { { buffer, 100 } }; + ModelConfig config; + config.mode = OH_NN_PERFORMANCE_LOW; + config.priority = OH_NN_PRIORITY_LOW; + OHOS::sptr preModel = + OHOS::sptr(new (std::nothrow) V2_0::MockIPreparedModel()); + EXPECT_NE(preModel, nullptr); -/* * - * @tc.name: hdidevice_allocatebuffer_003 - * @tc.desc: Verify the AllocateBuffer function return nullptr in case of 0 size. - * @tc.type: FUNC - */ -HWTEST_F(HDIDeviceTest, hdidevice_allocatebuffer_003, TestSize.Level0) -{ - OHOS::sptr device = V2_0::INnrtDevice::Get(false); - std::unique_ptr hdiDevice = std::make_unique(device); - EXPECT_NE(hdiDevice, nullptr); + std::shared_ptr preparedModel = std::make_shared(preModel); - size_t length = 0; - void *result = hdiDevice->AllocateBuffer(length); - EXPECT_EQ(nullptr, result); + OHOS::sptr iPreparedModel = + OHOS::sptr(new (std::nothrow) V2_0::MockIPreparedModel); + EXPECT_CALL(*sp, PrepareModelFromModelCache(::testing::_, ::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<2>(iPreparedModel), ::testing::Return(HDF_FAILURE))); + + bool isUpdatable = false; + OH_NN_ReturnCode result = hdiDevice->PrepareModelFromModelCache(modelCache, config, preparedModel, isUpdatable); + EXPECT_EQ(OH_NN_FAILED, result); } /* * - * @tc.name: hdidevice_releasebuffer_001 - * @tc.desc: Verify the ReleaseBuffer function validate buffer success. + * @tc.name: hdidevice_preparemodelfrommodelcache_005 + * @tc.desc: Verify the PrepareModelFromModelCache function return success. * @tc.type: FUNC */ -HWTEST_F(HDIDeviceTest, hdidevice_releasebuffer_001, TestSize.Level0) +HWTEST_F(HDIDeviceTest, hdidevice_preparemodelfrommodelcache_005, TestSize.Level0) { + LOGE("GetDeviceStatus hdidevice_preparemodelfrommodelcache_005"); size_t length = 100; void *buffer = nullptr; GetBuffer(buffer, length); - OHOS::sptr device = V2_0::INnrtDevice::Get(false); - std::unique_ptr hdiDevice = std::make_unique(device); + OHOS::sptr sp = OHOS::sptr(new (std::nothrow) V2_0::MockIDevice()); + EXPECT_NE(sp, nullptr); - EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), ReleaseBuffer(::testing::_)) - .WillRepeatedly(::testing::Return(HDF_SUCCESS)); + std::unique_ptr hdiDevice = std::make_unique(sp); + EXPECT_NE(hdiDevice, nullptr); + + std::vector modelCache = { { buffer, 100 } }; + ModelConfig config; + config.mode = OH_NN_PERFORMANCE_MEDIUM; + config.priority = OH_NN_PRIORITY_MEDIUM; + OHOS::sptr preModel = + OHOS::sptr(new (std::nothrow) V2_0::MockIPreparedModel()); + EXPECT_NE(preModel, nullptr); + + std::shared_ptr preparedModel = std::make_shared(preModel); + + OHOS::sptr iPreparedModel = + OHOS::sptr(new (std::nothrow) V2_0::MockIPreparedModel); + EXPECT_CALL(*sp, PrepareModelFromModelCache(::testing::_, ::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<2>(iPreparedModel), ::testing::Return(HDF_FAILURE))); + + bool isUpdatable = false; + OH_NN_ReturnCode result = hdiDevice->PrepareModelFromModelCache(modelCache, config, preparedModel, isUpdatable); + EXPECT_EQ(OH_NN_FAILED, result); +} + +/* * + * @tc.name: hdidevice_preparemodelfrommodelcache_006 + * @tc.desc: Verify the PrepareModelFromModelCache function return success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_preparemodelfrommodelcache_006, TestSize.Level0) +{ + LOGE("GetDeviceStatus hdidevice_preparemodelfrommodelcache_006"); + size_t length = 100; + void *buffer = nullptr; + GetBuffer(buffer, length); + + OHOS::sptr sp = OHOS::sptr(new (std::nothrow) V2_0::MockIDevice()); + EXPECT_NE(sp, nullptr); + + std::unique_ptr hdiDevice = std::make_unique(sp); + EXPECT_NE(hdiDevice, nullptr); + + std::vector modelCache = { { buffer, 100 } }; + ModelConfig config; + config.mode = OH_NN_PERFORMANCE_HIGH; + config.priority = OH_NN_PRIORITY_HIGH; + OHOS::sptr preModel = + OHOS::sptr(new (std::nothrow) V2_0::MockIPreparedModel()); + EXPECT_NE(preModel, nullptr); + + std::shared_ptr preparedModel = std::make_shared(preModel); + + OHOS::sptr iPreparedModel = + OHOS::sptr(new (std::nothrow) V2_0::MockIPreparedModel); + EXPECT_CALL(*sp, PrepareModelFromModelCache(::testing::_, ::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<2>(iPreparedModel), ::testing::Return(HDF_FAILURE))); + + bool isUpdatable = false; + OH_NN_ReturnCode result = hdiDevice->PrepareModelFromModelCache(modelCache, config, preparedModel, isUpdatable); + EXPECT_EQ(OH_NN_FAILED, result); +} + +/* * + * @tc.name: hdidevice_preparemodelfrommodelcache_007 + * @tc.desc: Verify the PrepareModelFromModelCache function return success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_preparemodelfrommodelcache_007, TestSize.Level0) +{ + LOGE("GetDeviceStatus hdidevice_preparemodelfrommodelcache_007"); + size_t length = 100; + void *buffer = nullptr; + GetBuffer(buffer, length); + + OHOS::sptr sp = OHOS::sptr(new (std::nothrow) V2_0::MockIDevice()); + EXPECT_NE(sp, nullptr); + + std::unique_ptr hdiDevice = std::make_unique(sp); + EXPECT_NE(hdiDevice, nullptr); + + std::vector modelCache = { { buffer, 100 } }; + ModelConfig config; + config.mode = OH_NN_PERFORMANCE_EXTREME; + OHOS::sptr preModel = + OHOS::sptr(new (std::nothrow) V2_0::MockIPreparedModel()); + EXPECT_NE(preModel, nullptr); + + std::shared_ptr preparedModel = std::make_shared(preModel); + + OHOS::sptr iPreparedModel = + OHOS::sptr(new (std::nothrow) V2_0::MockIPreparedModel); + EXPECT_CALL(*sp, PrepareModelFromModelCache(::testing::_, ::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<2>(iPreparedModel), ::testing::Return(HDF_FAILURE))); + + bool isUpdatable = false; + OH_NN_ReturnCode result = hdiDevice->PrepareModelFromModelCache(modelCache, config, preparedModel, isUpdatable); + EXPECT_EQ(OH_NN_FAILED, result); +} + +/* * + * @tc.name: hdidevice_allocatebuffer_001 + * @tc.desc: Verify the AllocateBuffer function return nullptr. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_allocatebuffer_001, TestSize.Level0) +{ + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + V2_0::SharedBuffer buffer; + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), AllocateBuffer(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<1>(buffer), ::testing::Return(HDF_FAILURE))); + + size_t length = 8; + void *result = hdiDevice->AllocateBuffer(length); + EXPECT_EQ(nullptr, result); + hdiDevice->ReleaseBuffer(result); +} + +/* * + * @tc.name: hdidevice_allocatebuffer_002 + * @tc.desc: Verify the AllocateBuffer function return nullptr and HDF_FAILURE. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_allocatebuffer_002, TestSize.Level0) +{ + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + size_t length = 8; + void *result = hdiDevice->AllocateBuffer(length); + EXPECT_EQ(nullptr, result); + hdiDevice->ReleaseBuffer(result); +} + +/* * + * @tc.name: hdidevice_allocatebuffer_003 + * @tc.desc: Verify the AllocateBuffer function return nullptr in case of 0 size. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_allocatebuffer_003, TestSize.Level0) +{ + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + size_t length = 0; + void *result = hdiDevice->AllocateBuffer(length); + EXPECT_EQ(nullptr, result); +} + +/* * + * @tc.name: hdidevice_allocatebuffer_004 + * @tc.desc: Verify the AllocateBuffer function return nullptr in case of 0 size. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_allocatebuffer_004, TestSize.Level0) +{ + LOGE("AllocateBuffer hdidevice_preparemodelfrommodelcache_007"); + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + size_t length = 0; + int fd = 0; + OH_NN_ReturnCode result = hdiDevice->AllocateBuffer(length, fd); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); +} + +/* * + * @tc.name: hdidevice_allocatebuffer_005 + * @tc.desc: Verify the AllocateBuffer function return nullptr in case of 0 size. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_allocatebuffer_005, TestSize.Level0) +{ + LOGE("AllocateBuffer hdidevice_allocatebuffer_005"); + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), AllocateBuffer(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::Return(HDF_FAILURE)); + + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + size_t length = 1; + int fd = 0; + OH_NN_ReturnCode result = hdiDevice->AllocateBuffer(length, fd); + EXPECT_EQ(OH_NN_MEMORY_ERROR, result); +} + +/* * + * @tc.name: hdidevice_allocatebuffer_006 + * @tc.desc: Verify the AllocateBuffer function return nullptr in case of 0 size. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_allocatebuffer_006, TestSize.Level0) +{ + LOGE("AllocateBuffer hdidevice_allocatebuffer_006"); + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), AllocateBuffer(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::Return(HDF_SUCCESS)); + + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + size_t length = 1; + int fd = 0; + OH_NN_ReturnCode result = hdiDevice->AllocateBuffer(length, fd); + EXPECT_EQ(OH_NN_SUCCESS, result); +} + +/* * + * @tc.name: hdidevice_releasebuffer_001 + * @tc.desc: Verify the ReleaseBuffer function validate buffer success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_releasebuffer_001, TestSize.Level0) +{ + size_t length = 100; + void *buffer = nullptr; + GetBuffer(buffer, length); + + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), ReleaseBuffer(::testing::_)) + .WillRepeatedly(::testing::Return(HDF_SUCCESS)); EXPECT_NE(hdiDevice, nullptr); hdiDevice->ReleaseBuffer(buffer); @@ -911,6 +1454,1569 @@ HWTEST_F(HDIDeviceTest, hdidevice_releasebuffer_005, TestSize.Level0) const auto &memoryManager = MemoryManager::GetInstance(); memoryManager->UnMapMemory(buffer); } + +/* * + * @tc.name: hdidevice_releasebuffer_007 + * @tc.desc: Verify the ReleaseBuffer function validate moc object's ReleaseBuffer return failure. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_releasebuffer_007, TestSize.Level0) +{ + LOGE("ReleaseBuffer hdidevice_releasebuffer_007"); + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), ReleaseBuffer(::testing::_)) + .WillRepeatedly(::testing::Return(HDF_FAILURE)); + + int fd = 0; + size_t length = 1; + OH_NN_ReturnCode ret = hdiDevice->ReleaseBuffer(fd, length); + EXPECT_EQ(OH_NN_FAILED, ret); +} + +/* * + * @tc.name: hdidevice_releasebuffer_008 + * @tc.desc: Verify the ReleaseBuffer function validate moc object's ReleaseBuffer return failure. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_releasebuffer_008, TestSize.Level0) +{ + LOGE("ReleaseBuffer hdidevice_releasebuffer_008"); + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + EXPECT_CALL(*((V2_0::MockIDevice *)device.GetRefPtr()), ReleaseBuffer(::testing::_)) + .WillRepeatedly(::testing::Return(HDF_SUCCESS)); + + int fd = 0; + size_t length = 1; + OH_NN_ReturnCode ret = hdiDevice->ReleaseBuffer(fd, length); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* * + * @tc.name: hdidevice_allocatetensorbuffer_001 + * @tc.desc: Verify the ReleaseBuffer function validate moc object's ReleaseBuffer return failure. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_allocatetensorbuffer_001, TestSize.Level0) +{ + LOGE("AllocateTensorBuffer hdidevice_allocatetensorbuffer_001"); + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + size_t length = 0; + std::shared_ptr tensor; + void* ret = hdiDevice->AllocateTensorBuffer(length, tensor); + EXPECT_EQ(nullptr, ret); +} + +/* * + * @tc.name: hdidevice_allocatetensorbuffer_002 + * @tc.desc: Verify the ReleaseBuffer function validate moc object's ReleaseBuffer return failure. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_allocatetensorbuffer_002, TestSize.Level0) +{ + LOGE("AllocateTensorBuffer hdidevice_allocatetensorbuffer_002"); + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + size_t length = 0; + std::shared_ptr tensor; + void* ret = hdiDevice->AllocateTensorBuffer(length, tensor); + EXPECT_EQ(nullptr, ret); +} + +/* * + * @tc.name: hdidevice_prepareofflinemodel_001 + * @tc.desc: Verify the ReleaseBuffer function validate moc object's ReleaseBuffer return failure. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_prepareofflinemodel_001, TestSize.Level0) +{ + LOGE("PrepareOfflineModel hdidevice_prepareofflinemodel_001"); + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + ModelConfig config; + std::shared_ptr preparedModel; + OH_NN_ReturnCode ret = hdiDevice->PrepareOfflineModel(nullptr, config, preparedModel); + EXPECT_EQ(OH_NN_NULL_PTR, ret); +} + +/* * + * @tc.name: hdidevice_prepareofflinemodel_002 + * @tc.desc: Verify the ReleaseBuffer function validate moc object's ReleaseBuffer return failure. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_prepareofflinemodel_002, TestSize.Level0) +{ + LOGE("PrepareOfflineModel hdidevice_prepareofflinemodel_002"); + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + std::shared_ptr model = std::make_shared(); + mindspore::lite::LiteGraph::Node node; + mindspore::lite::LiteGraph::Node* testNode = &node; + model->all_nodes_.emplace_back(testNode); + ModelConfig config; + std::shared_ptr preparedModel; + OH_NN_ReturnCode ret = hdiDevice->PrepareOfflineModel(model, config, preparedModel); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* * + * @tc.name: hdidevice_prepareofflinemodel_003 + * @tc.desc: Verify the ReleaseBuffer function validate moc object's ReleaseBuffer return failure. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_prepareofflinemodel_003, TestSize.Level0) +{ + LOGE("PrepareOfflineModel hdidevice_prepareofflinemodel_003"); + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + std::shared_ptr model = std::make_shared(); + mindspore::lite::LiteGraph::Node node; + uint32_t indice = 0; + node.input_indices_.emplace_back(indice); + node.input_indices_.emplace_back(indice); + mindspore::lite::LiteGraph::Node* testNode = &node; + model->all_nodes_.emplace_back(testNode); + model->all_tensors_.emplace_back(mindspore::lite::MindIR_Tensor_Create()); + model->all_tensors_.emplace_back(mindspore::lite::MindIR_Tensor_Create()); + ModelConfig config; + std::shared_ptr preparedModel; + OH_NN_ReturnCode ret = hdiDevice->PrepareOfflineModel(model, config, preparedModel); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* * + * @tc.name: hdidevice_prepareofflinemodel_004 + * @tc.desc: Verify the ReleaseBuffer function validate moc object's ReleaseBuffer return failure. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_prepareofflinemodel_004, TestSize.Level0) +{ + LOGE("PrepareOfflineModel hdidevice_prepareofflinemodel_004"); + OHOS::sptr device = V2_0::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + std::shared_ptr model = std::make_shared(); + mindspore::lite::LiteGraph::Node node; + uint32_t indice = 0; + node.input_indices_.emplace_back(indice); + node.input_indices_.emplace_back(indice); + mindspore::lite::LiteGraph::Node* testNode = &node; + model->all_nodes_.emplace_back(testNode); + + char a = 'a'; + mindspore::lite::DataType data_type = mindspore::lite::DataType::DATA_TYPE_INT32; + int dim = 1; + int32_t *dims = &dim; + uint32_t dims_size = 1; + mindspore::lite::Format format = mindspore::lite::Format::FORMAT_HWCK; + uint8_t datas = 0; + uint8_t *data = &datas; + uint32_t data_size = 2; + mindspore::lite::QuantParam quant_params; + uint32_t quant_params_size = 0; + mindspore::lite::TensorPtr ptr2 = mindspore::lite::MindIR_Tensor_Create(&a, data_type, dims, dims_size, + format, data, data_size, + &quant_params, quant_params_size); + std::vector offlineModel2 = mindspore::lite::MindIR_Tensor_GetData(ptr2); + + model->all_tensors_.emplace_back(ptr2); + model->all_tensors_.emplace_back(ptr2); + ModelConfig config; + std::shared_ptr preparedModel; + OH_NN_ReturnCode ret = hdiDevice->PrepareOfflineModel(model, config, preparedModel); + EXPECT_EQ(OH_NN_MEMORY_ERROR, ret); + + testing::Mock::AllowLeak(device.GetRefPtr()); +} + +/* * + * @tc.name: hdidevice_V2_1_constructor_001 + * @tc.desc: Verify the Constructor function return object success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_V2_1_constructor_001, TestSize.Level0) +{ + OHOS::sptr device = V2_1::INnrtDevice::Get(false); + EXPECT_NE(device, nullptr); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); +} + +/* * + * @tc.name: hdidevice_V2_1_getdevicename_001 + * @tc.desc: Verify the GetDeviceName function validate device name success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_V2_1_getdevicename_001, TestSize.Level0) +{ + OHOS::sptr device = V2_1::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + std::string deviceName = "MockDevice"; + EXPECT_CALL(*((V2_1::MockIDevice *)device.GetRefPtr()), GetDeviceName(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(deviceName), ::testing::Return(HDF_SUCCESS))); + + const std::string expectDeviceName = "MockDevice"; + std::string newDeviceName = ""; + OH_NN_ReturnCode result = hdiDevice->GetDeviceName(newDeviceName); + EXPECT_EQ(OH_NN_SUCCESS, result); + EXPECT_EQ(expectDeviceName, newDeviceName); +} + +/* * + * @tc.name: hdidevice_V2_1_getdevicename_002 + * @tc.desc: Verify the GetDeviceName function return unavailable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_V2_1_getdevicename_002, TestSize.Level0) +{ + OHOS::sptr device = V2_1::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + std::string deviceName = "MockDevice"; + EXPECT_CALL(*((V2_1::MockIDevice *)device.GetRefPtr()), GetDeviceName(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(deviceName), ::testing::Return(HDF_FAILURE))); + OH_NN_ReturnCode result = hdiDevice->GetDeviceName(deviceName); + EXPECT_EQ(OH_NN_UNAVAILABLE_DEVICE, result); +} + +/* * + * @tc.name: hdidevice_V2_1_getvendorname_001 + * @tc.desc: Verify the GetVendorName function validate vendor name success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_V2_1_getvendorname_001, TestSize.Level0) +{ + OHOS::sptr device = V2_1::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + std::string vendorName = "MockVendor"; + EXPECT_CALL(*((V2_1::MockIDevice *)device.GetRefPtr()), GetVendorName(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(vendorName), ::testing::Return(HDF_SUCCESS))); + + const std::string expectDeviceName = "MockVendor"; + std::string newVendorName = ""; + OH_NN_ReturnCode result = hdiDevice->GetVendorName(newVendorName); + EXPECT_EQ(OH_NN_SUCCESS, result); + EXPECT_EQ(expectDeviceName, newVendorName); +} + +/* * + * @tc.name: hdidevice_V2_1_getvendorname_002 + * @tc.desc: Verify the GetVendorName function return unavailable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_V2_1_getvendorname_002, TestSize.Level0) +{ + OHOS::sptr device = V2_1::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + std::string vendorName = "MockVendor"; + EXPECT_CALL(*((V2_1::MockIDevice *)device.GetRefPtr()), GetVendorName(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(vendorName), ::testing::Return(HDF_FAILURE))); + OH_NN_ReturnCode result = hdiDevice->GetVendorName(vendorName); + EXPECT_EQ(OH_NN_UNAVAILABLE_DEVICE, result); +} + +/* * + * @tc.name: hdidevice_V2_1_getversion_001 + * @tc.desc: Verify the GetVersion function validate vendor name success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_V2_1_getversion_001, TestSize.Level0) +{ + LOGE("GetVersion hdidevice_V2_1_getversion_001"); + OHOS::sptr device = V2_1::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + std::string vendorName = "MockVendor"; + EXPECT_CALL(*((V2_1::MockIDevice *)device.GetRefPtr()), GetVersion(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::Return(HDF_SUCCESS)); + + const std::string expectDeviceName = "MockVendor"; + std::string newVendorName = ""; + OH_NN_ReturnCode result = hdiDevice->GetVersion(newVendorName); + EXPECT_EQ(OH_NN_SUCCESS, result); +} + +/* * + * @tc.name: hdidevice_V2_1_getversion_002 + * @tc.desc: Verify the GetVersion function return unavailable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_V2_1_getversion_002, TestSize.Level0) +{ + LOGE("GetVersion hdidevice_V2_1_getversion_002"); + OHOS::sptr device = V2_1::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + std::string vendorName = "MockVendor"; + EXPECT_CALL(*((V2_1::MockIDevice *)device.GetRefPtr()), GetVersion(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::Return(HDF_FAILURE)); + OH_NN_ReturnCode result = hdiDevice->GetVersion(vendorName); + EXPECT_EQ(OH_NN_UNAVAILABLE_DEVICE, result); +} + +/* * + * @tc.name: hdidevice_V2_1_getdevicetype_001 + * @tc.desc: Verify the GetDeviceType function validate device type success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_V2_1_getdevicetype_001, TestSize.Level0) +{ + OHOS::sptr device = V2_1::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + V2_1::DeviceType iDeviceType = V2_1::DeviceType::CPU; + EXPECT_CALL(*((V2_1::MockIDevice *)device.GetRefPtr()), GetDeviceType(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(iDeviceType), ::testing::Return(HDF_SUCCESS))); + + OH_NN_DeviceType expectDeviceType = OH_NN_CPU; + OH_NN_DeviceType newDeviceType = OH_NN_CPU; + OH_NN_ReturnCode result = hdiDevice->GetDeviceType(newDeviceType); + EXPECT_EQ(OH_NN_SUCCESS, result); + EXPECT_EQ(expectDeviceType, newDeviceType); +} + +/* * + * @tc.name: hdidevice_V2_1_getdevicetype_002 + * @tc.desc: Verify the GetDeviceType function return unavailable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_V2_1_getdevicetype_002, TestSize.Level0) +{ + OHOS::sptr device = V2_1::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + OH_NN_DeviceType deviceType = OH_NN_CPU; + V2_1::DeviceType iDeviceType = V2_1::DeviceType::CPU; + EXPECT_CALL(*((V2_1::MockIDevice *)device.GetRefPtr()), GetDeviceType(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(iDeviceType), ::testing::Return(HDF_FAILURE))); + OH_NN_ReturnCode result = hdiDevice->GetDeviceType(deviceType); + EXPECT_EQ(OH_NN_UNAVAILABLE_DEVICE, result); +} + +/* * + * @tc.name: hdidevice_V2_1_getdevicetype_003 + * @tc.desc: Verify the GetDeviceType function return unavailable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_V2_1_getdevicetype_003, TestSize.Level0) +{ + LOGE("GetDeviceType hdidevice_V2_1_getdevicetype_003"); + OHOS::sptr device = V2_1::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + OH_NN_DeviceType deviceType = OH_NN_CPU; + EXPECT_CALL(*((V2_1::MockIDevice *)device.GetRefPtr()), GetDeviceType(::testing::_)) + .WillRepeatedly(Invoke([](V2_1::DeviceType& type) { + // 这里直接修改传入的引用参数 + type = V2_1::DeviceType::GPU; + return OH_NN_SUCCESS; // 假设成功的状态码 + })); + OH_NN_ReturnCode result = hdiDevice->GetDeviceType(deviceType); + EXPECT_EQ(OH_NN_SUCCESS, result); +} + +/* * + * @tc.name: hdidevice_V2_1_getdevicetype_004 + * @tc.desc: Verify the GetDeviceType function return unavailable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_V2_1_getdevicetype_004, TestSize.Level0) +{ + LOGE("GetDeviceType hdidevice_V2_1_getdevicetype_004"); + OHOS::sptr device = V2_1::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + OH_NN_DeviceType deviceType = OH_NN_CPU; + EXPECT_CALL(*((V2_1::MockIDevice *)device.GetRefPtr()), GetDeviceType(::testing::_)) + .WillRepeatedly(Invoke([](V2_1::DeviceType& type) { + // 这里直接修改传入的引用参数 + type = V2_1::DeviceType::ACCELERATOR; + return OH_NN_SUCCESS; // 假设成功的状态码 + })); + OH_NN_ReturnCode result = hdiDevice->GetDeviceType(deviceType); + EXPECT_EQ(OH_NN_SUCCESS, result); +} + +/* * + * @tc.name: hdidevice_V2_1_getdevicetype_005 + * @tc.desc: Verify the GetDeviceType function return unavailable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_V2_1_getdevicetype_005, TestSize.Level0) +{ + LOGE("GetDeviceType hdidevice_V2_1_getdevicetype_005"); + OHOS::sptr device = V2_1::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + OH_NN_DeviceType deviceType = OH_NN_CPU; + EXPECT_CALL(*((V2_1::MockIDevice *)device.GetRefPtr()), GetDeviceType(::testing::_)) + .WillRepeatedly(Invoke([](V2_1::DeviceType& type) { + // 这里直接修改传入的引用参数 + type = V2_1::DeviceType::OTHER; + return OH_NN_SUCCESS; // 假设成功的状态码 + })); + OH_NN_ReturnCode result = hdiDevice->GetDeviceType(deviceType); + EXPECT_EQ(OH_NN_SUCCESS, result); +} + +/* * + * @tc.name: hdidevice_V2_1_getdevicestatus_001 + * @tc.desc: Verify the GetDeviceStatus function validate device status success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_V2_1_getdevicestatus_001, TestSize.Level0) +{ + OHOS::sptr device = V2_1::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + V2_1::DeviceStatus iDeviceStatus = V2_1::DeviceStatus::AVAILABLE; + EXPECT_CALL(*((V2_1::MockIDevice *)device.GetRefPtr()), GetDeviceStatus(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(iDeviceStatus), ::testing::Return(HDF_SUCCESS))); + + const DeviceStatus expectDeviceStatus = AVAILABLE; + DeviceStatus newDeviceStatus = AVAILABLE; + OH_NN_ReturnCode result = hdiDevice->GetDeviceStatus(newDeviceStatus); + EXPECT_EQ(OH_NN_SUCCESS, result); + EXPECT_EQ(expectDeviceStatus, newDeviceStatus); +} + +/* * + * @tc.name: hdidevice_V2_1_getdevicestatus_002 + * @tc.desc: Verify the GetDeviceStatus function return unavailable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_V2_1_getdevicestatus_002, TestSize.Level0) +{ + OHOS::sptr device = V2_1::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + DeviceStatus deviceStatus = AVAILABLE; + V2_1::DeviceStatus iDeviceStatus = V2_1::DeviceStatus::AVAILABLE; + EXPECT_CALL(*((V2_1::MockIDevice *)device.GetRefPtr()), GetDeviceStatus(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(iDeviceStatus), ::testing::Return(HDF_FAILURE))); + OH_NN_ReturnCode result = hdiDevice->GetDeviceStatus(deviceStatus); + EXPECT_EQ(OH_NN_UNAVAILABLE_DEVICE, result); +} + +/* * + * @tc.name: hdidevice_V2_1_getdevicestatus_003 + * @tc.desc: Verify the GetDeviceStatus function validate device status success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_V2_1_getdevicestatus_003, TestSize.Level0) +{ + LOGE("GetDeviceStatus hdidevice_V2_1_getdevicestatus_003"); + OHOS::sptr device = V2_1::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + EXPECT_CALL(*((V2_1::MockIDevice *)device.GetRefPtr()), GetDeviceStatus(::testing::_)) + .WillRepeatedly(Invoke([](V2_1::DeviceStatus& status) { + // 这里直接修改传入的引用参数 + status = V2_1::DeviceStatus::BUSY; + return OH_NN_SUCCESS; // 假设成功的状态码 + })); + + DeviceStatus newDeviceStatus = AVAILABLE; + OH_NN_ReturnCode result = hdiDevice->GetDeviceStatus(newDeviceStatus); + EXPECT_EQ(OH_NN_SUCCESS, result); +} + +/* * + * @tc.name: hdidevice_V2_1_getdevicestatus_004 + * @tc.desc: Verify the GetDeviceStatus function validate device status success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_V2_1_getdevicestatus_004, TestSize.Level0) +{ + LOGE("GetDeviceStatus hdidevice_V2_1_getdevicestatus_004"); + OHOS::sptr device = V2_1::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + EXPECT_CALL(*((V2_1::MockIDevice *)device.GetRefPtr()), GetDeviceStatus(::testing::_)) + .WillRepeatedly(Invoke([](V2_1::DeviceStatus& status) { + // 这里直接修改传入的引用参数 + status = V2_1::DeviceStatus::OFFLINE; + return OH_NN_SUCCESS; // 假设成功的状态码 + })); + + DeviceStatus newDeviceStatus = AVAILABLE; + OH_NN_ReturnCode result = hdiDevice->GetDeviceStatus(newDeviceStatus); + EXPECT_EQ(OH_NN_SUCCESS, result); +} + +/* * + * @tc.name: hdidevice_V2_1_getdevicestatus_005 + * @tc.desc: Verify the GetDeviceStatus function validate device status success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_V2_1_getdevicestatus_005, TestSize.Level0) +{ + LOGE("GetDeviceStatus hdidevice_V2_1_getdevicestatus_005"); + OHOS::sptr device = V2_1::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + EXPECT_CALL(*((V2_1::MockIDevice *)device.GetRefPtr()), GetDeviceStatus(::testing::_)) + .WillRepeatedly(Invoke([](V2_1::DeviceStatus& status) { + // 这里直接修改传入的引用参数 + status = V2_1::DeviceStatus::UNKNOWN; + return OH_NN_SUCCESS; // 假设成功的状态码 + })); + + DeviceStatus newDeviceStatus = AVAILABLE; + OH_NN_ReturnCode result = hdiDevice->GetDeviceStatus(newDeviceStatus); + EXPECT_EQ(OH_NN_SUCCESS, result); +} + +/* * + * @tc.name: hdidevice_V2_1_getsupportedoperation_001 + * @tc.desc: Verify the GetSupportedOperation function return success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_V2_1_getsupportedoperation_001, TestSize.Level0) +{ + std::vector ops {true}; + std::shared_ptr model = std::make_shared(); + EXPECT_NE(nullptr, model); + BuildLiteGraph(model); + + OHOS::sptr device = V2_1::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + V2_1::SharedBuffer buffer {1, 1, 0, 1}; + EXPECT_CALL(*((V2_1::MockIDevice *)device.GetRefPtr()), AllocateBuffer(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<1>(buffer), ::testing::Return(HDF_SUCCESS))); + + EXPECT_CALL(*((V2_1::MockIDevice *)device.GetRefPtr()), GetSupportedOperation(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<1>(ops), ::testing::Return(HDF_SUCCESS))); + + std::vector newOps {true}; + const std::vector expectOps {true}; + OH_NN_ReturnCode result = hdiDevice->GetSupportedOperation(model, newOps); + EXPECT_EQ(OH_NN_FAILED, result); + auto expectOpsSize = expectOps.size(); + for (size_t i = 0; i < expectOpsSize; ++i) { + EXPECT_EQ(expectOps[i], newOps[i]); + } +} + +/* * + * @tc.name: hdidevice_V2_1_getsupportedoperation_002 + * @tc.desc: Verify the GetSupportedOperation function return failed in case of allocate buffer failure. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_V2_1_getsupportedoperation_002, TestSize.Level0) +{ + std::vector ops; + std::shared_ptr model = std::make_shared(); + EXPECT_NE(nullptr, model); + BuildLiteGraph(model); + + OHOS::sptr device = V2_1::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + V2_1::SharedBuffer buffer {1, 1, 0, 1}; + EXPECT_CALL(*((V2_1::MockIDevice *)device.GetRefPtr()), AllocateBuffer(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<1>(buffer), ::testing::Return(HDF_FAILURE))); + + OH_NN_ReturnCode result = hdiDevice->GetSupportedOperation(model, ops); + EXPECT_EQ(OH_NN_FAILED, result); +} + +/* * + * @tc.name: hdidevice_V2_1_getsupportedoperation_003 + * @tc.desc: Verify the GetSupportedOperation function return nullptr. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_V2_1_getsupportedoperation_003, TestSize.Level0) +{ + OHOS::sptr device = V2_1::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + std::shared_ptr model = nullptr; + std::vector ops; + OH_NN_ReturnCode result = hdiDevice->GetSupportedOperation(model, ops); + EXPECT_EQ(OH_NN_NULL_PTR, result); +} + +/* * + * @tc.name: hdidevice_V2_1_getsupportedoperation_004 + * @tc.desc: Verify the GetSupportedOperation function return unavalidable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_V2_1_getsupportedoperation_004, TestSize.Level0) +{ + std::vector ops {true}; + std::shared_ptr model = std::make_shared(); + EXPECT_NE(nullptr, model); + BuildLiteGraph(model); + + OHOS::sptr device = V2_1::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + V2_1::SharedBuffer buffer {2, 1, 0, 1}; + EXPECT_CALL(*((V2_1::MockIDevice *)device.GetRefPtr()), AllocateBuffer(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<1>(buffer), ::testing::Return(HDF_SUCCESS))); + + EXPECT_CALL(*((V2_1::MockIDevice *)device.GetRefPtr()), GetSupportedOperation(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<1>(ops), ::testing::Return(HDF_FAILURE))); + + std::vector newOps {true}; + OH_NN_ReturnCode result = hdiDevice->GetSupportedOperation(model, newOps); + EXPECT_EQ(OH_NN_FAILED, result); +} + +/* * + * @tc.name: hdidevice_V2_1_isfloat16precisionsupported_001 + * @tc.desc: Verify the IsFloat16PrecisionSupported function return success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_V2_1_isfloat16precisionsupported_001, TestSize.Level0) +{ + OHOS::sptr device = V2_1::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + bool isSupported = false; + EXPECT_CALL(*((V2_1::MockIDevice *)device.GetRefPtr()), IsFloat16PrecisionSupported(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(isSupported), ::testing::Return(HDF_SUCCESS))); + OH_NN_ReturnCode result = hdiDevice->IsFloat16PrecisionSupported(isSupported); + EXPECT_EQ(OH_NN_SUCCESS, result); +} + +/* * + * @tc.name: hdidevice_V2_1_isfloat16precisionsupported_002 + * @tc.desc: Verify the IsFloat16PrecisionSupported function return unavailable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_V2_1_isfloat16precisionsupported_002, TestSize.Level0) +{ + OHOS::sptr device = V2_1::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + bool isSupported = false; + EXPECT_CALL(*((V2_1::MockIDevice *)device.GetRefPtr()), IsFloat16PrecisionSupported(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(isSupported), ::testing::Return(HDF_FAILURE))); + OH_NN_ReturnCode result = hdiDevice->IsFloat16PrecisionSupported(isSupported); + EXPECT_EQ(OH_NN_UNAVAILABLE_DEVICE, result); +} + +/* * + * @tc.name: hdidevice_V2_1_isperformancemodesupported_001 + * @tc.desc: Verify the IsPerformanceModeSupported function return success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_V2_1_isperformancemodesupported_001, TestSize.Level0) +{ + OHOS::sptr device = V2_1::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + bool isSupported = false; + EXPECT_CALL(*((V2_1::MockIDevice *)device.GetRefPtr()), IsPerformanceModeSupported(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(isSupported), ::testing::Return(HDF_SUCCESS))); + + bool newIsSupported = false; + const bool expectIsSupported = false; + OH_NN_ReturnCode result = hdiDevice->IsPerformanceModeSupported(newIsSupported); + EXPECT_EQ(OH_NN_SUCCESS, result); + EXPECT_EQ(expectIsSupported, newIsSupported); +} + +/* * + * @tc.name: hdidevice_V2_1_isperformancemodesupported_002 + * @tc.desc: Verify the IsPerformanceModeSupported function return unavailable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_V2_1_isperformancemodesupported_002, TestSize.Level0) +{ + OHOS::sptr device = V2_1::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + bool isSupported = false; + EXPECT_CALL(*((V2_1::MockIDevice *)device.GetRefPtr()), IsPerformanceModeSupported(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(isSupported), ::testing::Return(HDF_FAILURE))); + OH_NN_ReturnCode result = hdiDevice->IsPerformanceModeSupported(isSupported); + EXPECT_EQ(OH_NN_UNAVAILABLE_DEVICE, result); +} + +/* * + * @tc.name: hdidevice_V2_1_isprioritysupported_001 + * @tc.desc: Verify the IsPrioritySupported function return success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_V2_1_isprioritysupported_001, TestSize.Level0) +{ + OHOS::sptr device = V2_1::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + bool isSupported = false; + EXPECT_CALL(*((V2_1::MockIDevice *)device.GetRefPtr()), IsPrioritySupported(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(isSupported), ::testing::Return(HDF_SUCCESS))); + + bool newIsSupported = false; + bool expectIsSupported = false; + OH_NN_ReturnCode result = hdiDevice->IsPrioritySupported(newIsSupported); + EXPECT_EQ(OH_NN_SUCCESS, result); + EXPECT_EQ(newIsSupported, expectIsSupported); +} + +/* * + * @tc.name: hdidevice_V2_1_isprioritysupported_002 + * @tc.desc: Verify the IsPrioritySupported function return unavailable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_V2_1_isprioritysupported_002, TestSize.Level0) +{ + OHOS::sptr device = V2_1::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + bool isSupported = false; + EXPECT_CALL(*((V2_1::MockIDevice *)device.GetRefPtr()), IsPrioritySupported(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(isSupported), ::testing::Return(HDF_FAILURE))); + OH_NN_ReturnCode result = hdiDevice->IsPrioritySupported(isSupported); + EXPECT_EQ(OH_NN_UNAVAILABLE_DEVICE, result); +} + +/* * + * @tc.name: hdidevice_V2_1_isdynamicinputsupported_001 + * @tc.desc: Verify the IsDynamicInputSupported function return success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_V2_1_isdynamicinputsupported_001, TestSize.Level0) +{ + OHOS::sptr device = V2_1::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + bool isSupported = false; + EXPECT_CALL(*((V2_1::MockIDevice *)device.GetRefPtr()), IsDynamicInputSupported(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(isSupported), ::testing::Return(HDF_SUCCESS))); + + bool newIsSupported = false; + bool expectIsSupported = false; + OH_NN_ReturnCode result = hdiDevice->IsDynamicInputSupported(newIsSupported); + EXPECT_EQ(OH_NN_SUCCESS, result); + EXPECT_EQ(newIsSupported, expectIsSupported); +} + +/* * + * @tc.name: hdidevice_V2_1_isdynamicinputsupported_002 + * @tc.desc: Verify the IsDynamicInputSupported function return unavailable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_V2_1_isdynamicinputsupported_002, TestSize.Level0) +{ + OHOS::sptr device = V2_1::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + bool isSupported = false; + EXPECT_CALL(*((V2_1::MockIDevice *)device.GetRefPtr()), IsDynamicInputSupported(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(isSupported), ::testing::Return(HDF_FAILURE))); + OH_NN_ReturnCode result = hdiDevice->IsDynamicInputSupported(isSupported); + EXPECT_EQ(OH_NN_UNAVAILABLE_DEVICE, result); +} + +/* * + * @tc.name: hdidevice_V2_1_isdynamicinputsupported_001 + * @tc.desc: Verify the IsModelCacheSupported function return success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_V2_1_ismodelcachesupported_001, TestSize.Level0) +{ + OHOS::sptr device = V2_1::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + bool isSupported = false; + EXPECT_CALL(*((V2_1::MockIDevice *)device.GetRefPtr()), IsModelCacheSupported(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(isSupported), ::testing::Return(HDF_SUCCESS))); + + bool newIsSupported = false; + bool expectIsSupported = false; + OH_NN_ReturnCode result = hdiDevice->IsModelCacheSupported(newIsSupported); + EXPECT_EQ(OH_NN_SUCCESS, result); + EXPECT_EQ(expectIsSupported, newIsSupported); +} + +/* * + * @tc.name: hdidevice_V2_1_isdynamicinputsupported_002 + * @tc.desc: Verify the IsModelCacheSupported function return unavailable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_V2_1_ismodelcachesupported_002, TestSize.Level0) +{ + OHOS::sptr device = V2_1::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + bool isSupported = false; + EXPECT_CALL(*((V2_1::MockIDevice *)device.GetRefPtr()), IsModelCacheSupported(::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(isSupported), ::testing::Return(HDF_FAILURE))); + OH_NN_ReturnCode result = hdiDevice->IsModelCacheSupported(isSupported); + EXPECT_EQ(OH_NN_UNAVAILABLE_DEVICE, result); +} + +/* * + * @tc.name: hdidevice_V2_1_preparemodel_001 + * @tc.desc: Verify the PrepareModel function return success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_V2_1_preparemodel_001, TestSize.Level0) +{ + int32_t allocBufferType = HDF_SUCCESS; + int32_t prepareType = HDF_SUCCESS; + OH_NN_ReturnCode result = PrepareModelv2(allocBufferType, prepareType); + EXPECT_EQ(OH_NN_FAILED, result); +} + +/* * + * @tc.name: hdidevice_V2_1_preparemodel_002 + * @tc.desc: Verify the PrepareModel function return invalid parameter. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_V2_1_preparemodel_002, TestSize.Level0) +{ + OHOS::sptr device = V2_1::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + std::shared_ptr model = nullptr; + ModelConfig config; + std::shared_ptr preparedModel; + OH_NN_ReturnCode result = hdiDevice->PrepareModel(model, config, preparedModel); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); +} + +/* * + * @tc.name: hdidevice_V2_1_preparemodel_003 + * @tc.desc: Verify the PrepareModel function return failed. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_V2_1_preparemodel_003, TestSize.Level0) +{ + int32_t allocBufferType = HDF_SUCCESS; + int32_t prepareType = HDF_FAILURE; + OH_NN_ReturnCode result = PrepareModelv2(allocBufferType, prepareType); + EXPECT_EQ(OH_NN_FAILED, result); +} + +/* * + * @tc.name: hdidevice_V2_1_preparemodel_004 + * @tc.desc: Verify the PrepareModel function return failed. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_V2_1_preparemodel_004, TestSize.Level0) +{ + int32_t allocBufferType = HDF_FAILURE; + int32_t prepareType = HDF_FAILURE; + OH_NN_ReturnCode result = PrepareModelv2(allocBufferType, prepareType); + EXPECT_EQ(OH_NN_FAILED, result); +} + +/* * + * @tc.name: hdidevice_V2_1_preparemodel_005 + * @tc.desc: Verify the PrepareModel function return failed. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_V2_1_preparemodel_005, TestSize.Level0) +{ + LOGE("PrepareModel hdidevice_V2_1_preparemodel_005"); + OHOS::sptr sp = OHOS::sptr(new (std::nothrow) V2_1::MockIDevice()); + EXPECT_NE(sp, nullptr); + + std::unique_ptr hdiDevice = std::make_unique(sp); + EXPECT_NE(hdiDevice, nullptr); + + float dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* data = dataArry; + ModelConfig config; + std::shared_ptr preparedModel; + OH_NN_ReturnCode result = hdiDevice->PrepareModel(data, config, preparedModel); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, result); +} + +/* * + * @tc.name: hdidevice_V2_1_preparemodelfrommodelcache_001 + * @tc.desc: Verify the PrepareModelFromModelCache function return success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_V2_1_preparemodelfrommodelcache_001, TestSize.Level0) +{ + size_t length = 100; + void *buffer = nullptr; + GetBuffer(buffer, length); + + std::vector modelCache = { { buffer, 100 } }; + ModelConfig config; + + OHOS::sptr sp = OHOS::sptr(new (std::nothrow) V2_1::MockIDevice()); + EXPECT_NE(sp, nullptr); + + std::unique_ptr hdiDevice = std::make_unique(sp); + EXPECT_NE(hdiDevice, nullptr); + + std::shared_ptr preparedModel; + + OHOS::sptr iPreparedModel = + OHOS::sptr(new (std::nothrow) V2_1::MockIPreparedModel()); + EXPECT_CALL(*sp, PrepareModelFromModelCache(::testing::_, ::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<2>(iPreparedModel), ::testing::Return(HDF_SUCCESS))); + + bool isUpdatable = false; + OH_NN_ReturnCode result = hdiDevice->PrepareModelFromModelCache(modelCache, config, preparedModel, isUpdatable); + const auto &memoryManager = MemoryManager::GetInstance(); + memoryManager->UnMapMemory(buffer); + EXPECT_EQ(OH_NN_SUCCESS, result); +} + +/* * + * @tc.name: hdidevice_V2_1_preparemodelfrommodelcache_002 + * @tc.desc: Verify the PrepareModelFromModelCache function return unavailable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_V2_1_preparemodelfrommodelcache_002, TestSize.Level0) +{ + size_t length = 100; + void *buffer = nullptr; + GetBuffer(buffer, length); + + OHOS::sptr sp = OHOS::sptr(new (std::nothrow) V2_1::MockIDevice()); + EXPECT_NE(sp, nullptr); + + std::unique_ptr hdiDevice = std::make_unique(sp); + EXPECT_NE(hdiDevice, nullptr); + + std::vector modelCache = { { buffer, 100 } }; + ModelConfig config; + OHOS::sptr preModel = + OHOS::sptr(new (std::nothrow) V2_1::MockIPreparedModel()); + EXPECT_NE(preModel, nullptr); + + std::shared_ptr preparedModel = std::make_shared(preModel); + + OHOS::sptr iPreparedModel = + OHOS::sptr(new (std::nothrow) V2_1::MockIPreparedModel); + EXPECT_CALL(*sp, PrepareModelFromModelCache(::testing::_, ::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<2>(iPreparedModel), ::testing::Return(HDF_FAILURE))); + + bool isUpdatable = false; + OH_NN_ReturnCode result = hdiDevice->PrepareModelFromModelCache(modelCache, config, preparedModel, isUpdatable); + EXPECT_EQ(OH_NN_FAILED, result); +} + +/* * + * @tc.name: hdidevice_V2_1_preparemodelfrommodelcache_003 + * @tc.desc: Verify the PrepareModelFromModelCache function return nullptr. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_V2_1_preparemodelfrommodelcache_003, TestSize.Level0) +{ + OHOS::sptr device = V2_1::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + std::vector modelCache = { { nullptr, 0 } }; + ModelConfig config; + std::shared_ptr preparedModel; + bool isUpdatable = false; + OH_NN_ReturnCode result = hdiDevice->PrepareModelFromModelCache(modelCache, config, preparedModel, isUpdatable); + EXPECT_EQ(OH_NN_NULL_PTR, result); +} + +/* * + * @tc.name: hdidevice_V2_1_preparemodelfrommodelcache_004 + * @tc.desc: Verify the PrepareModelFromModelCache function return success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_V2_1_preparemodelfrommodelcache_004, TestSize.Level0) +{ + LOGE("GetDeviceStatus hdidevice_V2_1_getdevicestatus_005"); + size_t length = 100; + void *buffer = nullptr; + GetBuffer(buffer, length); + + OHOS::sptr sp = OHOS::sptr(new (std::nothrow) V2_1::MockIDevice()); + EXPECT_NE(sp, nullptr); + + std::unique_ptr hdiDevice = std::make_unique(sp); + EXPECT_NE(hdiDevice, nullptr); + + std::vector modelCache = { { buffer, 100 } }; + ModelConfig config; + config.mode = OH_NN_PERFORMANCE_LOW; + config.priority = OH_NN_PRIORITY_LOW; + OHOS::sptr preModel = + OHOS::sptr(new (std::nothrow) V2_1::MockIPreparedModel()); + EXPECT_NE(preModel, nullptr); + + std::shared_ptr preparedModel = std::make_shared(preModel); + + OHOS::sptr iPreparedModel = + OHOS::sptr(new (std::nothrow) V2_1::MockIPreparedModel); + EXPECT_CALL(*sp, PrepareModelFromModelCache(::testing::_, ::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<2>(iPreparedModel), ::testing::Return(HDF_FAILURE))); + + bool isUpdatable = false; + OH_NN_ReturnCode result = hdiDevice->PrepareModelFromModelCache(modelCache, config, preparedModel, isUpdatable); + EXPECT_EQ(OH_NN_FAILED, result); +} + +/* * + * @tc.name: hdidevice_V2_1_preparemodelfrommodelcache_005 + * @tc.desc: Verify the PrepareModelFromModelCache function return success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_V2_1_preparemodelfrommodelcache_005, TestSize.Level0) +{ + LOGE("GetDeviceStatus hdidevice_V2_1_preparemodelfrommodelcache_005"); + size_t length = 100; + void *buffer = nullptr; + GetBuffer(buffer, length); + + OHOS::sptr sp = OHOS::sptr(new (std::nothrow) V2_1::MockIDevice()); + EXPECT_NE(sp, nullptr); + + std::unique_ptr hdiDevice = std::make_unique(sp); + EXPECT_NE(hdiDevice, nullptr); + + std::vector modelCache = { { buffer, 100 } }; + ModelConfig config; + config.mode = OH_NN_PERFORMANCE_MEDIUM; + config.priority = OH_NN_PRIORITY_MEDIUM; + OHOS::sptr preModel = + OHOS::sptr(new (std::nothrow) V2_1::MockIPreparedModel()); + EXPECT_NE(preModel, nullptr); + + std::shared_ptr preparedModel = std::make_shared(preModel); + + OHOS::sptr iPreparedModel = + OHOS::sptr(new (std::nothrow) V2_1::MockIPreparedModel); + EXPECT_CALL(*sp, PrepareModelFromModelCache(::testing::_, ::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<2>(iPreparedModel), ::testing::Return(HDF_FAILURE))); + + bool isUpdatable = false; + OH_NN_ReturnCode result = hdiDevice->PrepareModelFromModelCache(modelCache, config, preparedModel, isUpdatable); + EXPECT_EQ(OH_NN_FAILED, result); +} + +/* * + * @tc.name: hdidevice_V2_1_preparemodelfrommodelcache_006 + * @tc.desc: Verify the PrepareModelFromModelCache function return success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_V2_1_preparemodelfrommodelcache_006, TestSize.Level0) +{ + LOGE("GetDeviceStatus hdidevice_V2_1_preparemodelfrommodelcache_006"); + size_t length = 100; + void *buffer = nullptr; + GetBuffer(buffer, length); + + OHOS::sptr sp = OHOS::sptr(new (std::nothrow) V2_1::MockIDevice()); + EXPECT_NE(sp, nullptr); + + std::unique_ptr hdiDevice = std::make_unique(sp); + EXPECT_NE(hdiDevice, nullptr); + + std::vector modelCache = { { buffer, 100 } }; + ModelConfig config; + config.mode = OH_NN_PERFORMANCE_HIGH; + config.priority = OH_NN_PRIORITY_HIGH; + OHOS::sptr preModel = + OHOS::sptr(new (std::nothrow) V2_1::MockIPreparedModel()); + EXPECT_NE(preModel, nullptr); + + std::shared_ptr preparedModel = std::make_shared(preModel); + + OHOS::sptr iPreparedModel = + OHOS::sptr(new (std::nothrow) V2_1::MockIPreparedModel); + EXPECT_CALL(*sp, PrepareModelFromModelCache(::testing::_, ::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<2>(iPreparedModel), ::testing::Return(HDF_FAILURE))); + + bool isUpdatable = false; + OH_NN_ReturnCode result = hdiDevice->PrepareModelFromModelCache(modelCache, config, preparedModel, isUpdatable); + EXPECT_EQ(OH_NN_FAILED, result); +} + +/* * + * @tc.name: hdidevice_V2_1_preparemodelfrommodelcache_007 + * @tc.desc: Verify the PrepareModelFromModelCache function return success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_V2_1_preparemodelfrommodelcache_007, TestSize.Level0) +{ + LOGE("GetDeviceStatus hdidevice_V2_1_preparemodelfrommodelcache_007"); + size_t length = 100; + void *buffer = nullptr; + GetBuffer(buffer, length); + + OHOS::sptr sp = OHOS::sptr(new (std::nothrow) V2_1::MockIDevice()); + EXPECT_NE(sp, nullptr); + + std::unique_ptr hdiDevice = std::make_unique(sp); + EXPECT_NE(hdiDevice, nullptr); + + std::vector modelCache = { { buffer, 100 } }; + ModelConfig config; + config.mode = OH_NN_PERFORMANCE_EXTREME; + OHOS::sptr preModel = + OHOS::sptr(new (std::nothrow) V2_1::MockIPreparedModel()); + EXPECT_NE(preModel, nullptr); + + std::shared_ptr preparedModel = std::make_shared(preModel); + + OHOS::sptr iPreparedModel = + OHOS::sptr(new (std::nothrow) V2_1::MockIPreparedModel); + EXPECT_CALL(*sp, PrepareModelFromModelCache(::testing::_, ::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<2>(iPreparedModel), ::testing::Return(HDF_FAILURE))); + + bool isUpdatable = false; + OH_NN_ReturnCode result = hdiDevice->PrepareModelFromModelCache(modelCache, config, preparedModel, isUpdatable); + EXPECT_EQ(OH_NN_FAILED, result); +} + +/* * + * @tc.name: hdidevice_V2_1_allocatebuffer_001 + * @tc.desc: Verify the AllocateBuffer function return nullptr. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_V2_1_allocatebuffer_001, TestSize.Level0) +{ + OHOS::sptr device = V2_1::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + V2_1::SharedBuffer buffer; + EXPECT_CALL(*((V2_1::MockIDevice *)device.GetRefPtr()), AllocateBuffer(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<1>(buffer), ::testing::Return(HDF_FAILURE))); + + size_t length = 8; + void *result = hdiDevice->AllocateBuffer(length); + EXPECT_EQ(nullptr, result); + hdiDevice->ReleaseBuffer(result); +} + +/* * + * @tc.name: hdidevice_V2_1_allocatebuffer_002 + * @tc.desc: Verify the AllocateBuffer function return nullptr and HDF_FAILURE. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_V2_1_allocatebuffer_002, TestSize.Level0) +{ + OHOS::sptr device = V2_1::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + size_t length = 8; + void *result = hdiDevice->AllocateBuffer(length); + EXPECT_EQ(nullptr, result); + hdiDevice->ReleaseBuffer(result); +} + +/* * + * @tc.name: hdidevice_V2_1_allocatebuffer_003 + * @tc.desc: Verify the AllocateBuffer function return nullptr in case of 0 size. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_V2_1_allocatebuffer_003, TestSize.Level0) +{ + OHOS::sptr device = V2_1::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + size_t length = 0; + void *result = hdiDevice->AllocateBuffer(length); + EXPECT_EQ(nullptr, result); +} + +/* * + * @tc.name: hdidevice_V2_1_allocatebuffer_004 + * @tc.desc: Verify the AllocateBuffer function return nullptr in case of 0 size. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_V2_1_allocatebuffer_004, TestSize.Level0) +{ + LOGE("AllocateBuffer hdidevice_V2_1_preparemodelfrommodelcache_007"); + OHOS::sptr device = V2_1::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + size_t length = 0; + int fd = 0; + OH_NN_ReturnCode result = hdiDevice->AllocateBuffer(length, fd); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); +} + +/* * + * @tc.name: hdidevice_V2_1_allocatebuffer_005 + * @tc.desc: Verify the AllocateBuffer function return nullptr in case of 0 size. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_V2_1_allocatebuffer_005, TestSize.Level0) +{ + LOGE("AllocateBuffer hdidevice_V2_1_allocatebuffer_005"); + OHOS::sptr device = V2_1::INnrtDevice::Get(false); + EXPECT_CALL(*((V2_1::MockIDevice *)device.GetRefPtr()), AllocateBuffer(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::Return(HDF_FAILURE)); + + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + size_t length = 1; + int fd = 0; + OH_NN_ReturnCode result = hdiDevice->AllocateBuffer(length, fd); + EXPECT_EQ(OH_NN_MEMORY_ERROR, result); +} + +/* * + * @tc.name: hdidevice_V2_1_allocatebuffer_006 + * @tc.desc: Verify the AllocateBuffer function return nullptr in case of 0 size. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_V2_1_allocatebuffer_006, TestSize.Level0) +{ + LOGE("AllocateBuffer hdidevice_V2_1_allocatebuffer_006"); + OHOS::sptr device = V2_1::INnrtDevice::Get(false); + EXPECT_CALL(*((V2_1::MockIDevice *)device.GetRefPtr()), AllocateBuffer(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::Return(HDF_SUCCESS)); + + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + size_t length = 1; + int fd = 0; + OH_NN_ReturnCode result = hdiDevice->AllocateBuffer(length, fd); + EXPECT_EQ(OH_NN_SUCCESS, result); +} + +/* * + * @tc.name: hdidevice_V2_1_releasebuffer_001 + * @tc.desc: Verify the ReleaseBuffer function validate buffer success. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_V2_1_releasebuffer_001, TestSize.Level0) +{ + size_t length = 100; + void *buffer = nullptr; + GetBuffer(buffer, length); + + OHOS::sptr device = V2_1::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + + EXPECT_CALL(*((V2_1::MockIDevice *)device.GetRefPtr()), ReleaseBuffer(::testing::_)) + .WillRepeatedly(::testing::Return(HDF_SUCCESS)); + + EXPECT_NE(hdiDevice, nullptr); + hdiDevice->ReleaseBuffer(buffer); + const auto &memoryManager = MemoryManager::GetInstance(); + memoryManager->UnMapMemory(buffer); +} + +/* * + * @tc.name: hdidevice_V2_1_releasebuffer_002 + * @tc.desc: Verify the ReleaseBuffer function validate AllocateBuffer return nullptr. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_V2_1_releasebuffer_002, TestSize.Level0) +{ + OHOS::sptr device = V2_1::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + V2_1::SharedBuffer sharedbuffer; + EXPECT_CALL(*((V2_1::MockIDevice *)device.GetRefPtr()), AllocateBuffer(::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<1>(sharedbuffer), ::testing::Return(HDF_FAILURE))); + + EXPECT_CALL(*((V2_1::MockIDevice *)device.GetRefPtr()), ReleaseBuffer(::testing::_)) + .WillRepeatedly(::testing::Return(HDF_FAILURE)); + + size_t length = 8; + void *buffer = hdiDevice->AllocateBuffer(length); + hdiDevice->ReleaseBuffer(buffer); +} + +/* * + * @tc.name: hdidevice_V2_1_releasebuffer_003 + * @tc.desc: Verify the ReleaseBuffer function validate param buffer is nullptr. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_V2_1_releasebuffer_003, TestSize.Level0) +{ + OHOS::sptr device = V2_1::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + void *buffer = nullptr; + hdiDevice->ReleaseBuffer(buffer); +} + +/* * + * @tc.name: hdidevice_V2_1_releasebuffer_004 + * @tc.desc: Verify the ReleaseBuffer function validate invalid buffer. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_V2_1_releasebuffer_004, TestSize.Level0) +{ + const size_t length = 100; + auto* buffer = new(std::nothrow) char[length]; + OHOS::sptr device = V2_1::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + hdiDevice->ReleaseBuffer(buffer); + delete[] buffer; + buffer = nullptr; +} + +/* * + * @tc.name: hdidevice_V2_1_releasebuffer_005 + * @tc.desc: Verify the ReleaseBuffer function validate moc object's ReleaseBuffer return failure. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_V2_1_releasebuffer_005, TestSize.Level0) +{ + size_t length = 100; + void *buffer = nullptr; + GetBuffer(buffer, length); + + OHOS::sptr device = V2_1::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + EXPECT_CALL(*((V2_1::MockIDevice *)device.GetRefPtr()), ReleaseBuffer(::testing::_)) + .WillRepeatedly(::testing::Return(HDF_FAILURE)); + + hdiDevice->ReleaseBuffer(buffer); + const auto &memoryManager = MemoryManager::GetInstance(); + memoryManager->UnMapMemory(buffer); +} + +/* * + * @tc.name: hdidevice_V2_1_releasebuffer_007 + * @tc.desc: Verify the ReleaseBuffer function validate moc object's ReleaseBuffer return failure. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_V2_1_releasebuffer_007, TestSize.Level0) +{ + LOGE("ReleaseBuffer hdidevice_V2_1_releasebuffer_007"); + OHOS::sptr device = V2_1::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + EXPECT_CALL(*((V2_1::MockIDevice *)device.GetRefPtr()), ReleaseBuffer(::testing::_)) + .WillRepeatedly(::testing::Return(HDF_FAILURE)); + + int fd = 0; + size_t length = 1; + OH_NN_ReturnCode ret = hdiDevice->ReleaseBuffer(fd, length); + EXPECT_EQ(OH_NN_FAILED, ret); +} + +/* * + * @tc.name: hdidevice_V2_1_releasebuffer_008 + * @tc.desc: Verify the ReleaseBuffer function validate moc object's ReleaseBuffer return failure. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_V2_1_releasebuffer_008, TestSize.Level0) +{ + LOGE("ReleaseBuffer hdidevice_V2_1_releasebuffer_008"); + OHOS::sptr device = V2_1::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + EXPECT_CALL(*((V2_1::MockIDevice *)device.GetRefPtr()), ReleaseBuffer(::testing::_)) + .WillRepeatedly(::testing::Return(HDF_SUCCESS)); + + int fd = 0; + size_t length = 1; + OH_NN_ReturnCode ret = hdiDevice->ReleaseBuffer(fd, length); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* * + * @tc.name: hdidevice_V2_1_allocatetensorbuffer_001 + * @tc.desc: Verify the ReleaseBuffer function validate moc object's ReleaseBuffer return failure. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_V2_1_allocatetensorbuffer_001, TestSize.Level0) +{ + LOGE("AllocateTensorBuffer hdidevice_V2_1_allocatetensorbuffer_001"); + OHOS::sptr device = V2_1::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + size_t length = 0; + std::shared_ptr tensor; + void* ret = hdiDevice->AllocateTensorBuffer(length, tensor); + EXPECT_EQ(nullptr, ret); +} + +/* * + * @tc.name: hdidevice_V2_1_allocatetensorbuffer_002 + * @tc.desc: Verify the ReleaseBuffer function validate moc object's ReleaseBuffer return failure. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_V2_1_allocatetensorbuffer_002, TestSize.Level0) +{ + LOGE("AllocateTensorBuffer hdidevice_V2_1_allocatetensorbuffer_002"); + OHOS::sptr device = V2_1::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + size_t length = 0; + std::shared_ptr tensor; + void* ret = hdiDevice->AllocateTensorBuffer(length, tensor); + EXPECT_EQ(nullptr, ret); +} + +/* * + * @tc.name: hdidevice_V2_1_prepareofflinemodel_001 + * @tc.desc: Verify the ReleaseBuffer function validate moc object's ReleaseBuffer return failure. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_V2_1_prepareofflinemodel_001, TestSize.Level0) +{ + LOGE("PrepareOfflineModel hdidevice_V2_1_prepareofflinemodel_001"); + OHOS::sptr device = V2_1::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + ModelConfig config; + std::shared_ptr preparedModel; + OH_NN_ReturnCode ret = hdiDevice->PrepareOfflineModel(nullptr, config, preparedModel); + EXPECT_EQ(OH_NN_NULL_PTR, ret); +} + +/* * + * @tc.name: hdidevice_V2_1_prepareofflinemodel_002 + * @tc.desc: Verify the ReleaseBuffer function validate moc object's ReleaseBuffer return failure. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_V2_1_prepareofflinemodel_002, TestSize.Level0) +{ + LOGE("PrepareOfflineModel hdidevice_V2_1_prepareofflinemodel_002"); + OHOS::sptr device = V2_1::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + std::shared_ptr model = std::make_shared(); + mindspore::lite::LiteGraph::Node node; + mindspore::lite::LiteGraph::Node* testNode = &node; + model->all_nodes_.emplace_back(testNode); + ModelConfig config; + std::shared_ptr preparedModel; + OH_NN_ReturnCode ret = hdiDevice->PrepareOfflineModel(model, config, preparedModel); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* * + * @tc.name: hdidevice_V2_1_prepareofflinemodel_003 + * @tc.desc: Verify the ReleaseBuffer function validate moc object's ReleaseBuffer return failure. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_V2_1_prepareofflinemodel_003, TestSize.Level0) +{ + LOGE("PrepareOfflineModel hdidevice_V2_1_prepareofflinemodel_003"); + OHOS::sptr device = V2_1::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + std::shared_ptr model = std::make_shared(); + mindspore::lite::LiteGraph::Node node; + uint32_t indice = 0; + node.input_indices_.emplace_back(indice); + node.input_indices_.emplace_back(indice); + mindspore::lite::LiteGraph::Node* testNode = &node; + model->all_nodes_.emplace_back(testNode); + model->all_tensors_.emplace_back(mindspore::lite::MindIR_Tensor_Create()); + model->all_tensors_.emplace_back(mindspore::lite::MindIR_Tensor_Create()); + ModelConfig config; + std::shared_ptr preparedModel; + OH_NN_ReturnCode ret = hdiDevice->PrepareOfflineModel(model, config, preparedModel); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* * + * @tc.name: hdidevice_V2_1_prepareofflinemodel_004 + * @tc.desc: Verify the ReleaseBuffer function validate moc object's ReleaseBuffer return failure. + * @tc.type: FUNC + */ +HWTEST_F(HDIDeviceTest, hdidevice_V2_1_prepareofflinemodel_004, TestSize.Level0) +{ + LOGE("PrepareOfflineModel hdidevice_V2_1_prepareofflinemodel_004"); + OHOS::sptr device = V2_1::INnrtDevice::Get(false); + std::unique_ptr hdiDevice = std::make_unique(device); + EXPECT_NE(hdiDevice, nullptr); + + std::shared_ptr model = std::make_shared(); + mindspore::lite::LiteGraph::Node node; + uint32_t indice = 0; + node.input_indices_.emplace_back(indice); + node.input_indices_.emplace_back(indice); + mindspore::lite::LiteGraph::Node* testNode = &node; + model->all_nodes_.emplace_back(testNode); + + char a = 'a'; + mindspore::lite::DataType data_type = mindspore::lite::DataType::DATA_TYPE_INT32; + int dim = 1; + int32_t *dims = &dim; + uint32_t dims_size = 1; + mindspore::lite::Format format = mindspore::lite::Format::FORMAT_HWCK; + uint8_t datas = 0; + uint8_t *data = &datas; + uint32_t data_size = 2; + mindspore::lite::QuantParam quant_params; + uint32_t quant_params_size = 0; + mindspore::lite::TensorPtr ptr2 = mindspore::lite::MindIR_Tensor_Create(&a, data_type, dims, dims_size, + format, data, data_size, + &quant_params, quant_params_size); + std::vector offlineModel2 = mindspore::lite::MindIR_Tensor_GetData(ptr2); + + model->all_tensors_.emplace_back(ptr2); + model->all_tensors_.emplace_back(ptr2); + ModelConfig config; + std::shared_ptr preparedModel; + OH_NN_ReturnCode ret = hdiDevice->PrepareOfflineModel(model, config, preparedModel); + EXPECT_EQ(OH_NN_MEMORY_ERROR, ret); + + testing::Mock::AllowLeak(device.GetRefPtr()); +} } // namespace UnitTest } // namespace NeuralNetworkRuntime -} // namespace OHOS +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/components/v2_0/hdi_prepared_model/hdi_prepared_model_test.cpp b/test/unittest/components/v2_0/hdi_prepared_model/hdi_prepared_model_test.cpp index b394f419adcb3230b89bf5c6b57f6edf4ecb80b9..343ea3137962140bc2bbea6eb982c190655f0002 100644 --- a/test/unittest/components/v2_0/hdi_prepared_model/hdi_prepared_model_test.cpp +++ b/test/unittest/components/v2_0/hdi_prepared_model/hdi_prepared_model_test.cpp @@ -26,6 +26,8 @@ #include "transform.h" #include "test/unittest/common/v2_0/mock_idevice.h" #include "test/unittest/common/file_utils.h" +#include "tensor.h" +#include "nntensor.h" using namespace testing; using namespace testing::ext; @@ -38,6 +40,21 @@ protected: void GetBuffer(void*& buffer, size_t length); void InitTensor(std::vector& inputs, void* buffer, size_t length); OH_NN_ReturnCode Run(std::vector& inputs); + OH_NN_ReturnCode RunFail(std::vector& inputs); +}; + +class MockTensor : public Tensor { +public: + MOCK_METHOD1(SetTensorDesc, OH_NN_ReturnCode(const TensorDesc*)); + MOCK_METHOD0(CreateData, OH_NN_ReturnCode()); + MOCK_METHOD1(CreateData, OH_NN_ReturnCode(size_t)); + MOCK_METHOD3(CreateData, OH_NN_ReturnCode(int, size_t, size_t)); + MOCK_CONST_METHOD0(GetTensorDesc, TensorDesc*()); + MOCK_CONST_METHOD0(GetData, void*()); + MOCK_CONST_METHOD0(GetFd, int()); + MOCK_CONST_METHOD0(GetSize, size_t()); + MOCK_CONST_METHOD0(GetOffset, size_t()); + MOCK_CONST_METHOD0(GetBackendID, size_t()); }; void HDIPreparedModelTest::GetBuffer(void*& buffer, size_t length) @@ -202,6 +219,27 @@ HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_exportmodelcache_004, TestSize.L EXPECT_EQ(OH_NN_SAVE_CACHE_EXCEPTION, result); } +/** + * @tc.name: hidpreparedmodel_exportmodelcache_005 + * @tc.desc: Verify the ExportModelCache function return unvailable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_exportmodelcache_005, TestSize.Level0) +{ + LOGE("ExportModelCache hidpreparedmodel_exportmodelcache_005"); + std::vector bufferVect = {{100, 100, 0, 100}}; + OHOS::sptr mockPreparedModel = + OHOS::sptr(new (std::nothrow) V2_0::MockIPreparedModel()); + EXPECT_NE(mockPreparedModel, nullptr); + + std::unique_ptr preparedModel = std::make_unique(mockPreparedModel); + + std::vector modelCache; + Buffer buffer; + modelCache.emplace_back(buffer); + OH_NN_ReturnCode result = preparedModel->ExportModelCache(modelCache); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); +} /** * @tc.name: hidpreparedmodel_run_001 * @tc.desc: Verify the Run function return invalid parameter. @@ -336,6 +374,416 @@ HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_005, TestSize.Level0) const auto& memoryManager = MemoryManager::GetInstance(); memoryManager->UnMapMemory(buffer); } + +/** + * @tc.name: hidpreparedmodel_run_006 + * @tc.desc: Verify the Run function return success. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_006, TestSize.Level0) +{ + LOGE("Run hidpreparedmodel_run_006"); + const size_t length = 100; + void* buffer = nullptr; + GetBuffer(buffer, length); + + std::vector inputs; + std::vector outputs; + InitTensor(inputs, buffer, length); + InitTensor(outputs, buffer, length); + + std::vector> outputsDims {{0}}; + std::vector isOutputBufferEnough {}; + + OHOS::sptr sp = + OHOS::sptr(new (std::nothrow) V2_0::MockIPreparedModel()); + EXPECT_NE(sp, nullptr); + + std::unique_ptr preparedModel = std::make_unique(sp); + EXPECT_CALL(*sp, Run(::testing::_, ::testing::_, ::testing::_)) + .WillRepeatedly( + ::testing::DoAll( + ::testing::SetArgReferee<2>(outputsDims), + ::testing::Return(HDF_SUCCESS) + ) + ); + + OH_NN_ReturnCode result = preparedModel->Run(inputs, outputs, outputsDims, isOutputBufferEnough); + EXPECT_EQ(OH_NN_SUCCESS, result); + + const auto& memoryManager = MemoryManager::GetInstance(); + memoryManager->UnMapMemory(buffer); +} + +OH_NN_ReturnCode HDIPreparedModelTest::RunFail(std::vector& inputs) +{ + std::vector outputs; + std::vector> outputsDims {}; + std::vector isOutputBufferEnough {}; + + OHOS::sptr sp = + OHOS::sptr(new (std::nothrow) V2_0::MockIPreparedModel()); + EXPECT_NE(sp, nullptr); + + std::unique_ptr preparedModel = std::make_unique(sp); + + OH_NN_ReturnCode result = preparedModel->Run(inputs, outputs, outputsDims, isOutputBufferEnough); + return result; +} + +/** + * @tc.name: hidpreparedmodel_run_007 + * @tc.desc: Verify the Run function return invalid parameter in case of output invalid. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_007, TestSize.Level0) +{ + LOGE("Run hidpreparedmodel_run_007"); + std::vector inputs; + IOTensor inputTensor; + inputTensor.dataType = OH_NN_BOOL; + inputs.emplace_back(std::move(inputTensor)); + + OH_NN_ReturnCode result = RunFail(inputs); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); +} + +/** + * @tc.name: hidpreparedmodel_run_008 + * @tc.desc: Verify the Run function return invalid parameter in case of output invalid. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_008, TestSize.Level0) +{ + LOGE("Run hidpreparedmodel_run_008"); + std::vector inputs; + IOTensor inputTensor; + inputTensor.dataType = OH_NN_INT16; + inputs.emplace_back(std::move(inputTensor)); + + OH_NN_ReturnCode result = RunFail(inputs); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); +} + +/** + * @tc.name: hidpreparedmodel_run_009 + * @tc.desc: Verify the Run function return invalid parameter in case of output invalid. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_009, TestSize.Level0) +{ + LOGE("Run hidpreparedmodel_run_009"); + std::vector inputs; + IOTensor inputTensor; + inputTensor.dataType = OH_NN_INT64; + inputs.emplace_back(std::move(inputTensor)); + + OH_NN_ReturnCode result = RunFail(inputs); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); +} + +/** + * @tc.name: hidpreparedmodel_run_010 + * @tc.desc: Verify the Run function return invalid parameter in case of output invalid. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_010, TestSize.Level0) +{ + LOGE("Run hidpreparedmodel_run_010"); + std::vector inputs; + IOTensor inputTensor; + inputTensor.dataType = OH_NN_UINT8; + inputs.emplace_back(std::move(inputTensor)); + + OH_NN_ReturnCode result = RunFail(inputs); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); +} + +/** + * @tc.name: hidpreparedmodel_run_011 + * @tc.desc: Verify the Run function return invalid parameter in case of output invalid. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_011, TestSize.Level0) +{ + LOGE("Run hidpreparedmodel_run_011"); + std::vector inputs; + IOTensor inputTensor; + inputTensor.dataType = OH_NN_UINT16; + inputs.emplace_back(std::move(inputTensor)); + + OH_NN_ReturnCode result = RunFail(inputs); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); +} + +/** + * @tc.name: hidpreparedmodel_run_012 + * @tc.desc: Verify the Run function return invalid parameter in case of output invalid. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_012, TestSize.Level0) +{ + LOGE("Run hidpreparedmodel_run_012"); + std::vector inputs; + IOTensor inputTensor; + inputTensor.dataType = OH_NN_UINT32; + inputs.emplace_back(std::move(inputTensor)); + + OH_NN_ReturnCode result = RunFail(inputs); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); +} + +/** + * @tc.name: hidpreparedmodel_run_013 + * @tc.desc: Verify the Run function return invalid parameter in case of output invalid. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_013, TestSize.Level0) +{ + LOGE("Run hidpreparedmodel_run_013"); + std::vector inputs; + IOTensor inputTensor; + inputTensor.dataType = OH_NN_UINT64; + inputs.emplace_back(std::move(inputTensor)); + + OH_NN_ReturnCode result = RunFail(inputs); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); +} + +/** + * @tc.name: hidpreparedmodel_run_014 + * @tc.desc: Verify the Run function return invalid parameter in case of output invalid. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_014, TestSize.Level0) +{ + LOGE("Run hidpreparedmodel_run_014"); + std::vector inputs; + IOTensor inputTensor; + inputTensor.dataType = OH_NN_FLOAT16; + inputs.emplace_back(std::move(inputTensor)); + + OH_NN_ReturnCode result = RunFail(inputs); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); +} + +/** + * @tc.name: hidpreparedmodel_run_015 + * @tc.desc: Verify the Run function return invalid parameter in case of output invalid. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_015, TestSize.Level0) +{ + LOGE("Run hidpreparedmodel_run_015"); + std::vector inputs; + IOTensor inputTensor; + inputTensor.dataType = OH_NN_FLOAT32; + inputs.emplace_back(std::move(inputTensor)); + + OH_NN_ReturnCode result = RunFail(inputs); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); +} + +/** + * @tc.name: hidpreparedmodel_run_016 + * @tc.desc: Verify the Run function return invalid parameter in case of output invalid. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_016, TestSize.Level0) +{ + LOGE("Run hidpreparedmodel_run_016"); + std::vector inputs; + IOTensor inputTensor; + inputTensor.dataType = OH_NN_FLOAT64; + inputTensor.format = OH_NN_FORMAT_NHWC; + inputs.emplace_back(std::move(inputTensor)); + + OH_NN_ReturnCode result = RunFail(inputs); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); +} + +/** + * @tc.name: hidpreparedmodel_run_017 + * @tc.desc: Verify the Run function return invalid parameter in case of output invalid. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_017, TestSize.Level0) +{ + LOGE("Run hidpreparedmodel_run_017"); + std::vector inputs; + IOTensor inputTensor; + inputTensor.dataType = OH_NN_UNKNOWN; + inputTensor.format = OH_NN_FORMAT_NONE; + inputs.emplace_back(std::move(inputTensor)); + + OH_NN_ReturnCode result = RunFail(inputs); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); +} + +/** + * @tc.name: hidpreparedmodel_run_018 + * @tc.desc: Verify the Run function return invalid parameter in case of output invalid. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_018, TestSize.Level0) +{ + LOGE("Run hidpreparedmodel_run_018"); + std::vector inputs; + IOTensor inputTensor; + inputTensor.dataType = OH_NN_INT32; + inputs.emplace_back(std::move(inputTensor)); + + OH_NN_ReturnCode result = RunFail(inputs); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); +} + +/** + * @tc.name: hidpreparedmodel_run_019 + * @tc.desc: Verify the Run function return invalid parameter in case of output invalid. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_019, TestSize.Level0) +{ + LOGE("Run hidpreparedmodel_run_019"); + std::vector inputs; + std::vector outputs; + std::vector> outputsDims {}; + std::vector isOutputBufferEnough {}; + + inputs.emplace_back(nullptr); + + OHOS::sptr sp = + OHOS::sptr(new (std::nothrow) V2_0::MockIPreparedModel()); + EXPECT_NE(sp, nullptr); + + std::unique_ptr preparedModel = std::make_unique(sp); + OH_NN_ReturnCode ret = preparedModel->Run(inputs, outputs, outputsDims, isOutputBufferEnough); + EXPECT_EQ(OH_NN_FAILED, ret); +} + +/** + * @tc.name: hidpreparedmodel_run_020 + * @tc.desc: Verify the Run function return invalid parameter in case of output invalid. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_020, TestSize.Level0) +{ + LOGE("Run hidpreparedmodel_run_020"); + std::vector inputs; + std::vector outputs; + std::vector> outputsDims {}; + std::vector isOutputBufferEnough {}; + + MockTensor* tensorImpl = new (std::nothrow) MockTensor(); + NN_Tensor* tensor = reinterpret_cast(tensorImpl); + inputs.emplace_back(tensor); + + OHOS::sptr sp = + OHOS::sptr(new (std::nothrow) V2_0::MockIPreparedModel()); + EXPECT_NE(sp, nullptr); + + std::unique_ptr preparedModel = std::make_unique(sp); + OH_NN_ReturnCode ret = preparedModel->Run(inputs, outputs, outputsDims, isOutputBufferEnough); + EXPECT_EQ(OH_NN_FAILED, ret); + + testing::Mock::AllowLeak(tensorImpl); +} + +/** + * @tc.name: hidpreparedmodel_run_021 + * @tc.desc: Verify the Run function return invalid parameter in case of output invalid. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_021, TestSize.Level0) +{ + LOGE("Run hidpreparedmodel_run_021"); + std::vector inputs; + std::vector outputs; + std::vector> outputsDims {}; + std::vector isOutputBufferEnough {}; + + size_t deviceId = 1; + NNTensor2_0* tensorImpl = new (std::nothrow) NNTensor2_0(deviceId); + TensorDesc TensorDesc; + + tensorImpl->SetTensorDesc(&TensorDesc); + NN_Tensor* tensor = reinterpret_cast(tensorImpl); + inputs.emplace_back(tensor); + + OHOS::sptr sp = + OHOS::sptr(new (std::nothrow) V2_0::MockIPreparedModel()); + EXPECT_NE(sp, nullptr); + + std::unique_ptr preparedModel = std::make_unique(sp); + OH_NN_ReturnCode ret = preparedModel->Run(inputs, outputs, outputsDims, isOutputBufferEnough); + EXPECT_EQ(OH_NN_FAILED, ret); +} + +/** + * @tc.name: hidpreparedmodel_run_022 + * @tc.desc: Verify the Run function return invalid parameter in case of output invalid. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_022, TestSize.Level0) +{ + LOGE("Run hidpreparedmodel_run_022"); + std::vector inputs; + std::vector outputs; + std::vector> outputsDims {}; + std::vector isOutputBufferEnough {}; + + size_t backendId = 1; + NNTensor2_0* nnTensor = new (std::nothrow) NNTensor2_0(backendId); + EXPECT_NE(nullptr, nnTensor); + + TensorDesc tensorDesc; + char name = 'a'; + tensorDesc.SetName(&name); + tensorDesc.SetDataType(OH_NN_UINT32); + tensorDesc.SetFormat(OH_NN_FORMAT_NCHW); + int32_t expectDim[2] = {3, 3}; + int32_t* ptr = expectDim; + uint32_t dimensionCount = 2; + tensorDesc.SetShape(ptr, dimensionCount); + + OH_NN_ReturnCode retSetTensorDesc = nnTensor->SetTensorDesc(&tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, retSetTensorDesc); + + nnTensor->SetSize(200); + nnTensor->SetOffset(0); + float m_dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* buffer = m_dataArry; + nnTensor->SetData(buffer); + + NN_Tensor* tensor = reinterpret_cast(nnTensor); + inputs.emplace_back(tensor); + + OHOS::sptr sp = + OHOS::sptr(new (std::nothrow) V2_0::MockIPreparedModel()); + EXPECT_NE(sp, nullptr); + + std::unique_ptr preparedModel = std::make_unique(sp); + OH_NN_ReturnCode ret = preparedModel->Run(inputs, outputs, outputsDims, isOutputBufferEnough); + EXPECT_EQ(OH_NN_UNAVAILABLE_DEVICE, ret); +} + +/** + * @tc.name: hidpreparedmodel_getmodelid_001 + * @tc.desc: Verify the Run function return invalid parameter in case of output invalid. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_getmodelid_001, TestSize.Level0) +{ + LOGE("GetModelID hidpreparedmodel_getmodelid_001"); + OHOS::sptr sp = + OHOS::sptr(new (std::nothrow) V2_0::MockIPreparedModel()); + EXPECT_NE(sp, nullptr); + + uint32_t index = 0; + std::unique_ptr preparedModel = std::make_unique(sp); + OH_NN_ReturnCode ret = preparedModel->GetModelID(index); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} } // namespace UnitTest } // namespace NeuralNetworkRuntime } // namespace OHOS diff --git a/test/unittest/components/v2_0/inner_model/inner_model_test.cpp b/test/unittest/components/v2_0/inner_model/inner_model_test.cpp index da5a5d0670cf6989ba4909f84efb8cc6f7f393fd..ccdaa28d5a4646cad889db81d2a85d174bccc6c7 100644 --- a/test/unittest/components/v2_0/inner_model/inner_model_test.cpp +++ b/test/unittest/components/v2_0/inner_model/inner_model_test.cpp @@ -21,10 +21,21 @@ #include "nn_tensor.h" #include "inner_model.h" +#include + +#include "lite_graph_to_hdi_model_v2_0.h" +#include "device.h" +#include "interfaces/kits/c/neural_network_runtime/neural_network_runtime_type.h" +#include "nnbackend.h" +#include "ops_registry.h" +#include "transform.h" + using namespace testing; using namespace testing::ext; using namespace OHOS::NeuralNetworkRuntime; +namespace MSLITE = mindspore::lite; + namespace NNRT { namespace UnitTest { class InnerModelTest : public testing::Test { @@ -879,3 +890,2238 @@ HWTEST_F(InnerModelTest, inner_model_get_supported_operation_004, TestSize.Level } } // namespace UnitTest } // namespace NNRT + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace V2 { +namespace UnitTest { +class LiteGraphToHDIModelV2Test : public testing::Test { +public: + LiteGraphToHDIModelV2Test() = default; + ~LiteGraphToHDIModelV2Test() = default; +public: + std::vector m_inputs{0, 1}; + std::vector m_outputs{2}; + std::vector m_param{3}; + std::vector m_input_dim{3, 3}; + std::vector m_output_dim{3, 3}; + std::vector m_param_dim{}; +}; + +MSLITE::LiteGraph::Node* getNode(void* primitive) +{ + MSLITE::LiteGraph::Node* node = new(std::nothrow) MSLITE::LiteGraph::Node(); + node->name_ = "NNRt_SubGraph"; + node->quant_type_ = 1; + node->primitive_ = primitive; + node->input_indices_ = {1, 1, 1, 1}; + node->output_indices_ = {1, 1, 1, 1}; + return node; +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_001 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_001, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_001"); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(nullptr, tensorBuffer); + EXPECT_EQ(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_002 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_002, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_002"); + std::shared_ptr liteGraph = std::make_shared(); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {0, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_EQ(nullptr, model); + + uint8_t *mmapPtr = static_cast(mmap(nullptr, + tensorBuffer.bufferSize, PROT_READ | PROT_WRITE, MAP_SHARED, tensorBuffer.fd, 0)); + EXPECT_EQ(MAP_FAILED, mmapPtr); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_003 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_003, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_003"); + std::shared_ptr liteGraph = std::make_shared(); + MSLITE::LiteGraph::SubGraph* subGraph = new (std::nothrow) MSLITE::LiteGraph::SubGraph(); + subGraph->name_ = "NNRt_SubGraph"; + subGraph->input_indices_ = {1, 1, 1, 1}; + subGraph->output_indices_ = {1, 1, 1, 1}; + subGraph->node_indices_ = {1, 1, 1, 1}; + + void* tp = MSLITE::MindIR_Tensor_Create(); + + liteGraph.get()->all_tensors_.emplace_back(tp); + liteGraph.get()->sub_graphs_.emplace_back(subGraph); + + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 1, 1, 1}; + + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); + + uint8_t *mmapPtr = static_cast(mmap(nullptr, + tensorBuffer.bufferSize, PROT_READ | PROT_WRITE, MAP_SHARED, tensorBuffer.fd, 0)); + EXPECT_EQ(MAP_FAILED, mmapPtr); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_004 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_004, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_004"); + std::shared_ptr liteGraph = std::make_shared(); + liteGraph.get()->all_nodes_.emplace_back(nullptr); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_EQ(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_005 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_005, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_005"); + std::shared_ptr liteGraph = std::make_shared(); + MSLITE::LiteGraph::Node* node = new(std::nothrow) MSLITE::LiteGraph::Node(); + liteGraph.get()->all_nodes_.emplace_back(node); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_EQ(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_006 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_006, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_006"); + std::shared_ptr liteGraph = std::make_shared(); + + float alpha {0.0f}; + float minVal {0.0f}; + float maxVal {0.0f}; + bool approximate {false}; + mindspore::lite::ActivationType activationType {mindspore::lite::ACTIVATION_TYPE_ABS}; + + void* primitive = mindspore::lite::MindIR_Activation_CreatePrimitive(activationType, alpha, + minVal, maxVal, approximate); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_007 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_007, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_007"); + std::shared_ptr liteGraph = std::make_shared(); + + int8_t num = 1; + int8_t* fuseData = # + mindspore::lite::ActivationType type = NNToMS::TransfromFusionType(static_cast(*fuseData)); + void* primitive = mindspore::lite::MindIR_AddFusion_CreatePrimitive(type); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_008 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_008, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_008"); + std::shared_ptr liteGraph = std::make_shared(); + + int64_t keepDims {0}; + void* primitive = mindspore::lite::MindIR_All_CreatePrimitive(keepDims); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_009 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_009, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_009"); + std::shared_ptr liteGraph = std::make_shared(); + + int64_t axis {-1}; + int64_t topK {1}; + bool keepDims {false}; + bool outMaxValue {false}; + void* primitive = mindspore::lite::MindIR_ArgMaxFusion_CreatePrimitive(axis, topK, keepDims, outMaxValue); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_010 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_010, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_010"); + std::shared_ptr liteGraph = std::make_shared(); + + int64_t summarize {0}; + void* primitive = mindspore::lite::MindIR_Assert_CreatePrimitive(summarize); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_011 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_011, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_011"); + std::shared_ptr liteGraph = std::make_shared(); + + std::vector kernelSize; + std::vector pad; + std::vector strides; + mindspore::lite::PadMode padMode {mindspore::lite::PAD_MODE_PAD}; + mindspore::lite::ActivationType activationType {mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; + mindspore::lite::RoundMode roundMode {mindspore::lite::ROUND_MODE_FLOOR}; + mindspore::lite::Format format {mindspore::lite::FORMAT_NCHW}; + bool global {false}; + void* primitive = mindspore::lite::MindIR_AvgPoolFusion_CreatePrimitive(kernelSize, strides, pad, + padMode, roundMode, format, global, activationType); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_012 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_012, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_012"); + std::shared_ptr liteGraph = std::make_shared(); + + std::vector blockSize; + std::vector> crops; + void* primitive = mindspore::lite::MindIR_BatchToSpaceND_CreatePrimitive(blockSize, crops); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_013 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_013, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_013"); + std::shared_ptr liteGraph = std::make_shared(); + + float epsilon {0.0001f}; + void* primitive = mindspore::lite::MindIR_FusedBatchNorm_CreatePrimitive(epsilon); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_014 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_014, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_014"); + std::shared_ptr liteGraph = std::make_shared(); + + float epsilon {0.0001f}; + void* primitive = mindspore::lite::MindIR_FusedBatchNorm_CreatePrimitive(epsilon); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_015 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_015, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_015"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_BiasAdd_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_016 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_016, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_016"); + std::shared_ptr liteGraph = std::make_shared(); + + std::vector shape; + void* primitive = mindspore::lite::MindIR_BroadcastTo_CreatePrimitive(shape); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_017 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_017, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_017"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Cast_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_018 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_018, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_018"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Ceil_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_019 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_019, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_019"); + std::shared_ptr liteGraph = std::make_shared(); + + float max {0.0f}; + float min {0.0f}; + void* primitive = mindspore::lite::MindIR_Clip_CreatePrimitive(max, min); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_020 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_020, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_020"); + std::shared_ptr liteGraph = std::make_shared(); + + int64_t axis{0}; + void* primitive = mindspore::lite::MindIR_Concat_CreatePrimitive(axis); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_021 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_021, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_021"); + std::shared_ptr liteGraph = std::make_shared(); + + int64_t dataType {0}; + std::vector value; + void* primitive = mindspore::lite::MindIR_ConstantOfShape_CreatePrimitive(dataType, value); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_022 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_022, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_022"); + std::shared_ptr liteGraph = std::make_shared(); + + int64_t group {1}; + int64_t inChannel {0}; + int64_t outChannel {0}; + std::vector kernelSize; + std::vector strides; + std::vector padList; + std::vector dilation; + std::vector outputPaddings; + mindspore::lite::PadMode padMode{mindspore::lite::PAD_MODE_PAD}; + mindspore::lite::ActivationType activationType{mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; + void* primitive = MindIR_Conv2dTransposeFusion_CreatePrimitive(kernelSize, + strides, dilation, padMode, padList, group, inChannel, outChannel, + activationType, outputPaddings); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_023 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_023, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_023"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Cos_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_024 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_024, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_024"); + std::shared_ptr liteGraph = std::make_shared(); + + int64_t axis {0}; + std::vector offset; + void* primitive = mindspore::lite::MindIR_Crop_CreatePrimitive(axis, offset); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_025 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_025, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_025"); + std::shared_ptr liteGraph = std::make_shared(); + + int64_t blockSize {0}; + std::string mode; + mindspore::lite::Format format {mindspore::lite::FORMAT_NCHW}; + void* primitive = mindspore::lite::MindIR_DepthToSpace_CreatePrimitive(blockSize, format, mode); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_026 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_026, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_026"); + std::shared_ptr liteGraph = std::make_shared(); + + int64_t inputSize {0}; + std::vector scale; + float nmsIoUThreshold {0.0f}; + float nmsScoreThreshold {0.0f}; + int64_t maxDetections {0}; + int64_t detectionsPerClass {0}; + int64_t maxClassesPerDetection {0}; + int64_t numClasses {0}; + bool useRegularNms {false}; + bool outQuantized {false}; + mindspore::lite::Format format {mindspore::lite::FORMAT_NCHW}; + void* primitive = mindspore::lite::MindIR_DetectionPostProcess_CreatePrimitive(format, inputSize, scale, + nmsIoUThreshold, nmsScoreThreshold, maxDetections, detectionsPerClass, maxClassesPerDetection, + numClasses, useRegularNms, outQuantized); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_027 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_027, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_027"); + std::shared_ptr liteGraph = std::make_shared(); + + mindspore::lite::EltwiseMode mode {mindspore::lite::ELTWISE_MODE_PROD}; + void* primitive = mindspore::lite::MindIR_Eltwise_CreatePrimitive(mode); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_028 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_028, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_028"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Equal_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_029 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_029, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_029"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Erf_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_030 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_030, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_030"); + std::shared_ptr liteGraph = std::make_shared(); + + float base {-1.0f}; + float scale {1.0f}; + float shift {0.0f}; + void* primitive = mindspore::lite::MindIR_ExpFusion_CreatePrimitive(base, scale, shift); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_031 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_031, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_031"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_ExpandDims_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_032 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_032, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_032"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Fill_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_033 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_033, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_033"); + std::shared_ptr liteGraph = std::make_shared(); + + int64_t axis {1}; + void* primitive = mindspore::lite::MindIR_Flatten_CreatePrimitive(axis); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_034 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_034, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_034"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Floor_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_035 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_035, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_035"); + std::shared_ptr liteGraph = std::make_shared(); + + bool hasBias {false}; + bool useAxis {false}; + int64_t axis {0}; + mindspore::lite::ActivationType activationType {mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; + void* primitive = mindspore::lite::MindIR_FullConnection_CreatePrimitive(hasBias, useAxis, + axis, activationType); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_036 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_036, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_036"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Gather_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_037 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_037, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_037"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_GatherNd_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_038 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_038, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_038"); + std::shared_ptr liteGraph = std::make_shared(); + + mindspore::lite::ActivationType activationType = mindspore::lite::ACTIVATION_TYPE_GELU; + float alpha = 0.0f; + float minVal = 0.0f; + float maxVal = 0.0f; + bool approximate = false; + void* primitive = mindspore::lite::MindIR_Activation_CreatePrimitive(activationType, + alpha, minVal, maxVal, approximate); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_039 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_039, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_039"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Greater_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_040 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_040, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_040"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_GreaterEqual_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_041 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_041, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_041"); + std::shared_ptr liteGraph = std::make_shared(); + + mindspore::lite::ActivationType activationType = mindspore::lite::ACTIVATION_TYPE_HSIGMOID; + float alpha = 0.0f; + float minVal = 0.0f; + float maxVal = 0.0f; + bool approximate = false; + void* primitive = mindspore::lite::MindIR_Activation_CreatePrimitive(activationType, + alpha, minVal, maxVal, approximate); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_042 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_042, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_042"); + std::shared_ptr liteGraph = std::make_shared(); + + float epsilon {0.0f}; + void* primitive = mindspore::lite::MindIR_InstanceNorm_CreatePrimitive(epsilon); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_043 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_043, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_043"); + std::shared_ptr liteGraph = std::make_shared(); + + float epsilon {0.0f}; + void* primitive = mindspore::lite::MindIR_InstanceNorm_CreatePrimitive(epsilon); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_044 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_044, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_044"); + std::shared_ptr liteGraph = std::make_shared(); + + std::vector axis; + float epsilon {1e-6}; + mindspore::lite::ActivationType activationType {mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; + void* primitive = mindspore::lite::MindIR_L2NormalizeFusion_CreatePrimitive(axis, epsilon, activationType); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_045 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_045, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_045"); + std::shared_ptr liteGraph = std::make_shared(); + + int64_t beginNormAxis {1}; + float epsilon {1e-7}; + bool elementwiseAffine {true}; + int64_t beginParamsAxis {1}; + void* primitive = mindspore::lite::MindIR_LayerNormFusion_CreatePrimitive(beginNormAxis, + epsilon, elementwiseAffine, beginParamsAxis); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_046 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_046, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_046"); + std::shared_ptr liteGraph = std::make_shared(); + + float alpha {0.0f}; + float minVal {0.0f}; + float maxVal {0.0f}; + bool approximate {false}; + mindspore::lite::ActivationType activationType {mindspore::lite::ACTIVATION_TYPE_LEAKY_RELU}; + void* primitive = mindspore::lite::MindIR_Activation_CreatePrimitive(activationType, alpha, + minVal, maxVal, approximate); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_047 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_047, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_047"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Less_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_048 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_048, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_048"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_LessEqual_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_049 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_049, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_049"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Log_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_050 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_050, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_050"); + std::shared_ptr liteGraph = std::make_shared(); + + int64_t axis {0}; + void* primitive = mindspore::lite::MindIR_LogSoftmax_CreatePrimitive(axis); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_051 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_051, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_051"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_LogicalAnd_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_052 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_052, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_052"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_LogicalNot_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_053 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_053, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_053"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_LogicalOr_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_054 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_054, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_054"); + std::shared_ptr liteGraph = std::make_shared(); + + int64_t depthRadius {0}; + float bias {0.0f}; + float alpha {0.0f}; + float beta {0.0f}; + std::string normRegion {"ACROSS_CHANNELS"}; + void* primitive = mindspore::lite::MindIR_LRN_CreatePrimitive(depthRadius, bias, alpha, + beta, normRegion); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_055 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_055, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_055"); + std::shared_ptr liteGraph = std::make_shared(); + + bool bidirectional {false}; + bool hasBias {false}; + int64_t inputSize {0}; + int64_t hiddenSize {0}; + int64_t numLayers {0}; + int64_t numDirections {0}; + float dropout {0.0f}; + float zoneoutCell {0.0f}; + float zoneoutHidden {0.0f}; + int64_t projSize {0}; + void* primitive = mindspore::lite::MindIR_LSTM_CreatePrimitive(bidirectional, hasBias, inputSize, + hiddenSize, numLayers, numDirections, dropout, zoneoutCell, zoneoutHidden, projSize); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_056 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_056, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_056"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Maximum_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_057 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_057, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_057"); + std::shared_ptr liteGraph = std::make_shared(); + + std::vector kernelSize; + std::vector pad; + std::vector strides; + mindspore::lite::PadMode padMode {mindspore::lite::PAD_MODE_PAD}; + mindspore::lite::ActivationType activationType {mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; + mindspore::lite::Format format {mindspore::lite::FORMAT_NCHW}; + bool global {false}; + void* primitive = MindIR_MaxPoolFusion_CreatePrimitive(kernelSize, strides, pad, + padMode, format, global, activationType); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_058 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_058, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_058"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Minimum_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_059 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_059, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_059"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Mod_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_060 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_060, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_060"); + std::shared_ptr liteGraph = std::make_shared(); + + mindspore::lite::ActivationType activationType {mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; + void* primitive = mindspore::lite::MindIR_MulFusion_CreatePrimitive(activationType); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_061 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_061, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_061"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Neg_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_062 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_062, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_062"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_NotEqual_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_063 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_063, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_063"); + std::shared_ptr liteGraph = std::make_shared(); + + int64_t axis {-1}; + void* primitive = mindspore::lite::MindIR_OneHot_CreatePrimitive(axis); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_064 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_064, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_064"); + std::shared_ptr liteGraph = std::make_shared(); + + std::vector> paddings; + float constantValue {0.0f}; + mindspore::lite::PaddingMode paddingMode {mindspore::lite::PADDING_MODE_CONSTANT}; + void* primitive = MindIR_PadFusion_CreatePrimitive(paddings, paddingMode, constantValue); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_065 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_065, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_065"); + std::shared_ptr liteGraph = std::make_shared(); + + float scale {1.0f}; + float shift {0.0f}; + void* primitive = mindspore::lite::MindIR_PowFusion_CreatePrimitive(scale, shift); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_066 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_066, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_066"); + std::shared_ptr liteGraph = std::make_shared(); + + bool channelShared{false}; + void* primitive = mindspore::lite::MindIR_PReLUFusion_CreatePrimitive(channelShared); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_067 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_067, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_067"); + std::shared_ptr liteGraph = std::make_shared(); + + const uint64_t* srcT{nullptr}; + const uint64_t* dstT{nullptr}; + int64_t axis {0}; + void* primitive = mindspore::lite::MindIR_QuantDTypeCast_CreatePrimitive(*srcT, *dstT, axis); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_068 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_068, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_068"); + std::shared_ptr liteGraph = std::make_shared(); + + int64_t dType {0.0f}; + int64_t start {0}; + int64_t limit {0}; + int64_t delta {1}; + void* primitive = mindspore::lite::MindIR_Range_CreatePrimitive(dType, start, limit, delta); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_069 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_069, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_069"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Rank_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_070 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_070, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_070"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Reciprocal_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_071 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_071, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_071"); + std::shared_ptr liteGraph = std::make_shared(); + + mindspore::lite::ReduceMode mode {mindspore::lite::REDUCE_MODE_ALL}; + float coeff {0.0f}; + bool reduceToEnd {false}; + bool keepDims {false}; + void* primitive = mindspore::lite::MindIR_ReduceFusion_CreatePrimitive(keepDims, mode, reduceToEnd, coeff); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_072 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_072, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_072"); + std::shared_ptr liteGraph = std::make_shared(); + + float alpha{0.0f}; + float minVal{0.0f}; + float maxVal{0.0f}; + bool approximate{false}; + mindspore::lite::ActivationType activationType{mindspore::lite::ACTIVATION_TYPE_RELU6}; + void* primitive = mindspore::lite::MindIR_Activation_CreatePrimitive(activationType, alpha, + minVal, maxVal, approximate); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_073 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_073, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_073"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Reshape_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_074 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_074, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_074"); + std::shared_ptr liteGraph = std::make_shared(); + + float cubicCoeff{0.0f}; + float extrapolationValue{0.0f}; + mindspore::lite::NearestMode nearestMode{mindspore::lite::NEAREST_MODE_NORMAL}; + mindspore::lite::ResizeMethod method {mindspore::lite::RESIZE_METHOD_LINEAR}; + uint64_t newHeight{0}; + uint64_t newWidth{0}; + bool preserveAspectRatio{false}; + mindspore::lite::CoordinateTransformMode coordinateTransformMode { + mindspore::lite::COORDINATE_TRANSFORM_MODE_ASYMMETRIC}; + uint64_t excludeOutside{0}; + void* primitive = mindspore::lite::MindIR_Resize_CreatePrimitive(method, newHeight, newWidth, + preserveAspectRatio, coordinateTransformMode, cubicCoeff, excludeOutside, + extrapolationValue, nearestMode); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_075 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_075, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_075"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Round_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_076 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_076, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_076"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Rsqrt_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_077 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_077, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_077"); + std::shared_ptr liteGraph = std::make_shared(); + + mindspore::lite::ActivationType activationType{mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; + const uint64_t* axis{nullptr}; + void* primitive = mindspore::lite::MindIR_ScaleFusion_CreatePrimitive(*axis, activationType); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_078 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_078, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_078"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_ScatterNd_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_079 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_079, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_079"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Select_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_080 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_080, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_080"); + std::shared_ptr liteGraph = std::make_shared(); + + float alpha{0.0f}; + float minVal{0.0f}; + float maxVal{0.0f}; + bool approximate{false}; + mindspore::lite::ActivationType activationType{mindspore::lite::ACTIVATION_TYPE_SIGMOID}; + void* primitive = mindspore::lite::MindIR_Activation_CreatePrimitive(activationType, alpha, minVal, + maxVal, approximate); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_081 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_081, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_081"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Sin_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_082 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_082, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_082"); + std::shared_ptr liteGraph = std::make_shared(); + + mindspore::lite::Format format {mindspore::lite::FORMAT_NCHW}; + int64_t blockSize {0}; + void* primitive = mindspore::lite::MindIR_SpaceToDepth_CreatePrimitive(blockSize, format); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_083 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_083, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_083"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_SparseToDense_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_084 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_084, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_084"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Square_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_085 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_085, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_085"); + std::shared_ptr liteGraph = std::make_shared(); + + float alpha {0.0f}; + float minVal {0.0f}; + float maxVal {0.0f}; + bool approximate {false}; + mindspore::lite::ActivationType activationType {mindspore::lite::ACTIVATION_TYPE_SWISH}; + void* primitive = mindspore::lite::MindIR_Activation_CreatePrimitive(activationType, alpha, + minVal, maxVal, approximate); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_086 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_086, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_086"); + std::shared_ptr liteGraph = std::make_shared(); + + int64_t axis {0}; + void* primitive = mindspore::lite::MindIR_Unstack_CreatePrimitive(axis); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_087 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_087, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_087"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Where_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_088 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_088, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_088"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Shape_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_089 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_089, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_089"); + std::shared_ptr liteGraph = std::make_shared(); + + std::vector axis; + void* primitive = mindspore::lite::MindIR_Unsqueeze_CreatePrimitive(axis); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_090 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_090, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_090"); + std::shared_ptr liteGraph = std::make_shared(); + + int64_t inChannel{0}; + int64_t outChannel{0}; + std::vector kernelSize; + std::vector strides; + std::vector pad; + std::vector dilation; + mindspore::lite::PadMode padMode{mindspore::lite::PAD_MODE_PAD}; + mindspore::lite::ActivationType activationType{mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; + void* primitive = mindspore::lite::MindIR_Conv2DFusion_CreatePrimitive(kernelSize, strides, + dilation, padMode, pad, inChannel, inChannel, outChannel, activationType); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_091 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_091, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_091"); + std::shared_ptr liteGraph = std::make_shared(); + + mindspore::lite::ActivationType activationType {mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; + void* primitive = mindspore::lite::MindIR_DivFusion_CreatePrimitive(activationType); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_092 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_092, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_092"); + std::shared_ptr liteGraph = std::make_shared(); + + mindspore::lite::ActivationType activationType{mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; + bool transposeA{false}; + bool transposeB{false}; + void* primitive = mindspore::lite::MindIR_MatMulFusion_CreatePrimitive(transposeA, transposeB, activationType); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_093 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_093, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_093"); + std::shared_ptr liteGraph = std::make_shared(); + + std::vector axes; + void* primitive = mindspore::lite::MindIR_SliceFusion_CreatePrimitive(axes); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_094 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_094, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_094"); + std::shared_ptr liteGraph = std::make_shared(); + + std::vector axis; + void* primitive = mindspore::lite::MindIR_Softmax_CreatePrimitive(axis); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_095 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_095, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_095"); + std::shared_ptr liteGraph = std::make_shared(); + + std::vector> paddings; + std::vector block_shape {}; + void* primitive = mindspore::lite::MindIR_SpaceToBatchND_CreatePrimitive(block_shape, paddings); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_096 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_096, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_096"); + std::shared_ptr liteGraph = std::make_shared(); + + int64_t outputNum {0}; + std::vector sizeSplits; + int64_t axis {0}; + void* primitive = mindspore::lite::MindIR_Split_CreatePrimitive(outputNum, sizeSplits, axis); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_097 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_097, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_097"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Sqrt_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_098 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_098, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_098"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_SquaredDifference_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_099 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_099, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_099"); + std::shared_ptr liteGraph = std::make_shared(); + + std::vector axis; + void* primitive = mindspore::lite::MindIR_Squeeze_CreatePrimitive(axis); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_100 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_100, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_100"); + std::shared_ptr liteGraph = std::make_shared(); + + int64_t axis = {0}; + void* primitive = mindspore::lite::MindIR_Stack_CreatePrimitive(axis); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_101 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_101, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_101"); + std::shared_ptr liteGraph = std::make_shared(); + + int64_t beginMask = {0}; + int64_t endMask = {0}; + int64_t ellipsisMask = {0}; + int64_t newAxisMask = {0}; + int64_t shrinkAxisMask = {0}; + void* primitive = mindspore::lite::MindIR_StridedSlice_CreatePrimitive(beginMask, endMask, ellipsisMask, + newAxisMask, shrinkAxisMask); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_102 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_102, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_102"); + std::shared_ptr liteGraph = std::make_shared(); + + mindspore::lite::ActivationType activationType {mindspore::lite::ACTIVATION_TYPE_NO_ACTIVATION}; + void* primitive = mindspore::lite::MindIR_SubFusion_CreatePrimitive(activationType); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_103 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_103, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_103"); + std::shared_ptr liteGraph = std::make_shared(); + + std::vector dims {0}; + void* primitive = mindspore::lite::MindIR_TileFusion_CreatePrimitive(dims); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_104 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_104, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_104"); + std::shared_ptr liteGraph = std::make_shared(); + + int64_t axis {0}; + bool sorted {true}; + void* primitive = mindspore::lite::MindIR_TopKFusion_CreatePrimitive(sorted, axis); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_105 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_105, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_105"); + std::shared_ptr liteGraph = std::make_shared(); + + void* primitive = mindspore::lite::MindIR_Transpose_CreatePrimitive(); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_106 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_106, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_106"); + std::shared_ptr liteGraph = std::make_shared(); + + std::vector axis; + void* primitive = mindspore::lite::MindIR_Unsqueeze_CreatePrimitive(axis); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_107 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_107, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_107"); + std::shared_ptr liteGraph = std::make_shared(); + + std::vector axis; + void* primitive = mindspore::lite::MindIR_Unsqueeze_CreatePrimitive(axis); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); +} + +/** + * @tc.name: litegraphtohdimodeltest_litegraph_to_hdimodel_108 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_litegraph_to_hdimodel_108, TestSize.Level0) +{ + LOGE("LiteGraph_To_HDIModel litegraphtohdimodeltest_litegraph_to_hdimodel_108"); + std::shared_ptr liteGraph = std::make_shared(); + MSLITE::LiteGraph::SubGraph* subGraph = new (std::nothrow) MSLITE::LiteGraph::SubGraph(); + subGraph->name_ = "NNRt_SubGraph"; + subGraph->input_indices_ = {1, 1, 1, 1}; + subGraph->output_indices_ = {1, 1, 1, 1}; + subGraph->node_indices_ = {1, 1, 1, 1}; + + void* tp = MSLITE::MindIR_Tensor_Create(); + + liteGraph.get()->all_tensors_.emplace_back(tp); + liteGraph.get()->all_tensors_.emplace_back(nullptr); + liteGraph.get()->sub_graphs_.emplace_back(subGraph); + + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 1, 1, 1}; + + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); + + uint8_t *mmapPtr = static_cast(mmap(nullptr, + tensorBuffer.bufferSize, PROT_READ | PROT_WRITE, MAP_SHARED, tensorBuffer.fd, 0)); + EXPECT_EQ(MAP_FAILED, mmapPtr); +} + +/** + * @tc.name: litegraphtohdimodeltest_hdimodel_destroy_001 + * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1. + * @tc.type: FUNC + */ +HWTEST_F(LiteGraphToHDIModelV2Test, litegraphtohdimodeltest_hdimodel_destroy_001, TestSize.Level0) +{ + LOGE("HDIModel_Destroy litegraphtohdimodeltest_hdimodel_destroy_001"); + std::shared_ptr liteGraph = std::make_shared(); + + float alpha {0.0f}; + float minVal {0.0f}; + float maxVal {0.0f}; + bool approximate {false}; + mindspore::lite::ActivationType activationType {mindspore::lite::ACTIVATION_TYPE_ABS}; + + void* primitive = mindspore::lite::MindIR_Activation_CreatePrimitive(activationType, alpha, + minVal, maxVal, approximate); + + liteGraph.get()->all_nodes_.emplace_back(getNode(primitive)); + OHOS::HDI::Nnrt::V2_0::SharedBuffer tensorBuffer {-1, 0, 0, 0}; + OHOS::HDI::Nnrt::V2_0::Model * model = LiteGraph_To_HDIModel(liteGraph.get(), tensorBuffer); + EXPECT_NE(nullptr, model); + HDIModel_Destroy(&model); +} +} // namespace UnitTest +} // namespace V1 +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/unittest/components/v2_1/hdi_prepared_model/hdi_prepared_model_test.cpp b/test/unittest/components/v2_1/hdi_prepared_model/hdi_prepared_model_test.cpp index 82a6c275cc452bb202d8780998ececae60e2f9a7..d60f38f9130b5d535279171d216f0ef195041d23 100644 --- a/test/unittest/components/v2_1/hdi_prepared_model/hdi_prepared_model_test.cpp +++ b/test/unittest/components/v2_1/hdi_prepared_model/hdi_prepared_model_test.cpp @@ -26,6 +26,8 @@ #include "transform.h" #include "test/unittest/common/v2_1/mock_idevice.h" #include "test/unittest/common/file_utils.h" +#include "tensor.h" +#include "nntensor.h" using namespace testing; using namespace testing::ext; @@ -38,6 +40,21 @@ protected: void GetBuffer(void*& buffer, size_t length); void InitTensor(std::vector& inputs, void* buffer, size_t length); OH_NN_ReturnCode Run(std::vector& inputs); + OH_NN_ReturnCode RunFail(std::vector& inputs); +}; + +class MockTensor : public Tensor { +public: + MOCK_METHOD1(SetTensorDesc, OH_NN_ReturnCode(const TensorDesc*)); + MOCK_METHOD0(CreateData, OH_NN_ReturnCode()); + MOCK_METHOD1(CreateData, OH_NN_ReturnCode(size_t)); + MOCK_METHOD3(CreateData, OH_NN_ReturnCode(int, size_t, size_t)); + MOCK_CONST_METHOD0(GetTensorDesc, TensorDesc*()); + MOCK_CONST_METHOD0(GetData, void*()); + MOCK_CONST_METHOD0(GetFd, int()); + MOCK_CONST_METHOD0(GetSize, size_t()); + MOCK_CONST_METHOD0(GetOffset, size_t()); + MOCK_CONST_METHOD0(GetBackendID, size_t()); }; void HDIPreparedModelTest::GetBuffer(void*& buffer, size_t length) @@ -202,6 +219,28 @@ HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_exportmodelcache_004, TestSize.L EXPECT_EQ(OH_NN_SAVE_CACHE_EXCEPTION, result); } +/** + * @tc.name: hidpreparedmodel_exportmodelcache_005 + * @tc.desc: Verify the ExportModelCache function return unvailable device. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_exportmodelcache_005, TestSize.Level0) +{ + LOGE("ExportModelCache hidpreparedmodel_exportmodelcache_005"); + std::vector bufferVect = {{100, 100, 0, 100}}; + OHOS::sptr mockPreparedModel = + OHOS::sptr(new (std::nothrow) V2_1::MockIPreparedModel()); + EXPECT_NE(mockPreparedModel, nullptr); + + std::unique_ptr preparedModel = std::make_unique(mockPreparedModel); + + std::vector modelCache; + Buffer buffer; + modelCache.emplace_back(buffer); + OH_NN_ReturnCode result = preparedModel->ExportModelCache(modelCache); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); +} + /** * @tc.name: hidpreparedmodel_run_001 * @tc.desc: Verify the Run function return invalid parameter. @@ -336,6 +375,414 @@ HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_005, TestSize.Level0) const auto& memoryManager = MemoryManager::GetInstance(); memoryManager->UnMapMemory(buffer); } + +/** + * @tc.name: hidpreparedmodel_run_006 + * @tc.desc: Verify the Run function return success. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_006, TestSize.Level0) +{ + LOGE("Run hidpreparedmodel_run_006"); + const size_t length = 100; + void* buffer = nullptr; + GetBuffer(buffer, length); + + std::vector inputs; + std::vector outputs; + InitTensor(inputs, buffer, length); + InitTensor(outputs, buffer, length); + + std::vector> outputsDims {{0}}; + std::vector isOutputBufferEnough {}; + + OHOS::sptr sp = + OHOS::sptr(new (std::nothrow) V2_1::MockIPreparedModel()); + EXPECT_NE(sp, nullptr); + + std::unique_ptr preparedModel = std::make_unique(sp); + EXPECT_CALL(*sp, Run(::testing::_, ::testing::_, ::testing::_)) + .WillRepeatedly(::testing::DoAll( + ::testing::SetArgReferee<2>(outputsDims), + ::testing::Return(HDF_SUCCESS)) + ); + + OH_NN_ReturnCode result = preparedModel->Run(inputs, outputs, outputsDims, isOutputBufferEnough); + EXPECT_EQ(OH_NN_SUCCESS, result); + + const auto& memoryManager = MemoryManager::GetInstance(); + memoryManager->UnMapMemory(buffer); +} + +OH_NN_ReturnCode HDIPreparedModelTest::RunFail(std::vector& inputs) +{ + std::vector outputs; + std::vector> outputsDims {}; + std::vector isOutputBufferEnough {}; + + OHOS::sptr sp = + OHOS::sptr(new (std::nothrow) V2_1::MockIPreparedModel()); + EXPECT_NE(sp, nullptr); + + std::unique_ptr preparedModel = std::make_unique(sp); + + OH_NN_ReturnCode result = preparedModel->Run(inputs, outputs, outputsDims, isOutputBufferEnough); + return result; +} + +/** + * @tc.name: hidpreparedmodel_run_007 + * @tc.desc: Verify the Run function return invalid parameter in case of output invalid. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_007, TestSize.Level0) +{ + LOGE("Run hidpreparedmodel_run_007"); + std::vector inputs; + IOTensor inputTensor; + inputTensor.dataType = OH_NN_BOOL; + inputs.emplace_back(std::move(inputTensor)); + + OH_NN_ReturnCode result = RunFail(inputs); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); +} + +/** + * @tc.name: hidpreparedmodel_run_008 + * @tc.desc: Verify the Run function return invalid parameter in case of output invalid. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_008, TestSize.Level0) +{ + LOGE("Run hidpreparedmodel_run_008"); + std::vector inputs; + IOTensor inputTensor; + inputTensor.dataType = OH_NN_INT16; + inputs.emplace_back(std::move(inputTensor)); + + OH_NN_ReturnCode result = RunFail(inputs); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); +} + +/** + * @tc.name: hidpreparedmodel_run_009 + * @tc.desc: Verify the Run function return invalid parameter in case of output invalid. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_009, TestSize.Level0) +{ + LOGE("Run hidpreparedmodel_run_009"); + std::vector inputs; + IOTensor inputTensor; + inputTensor.dataType = OH_NN_INT64; + inputs.emplace_back(std::move(inputTensor)); + + OH_NN_ReturnCode result = RunFail(inputs); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); +} + +/** + * @tc.name: hidpreparedmodel_run_010 + * @tc.desc: Verify the Run function return invalid parameter in case of output invalid. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_010, TestSize.Level0) +{ + LOGE("Run hidpreparedmodel_run_010"); + std::vector inputs; + IOTensor inputTensor; + inputTensor.dataType = OH_NN_UINT8; + inputs.emplace_back(std::move(inputTensor)); + + OH_NN_ReturnCode result = RunFail(inputs); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); +} + +/** + * @tc.name: hidpreparedmodel_run_011 + * @tc.desc: Verify the Run function return invalid parameter in case of output invalid. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_011, TestSize.Level0) +{ + LOGE("Run hidpreparedmodel_run_011"); + std::vector inputs; + IOTensor inputTensor; + inputTensor.dataType = OH_NN_UINT16; + inputs.emplace_back(std::move(inputTensor)); + + OH_NN_ReturnCode result = RunFail(inputs); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); +} + +/** + * @tc.name: hidpreparedmodel_run_012 + * @tc.desc: Verify the Run function return invalid parameter in case of output invalid. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_012, TestSize.Level0) +{ + LOGE("Run hidpreparedmodel_run_012"); + std::vector inputs; + IOTensor inputTensor; + inputTensor.dataType = OH_NN_UINT32; + inputs.emplace_back(std::move(inputTensor)); + + OH_NN_ReturnCode result = RunFail(inputs); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); +} + +/** + * @tc.name: hidpreparedmodel_run_013 + * @tc.desc: Verify the Run function return invalid parameter in case of output invalid. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_013, TestSize.Level0) +{ + LOGE("Run hidpreparedmodel_run_013"); + std::vector inputs; + IOTensor inputTensor; + inputTensor.dataType = OH_NN_UINT64; + inputs.emplace_back(std::move(inputTensor)); + + OH_NN_ReturnCode result = RunFail(inputs); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); +} + +/** + * @tc.name: hidpreparedmodel_run_014 + * @tc.desc: Verify the Run function return invalid parameter in case of output invalid. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_014, TestSize.Level0) +{ + LOGE("Run hidpreparedmodel_run_014"); + std::vector inputs; + IOTensor inputTensor; + inputTensor.dataType = OH_NN_FLOAT16; + inputs.emplace_back(std::move(inputTensor)); + + OH_NN_ReturnCode result = RunFail(inputs); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); +} + +/** + * @tc.name: hidpreparedmodel_run_015 + * @tc.desc: Verify the Run function return invalid parameter in case of output invalid. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_015, TestSize.Level0) +{ + LOGE("Run hidpreparedmodel_run_015"); + std::vector inputs; + IOTensor inputTensor; + inputTensor.dataType = OH_NN_FLOAT32; + inputs.emplace_back(std::move(inputTensor)); + + OH_NN_ReturnCode result = RunFail(inputs); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); +} + +/** + * @tc.name: hidpreparedmodel_run_016 + * @tc.desc: Verify the Run function return invalid parameter in case of output invalid. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_016, TestSize.Level0) +{ + LOGE("Run hidpreparedmodel_run_016"); + std::vector inputs; + IOTensor inputTensor; + inputTensor.dataType = OH_NN_FLOAT64; + inputTensor.format = OH_NN_FORMAT_NHWC; + inputs.emplace_back(std::move(inputTensor)); + + OH_NN_ReturnCode result = RunFail(inputs); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); +} + +/** + * @tc.name: hidpreparedmodel_run_017 + * @tc.desc: Verify the Run function return invalid parameter in case of output invalid. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_017, TestSize.Level0) +{ + LOGE("Run hidpreparedmodel_run_017"); + std::vector inputs; + IOTensor inputTensor; + inputTensor.dataType = OH_NN_UNKNOWN; + inputTensor.format = OH_NN_FORMAT_NONE; + inputs.emplace_back(std::move(inputTensor)); + + OH_NN_ReturnCode result = RunFail(inputs); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); +} + +/** + * @tc.name: hidpreparedmodel_run_018 + * @tc.desc: Verify the Run function return invalid parameter in case of output invalid. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_018, TestSize.Level0) +{ + LOGE("Run hidpreparedmodel_run_018"); + std::vector inputs; + IOTensor inputTensor; + inputTensor.dataType = OH_NN_INT32; + inputs.emplace_back(std::move(inputTensor)); + + OH_NN_ReturnCode result = RunFail(inputs); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, result); +} + +/** + * @tc.name: hidpreparedmodel_run_019 + * @tc.desc: Verify the Run function return invalid parameter in case of output invalid. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_019, TestSize.Level0) +{ + LOGE("Run hidpreparedmodel_run_019"); + std::vector inputs; + std::vector outputs; + std::vector> outputsDims {}; + std::vector isOutputBufferEnough {}; + + inputs.emplace_back(nullptr); + + OHOS::sptr sp = + OHOS::sptr(new (std::nothrow) V2_1::MockIPreparedModel()); + EXPECT_NE(sp, nullptr); + + std::unique_ptr preparedModel = std::make_unique(sp); + OH_NN_ReturnCode ret = preparedModel->Run(inputs, outputs, outputsDims, isOutputBufferEnough); + EXPECT_EQ(OH_NN_FAILED, ret); +} + +/** + * @tc.name: hidpreparedmodel_run_020 + * @tc.desc: Verify the Run function return invalid parameter in case of output invalid. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_020, TestSize.Level0) +{ + LOGE("Run hidpreparedmodel_run_020"); + std::vector inputs; + std::vector outputs; + std::vector> outputsDims {}; + std::vector isOutputBufferEnough {}; + + MockTensor* tensorImpl = new (std::nothrow) MockTensor(); + NN_Tensor* tensor = reinterpret_cast(tensorImpl); + inputs.emplace_back(tensor); + + OHOS::sptr sp = + OHOS::sptr(new (std::nothrow) V2_1::MockIPreparedModel()); + EXPECT_NE(sp, nullptr); + + std::unique_ptr preparedModel = std::make_unique(sp); + OH_NN_ReturnCode ret = preparedModel->Run(inputs, outputs, outputsDims, isOutputBufferEnough); + EXPECT_EQ(OH_NN_FAILED, ret); + + testing::Mock::AllowLeak(tensorImpl); +} + +/** + * @tc.name: hidpreparedmodel_run_021 + * @tc.desc: Verify the Run function return invalid parameter in case of output invalid. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_021, TestSize.Level0) +{ + LOGE("Run hidpreparedmodel_run_021"); + std::vector inputs; + std::vector outputs; + std::vector> outputsDims {}; + std::vector isOutputBufferEnough {}; + + size_t deviceId = 1; + NNTensor2_0* tensorImpl = new (std::nothrow) NNTensor2_0(deviceId); + TensorDesc TensorDesc; + + tensorImpl->SetTensorDesc(&TensorDesc); + NN_Tensor* tensor = reinterpret_cast(tensorImpl); + inputs.emplace_back(tensor); + + OHOS::sptr sp = + OHOS::sptr(new (std::nothrow) V2_1::MockIPreparedModel()); + EXPECT_NE(sp, nullptr); + + std::unique_ptr preparedModel = std::make_unique(sp); + OH_NN_ReturnCode ret = preparedModel->Run(inputs, outputs, outputsDims, isOutputBufferEnough); + EXPECT_EQ(OH_NN_FAILED, ret); +} + +/** + * @tc.name: hidpreparedmodel_run_022 + * @tc.desc: Verify the Run function return invalid parameter in case of output invalid. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_run_022, TestSize.Level0) +{ + LOGE("Run hidpreparedmodel_run_022"); + std::vector inputs; + std::vector outputs; + std::vector> outputsDims {}; + std::vector isOutputBufferEnough {}; + + size_t backendId = 1; + NNTensor2_0* nnTensor = new (std::nothrow) NNTensor2_0(backendId); + EXPECT_NE(nullptr, nnTensor); + + TensorDesc tensorDesc; + char name = 'a'; + tensorDesc.SetName(&name); + tensorDesc.SetDataType(OH_NN_UINT32); + tensorDesc.SetFormat(OH_NN_FORMAT_NCHW); + int32_t expectDim[2] = {3, 3}; + int32_t* ptr = expectDim; + uint32_t dimensionCount = 2; + tensorDesc.SetShape(ptr, dimensionCount); + + OH_NN_ReturnCode retSetTensorDesc = nnTensor->SetTensorDesc(&tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, retSetTensorDesc); + + nnTensor->SetSize(200); + nnTensor->SetOffset(0); + float m_dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; + void* buffer = m_dataArry; + nnTensor->SetData(buffer); + + NN_Tensor* tensor = reinterpret_cast(nnTensor); + inputs.emplace_back(tensor); + + OHOS::sptr sp = + OHOS::sptr(new (std::nothrow) V2_1::MockIPreparedModel()); + EXPECT_NE(sp, nullptr); + + std::unique_ptr preparedModel = std::make_unique(sp); + OH_NN_ReturnCode ret = preparedModel->Run(inputs, outputs, outputsDims, isOutputBufferEnough); + EXPECT_EQ(OH_NN_UNAVAILABLE_DEVICE, ret); +} + +/** + * @tc.name: hidpreparedmodel_getmodelid_001 + * @tc.desc: Verify the Run function return invalid parameter in case of output invalid. + * @tc.type: FUNC + */ +HWTEST_F(HDIPreparedModelTest, hidpreparedmodel_getmodelid_001, TestSize.Level0) +{ + LOGE("GetModelID hidpreparedmodel_getmodelid_001"); + OHOS::sptr sp = + OHOS::sptr(new (std::nothrow) V2_1::MockIPreparedModel()); + EXPECT_NE(sp, nullptr); + + uint32_t index = 0; + std::unique_ptr preparedModel = std::make_unique(sp); + OH_NN_ReturnCode ret = preparedModel->GetModelID(index); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} } // namespace UnitTest } // namespace NeuralNetworkRuntime } // namespace OHOS diff --git a/test/unittest/inner_kits/neural_network_runtime_inner_test.cpp b/test/unittest/inner_kits/neural_network_runtime_inner_test.cpp index 8e220c77acbffb072343ee7ce3cbd0a2d3c37fe2..d76511c255619a1f40b39cc3f11ccd1dd0ab9cf9 100644 --- a/test/unittest/inner_kits/neural_network_runtime_inner_test.cpp +++ b/test/unittest/inner_kits/neural_network_runtime_inner_test.cpp @@ -131,6 +131,239 @@ HWTEST_F(NeuralNetworkRuntimeInnerTest, build_from_lite_graph_004, testing::ext: liteGraph = nullptr; EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); } + +/* + * @tc.name: build_from_lite_graph_005 + * @tc.desc: Verify the success of the OH_NNModel_BuildFromLiteGraph function. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeInnerTest, build_from_lite_graph_005, testing::ext::TestSize.Level0) +{ + LOGE("OH_NNModel_BuildFromLiteGraph build_from_lite_graph_005"); + OHOS::NeuralNetworkRuntime::InnerModel* innerModel = new (std::nothrow) OHOS::NeuralNetworkRuntime::InnerModel(); + EXPECT_NE(nullptr, innerModel); + OH_NNModel* model = reinterpret_cast(innerModel); + mindspore::lite::LiteGraph* liteGraph = new (std::nothrow) mindspore::lite::LiteGraph; + char a = 'a'; + OH_NN_Extension extension1 = {"QuantBuffer", &a, 8}; + OH_NN_Extension extension2 = {"ModelName", &a, 8}; + OH_NN_Extension extension3 = {"Profiling", &a, 8}; + OH_NN_Extension extension7 = {"isProfiling", &a, 8}; + OH_NN_Extension extension4 = {"opLayout", &a, 8}; + OH_NN_Extension extension5 = {"InputDims", &a, 8}; + OH_NN_Extension extension6 = {"DynamicDims", &a, 8}; + OH_NN_Extension extension[7] = {extension1, extension2, extension7, extension4, extension5, extension6, extension3}; + size_t extensionSize = 7; + EXPECT_NE(nullptr, liteGraph); + liteGraph->name_ = "testGraph"; + liteGraph->input_indices_ = {0}; + liteGraph->output_indices_ = {1}; + mindspore::lite::DataType data_type = mindspore::lite::DataType::DATA_TYPE_INT32; + int dim = 1; + int32_t *dims = &dim; + uint32_t dims_size = 1; + mindspore::lite::Format format = mindspore::lite::Format::FORMAT_HWCK; + uint8_t datas = 0; + uint8_t *data = &datas; + uint32_t data_size = 2; + mindspore::lite::QuantParam quant_params; + uint32_t quant_params_size = 0; + mindspore::lite::TensorPtr ptr2 = mindspore::lite::MindIR_Tensor_Create(&a, data_type, dims, dims_size, + format, data, data_size, + &quant_params, quant_params_size); + + for (size_t indexInput = 0; indexInput < liteGraph->input_indices_.size(); ++indexInput) { + const std::vector dim = {3, 3}; + const std::vector data(36, 1); + + liteGraph->all_tensors_.emplace_back(ptr2); + } + for (size_t indexOutput = 0; indexOutput < liteGraph->output_indices_.size(); ++indexOutput) { + const std::vector dimOut = {3, 3}; + const std::vector dataOut(36, 1); + liteGraph->all_tensors_.emplace_back(ptr2); + } + OH_NN_ReturnCode ret = OH_NNModel_BuildFromLiteGraph(model, liteGraph, extension, extensionSize); + delete innerModel; + innerModel = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: oh_nnmodel_buildfrommetagraph_001 + * @tc.desc: Verify that the liteGraph parameter passed to the OH_NNModel_BuildFromLiteGraph function is invalid. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeInnerTest, oh_nnmodel_buildfrommetagraph_001, testing::ext::TestSize.Level0) +{ + LOGE("OH_NNModel_BuildFromMetaGraph oh_nnmodel_buildfrommetagraph_001"); + OHOS::NeuralNetworkRuntime::InnerModel* innerModel = new (std::nothrow) OHOS::NeuralNetworkRuntime::InnerModel(); + EXPECT_NE(nullptr, innerModel); + OH_NNModel* model = reinterpret_cast(innerModel); + mindspore::lite::LiteGraph* liteGraph = new (std::nothrow) mindspore::lite::LiteGraph; + char a = 'a'; + OH_NN_Extension extension1 = {"QuantBuffer", &a, 1}; + OH_NN_Extension extension2 = {"ModelName", &a, 1}; + OH_NN_Extension extension3 = {"Profiling", &a, 1}; + OH_NN_Extension extension4 = {"opLayout", &a, 1}; + OH_NN_Extension extension[4] = {extension1, extension2, extension3, extension4}; + + size_t extensionSize = 4; + EXPECT_NE(nullptr, liteGraph); + liteGraph->name_ = "testGraph"; + liteGraph->input_indices_ = {0}; + liteGraph->output_indices_ = {1}; + const std::vector quant_params {}; + for (size_t indexInput = 0; indexInput < liteGraph->input_indices_.size(); ++indexInput) { + const std::vector dim = {3, 3}; + const std::vector data(36, 1); + + liteGraph->all_tensors_.emplace_back(mindspore::lite::MindIR_Tensor_Create()); + } + for (size_t indexOutput = 0; indexOutput < liteGraph->output_indices_.size(); ++indexOutput) { + const std::vector dimOut = {3, 3}; + const std::vector dataOut(36, 1); + liteGraph->all_tensors_.emplace_back(mindspore::lite::MindIR_Tensor_Create()); + } + OH_NN_ReturnCode ret = OH_NNModel_BuildFromMetaGraph(model, liteGraph, extension, extensionSize); + delete innerModel; + innerModel = nullptr; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/* + * @tc.name: oh_nnmodel_buildfrommetagraph_002 + * @tc.desc: Verify that the liteGraph parameter passed to the OH_NNModel_BuildFromLiteGraph function is invalid. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeInnerTest, oh_nnmodel_buildfrommetagraph_002, testing::ext::TestSize.Level0) +{ + LOGE("OH_NNModel_BuildFromMetaGraph oh_nnmodel_buildfrommetagraph_002"); + mindspore::lite::LiteGraph* liteGraph = new (std::nothrow) mindspore::lite::LiteGraph; + OH_NN_Extension* extensions = nullptr; + + size_t extensionSize = 0; + EXPECT_NE(nullptr, liteGraph); + liteGraph->name_ = "testGraph"; + liteGraph->input_indices_ = {0}; + liteGraph->output_indices_ = {1}; + const std::vector quant_params {}; + for (size_t indexInput = 0; indexInput < liteGraph->input_indices_.size(); ++indexInput) { + const std::vector dim = {3, 3}; + const std::vector data(36, 1); + + liteGraph->all_tensors_.emplace_back(mindspore::lite::MindIR_Tensor_Create()); + } + for (size_t indexOutput = 0; indexOutput < liteGraph->output_indices_.size(); ++indexOutput) { + const std::vector dimOut = {3, 3}; + const std::vector dataOut(36, 1); + liteGraph->all_tensors_.emplace_back(mindspore::lite::MindIR_Tensor_Create()); + } + OH_NN_ReturnCode ret = OH_NNModel_BuildFromMetaGraph(nullptr, liteGraph, extensions, extensionSize); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: oh_nnmodel_buildfrommetagraph_003 + * @tc.desc: Verify that the liteGraph parameter passed to the OH_NNModel_BuildFromLiteGraph function is invalid. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeInnerTest, oh_nnmodel_buildfrommetagraph_003, testing::ext::TestSize.Level0) +{ + LOGE("OH_NNModel_BuildFromMetaGraph oh_nnmodel_buildfrommetagraph_003"); + OHOS::NeuralNetworkRuntime::InnerModel* innerModel = new (std::nothrow) OHOS::NeuralNetworkRuntime::InnerModel(); + EXPECT_NE(nullptr, innerModel); + OH_NNModel* model = reinterpret_cast(innerModel); + OH_NN_Extension* extensions = nullptr; + size_t extensionSize = 0; + OH_NN_ReturnCode ret = OH_NNModel_BuildFromMetaGraph(model, nullptr, extensions, extensionSize); + delete innerModel; + innerModel = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: oh_nnmodel_setinputsandoutputsinfo_001 + * @tc.desc: Verify that the liteGraph parameter passed to the OH_NNModel_BuildFromLiteGraph function is invalid. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeInnerTest, oh_nnmodel_setinputsandoutputsinfo_001, testing::ext::TestSize.Level0) +{ + LOGE("OH_NNModel_SetInputsAndOutputsInfo oh_nnmodel_setinputsandoutputsinfo_001"); + OHOS::NeuralNetworkRuntime::InnerModel* innerModel = new (std::nothrow) OHOS::NeuralNetworkRuntime::InnerModel(); + EXPECT_NE(nullptr, innerModel); + OH_NNModel* model = reinterpret_cast(innerModel); + + OH_NN_TensorInfo inputsInfo; + size_t inputSize = 1; + OH_NN_TensorInfo outputsInfo; + size_t outputSize = 1 ; + OH_NN_ReturnCode ret = OH_NNModel_SetInputsAndOutputsInfo(model, &inputsInfo, inputSize, &outputsInfo, outputSize); + delete innerModel; + innerModel = nullptr; + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/* + * @tc.name: oh_nnmodel_setinputsandoutputsinfo_002 + * @tc.desc: Verify that the liteGraph parameter passed to the OH_NNModel_BuildFromLiteGraph function is invalid. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeInnerTest, oh_nnmodel_setinputsandoutputsinfo_002, testing::ext::TestSize.Level0) +{ + LOGE("OH_NNModel_SetInputsAndOutputsInfo oh_nnmodel_setinputsandoutputsinfo_002"); + OH_NN_TensorInfo inputsInfo; + size_t inputSize = 1; + OH_NN_TensorInfo outputsInfo; + size_t outputSize = 1 ; + OH_NN_ReturnCode ret = OH_NNModel_SetInputsAndOutputsInfo(nullptr, + &inputsInfo, inputSize, &outputsInfo, outputSize); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: oh_nnmodel_setinputsandoutputsinfo_003 + * @tc.desc: Verify that the liteGraph parameter passed to the OH_NNModel_BuildFromLiteGraph function is invalid. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeInnerTest, oh_nnmodel_setinputsandoutputsinfo_003, testing::ext::TestSize.Level0) +{ + LOGE("OH_NNModel_SetInputsAndOutputsInfo oh_nnmodel_setinputsandoutputsinfo_003"); + OHOS::NeuralNetworkRuntime::InnerModel* innerModel = new (std::nothrow) OHOS::NeuralNetworkRuntime::InnerModel(); + EXPECT_NE(nullptr, innerModel); + OH_NNModel* model = reinterpret_cast(innerModel); + + OH_NN_TensorInfo inputsInfo; + size_t inputSize = 0; + OH_NN_TensorInfo outputsInfo; + size_t outputSize = 1 ; + OH_NN_ReturnCode ret = OH_NNModel_SetInputsAndOutputsInfo(model, &inputsInfo, inputSize, &outputsInfo, outputSize); + delete innerModel; + innerModel = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/* + * @tc.name: oh_nnmodel_setinputsandoutputsinfo_004 + * @tc.desc: Verify that the liteGraph parameter passed to the OH_NNModel_BuildFromLiteGraph function is invalid. + * @tc.type: FUNC + */ +HWTEST_F(NeuralNetworkRuntimeInnerTest, oh_nnmodel_setinputsandoutputsinfo_004, testing::ext::TestSize.Level0) +{ + LOGE("OH_NNModel_SetInputsAndOutputsInfo oh_nnmodel_setinputsandoutputsinfo_004"); + OHOS::NeuralNetworkRuntime::InnerModel* innerModel = new (std::nothrow) OHOS::NeuralNetworkRuntime::InnerModel(); + EXPECT_NE(nullptr, innerModel); + OH_NNModel* model = reinterpret_cast(innerModel); + + OH_NN_TensorInfo inputsInfo; + size_t inputSize = 1; + OH_NN_TensorInfo outputsInfo; + size_t outputSize = 0; + OH_NN_ReturnCode ret = OH_NNModel_SetInputsAndOutputsInfo(model, &inputsInfo, inputSize, &outputsInfo, outputSize); + delete innerModel; + innerModel = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} } // namespace Unittest } // namespace NeuralNetworkRuntime } // namespace OHOS