From 49f0f69293afcab3feb2ee91d41842eb9d15d357 Mon Sep 17 00:00:00 2001 From: w30052974 Date: Thu, 17 Apr 2025 10:29:05 +0800 Subject: [PATCH 1/2] =?UTF-8?q?nnrt=20=E7=94=A8=E4=BE=8B=E5=BD=92=E6=A1=A3?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: w30052974 --- test/nnrt_xts_acts/BUILD.gn | 21 + .../neural_network_runtime/v1_0/BUILD.gn | 20 + .../v1_0/common/const.h | 44 + .../v1_0/common/mock_idevice.cpp | 336 +++++ .../v1_0/common/mock_idevice.h | 123 ++ .../v1_0/common/model.h | 111 ++ .../v1_0/common/nnrt_utils.cpp | 471 ++++++ .../v1_0/common/nnrt_utils.h | 99 ++ .../v1_0/interface/BUILD.gn | 53 + .../v1_0/interface/src/CompileTest.cpp | 875 +++++++++++ .../v1_0/interface/src/DeviceTest.cpp | 207 +++ .../v1_0/interface/src/ExecutorTest.cpp | 1214 +++++++++++++++ .../v1_0/interface/src/MemoryTest.cpp | 950 ++++++++++++ .../v1_0/interface/src/ModelTest.cpp | 1024 +++++++++++++ .../v1_0/stability/BUILD.gn | 49 + .../v1_0/stability/src/MultiThreadTest.cpp | 94 ++ .../neural_network_runtime/v2_0/BUILD.gn | 20 + .../v2_0/common/const.h | 44 + .../v2_0/common/mock_idevice.cpp | 369 +++++ .../v2_0/common/mock_idevice.h | 128 ++ .../v2_0/common/model.h | 111 ++ .../v2_0/common/nnrt_utils.cpp | 471 ++++++ .../v2_0/common/nnrt_utils.h | 99 ++ .../v2_0/interface/BUILD.gn | 53 + .../v2_0/interface/src/CompileTest.cpp | 875 +++++++++++ .../v2_0/interface/src/DeviceTest.cpp | 207 +++ .../v2_0/interface/src/ExecutorTest.cpp | 1318 +++++++++++++++++ .../v2_0/interface/src/MemoryTest.cpp | 949 ++++++++++++ .../v2_0/interface/src/ModelTest.cpp | 1024 +++++++++++++ .../v2_0/stability/BUILD.gn | 49 + .../v2_0/stability/src/MultiThreadTest.cpp | 94 ++ test/nnrt_xts_acts/nncore/BUILD.gn | 21 + .../nncore/common/mock_idevice.cpp | 345 +++++ .../nncore/common/mock_idevice.h | 126 ++ .../nncore/common/nncore_const.h | 51 + .../nncore/common/nncore_utils.cpp | 704 +++++++++ .../nncore/common/nncore_utils.h | 190 +++ test/nnrt_xts_acts/nncore/e2etest/BUILD.gn | 47 + .../nncore/e2etest/src/EndToEndTest.cpp | 447 ++++++ test/nnrt_xts_acts/nncore/nncoretest/BUILD.gn | 51 + .../nncoretest/src/HdiCompilationTest.cpp | 362 +++++ .../nncore/nncoretest/src/HdiExecutorTest.cpp | 1032 +++++++++++++ .../nncore/nncoretest/src/HdiModelTest.cpp | 420 ++++++ .../nncoretest/src/HdiTensorDescTest.cpp | 427 ++++++ .../nncore/nncoretest/src/HdiTensorTest.cpp | 547 +++++++ test/nnrt_xts_acts/nncore/opstest/BUILD.gn | 100 ++ .../nncore/opstest/include/mock_idevice.h | 128 ++ .../nncore/opstest/include/nncore_const.h | 31 + .../nncore/opstest/include/nncore_utils.h | 83 ++ .../nncore/opstest/src/abs_test.cpp | 757 ++++++++++ .../nncore/opstest/src/all_test.cpp | 817 ++++++++++ .../nncore/opstest/src/assert_test.cpp | 769 ++++++++++ .../nncore/opstest/src/broadcast_to_test.cpp | 819 ++++++++++ .../nncore/opstest/src/ceil_test.cpp | 757 ++++++++++ .../nncore/opstest/src/clip_test.cpp | 911 ++++++++++++ .../opstest/src/constant_of_shape_test.cpp | 791 ++++++++++ .../nncore/opstest/src/cos_test.cpp | 755 ++++++++++ .../nncore/opstest/src/crop_test.cpp | 838 +++++++++++ .../opstest/src/depth_to_space_test.cpp | 780 ++++++++++ .../src/detection_post_process_test.cpp | 803 ++++++++++ .../nncore/opstest/src/equal_test.cpp | 850 +++++++++++ .../nncore/opstest/src/erf_test.cpp | 799 ++++++++++ .../nncore/opstest/src/exp_test.cpp | 864 +++++++++++ .../nncore/opstest/src/flatten_test.cpp | 844 +++++++++++ .../nncore/opstest/src/floor_test.cpp | 757 ++++++++++ .../nncore/opstest/src/gather_nd_test.cpp | 765 ++++++++++ .../nncore/opstest/src/greater_equal_test.cpp | 863 +++++++++++ .../nncore/opstest/src/greater_test.cpp | 850 +++++++++++ .../nncore/opstest/src/hard_sigmoid_test.cpp | 764 ++++++++++ .../nncore/opstest/src/instance_norm_test.cpp | 791 ++++++++++ .../nncore/opstest/src/l2_normalize_test.cpp | 826 +++++++++++ .../nncore/opstest/src/leaky_relu_test.cpp | 866 +++++++++++ .../nncore/opstest/src/less_test.cpp | 851 +++++++++++ .../nncore/opstest/src/log_softmax_test.cpp | 816 ++++++++++ .../nncore/opstest/src/log_test.cpp | 755 ++++++++++ .../nncore/opstest/src/logical_and_test.cpp | 904 +++++++++++ .../nncore/opstest/src/logical_not_test.cpp | 892 +++++++++++ .../nncore/opstest/src/logical_or_test.cpp | 767 ++++++++++ .../nncore/opstest/src/lrn_test.cpp | 730 +++++++++ .../nncore/opstest/src/lstm_test.cpp | 751 ++++++++++ .../nncore/opstest/src/minimum_test.cpp | 805 ++++++++++ .../nncore/opstest/src/mock_idevice.cpp | 371 +++++ .../nncore/opstest/src/mod_test.cpp | 804 ++++++++++ .../nncore/opstest/src/neg_test.cpp | 755 ++++++++++ .../nncore/opstest/src/nncore_utils.cpp | 239 +++ .../nncore/opstest/src/not_equal_test.cpp | 858 +++++++++++ .../nncore/opstest/src/pow_test.cpp | 822 ++++++++++ .../opstest/src/quant_dtype_cast_test.cpp | 841 +++++++++++ .../nncore/opstest/src/range_test.cpp | 919 ++++++++++++ .../nncore/opstest/src/rank_test.cpp | 756 ++++++++++ .../nncore/opstest/src/reciprocal_test.cpp | 806 ++++++++++ .../nncore/opstest/src/reducel2_test.cpp | 948 ++++++++++++ .../nncore/opstest/src/reducemax_test.cpp | 928 ++++++++++++ .../nncore/opstest/src/reducemin_test.cpp | 928 ++++++++++++ .../nncore/opstest/src/reducesum_test.cpp | 938 ++++++++++++ .../nncore/opstest/src/round_test.cpp | 757 ++++++++++ .../nncore/opstest/src/scatter_nd_test.cpp | 880 +++++++++++ .../nncore/opstest/src/select_test.cpp | 907 ++++++++++++ .../nncore/opstest/src/sin_test.cpp | 756 ++++++++++ .../opstest/src/space_to_depth_test.cpp | 869 +++++++++++ .../opstest/src/sparse_to_dense_test.cpp | 986 ++++++++++++ .../nncore/opstest/src/square_test.cpp | 756 ++++++++++ .../nncore/opstest/src/swish_test.cpp | 799 ++++++++++ .../nncore/opstest/src/unstack_test.cpp | 880 +++++++++++ .../nncore/opstest/src/where_test.cpp | 812 ++++++++++ 105 files changed, 61836 insertions(+) create mode 100644 test/nnrt_xts_acts/BUILD.gn create mode 100644 test/nnrt_xts_acts/neural_network_runtime/v1_0/BUILD.gn create mode 100644 test/nnrt_xts_acts/neural_network_runtime/v1_0/common/const.h create mode 100644 test/nnrt_xts_acts/neural_network_runtime/v1_0/common/mock_idevice.cpp create mode 100644 test/nnrt_xts_acts/neural_network_runtime/v1_0/common/mock_idevice.h create mode 100644 test/nnrt_xts_acts/neural_network_runtime/v1_0/common/model.h create mode 100644 test/nnrt_xts_acts/neural_network_runtime/v1_0/common/nnrt_utils.cpp create mode 100644 test/nnrt_xts_acts/neural_network_runtime/v1_0/common/nnrt_utils.h create mode 100644 test/nnrt_xts_acts/neural_network_runtime/v1_0/interface/BUILD.gn create mode 100644 test/nnrt_xts_acts/neural_network_runtime/v1_0/interface/src/CompileTest.cpp create mode 100644 test/nnrt_xts_acts/neural_network_runtime/v1_0/interface/src/DeviceTest.cpp create mode 100644 test/nnrt_xts_acts/neural_network_runtime/v1_0/interface/src/ExecutorTest.cpp create mode 100644 test/nnrt_xts_acts/neural_network_runtime/v1_0/interface/src/MemoryTest.cpp create mode 100644 test/nnrt_xts_acts/neural_network_runtime/v1_0/interface/src/ModelTest.cpp create mode 100644 test/nnrt_xts_acts/neural_network_runtime/v1_0/stability/BUILD.gn create mode 100644 test/nnrt_xts_acts/neural_network_runtime/v1_0/stability/src/MultiThreadTest.cpp create mode 100644 test/nnrt_xts_acts/neural_network_runtime/v2_0/BUILD.gn create mode 100644 test/nnrt_xts_acts/neural_network_runtime/v2_0/common/const.h create mode 100644 test/nnrt_xts_acts/neural_network_runtime/v2_0/common/mock_idevice.cpp create mode 100644 test/nnrt_xts_acts/neural_network_runtime/v2_0/common/mock_idevice.h create mode 100644 test/nnrt_xts_acts/neural_network_runtime/v2_0/common/model.h create mode 100644 test/nnrt_xts_acts/neural_network_runtime/v2_0/common/nnrt_utils.cpp create mode 100644 test/nnrt_xts_acts/neural_network_runtime/v2_0/common/nnrt_utils.h create mode 100644 test/nnrt_xts_acts/neural_network_runtime/v2_0/interface/BUILD.gn create mode 100644 test/nnrt_xts_acts/neural_network_runtime/v2_0/interface/src/CompileTest.cpp create mode 100644 test/nnrt_xts_acts/neural_network_runtime/v2_0/interface/src/DeviceTest.cpp create mode 100644 test/nnrt_xts_acts/neural_network_runtime/v2_0/interface/src/ExecutorTest.cpp create mode 100644 test/nnrt_xts_acts/neural_network_runtime/v2_0/interface/src/MemoryTest.cpp create mode 100644 test/nnrt_xts_acts/neural_network_runtime/v2_0/interface/src/ModelTest.cpp create mode 100644 test/nnrt_xts_acts/neural_network_runtime/v2_0/stability/BUILD.gn create mode 100644 test/nnrt_xts_acts/neural_network_runtime/v2_0/stability/src/MultiThreadTest.cpp create mode 100644 test/nnrt_xts_acts/nncore/BUILD.gn create mode 100644 test/nnrt_xts_acts/nncore/common/mock_idevice.cpp create mode 100644 test/nnrt_xts_acts/nncore/common/mock_idevice.h create mode 100644 test/nnrt_xts_acts/nncore/common/nncore_const.h create mode 100644 test/nnrt_xts_acts/nncore/common/nncore_utils.cpp create mode 100644 test/nnrt_xts_acts/nncore/common/nncore_utils.h create mode 100644 test/nnrt_xts_acts/nncore/e2etest/BUILD.gn create mode 100644 test/nnrt_xts_acts/nncore/e2etest/src/EndToEndTest.cpp create mode 100644 test/nnrt_xts_acts/nncore/nncoretest/BUILD.gn create mode 100644 test/nnrt_xts_acts/nncore/nncoretest/src/HdiCompilationTest.cpp create mode 100644 test/nnrt_xts_acts/nncore/nncoretest/src/HdiExecutorTest.cpp create mode 100644 test/nnrt_xts_acts/nncore/nncoretest/src/HdiModelTest.cpp create mode 100644 test/nnrt_xts_acts/nncore/nncoretest/src/HdiTensorDescTest.cpp create mode 100644 test/nnrt_xts_acts/nncore/nncoretest/src/HdiTensorTest.cpp create mode 100644 test/nnrt_xts_acts/nncore/opstest/BUILD.gn create mode 100644 test/nnrt_xts_acts/nncore/opstest/include/mock_idevice.h create mode 100644 test/nnrt_xts_acts/nncore/opstest/include/nncore_const.h create mode 100644 test/nnrt_xts_acts/nncore/opstest/include/nncore_utils.h create mode 100644 test/nnrt_xts_acts/nncore/opstest/src/abs_test.cpp create mode 100644 test/nnrt_xts_acts/nncore/opstest/src/all_test.cpp create mode 100644 test/nnrt_xts_acts/nncore/opstest/src/assert_test.cpp create mode 100644 test/nnrt_xts_acts/nncore/opstest/src/broadcast_to_test.cpp create mode 100644 test/nnrt_xts_acts/nncore/opstest/src/ceil_test.cpp create mode 100644 test/nnrt_xts_acts/nncore/opstest/src/clip_test.cpp create mode 100644 test/nnrt_xts_acts/nncore/opstest/src/constant_of_shape_test.cpp create mode 100644 test/nnrt_xts_acts/nncore/opstest/src/cos_test.cpp create mode 100644 test/nnrt_xts_acts/nncore/opstest/src/crop_test.cpp create mode 100644 test/nnrt_xts_acts/nncore/opstest/src/depth_to_space_test.cpp create mode 100644 test/nnrt_xts_acts/nncore/opstest/src/detection_post_process_test.cpp create mode 100644 test/nnrt_xts_acts/nncore/opstest/src/equal_test.cpp create mode 100644 test/nnrt_xts_acts/nncore/opstest/src/erf_test.cpp create mode 100644 test/nnrt_xts_acts/nncore/opstest/src/exp_test.cpp create mode 100644 test/nnrt_xts_acts/nncore/opstest/src/flatten_test.cpp create mode 100644 test/nnrt_xts_acts/nncore/opstest/src/floor_test.cpp create mode 100644 test/nnrt_xts_acts/nncore/opstest/src/gather_nd_test.cpp create mode 100644 test/nnrt_xts_acts/nncore/opstest/src/greater_equal_test.cpp create mode 100644 test/nnrt_xts_acts/nncore/opstest/src/greater_test.cpp create mode 100644 test/nnrt_xts_acts/nncore/opstest/src/hard_sigmoid_test.cpp create mode 100644 test/nnrt_xts_acts/nncore/opstest/src/instance_norm_test.cpp create mode 100644 test/nnrt_xts_acts/nncore/opstest/src/l2_normalize_test.cpp create mode 100644 test/nnrt_xts_acts/nncore/opstest/src/leaky_relu_test.cpp create mode 100644 test/nnrt_xts_acts/nncore/opstest/src/less_test.cpp create mode 100644 test/nnrt_xts_acts/nncore/opstest/src/log_softmax_test.cpp create mode 100644 test/nnrt_xts_acts/nncore/opstest/src/log_test.cpp create mode 100644 test/nnrt_xts_acts/nncore/opstest/src/logical_and_test.cpp create mode 100644 test/nnrt_xts_acts/nncore/opstest/src/logical_not_test.cpp create mode 100644 test/nnrt_xts_acts/nncore/opstest/src/logical_or_test.cpp create mode 100644 test/nnrt_xts_acts/nncore/opstest/src/lrn_test.cpp create mode 100644 test/nnrt_xts_acts/nncore/opstest/src/lstm_test.cpp create mode 100644 test/nnrt_xts_acts/nncore/opstest/src/minimum_test.cpp create mode 100644 test/nnrt_xts_acts/nncore/opstest/src/mock_idevice.cpp create mode 100644 test/nnrt_xts_acts/nncore/opstest/src/mod_test.cpp create mode 100644 test/nnrt_xts_acts/nncore/opstest/src/neg_test.cpp create mode 100644 test/nnrt_xts_acts/nncore/opstest/src/nncore_utils.cpp create mode 100644 test/nnrt_xts_acts/nncore/opstest/src/not_equal_test.cpp create mode 100644 test/nnrt_xts_acts/nncore/opstest/src/pow_test.cpp create mode 100644 test/nnrt_xts_acts/nncore/opstest/src/quant_dtype_cast_test.cpp create mode 100644 test/nnrt_xts_acts/nncore/opstest/src/range_test.cpp create mode 100644 test/nnrt_xts_acts/nncore/opstest/src/rank_test.cpp create mode 100644 test/nnrt_xts_acts/nncore/opstest/src/reciprocal_test.cpp create mode 100644 test/nnrt_xts_acts/nncore/opstest/src/reducel2_test.cpp create mode 100644 test/nnrt_xts_acts/nncore/opstest/src/reducemax_test.cpp create mode 100644 test/nnrt_xts_acts/nncore/opstest/src/reducemin_test.cpp create mode 100644 test/nnrt_xts_acts/nncore/opstest/src/reducesum_test.cpp create mode 100644 test/nnrt_xts_acts/nncore/opstest/src/round_test.cpp create mode 100644 test/nnrt_xts_acts/nncore/opstest/src/scatter_nd_test.cpp create mode 100644 test/nnrt_xts_acts/nncore/opstest/src/select_test.cpp create mode 100644 test/nnrt_xts_acts/nncore/opstest/src/sin_test.cpp create mode 100644 test/nnrt_xts_acts/nncore/opstest/src/space_to_depth_test.cpp create mode 100644 test/nnrt_xts_acts/nncore/opstest/src/sparse_to_dense_test.cpp create mode 100644 test/nnrt_xts_acts/nncore/opstest/src/square_test.cpp create mode 100644 test/nnrt_xts_acts/nncore/opstest/src/swish_test.cpp create mode 100644 test/nnrt_xts_acts/nncore/opstest/src/unstack_test.cpp create mode 100644 test/nnrt_xts_acts/nncore/opstest/src/where_test.cpp diff --git a/test/nnrt_xts_acts/BUILD.gn b/test/nnrt_xts_acts/BUILD.gn new file mode 100644 index 0000000..9027bf7 --- /dev/null +++ b/test/nnrt_xts_acts/BUILD.gn @@ -0,0 +1,21 @@ +# Copyright (c) 2023 Huawei Device Co., Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +group("NNRtXtsActs") { + testonly = true + deps = [ + "neural_network_runtime/v1_0:neural_network_runtime", + "neural_network_runtime/v2_0:neural_network_runtime", + "nncore:ActsHdfNncoreTest", + ] +} diff --git a/test/nnrt_xts_acts/neural_network_runtime/v1_0/BUILD.gn b/test/nnrt_xts_acts/neural_network_runtime/v1_0/BUILD.gn new file mode 100644 index 0000000..9db9ce7 --- /dev/null +++ b/test/nnrt_xts_acts/neural_network_runtime/v1_0/BUILD.gn @@ -0,0 +1,20 @@ +# Copyright (c) 2021-2022 Huawei Device Co., Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +group("neural_network_runtime") { + testonly = true + deps = [ + "interface:ActsAiNnrtFunctionV1_0Test", + "stability:ActsAiNnrtStabilityV1_0Test", + ] +} diff --git a/test/nnrt_xts_acts/neural_network_runtime/v1_0/common/const.h b/test/nnrt_xts_acts/neural_network_runtime/v1_0/common/const.h new file mode 100644 index 0000000..8a240bd --- /dev/null +++ b/test/nnrt_xts_acts/neural_network_runtime/v1_0/common/const.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef CONST_H +#define CONST_H + +#include +#include + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Test { + +const uint32_t ADD_DATA_LENGTH = 4 * sizeof(float); +const uint32_t AVG_INPUT_LENGTH = 9 * sizeof(float); +const std::vector TENSOR_SHAPE = {1, 2, 2, 1}; +const std::vector PARAM_INDEX = {2}; +const std::vector INPUT_INDEX = {0, 1}; +const std::vector OUTPUT_INDEX = {3}; +const int32_t ELEMENT_COUNT = 4; + +const std::string CACHE_DIR = "./cache"; +const std::string CACHE_PATH = CACHE_DIR + "/0.nncache"; +const std::string CACHE_INFO_PATH = CACHE_DIR + "/cache_info.nncache"; +const uint32_t NO_DEVICE_COUNT = 0; +const int STRESS_COUNT = 100000; +const int PRINT_FREQ = 500; + +} // namespace Test +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // CONST_H \ No newline at end of file diff --git a/test/nnrt_xts_acts/neural_network_runtime/v1_0/common/mock_idevice.cpp b/test/nnrt_xts_acts/neural_network_runtime/v1_0/common/mock_idevice.cpp new file mode 100644 index 0000000..f7bcaa0 --- /dev/null +++ b/test/nnrt_xts_acts/neural_network_runtime/v1_0/common/mock_idevice.cpp @@ -0,0 +1,336 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include + +#include "const.h" +#include "mock_idevice.h" +#include "hdi_device_v1_0.h" +#include "log.h" +#include "utils.h" +#include "nnbackend.h" +#include "backend_registrar.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +std::shared_ptr HDIDeviceV1_0Creator() +{ + std::string deviceName; + std::string vendorName; + std::string version; + + // only one device from HDI now. + OHOS::sptr iDevice = V1_0::INnrtDevice::Get(); + if (iDevice == nullptr) { + LOGW("Get HDI device failed."); + return nullptr; + } + + auto hdiRet = iDevice->GetDeviceName(deviceName); + if (hdiRet != HDF_SUCCESS) { + LOGW("Get device name failed. ErrorCode=%d", hdiRet); + return nullptr; + } + + hdiRet = iDevice->GetVendorName(vendorName); + if (hdiRet != HDF_SUCCESS) { + LOGW("Get vendor name failed. ErrorCode=%d", hdiRet); + return nullptr; + } + + std::pair hdiVersion; + hdiRet = iDevice->GetVersion(hdiVersion.first, hdiVersion.second); + if (hdiRet != HDF_SUCCESS) { + LOGW("Get version failed. ErrorCode=%d", hdiRet); + return nullptr; + } + version = 'v' + std::to_string(hdiVersion.first) + '_' + std::to_string(hdiVersion.second); + const std::string& backendName = GenUniqueName(deviceName, vendorName, version); + + std::shared_ptr device = CreateSharedPtr(iDevice); + if (device == nullptr) { + LOGW("Failed to register device, because fail to create device instance."); + return nullptr; + } + + std::shared_ptr backend = std::make_shared(device, std::hash{}(backendName)); + if (backend == nullptr) { + LOGW("Failed to register backend, because fail to create backend."); + } + return backend; +} + +REGISTER_BACKEND(HDIDeviceV1_0, HDIDeviceV1_0Creator) +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +namespace OHOS { +namespace HDI { +namespace Nnrt { +namespace V1_0 { + +sptr INnrtDevice::Get(bool isStub) +{ + return INnrtDevice::Get("mock_device_service", isStub); +} + +sptr INnrtDevice::Get(const std::string &serviceName, bool isStub) +{ + if (isStub) { + return nullptr; + } + sptr mockIDevice = sptr(MockIDevice::GetInstance()); + return mockIDevice; +} + +MockIDevice::MockIDevice() +{ + m_bufferFd = 0; +} + +MockIDevice::~MockIDevice() +{ + for (auto fd : m_fds) { + close(fd); + } +} + +MockIPreparedModel::~MockIPreparedModel() +{ + for (auto fd : m_fds) { + close(fd); + } +} + +MockIDevice *MockIDevice::GetInstance() +{ + static MockIDevice iDevice; + return &iDevice; +} + +void MockIDevice::SetFP16Supported(bool isSupported) +{ + m_fp16 = isSupported; +} + +void MockIDevice::SetPerformanceSupported(bool isSupported) +{ + m_performance = isSupported; +} + +void MockIDevice::SetPrioritySupported(bool isSupported) +{ + m_priority = isSupported; +} + +void MockIDevice::SetModelCacheSupported(bool isSupported) +{ + m_cache = isSupported; +} + +void MockIDevice::SetOperationsSupported(std::vector isSupported) +{ + m_operations = isSupported; +} + +void MockIDevice::SetDynamicInputSupported(bool isSupported) +{ + m_dynamic = isSupported; +} + +int32_t MockIDevice::GetDeviceName(std::string& name) +{ + name = "Device-CPU"; + return HDF_SUCCESS; +} + +int32_t MockIDevice::GetVendorName(std::string& name) +{ + name = "TestVendor"; + return HDF_SUCCESS; +} + +int32_t MockIDevice::GetDeviceType(DeviceType& deviceType) +{ + deviceType = DeviceType::CPU; + return HDF_SUCCESS; +} + +int32_t MockIDevice::GetDeviceStatus(DeviceStatus& status) +{ + status = DeviceStatus::AVAILABLE; + return HDF_SUCCESS; +} + +int32_t MockIDevice::GetVersion(uint32_t &majorVersion, uint32_t &minorVersion) +{ + majorVersion = 1; + minorVersion = 0; + return HDF_SUCCESS; +} + +int32_t MockIDevice::GetSupportedOperation(const Model& model, std::vector& ops) +{ + ops = m_operations; + return HDF_SUCCESS; +} + +int32_t MockIDevice::IsFloat16PrecisionSupported(bool& isSupported) +{ + isSupported = m_fp16; + return HDF_SUCCESS; +} + +int32_t MockIDevice::IsPerformanceModeSupported(bool& isSupported) +{ + isSupported = m_performance; + return HDF_SUCCESS; +} + +int32_t MockIDevice::IsPrioritySupported(bool& isSupported) +{ + isSupported = m_priority; + return HDF_SUCCESS; +} + +int32_t MockIDevice::IsDynamicInputSupported(bool& isSupported) +{ + isSupported = m_dynamic; + return HDF_SUCCESS; +} + +int32_t MockIDevice::IsModelCacheSupported(bool& isSupported) +{ + isSupported = m_cache; + return HDF_SUCCESS; +} + +int32_t MockIDevice::AllocateBuffer(uint32_t length, SharedBuffer &buffer) +{ + std::lock_guard lock(m_mtx); + buffer.fd = AshmemCreate("allocateBuffer", length); + buffer.bufferSize = AshmemGetSize(buffer.fd); + buffer.offset = 0; + buffer.dataSize = length; + + AshmemSetProt(buffer.fd, PROT_READ | PROT_WRITE); + m_fds.emplace(buffer.fd); + m_bufferFd = buffer.fd; + return HDF_SUCCESS; +} + +int32_t MockIDevice::ReleaseBuffer(const SharedBuffer &buffer) +{ + if (m_fds.find(buffer.fd) == m_fds.end()) { + LOGE("ReleaseBuffer:buffer fd is invalid. fd = %d", buffer.fd); + return HDF_FAILURE; + } + if (close(buffer.fd) != 0) { + LOGE("ReleaseBuffer:Close buffer fd failed. fd = %d", buffer.fd); + return HDF_FAILURE; + } + return HDF_SUCCESS; +} + +int32_t MockIDevice::MemoryCopy(float* data, uint32_t length) +{ + std::lock_guard lock(m_mtx); + void* mapData = mmap(nullptr, length, PROT_READ | PROT_WRITE, MAP_SHARED, m_bufferFd, 0); + if (mapData == MAP_FAILED) { + LOGE("[Mock_Device]::ExportModelCache failed, Map fd to address failed: %{public}s.", strerror(errno)); + return HDF_FAILURE; + } + + auto memRet = memcpy_s(mapData, length, data, length); + auto unmapResult = munmap(mapData, length); + if (unmapResult != 0) { + LOGE("[Mock_Device]ExportModelCache failed . Please try again."); + return HDF_FAILURE; + } + if (memRet != EOK) { + LOGE("[Mock_Device]ExportModelCache failed, failed to memcpy_s data type."); + return HDF_FAILURE; + } + return HDF_SUCCESS; +} + +int32_t MockIDevice::PrepareModel(const Model& model, const ModelConfig& config, sptr& preparedModel) +{ + preparedModel = new (std::nothrow) V1_0::MockIPreparedModel(); + return HDF_SUCCESS; +} + +int32_t MockIDevice::PrepareModelFromModelCache(const std::vector& modelCache, const ModelConfig& config, + sptr& preparedModel) +{ + preparedModel = new (std::nothrow) V1_0::MockIPreparedModel(); + return HDF_SUCCESS; +} + +int32_t MockIPreparedModel::ExportModelCache(std::vector& modelCache) +{ + if (!modelCache.empty()) { + LOGE("[NNRtTest] The parameters of ExportModelCache should be an empty vector."); + return HDF_ERR_INVALID_PARAM; + } + uint8_t bufferData[4] = {0, 1, 2, 3}; + uint32_t size = sizeof(bufferData); + SharedBuffer buffer; + buffer.fd = AshmemCreate("cache", size); + buffer.bufferSize = AshmemGetSize(buffer.fd); + buffer.offset = 0; + buffer.dataSize = size; + AshmemSetProt(buffer.fd, PROT_READ | PROT_WRITE); + + void* data = mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_SHARED, buffer.fd, 0); + if (data == MAP_FAILED) { + LOGE("[Mock_Device]::ExportModelCache failed, Map fd to address failed: %{public}s.", strerror(errno)); + return HDF_FAILURE; + } + + auto memRet = memcpy_s(data, size, bufferData, size); + auto unmapResult = munmap(data, size); + if (unmapResult != 0) { + LOGE("[Mock_Device]ExportModelCache failed . Please try again."); + return HDF_FAILURE; + } + if (memRet != EOK) { + LOGE("[Mock_Device]ExportModelCache failed, failed to memcpy_s data type."); + return HDF_FAILURE; + } + m_fds.emplace(buffer.fd); + modelCache.emplace_back(buffer); + return HDF_SUCCESS; +} + +int32_t MockIPreparedModel::GetVersion(uint32_t &majorVersion, uint32_t &minorVersion) +{ + majorVersion = 1; + minorVersion = 0; + return HDF_SUCCESS; +} + +int32_t MockIPreparedModel::Run(const std::vector& inputs, const std::vector& outputs, + std::vector>& outputsDims, std::vector& isOutputBufferEnough) +{ + outputsDims = {{1, 2, 2, 1}}; + isOutputBufferEnough = {true}; + return HDF_SUCCESS; +} + +} // namespace V1_0 +} // namespace Nnrt +} // namespace HDI +} // namespace OHOS diff --git a/test/nnrt_xts_acts/neural_network_runtime/v1_0/common/mock_idevice.h b/test/nnrt_xts_acts/neural_network_runtime/v1_0/common/mock_idevice.h new file mode 100644 index 0000000..6bb4c79 --- /dev/null +++ b/test/nnrt_xts_acts/neural_network_runtime/v1_0/common/mock_idevice.h @@ -0,0 +1,123 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MOCK_IDEVICE_H +#define MOCK_IDEVICE_H + +#include +#include +#include +#include +#include + +#include +#include +#include "mindir_lite_graph.h" +#include "mindir.h" + +#include "securec.h" +#include "refbase.h" +#include "log.h" +#include "ashmem.h" + +#include +#include +#include + +namespace OHOS { +namespace HDI { +namespace Nnrt { +namespace V1_0 { + +class MockIDevice : public INnrtDevice { +public: + int32_t GetSupportedOperation(const Model& model, std::vector& ops) override; + + int32_t IsFloat16PrecisionSupported(bool& isSupported) override; + + int32_t IsPerformanceModeSupported(bool& isSupported) override; + + int32_t IsPrioritySupported(bool& isSupported) override; + + int32_t IsDynamicInputSupported(bool& isSupported) override; + + int32_t IsModelCacheSupported(bool& isSupported) override; + + int32_t AllocateBuffer(uint32_t length, SharedBuffer &buffer) override; + + int32_t ReleaseBuffer(const SharedBuffer &buffer) override; + + int32_t GetDeviceName(std::string& name) override; + + int32_t GetVendorName(std::string& name) override; + + int32_t GetDeviceType(DeviceType& deviceType) override; + + int32_t GetDeviceStatus(DeviceStatus& status) override; + + int32_t GetVersion(uint32_t &majorVersion, uint32_t &minorVersion) override; + + int32_t PrepareModel(const Model& model, const ModelConfig& config, sptr& preparedModel) override; + + int32_t PrepareModelFromModelCache(const std::vector& modelCache, const ModelConfig& config, + sptr& preparedModel) override; + + int32_t MemoryCopy(float* data, uint32_t length); + + void SetFP16Supported(bool isSupported); + + void SetPerformanceSupported(bool isSupported); + + void SetPrioritySupported(bool isSupported); + + void SetModelCacheSupported(bool isSupported); + + void SetOperationsSupported(std::vector isSupported); + + void SetDynamicInputSupported(bool isSupported); + + static MockIDevice *GetInstance(); + + MockIDevice(); + virtual ~MockIDevice(); + +private: + std::unordered_set m_fds; + int m_bufferFd; + bool m_fp16 = true; + bool m_performance = true; + bool m_priority = true; + bool m_cache = true; + bool m_dynamic = true; + std::vector m_operations{true}; + std::mutex m_mtx; +}; + +class MockIPreparedModel : public IPreparedModel { +public: + int32_t ExportModelCache(std::vector& modelCache) override; + int32_t Run(const std::vector& inputs, const std::vector& outputs, + std::vector>& outputsDims, std::vector& isOutputBufferEnough) override; + int32_t GetVersion(uint32_t &majorVersion, uint32_t &minorVersion) override; + MockIPreparedModel() = default; + virtual ~MockIPreparedModel(); +private: + std::unordered_set m_fds; +}; + +} // namespace V1_0 +} // namespace Nnrt +} // namespace HDI +} // namespace OHOS +#endif // MOCK_IDEVICE_H diff --git a/test/nnrt_xts_acts/neural_network_runtime/v1_0/common/model.h b/test/nnrt_xts_acts/neural_network_runtime/v1_0/common/model.h new file mode 100644 index 0000000..f575697 --- /dev/null +++ b/test/nnrt_xts_acts/neural_network_runtime/v1_0/common/model.h @@ -0,0 +1,111 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MODEL_H +#define MODEL_H + +#include "const.h" +#include "nnrt_utils.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Test { + +struct AddModel { + // ADD MODEL + float inputValue0[4] = {0, 1, 2, 3}; + float inputValue1[4] = {0, 1, 2, 3}; + int8_t activationValue = OH_NN_FUSED_NONE; + float outputValue[4] = {0}; + float expectValue[4] = {0, 2, 4, 6}; + + OHNNOperandTest input0 = {OH_NN_FLOAT32, OH_NN_TENSOR, TENSOR_SHAPE, inputValue0, ADD_DATA_LENGTH}; + OHNNOperandTest input1 = {OH_NN_FLOAT32, OH_NN_TENSOR, TENSOR_SHAPE, inputValue1, ADD_DATA_LENGTH}; + OHNNOperandTest activation = {OH_NN_INT8, OH_NN_ADD_ACTIVATIONTYPE, {}, &activationValue, sizeof(int8_t)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, TENSOR_SHAPE, outputValue, ADD_DATA_LENGTH}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_ADD, + .operands = {input0, input1, activation, output}, + .paramIndices = {2}, + .inputIndices = {0, 1}, + .outputIndices = {3}}; +}; + +struct AvgPoolDynamicModel { + // AVG POOL MODEL + float inputValue[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + uint64_t kernelValue[2] = {2, 2}; + uint64_t strideValue[2] = {1, 1}; + int8_t padValue = 1; + int8_t activationValue = OH_NN_FUSED_NONE; + float outputValue[4] = {0}; + float expectValue[4] = {2, 3, 5, 6}; + + OHNNOperandTest dynamicInput = {OH_NN_FLOAT32, OH_NN_TENSOR, {-1, -1, -1, -1}, inputValue, AVG_INPUT_LENGTH}; + OHNNOperandTest kernel = {OH_NN_INT64, OH_NN_AVG_POOL_KERNEL_SIZE, {2}, kernelValue, sizeof(kernelValue)}; + OHNNOperandTest strides = {OH_NN_INT64, OH_NN_AVG_POOL_STRIDE, {2}, strideValue, sizeof(strideValue)}; + OHNNOperandTest padMode = {OH_NN_INT8, OH_NN_AVG_POOL_PAD_MODE, {}, &padValue, sizeof(padValue)}; + OHNNOperandTest activation = {OH_NN_INT8, OH_NN_AVG_POOL_ACTIVATION_TYPE, {}, &activationValue, sizeof(int8_t)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, {-1, -1, -1, -1}, outputValue, sizeof(outputValue)}; + + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_AVG_POOL, + .operands = {dynamicInput, kernel, strides, padMode, activation, output}, + .paramIndices = {1, 2, 3, 4}, + .inputIndices = {0}, + .outputIndices = {5}}; +}; + +struct TopKModel { + // TopK Model + float valueX[6] = {0, 1, 2, 3, 4, 5}; + int8_t valueK = 2; + bool valueSorted = true; + float valueOutput1[2]; + int32_t valueOutput2[2]; + + OHNNOperandTest x = {OH_NN_FLOAT32, OH_NN_TENSOR, {1, 6}, valueX, 6 * sizeof(float)}; + OHNNOperandTest k = {OH_NN_INT8, OH_NN_TENSOR, {}, &valueK, sizeof(int8_t)}; + OHNNOperandTest sorted = {OH_NN_BOOL, OH_NN_TOP_K_SORTED, {}, &valueSorted, sizeof(bool)}; + OHNNOperandTest output1 = {OH_NN_FLOAT32, OH_NN_TENSOR, {1, 2}, valueOutput1, 2 * sizeof(float)}; + OHNNOperandTest output2 = {OH_NN_INT32, OH_NN_TENSOR, {1, 2}, valueOutput2, 2 * sizeof(int32_t)}; + + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_TOP_K, + .operands = {x, k, sorted, output1, output2}, + .paramIndices = {2}, + .inputIndices = {0, 1}, + .outputIndices = {3, 4}}; +}; + +class AddTopKModel { + // Build two ops Model +private: + AddModel addModel; + TopKModel topKModel; + +public: + OHNNGraphArgsMulti graphArgs = { + .operationTypes = {OH_NN_OPS_ADD, OH_NN_OPS_TOP_K}, + .operands = {{addModel.input0, addModel.input1, addModel.activation, addModel.output}, + {topKModel.k, topKModel.sorted, topKModel.output1, topKModel.output2}}, + .paramIndices = {{2}, {5}}, + .inputIndices = {{0, 1}, {3, 4}}, + .outputIndices = {{3}, {6, 7}}, + .graphInput = {0, 1, 4}, + .graphOutput = {6, 7}}; +}; + +} // namespace Test +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // MODEL_H \ No newline at end of file diff --git a/test/nnrt_xts_acts/neural_network_runtime/v1_0/common/nnrt_utils.cpp b/test/nnrt_xts_acts/neural_network_runtime/v1_0/common/nnrt_utils.cpp new file mode 100644 index 0000000..0194c2c --- /dev/null +++ b/test/nnrt_xts_acts/neural_network_runtime/v1_0/common/nnrt_utils.cpp @@ -0,0 +1,471 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "nnrt_utils.h" +#include "const.h" +#include + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Test { + +OH_NN_UInt32Array TransformUInt32Array(const std::vector& vector) +{ + uint32_t* data = (vector.empty()) ? nullptr : const_cast(vector.data()); + return {data, vector.size()}; +} + +int BuildMultiOpGraph(OH_NNModel *model, const OHNNGraphArgsMulti &graphArgs) +{ + int ret = 0; + int opCnt = 0; + for (int j = 0; j < graphArgs.operationTypes.size(); j++) { + for (int i = 0; i < graphArgs.operands[j].size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[j][i]; + auto quantParam = operandTem.quantParam; + OH_NN_Tensor operand = {operandTem.dataType, (uint32_t) operandTem.shape.size(), + operandTem.shape.data(), quantParam, operandTem.type}; + ret = OH_NNModel_AddTensor(model, &operand); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNModel_AddTensor failed! ret=%d\n", ret); + return ret; + } + if (std::find(graphArgs.paramIndices[j].begin(), graphArgs.paramIndices[j].end(), opCnt) != + graphArgs.paramIndices[j].end()) { + ret = OH_NNModel_SetTensorData(model, opCnt, operandTem.data, operandTem.length); + } + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNModel_SetTensorData failed! ret=%d\n", ret); + return ret; + } + opCnt += 1; + } + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices[j]); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices[j]); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices[j]); + + ret = OH_NNModel_AddOperation(model, graphArgs.operationTypes[j], ¶mIndices, &inputIndices, + &outputIndices); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNModel_AddOperation failed! ret=%d\n", ret); + return ret; + } + } + auto graphInputs = TransformUInt32Array(graphArgs.graphInput); + auto graphOutputs = TransformUInt32Array(graphArgs.graphOutput); + ret = OH_NNModel_SpecifyInputsAndOutputs(model, &graphInputs, &graphOutputs); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNModel_SpecifyInputsAndOutputs failed! ret=%d\n", ret); + return ret; + } + ret = OH_NNModel_Finish(model); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNModel_Finish failed! ret=%d\n", ret); + return ret; + } + return ret; +} + +int BuildSingleOpGraph(OH_NNModel *model, const OHNNGraphArgs &graphArgs) +{ + int ret = 0; + for (int i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + auto quantParam = operandTem.quantParam; + OH_NN_Tensor operand = {operandTem.dataType, (uint32_t) operandTem.shape.size(), + operandTem.shape.data(), quantParam, operandTem.type}; + ret = OH_NNModel_AddTensor(model, &operand); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNModel_AddTensor failed! ret=%d\n", ret); + return ret; + } + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + ret = OH_NNModel_SetTensorData(model, i, operandTem.data, operandTem.length); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNModel_SetTensorData failed! ret=%d\n", ret); + return ret; + } + } + } + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + if (graphArgs.addOperation) { + ret = OH_NNModel_AddOperation(model, graphArgs.operationType, ¶mIndices, &inputIndices, + &outputIndices); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNModel_AddOperation failed! ret=%d\n", ret); + return ret; + } + } + if (graphArgs.specifyIO) { + ret = OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNModel_SpecifyInputsAndOutputs failed! ret=%d\n", ret); + return ret; + } + } + if (graphArgs.build) { + ret = OH_NNModel_Finish(model); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNModel_Finish failed! ret=%d\n", ret); + return ret; + } + } + return ret; +} + +OH_NN_ReturnCode SetDevice(OH_NNCompilation *compilation) +{ + OH_NN_ReturnCode ret = OH_NN_FAILED; + const size_t *devicesID{nullptr}; + uint32_t devicesCount{0}; + ret = OH_NNDevice_GetAllDevicesID(&devicesID, &devicesCount); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNDevice_GetAllDevicesID failed! ret=%d\n", ret); + return ret; + } + if (devicesCount <= NO_DEVICE_COUNT) { + LOGE("[NNRtTest] devicesCount <= 0 devicesCount=%d\n", devicesCount); + return OH_NN_FAILED; + } + + const char *name = nullptr; + std::string m_deviceName{"Device-CPU_TestVendor_v1_0"}; + for (uint32_t i = 0; i < devicesCount; i++) { + name = nullptr; + ret = OH_NNDevice_GetName(devicesID[i], &name); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNDevice_GetName failed! ret=%d\n", ret); + return ret; + } + + std::string sName(name); + if (m_deviceName == sName) { + ret = OH_NNCompilation_SetDevice(compilation, devicesID[i]); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNCompilation_SetDevice failed! ret=%d\n", ret); + return ret; + } + return OH_NN_SUCCESS; + } + } + return OH_NN_FAILED; +} + +int CompileGraphMock(OH_NNCompilation *compilation, const OHNNCompileParam &compileParam) +{ + int ret = 0; + ret = SetDevice(compilation); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNCompilation_SetDevice failed! ret=%d\n", ret); + return ret; + } + // set cache + if (!compileParam.cacheDir.empty()) { + ret = OH_NNCompilation_SetCache(compilation, compileParam.cacheDir.c_str(), + compileParam.cacheVersion); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNCompilation_SetCache failed! ret=%d\n", ret); + return ret; + } + } + // set performance + if (compileParam.performanceMode != OH_NN_PERFORMANCE_NONE) { + ret = OH_NNCompilation_SetPerformanceMode(compilation, compileParam.performanceMode); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNCompilation_SetPerformanceMode failed! ret=%d\n", ret); + return ret; + } + } + // set priority + if (compileParam.priority != OH_NN_PRIORITY_NONE) { + ret = OH_NNCompilation_SetPriority(compilation, compileParam.priority); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNCompilation_SetPriority failed! ret=%d\n", ret); + return ret; + } + } + // enable fp16 + if (compileParam.enableFp16) { + ret = OH_NNCompilation_EnableFloat16(compilation, compileParam.enableFp16); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNCompilation_EnableFloat16 failed! ret=%d\n", ret); + return ret; + } + } + // build + ret = OH_NNCompilation_Build(compilation); + return ret; +} + + +int ExecuteGraphMock(OH_NNExecutor *executor, const OHNNGraphArgs &graphArgs, + float* expect) +{ + OHOS::sptr device = V1_0::MockIDevice::GetInstance(); + int ret = 0; + uint32_t inputIndex = 0; + uint32_t outputIndex = 0; + for (auto i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + auto quantParam = operandTem.quantParam; + OH_NN_Tensor operand = {operandTem.dataType, (uint32_t) operandTem.shape.size(), + operandTem.shape.data(), + quantParam, operandTem.type}; + if (std::find(graphArgs.inputIndices.begin(), graphArgs.inputIndices.end(), i) != + graphArgs.inputIndices.end()) { + ret = OH_NNExecutor_SetInput(executor, inputIndex, &operand, operandTem.data, + operandTem.length); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNExecutor_SetInput failed! ret=%d\n", ret); + return ret; + } + inputIndex += 1; + } else if (std::find(graphArgs.outputIndices.begin(), graphArgs.outputIndices.end(), i) != + graphArgs.outputIndices.end()) { + ret = OH_NNExecutor_SetOutput(executor, outputIndex, operandTem.data, operandTem.length); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNExecutor_SetOutput failed! ret=%d\n", ret); + return ret; + } + if (expect != nullptr) { + ret = device->MemoryCopy(expect, operandTem.length); + } + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] device set expect output failed! ret=%d\n", ret); + return ret; + } + outputIndex += 1; + } + } + ret = OH_NNExecutor_Run(executor); + return ret; +} + +int ExecutorWithMemory(OH_NNExecutor *executor, const OHNNGraphArgs &graphArgs, OH_NN_Memory *OHNNMemory[], + float* expect) +{ + OHOS::sptr device = V1_0::MockIDevice::GetInstance(); + int ret = 0; + uint32_t inputIndex = 0; + uint32_t outputIndex = 0; + for (auto i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + auto quantParam = operandTem.quantParam; + OH_NN_Tensor operand = {operandTem.dataType, (uint32_t) operandTem.shape.size(), + operandTem.shape.data(), + quantParam, operandTem.type}; + if (std::find(graphArgs.inputIndices.begin(), graphArgs.inputIndices.end(), i) != + graphArgs.inputIndices.end()) { + OH_NN_Memory *inputMemory = OH_NNExecutor_AllocateInputMemory(executor, inputIndex, + operandTem.length); + ret = OH_NNExecutor_SetInputWithMemory(executor, inputIndex, &operand, inputMemory); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNExecutor_SetInputWithMemory failed! ret=%d\n", ret); + return ret; + } + memcpy_s(inputMemory->data, operandTem.length, static_cast(operandTem.data), operandTem.length); + OHNNMemory[inputIndex] = inputMemory; + inputIndex += 1; + } else if (std::find(graphArgs.outputIndices.begin(), graphArgs.outputIndices.end(), i) != + graphArgs.outputIndices.end()) { + OH_NN_Memory *outputMemory = OH_NNExecutor_AllocateOutputMemory(executor, outputIndex, + operandTem.length); + ret = OH_NNExecutor_SetOutputWithMemory(executor, outputIndex, outputMemory); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNExecutor_SetOutputWithMemory failed! ret=%d\n", ret); + return ret; + } + ret = device->MemoryCopy(expect, operandTem.length); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] device set expect output failed! ret=%d\n", ret); + return ret; + } + OHNNMemory[inputIndex + outputIndex] = outputMemory; + outputIndex += 1; + } + } + ret = OH_NNExecutor_Run(executor); + return ret; +} + + +void Free(OH_NNModel *model, OH_NNCompilation *compilation, OH_NNExecutor *executor) +{ + if (model != nullptr) { + OH_NNModel_Destroy(&model); + ASSERT_EQ(nullptr, model); + } + if (compilation != nullptr) { + OH_NNCompilation_Destroy(&compilation); + ASSERT_EQ(nullptr, compilation); + } + if (executor != nullptr) { + OH_NNExecutor_Destroy(&executor); + ASSERT_EQ(nullptr, executor); + } +} + +PathType CheckPath(const std::string &path) +{ + if (path.empty()) { + LOGI("CheckPath: path is null"); + return PathType::NOT_FOUND; + } + struct stat buf{}; + if (stat(path.c_str(), &buf) == 0) { + if (buf.st_mode & S_IFDIR) { + return PathType::DIR; + } else if (buf.st_mode & S_IFREG) { + return PathType::FILE; + } else { + return PathType::UNKNOWN; + } + } + LOGI("%s not found", path.c_str()); + return PathType::NOT_FOUND; +} + +bool DeleteFile(const std::string &path) +{ + if (path.empty()) { + LOGI("DeleteFile: path is null"); + return false; + } + if (CheckPath(path) == PathType::NOT_FOUND) { + LOGI("not found: %s", path.c_str()); + return true; + } + if (remove(path.c_str()) == 0) { + LOGI("deleted: %s", path.c_str()); + return true; + } + LOGI("delete failed: %s", path.c_str()); + return false; +} + +void CopyFile(const std::string &srcPath, const std::string &dstPath) +{ + std::ifstream src(srcPath, std::ios::binary); + std::ofstream dst(dstPath, std::ios::binary); + + dst << src.rdbuf(); +} + +std::string ConcatPath(const std::string &str1, const std::string &str2) +{ + // boundary + if (str2.empty()) { + return str1; + } + if (str1.empty()) { + return str2; + } + // concat + char end = str1[str1.size() - 1]; + if (end == '\\' or end == '/') { + return str1 + str2; + } else { + return str1 + '/' + str2; + } +} + +void DeleteFolder(const std::string &path) +{ + if (path.empty()) { + LOGI("DeletePath: path is null"); + return; + } + + DIR *dir = opendir(path.c_str()); + // check is dir ? + if (dir == nullptr) { + LOGE("[NNRtTest] Can not open dir. Check path or permission! path: %s", path.c_str()); + return; + } + struct dirent *file; + // read all the files in dir + std::vector pathList; + while ((file = readdir(dir)) != nullptr) { + // skip "." and ".." + if (strcmp(file->d_name, ".") == 0 || strcmp(file->d_name, "..") == 0) { + continue; + } + if (file->d_type == DT_DIR) { + std::string filePath = path + "/" + file->d_name; + DeleteFolder(filePath); // 递归执行 + } else { + pathList.emplace_back(ConcatPath(path, file->d_name)); + } + } + closedir(dir); + pathList.emplace_back(path); + LOGI("[Common] Delete folder %s", path.c_str()); + for (auto &i : pathList) { + DeleteFile(i); + } +} + +bool CreateFolder(const std::string &path) +{ + if (path.empty()) { + LOGI("CreateFolder: path is empty"); + return false; + } + LOGI("CreateFolder:%s", path.c_str()); + mode_t mode = 0700; + for (int i = 1; i < path.size() - 1; i++) { + if (path[i] != '/') { + continue; + } + PathType ret = CheckPath(path.substr(0, i)); + switch (ret) { + case PathType::DIR: + continue; + case PathType::NOT_FOUND: + LOGI("mkdir: %s", path.substr(0, i).c_str()); + mkdir(path.substr(0, i).c_str(), mode); + break; + default: + LOGI("error: %s", path.substr(0, i).c_str()); + return false; + } + } + mkdir(path.c_str(), mode); + return CheckPath(path) == PathType::DIR; +} + +bool CheckOutput(const float* output, const float* expect) +{ + if (output == nullptr || expect == nullptr) { + LOGE("[NNRtTest] output or expect is nullptr\n"); + return false; + } + for (int i = 0; i < ELEMENT_COUNT; i++) { + if (std::abs(float(output[i]) - float(expect[i])) > 1e-8) { + for (int j = 0; j < ELEMENT_COUNT; j++) { + LOGE("[NNRtTest] output %d not match: expect:%f, actual:%f\n", j, float(expect[j]), float(output[j])); + } + return false; + } + } + return true; +} + +} // namespace Test +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/nnrt_xts_acts/neural_network_runtime/v1_0/common/nnrt_utils.h b/test/nnrt_xts_acts/neural_network_runtime/v1_0/common/nnrt_utils.h new file mode 100644 index 0000000..a8d1011 --- /dev/null +++ b/test/nnrt_xts_acts/neural_network_runtime/v1_0/common/nnrt_utils.h @@ -0,0 +1,99 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef NNRT_UTILS_H +#define NNRT_UTILS_H + +#include +#include +#include + +#include "neural_network_runtime/neural_network_runtime.h" +#include "log.h" +#include "mock_idevice.h" +#include "const.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Test { +namespace V1_0 = OHOS::HDI::Nnrt::V1_0; +struct OHNNOperandTest { + OH_NN_DataType dataType; + OH_NN_TensorType type; + std::vector shape; + void *data{nullptr}; + int32_t length{0}; + const OH_NN_QuantParam *quantParam = nullptr; +}; + +struct OHNNGraphArgs { + OH_NN_OperationType operationType; + std::vector operands; + std::vector paramIndices; + std::vector inputIndices; + std::vector outputIndices; + bool build = true; + bool specifyIO = true; + bool addOperation = true; +}; + +struct OHNNGraphArgsMulti { + std::vector operationTypes; + std::vector> operands; + std::vector> paramIndices; + std::vector> inputIndices; + std::vector> outputIndices; + std::vector graphInput; + std::vector graphOutput; +}; + +struct OHNNCompileParam { + int32_t deviceId = 0; + std::string cacheDir; + uint32_t cacheVersion = 0; + OH_NN_PerformanceMode performanceMode = OH_NN_PERFORMANCE_NONE; + OH_NN_Priority priority = OH_NN_PRIORITY_NONE; + bool enableFp16 = false; +}; + +int BuildSingleOpGraph(OH_NNModel *model, const OHNNGraphArgs &graphArgs); + +int ExecutorWithMemory(OH_NNExecutor *executor, const OHNNGraphArgs &graphArgs, OH_NN_Memory *OHNNMemory[], + float* expect); + +void Free(OH_NNModel *model = nullptr, OH_NNCompilation *compilation = nullptr, OH_NNExecutor *executor = nullptr); + +int CompileGraphMock(OH_NNCompilation *compilation, const OHNNCompileParam &compileParam); + +int ExecuteGraphMock(OH_NNExecutor *executor, const OHNNGraphArgs &graphArgs, float* expect); + +OH_NN_ReturnCode SetDevice(OH_NNCompilation *compilation); +int BuildMultiOpGraph(OH_NNModel *model, const OHNNGraphArgsMulti &graphArgs); +OH_NN_UInt32Array GetUInt32Array(std::vector indices); + +bool CheckOutput(const float* output, const float* expect); + +enum class PathType { FILE, DIR, UNKNOWN, NOT_FOUND }; +PathType CheckPath(const std::string &path); +bool DeleteFile(const std::string &path); +void CopyFile(const std::string &srcPath, const std::string &dstPath); +std::string ConcatPath(const std::string &str1, const std::string &str2); +void DeleteFolder(const std::string &path); +bool CreateFolder(const std::string &path); + +} // namespace Test +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NNRT_UTILS_H \ No newline at end of file diff --git a/test/nnrt_xts_acts/neural_network_runtime/v1_0/interface/BUILD.gn b/test/nnrt_xts_acts/neural_network_runtime/v1_0/interface/BUILD.gn new file mode 100644 index 0000000..21bdd47 --- /dev/null +++ b/test/nnrt_xts_acts/neural_network_runtime/v1_0/interface/BUILD.gn @@ -0,0 +1,53 @@ +# Copyright (c) 2022 Huawei Device Co., Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import("//build/test.gni") + +config("nnrt_config") { + include_dirs = [ "../common" ] + cflags = [ "-Wno-error" ] + cflags_cc = [ "-fexceptions" ] +} + +ohos_systemtest("ActsAiNnrtFunctionV1_0Test") { + module_out_path = "neural_network_runtime/neural_network_runtime" + sources = [ + "../common/mock_idevice.cpp", + "../common/nnrt_utils.cpp", + "src/CompileTest.cpp", + "src/DeviceTest.cpp", + "src/ExecutorTest.cpp", + "src/MemoryTest.cpp", + "src/ModelTest.cpp", + ] + + configs = [ ":nnrt_config" ] + + external_deps = [ + "c_utils:utils", + "drivers_interface_nnrt:libnnrt_proxy_1.0", + "googletest:gmock_main", + "googletest:gtest_main", + "hdf_core:libhdf_utils", + "hdf_core:libhdi", + "hilog:libhilog", + "hitrace:libhitracechain", + "ipc:ipc_single", + "mindspore:mindir_lib", + "neural_network_runtime:libneural_network_core", + "neural_network_runtime:libneural_network_runtime", + ] + + subsystem_name = "ai" + part_name = "neural_network_runtime" +} diff --git a/test/nnrt_xts_acts/neural_network_runtime/v1_0/interface/src/CompileTest.cpp b/test/nnrt_xts_acts/neural_network_runtime/v1_0/interface/src/CompileTest.cpp new file mode 100644 index 0000000..e91c950 --- /dev/null +++ b/test/nnrt_xts_acts/neural_network_runtime/v1_0/interface/src/CompileTest.cpp @@ -0,0 +1,875 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include +#include +#include + +#include "nnrt_utils.h" +#include "model.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime; +using namespace OHOS::NeuralNetworkRuntime::Test; +using namespace OHOS::HDI::Nnrt::V1_0; + +namespace { + +class CompileTest : public testing::Test { +public: + void SetUp() + { + CreateFolder(CACHE_DIR); + } + void TearDown() + { + DeleteFolder(CACHE_DIR); + } + void GenCacheFile() + { + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + OHNNCompileParam compileParam{ + .cacheDir = "./cache", + .cacheVersion = 10, + }; + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + Free(model, compilation); + ASSERT_TRUE(CheckPath(CACHE_PATH) == PathType::FILE); + ASSERT_TRUE(CheckPath(CACHE_INFO_PATH) == PathType::FILE); + } + void DestroyCache() + { + std::ifstream ifs(CACHE_PATH.c_str(), std::ios::in | std::ios::binary); + char* ptr{nullptr}; + int cacheSize = ifs.tellg(); + int invalidCacheSize = cacheSize * 0.9; + ifs.read(ptr, cacheSize); + ifs.close(); + std::ofstream ofs(CACHE_PATH.c_str(), std::ios::out | std::ios::binary); + ofs.write(ptr, invalidCacheSize); + ofs.close(); + } + +protected: + OHNNCompileParam m_compileParam; + AddModel addModel; + OHNNGraphArgs graphArgs = addModel.graphArgs; +}; + +void CompileModel(OH_NNCompilation *compilation, const OHNNCompileParam &compileParam) +{ + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); +} + +} // namespace + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_Create_0100 + * @tc.name : 创建编译实例,model为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_Create_0100, Function | MediumTest | Level3) +{ + OH_NNCompilation *compilation = OH_NNCompilation_Construct(nullptr); + ASSERT_EQ(nullptr, compilation); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_Create_0200 + * @tc.name : 创建编译实例,model未完成构图 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_Create_0200, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, SetDevice(compilation)); + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNCompilation_Build(compilation)); + Free(model, compilation); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_Create_0300 + * @tc.name : 创建编译实例,model已完成构图,存在算子不支持 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_Create_0300, Function | MediumTest | Level2) +{ + OHOS::sptr device = V1_0::MockIDevice::GetInstance(); + std::vector isSupported = {true, false}; + device->SetOperationsSupported(isSupported); + + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + + AddTopKModel addTopKModel; + OHNNGraphArgsMulti graphArgsMulti = addTopKModel.graphArgs; + ASSERT_EQ(OH_NN_SUCCESS, BuildMultiOpGraph(model, graphArgsMulti)); + + const size_t *devicesID{nullptr}; + const bool *realSupported{nullptr}; + uint32_t opCount; + uint32_t devicesCount; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNDevice_GetAllDevicesID(&devicesID, &devicesCount)); + size_t targetDevice = devicesID[0]; + + OH_NN_ReturnCode ret = OH_NNModel_GetAvailableOperations(model, targetDevice, &realSupported, &opCount); + ASSERT_EQ(OH_NN_SUCCESS, ret); + for (int i = 0; i < opCount; i++) { + EXPECT_EQ(realSupported[i], isSupported[i]); + } + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, SetDevice(compilation)); + ASSERT_EQ(OH_NN_FAILED, OH_NNCompilation_Build(compilation)); + Free(model, compilation); + device->SetOperationsSupported({true}); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_SetDevice_0100 + * @tc.name : 设置device,compilation为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_SetDevice_0100, Function | MediumTest | Level3) +{ + const size_t *devicesID{nullptr}; + uint32_t devicesCount{0}; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNDevice_GetAllDevicesID(&devicesID, &devicesCount)); + + size_t targetDevice = devicesID[0]; // Use the first device in system test. + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNCompilation_SetDevice(nullptr, targetDevice)); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_SetDevice_0200 + * @tc.name : 设置device,deviceID不存在 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_SetDevice_0200, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_SetDevice(compilation, 100000)); + ASSERT_EQ(OH_NN_FAILED, OH_NNCompilation_Build(compilation)); + Free(model, compilation); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_SetDevice_0300 + * @tc.name : 设置device,deviceID存在 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_SetDevice_0300, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + OHNNCompileParam compileParam; + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + Free(model, compilation); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_SetCache_0100 + * @tc.name : 设置cache路径及版本,compilation为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_SetCache_0100, Function | MediumTest | Level3) +{ + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNCompilation_SetCache(nullptr, "./", 0)); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_SetCache_0200 + * @tc.name : 设置cache路径及版本,cacheDir为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_SetCache_0200, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNCompilation_SetCache(compilation, nullptr, 0)); + Free(model, compilation); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_SetCache_0300 + * @tc.name : device不支持,设置cache路径及版本 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_SetCache_0300, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + // set model cache unavailabel + OHOS::sptr device = V1_0::MockIDevice::GetInstance(); + device->SetModelCacheSupported(false); + + ASSERT_EQ(OH_NN_SUCCESS, SetDevice(compilation)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_SetCache(compilation, "./cache", 10)); + ASSERT_EQ(OH_NN_OPERATION_FORBIDDEN, OH_NNCompilation_Build(compilation)); + Free(model, compilation); + device->SetModelCacheSupported(true); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_SetCache_0400 + * @tc.name : 设置不存在cache路径 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_SetCache_0400, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + OHNNCompileParam compileParam{.cacheDir = "./test"}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, CompileGraphMock(compilation, compileParam)); + Free(model, compilation); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_SetCache_0500 + * @tc.name : 设置cache路径,cache破坏,重新生成cache + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_SetCache_0500, Function | MediumTest | Level2) +{ + // generate cache file in cache diretory + GenCacheFile(); + // destroy cache file to invalid size + DestroyCache(); + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + OHNNCompileParam compileParam{ + .cacheDir = "./cache", + .cacheVersion = 10, + }; + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + Free(model, compilation); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_SetCache_0600 + * @tc.name : 设置version,小于cache版本号 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_SetCache_0600, Function | MediumTest | Level2) +{ + GenCacheFile(); + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + OHNNCompileParam compileParam{ + .cacheDir = "./cache", + .cacheVersion = 9, + }; + ASSERT_EQ(OH_NN_OPERATION_FORBIDDEN, CompileGraphMock(compilation, compileParam)); + Free(model, compilation); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_SetCache_0700 + * @tc.name : 设置version,等于cache版本号 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_SetCache_0700, Function | MediumTest | Level2) +{ + GenCacheFile(); + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + OHNNCompileParam compileParam{ + .cacheDir = "./cache", + .cacheVersion = 10, + }; + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + Free(model, compilation); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_SetCache_0800 + * @tc.name : 设置version,大于cache版本号 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_SetCache_0800, Function | MediumTest | Level2) +{ + GenCacheFile(); + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + OHNNCompileParam compileParam{ + .cacheDir = "./cache", + .cacheVersion = 11, + }; + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + Free(model, compilation); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_SetPerformanceMode_0100 + * @tc.name : 设置priority,compilation为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_SetPerformanceMode_0100, Function | MediumTest | Level3) +{ + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNCompilation_SetPerformanceMode(nullptr, OH_NN_PERFORMANCE_MEDIUM)); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_SetPerformanceMode_0200 + * @tc.name : device不支持,设置performance + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_SetPerformanceMode_Mock_0200, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_LOW, + }; + OHOS::sptr device = V1_0::MockIDevice::GetInstance(); + device->SetPerformanceSupported(false); + ASSERT_EQ(OH_NN_OPERATION_FORBIDDEN, CompileGraphMock(compilation, compileParam)); + Free(model, compilation); + device->SetPerformanceSupported(true); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_SetPerformanceMode_0300 + * @tc.name : 设置performanceMode为NONE + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_SetPerformanceMode_0300, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, SetDevice(compilation)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_SetPerformanceMode(compilation, OH_NN_PERFORMANCE_NONE)); + Free(model, compilation); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_SetPerformanceMode_0400 + * @tc.name : 设置performanceMode为LOW + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_SetPerformanceMode_0400, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, SetDevice(compilation)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_SetPerformanceMode(compilation, OH_NN_PERFORMANCE_LOW)); + Free(model, compilation); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_SetPerformanceMode_0500 + * @tc.name : 设置performanceMode为MEDIUM + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_SetPerformanceMode_0500, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, SetDevice(compilation)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_SetPerformanceMode(compilation, OH_NN_PERFORMANCE_MEDIUM)); + Free(model, compilation); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_SetPerformanceMode_0600 + * @tc.name : 设置performanceMode为HIGH + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_SetPerformanceMode_0600, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, SetDevice(compilation)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_SetPerformanceMode(compilation, OH_NN_PERFORMANCE_HIGH)); + Free(model, compilation); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_SetPerformanceMode_0700 + * @tc.name : 设置performanceMode为EXTREME + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_SetPerformanceMode_0700, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, SetDevice(compilation)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_SetPerformanceMode(compilation, OH_NN_PERFORMANCE_EXTREME)); + Free(model, compilation); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_SetPerformanceMode_0800 + * @tc.name : 设置performanceMode为NONE-1 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_SetPerformanceMode_0800, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, SetDevice(compilation)); + ASSERT_EQ(OH_NN_SUCCESS, + OH_NNCompilation_SetPerformanceMode(compilation, + static_cast(OH_NN_PERFORMANCE_NONE - 1))); + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNCompilation_Build(compilation)); + Free(model, compilation); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_SetPerformanceMode_0900 + * @tc.name : 设置performanceMode为EXTREME+1 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_SetPerformanceMode_0900, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, SetDevice(compilation)); + ASSERT_EQ(OH_NN_SUCCESS, + OH_NNCompilation_SetPerformanceMode(compilation, + static_cast(OH_NN_PERFORMANCE_EXTREME + 1))); + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNCompilation_Build(compilation)); + Free(model, compilation); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_SetPriority_0100 + * @tc.name : 设置priority,compilation为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_SetPriority_0100, Function | MediumTest | Level3) +{ + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNCompilation_SetPriority(nullptr, OH_NN_PRIORITY_MEDIUM)); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_SetPriority_0200 + * @tc.name : device不支持,设置priority + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_SetPriority_0200, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + // set device not supported + OHOS::sptr device = V1_0::MockIDevice::GetInstance(); + device->SetPrioritySupported(false); + ASSERT_EQ(OH_NN_SUCCESS, SetDevice(compilation)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_SetPriority(compilation, OH_NN_PRIORITY_LOW)); + ASSERT_EQ(OH_NN_OPERATION_FORBIDDEN, OH_NNCompilation_Build(compilation)); + Free(model, compilation); + device->SetPrioritySupported(true); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_SetPriority_0300 + * @tc.name : 设置priority为NONE + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_SetPriority_0300, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, SetDevice(compilation)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_SetPriority(compilation, OH_NN_PRIORITY_NONE)); + Free(model, compilation); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_SetPriority_0400 + * @tc.name : 设置priority为LOW + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_SetPriority_0400, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, SetDevice(compilation)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_SetPriority(compilation, OH_NN_PRIORITY_LOW)); + Free(model, compilation); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_SetPriority_0500 + * @tc.name : 设置priority为MEDIUM + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_SetPriority_0500, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, SetDevice(compilation)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_SetPriority(compilation, OH_NN_PRIORITY_MEDIUM)); + Free(model, compilation); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_SetPriority_0600 + * @tc.name : 设置priority为LOW + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_SetPriority_0600, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, SetDevice(compilation)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_SetPriority(compilation, OH_NN_PRIORITY_HIGH)); + Free(model, compilation); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_SetPriority_0700 + * @tc.name : 设置priority为NONE-1 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_SetPriority_0700, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, SetDevice(compilation)); + ASSERT_EQ(OH_NN_SUCCESS, + OH_NNCompilation_SetPriority(compilation, static_cast(OH_NN_PRIORITY_NONE - 1))); + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNCompilation_Build(compilation)); + Free(model, compilation); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_SetPriority_0800 + * @tc.name : 设置priority为HIGH+1 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_SetPriority_0800, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, SetDevice(compilation)); + ASSERT_EQ(OH_NN_SUCCESS, + OH_NNCompilation_SetPriority(compilation, static_cast(OH_NN_PRIORITY_HIGH + 1))); + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNCompilation_Build(compilation)); + Free(model, compilation); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_EnableFloat16_0100 + * @tc.name : 设置enableFloat16,compilation为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_EnableFloat16_0100, Function | MediumTest | Level3) +{ + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNCompilation_EnableFloat16(nullptr, OH_NN_PERFORMANCE_MEDIUM)); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_EnableFloat16_0200 + * @tc.name : device支持,设置fp16推理为false + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_EnableFloat16_0200, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, SetDevice(compilation)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_EnableFloat16(compilation, false)); + Free(model, compilation); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_EnableFloat16_0300 + * @tc.name : device不支持,设置fp16推理为false + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_EnableFloat16_0300, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + // set fp16 unavailable + OHOS::sptr device = V1_0::MockIDevice::GetInstance(); + device->SetFP16Supported(false); + + ASSERT_EQ(OH_NN_SUCCESS, SetDevice(compilation)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_EnableFloat16(compilation, true)); + ASSERT_EQ(OH_NN_OPERATION_FORBIDDEN, OH_NNCompilation_Build(compilation)); + Free(model, compilation); + device->SetFP16Supported(true); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_EnableFloat16_0400 + * @tc.name : device不支持,设置fp16推理为true + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_EnableFloat16_0400, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + // set fp16 unavailable + OHOS::sptr device = V1_0::MockIDevice::GetInstance(); + device->SetFP16Supported(false); + + ASSERT_EQ(OH_NN_SUCCESS, SetDevice(compilation)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_EnableFloat16(compilation, true)); + ASSERT_EQ(OH_NN_OPERATION_FORBIDDEN, OH_NNCompilation_Build(compilation)); + Free(model, compilation); + device->SetFP16Supported(true); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_Build_0100 + * @tc.name : 编译模型,compilation为空指针 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_Build_0100, Function | MediumTest | Level3) +{ + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNCompilation_Build(nullptr)); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_Build_0200 + * @tc.name : 编译模型,未设置device,默认设备,返回成功 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_Build_0200, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_Build(compilation)); + Free(model, compilation); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_Build_0300 + * @tc.name : 编译模型,仅设置device,默认配置测试 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_Build_0300, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + OHNNCompileParam compileParam; + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + Free(model, compilation); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_Build_0400 + * @tc.name : 设置缓存路径及版本,编译模型导出缓存 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_Build_0400, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + OHNNCompileParam compileParam{ + .cacheDir = "./cache", + .cacheVersion = 10, + }; + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + Free(model, compilation); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_Destroy_0100 + * @tc.name : 释放编译实例,compilation为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_Destroy_0100, Function | MediumTest | Level3) +{ + OH_NNCompilation *compilation = nullptr; + OH_NNCompilation_Destroy(&compilation); + ASSERT_EQ(nullptr, compilation); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_Destroy_0200 + * @tc.name : 释放编译实例,未调用模型编译 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_Destroy_0200, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, SetDevice(compilation)); + OH_NNCompilation_Destroy(&compilation); + ASSERT_EQ(nullptr, compilation); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_Destroy_0300 + * @tc.name : 模型已编译,释放编译实例 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_Destroy_0300, Function | MediumTest | Level0) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + OHNNCompileParam compileParam; + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + Free(model, compilation); +} + +/** + * @tc.number : SUB_AI_NNR_Func_North_Compilation_Combine_0100 + * @tc.name : 多线程并发模型编译,编译成功 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNR_Func_North_Compilation_Combine_0100, Function | MediumTest | Level2) +{ + OH_NNModel *model1 = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model1); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model1, graphArgs)); + + OH_NNModel *model2 = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model2); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model2, graphArgs)); + + OH_NNCompilation *compilation1 = OH_NNCompilation_Construct(model1); + ASSERT_NE(nullptr, compilation1); + OH_NNCompilation *compilation2 = OH_NNCompilation_Construct(model2); + ASSERT_NE(nullptr, compilation2); + + std::thread th1(CompileModel, compilation1, m_compileParam); + std::thread th2(CompileModel, compilation2, m_compileParam); + th1.join(); + th2.join(); + Free(model1, compilation1); + Free(model2, compilation2); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_Combine_0200 + * @tc.name : 已编译模型,重复编译 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_Combine_0200, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + OHNNCompileParam compileParam; + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNCompilation_Build(compilation)); + Free(model, compilation); +} \ No newline at end of file diff --git a/test/nnrt_xts_acts/neural_network_runtime/v1_0/interface/src/DeviceTest.cpp b/test/nnrt_xts_acts/neural_network_runtime/v1_0/interface/src/DeviceTest.cpp new file mode 100644 index 0000000..d2148cc --- /dev/null +++ b/test/nnrt_xts_acts/neural_network_runtime/v1_0/interface/src/DeviceTest.cpp @@ -0,0 +1,207 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include + +#include "nnrt_utils.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Test; + +class DeviceTest : public testing::Test {}; + +/** + * @tc.number : SUB_AI_NNRtt_Func_North_Device_DeviceID_0100 + * @tc.name : 获取设备ID,*allDevicesID为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(DeviceTest, SUB_AI_NNRt_Func_North_Device_DeviceID_0100, Function | MediumTest | Level3) +{ + uint32_t count{0}; + + OH_NN_ReturnCode ret = OH_NNDevice_GetAllDevicesID(nullptr, &count); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Device_DeviceID_0200 + * @tc.name : 获取设备ID,**allDevicesID非nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(DeviceTest, SUB_AI_NNRt_Func_North_Device_DeviceID_0200, Function | MediumTest | Level3) +{ + const size_t allDeviceIds = 0; + const size_t *pAllDeviceIds = &allDeviceIds; + uint32_t count{0}; + + OH_NN_ReturnCode ret = OH_NNDevice_GetAllDevicesID(&pAllDeviceIds, &count); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Device_DeviceID_0300 + * @tc.name : 获取设备ID,获取设备ID,deviceCount为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(DeviceTest, SUB_AI_NNRt_Func_North_Device_DeviceID_0300, Function | MediumTest | Level3) +{ + const size_t *allDeviceIds = nullptr; + + OH_NN_ReturnCode ret = OH_NNDevice_GetAllDevicesID(&allDeviceIds, nullptr); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Device_DeviceID_0400 + * @tc.name : 获取设备ID,设备数量校验 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(DeviceTest, SUB_AI_NNRt_Func_North_Device_DeviceID_0400, Function | MediumTest | Level2) +{ + const size_t *allDeviceIds = nullptr; + uint32_t count{0}; + + OH_NN_ReturnCode ret = OH_NNDevice_GetAllDevicesID(&allDeviceIds, &count); + EXPECT_EQ(OH_NN_SUCCESS, ret); + + uint32_t expectCount = 1; + EXPECT_LE(expectCount, count); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Device_DeviceName_0100 + * @tc.name : 获取硬件名称,deviceID不存在 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(DeviceTest, SUB_AI_NNRt_Func_North_Device_DeviceName_0100, Function | MediumTest | Level3) +{ + const size_t deviceID{100000}; + const char *name = nullptr; + + OH_NN_ReturnCode ret = OH_NNDevice_GetName(deviceID, &name); + EXPECT_EQ(OH_NN_FAILED, ret); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Device_DeviceName_0200 + * @tc.name : 获取硬件名称,*name为nullprt + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(DeviceTest, SUB_AI_NNRt_Func_North_Device_DeviceName_0200, Function | MediumTest | Level3) +{ + const size_t *devicesID{nullptr}; + uint32_t devicesCount{0}; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNDevice_GetAllDevicesID(&devicesID, &devicesCount)); + size_t targetDevice = devicesID[0]; + + OH_NN_ReturnCode ret = OH_NNDevice_GetName(targetDevice, nullptr); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Device_DeviceName_0300 + * @tc.name : 获取硬件名称,**name非nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(DeviceTest, SUB_AI_NNRt_Func_North_Device_DeviceName_0300, Function | MediumTest | Level3) +{ + const size_t *devicesID{nullptr}; + uint32_t devicesCount{0}; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNDevice_GetAllDevicesID(&devicesID, &devicesCount)); + size_t targetDevice = devicesID[0]; + + const char *name = "name"; + + OH_NN_ReturnCode ret = OH_NNDevice_GetName(targetDevice, &name); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Device_DeviceName_0400 + * @tc.name : 获取硬件名称, 结果校验 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(DeviceTest, SUB_AI_NNRt_Func_North_Device_DeviceName_0400, Function | MediumTest | Level1) +{ + const size_t *devicesID{nullptr}; + uint32_t devicesCount{0}; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNDevice_GetAllDevicesID(&devicesID, &devicesCount)); + uint32_t number = 1; + EXPECT_GE(devicesCount, number); + + const char *name = nullptr; + std::string m_deviceName{"Device-CPU_TestVendor_v1_0"}; + OH_NN_ReturnCode ret = OH_NN_FAILED; + bool isHaveName = false; + for (uint32_t i = 0; i < devicesCount; i++) { + name = nullptr; + ret = OH_NNDevice_GetName(devicesID[i], &name); + EXPECT_EQ(OH_NN_SUCCESS, ret); + std::string sName(name); + if (m_deviceName == sName) { + isHaveName = true; + } + } + EXPECT_EQ(isHaveName, true); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Device_DeviceType_0100 + * @tc.name : 获取硬件类别,deviceType为nullprt + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(DeviceTest, SUB_AI_NNRt_Func_North_Device_DeviceType_0100, Function | MediumTest | Level3) +{ + const size_t *devicesID{nullptr}; + uint32_t devicesCount{0}; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNDevice_GetAllDevicesID(&devicesID, &devicesCount)); + size_t targetDevice = devicesID[0]; + + OH_NN_ReturnCode ret = OH_NNDevice_GetType(targetDevice, nullptr); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Device_DeviceType_0200 + * @tc.name : 获取硬件类别,deviceID不存在 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(DeviceTest, SUB_AI_NNRt_Func_North_Device_DeviceType_0200, Function | MediumTest | Level3) +{ + const size_t deviceID{100000}; + OH_NN_DeviceType type{OH_NN_OTHERS}; + + OH_NN_ReturnCode ret = OH_NNDevice_GetType(deviceID, &type); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Device_DeviceType_0300 + * @tc.name :获取硬件类别,结果校验 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(DeviceTest, SUB_AI_NNRt_Func_North_Device_DeviceType_0300, Function | MediumTest | Level1) +{ + const size_t *devicesID{nullptr}; + uint32_t devicesCount{0}; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNDevice_GetAllDevicesID(&devicesID, &devicesCount)); + size_t targetDevice = devicesID[0]; + + OH_NN_DeviceType type{OH_NN_OTHERS}; + + OH_NN_ReturnCode ret = OH_NNDevice_GetType(targetDevice, &type); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} diff --git a/test/nnrt_xts_acts/neural_network_runtime/v1_0/interface/src/ExecutorTest.cpp b/test/nnrt_xts_acts/neural_network_runtime/v1_0/interface/src/ExecutorTest.cpp new file mode 100644 index 0000000..763fe31 --- /dev/null +++ b/test/nnrt_xts_acts/neural_network_runtime/v1_0/interface/src/ExecutorTest.cpp @@ -0,0 +1,1214 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include +#include + +#include "nnrt_utils.h" +#include "model.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime; +using namespace OHOS::NeuralNetworkRuntime::Test; +using namespace OHOS::HDI::Nnrt::V1_0; + +namespace { + +class ExecutorTest : public testing::Test { +protected: + OHOS::sptr device; + AddModel addModel; + OHNNGraphArgs graphArgs = addModel.graphArgs; + OHNNCompileParam compileParam; +}; + +void ExecuteModel(OH_NNExecutor *executor, const OHNNGraphArgs &graphArgs, float* expect) +{ + ASSERT_EQ(OH_NN_SUCCESS, ExecuteGraphMock(executor, graphArgs, expect)); +} + +} // namespace + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Create_0100 + * @tc.name : 创建执行实例,compilation为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_Create_0100, Function | MediumTest | Level3) +{ + OH_NNExecutor *executor = OH_NNExecutor_Construct(nullptr); + ASSERT_EQ(nullptr, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Create_0200 + * @tc.name : 创建执行实例,compilation未完成编译 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_Create_0200, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + + const size_t *devicesID{nullptr}; + uint32_t devicesCount{0}; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNDevice_GetAllDevicesID(&devicesID, &devicesCount)); + size_t targetDevice = devicesID[0]; + + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_SetDevice(compilation, targetDevice)); + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_EQ(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_SetInput_0100 + * @tc.name : 设置输入,executor为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_SetInput_0100, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + uint32_t inputIndex = 0; + const OHNNOperandTest &operandTem = graphArgs.operands[0]; + auto quantParam = operandTem.quantParam; + OH_NN_Tensor operand = {operandTem.dataType, (uint32_t)operandTem.shape.size(), operandTem.shape.data(), + quantParam, operandTem.type}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, + OH_NNExecutor_SetInput(nullptr, inputIndex, &operand, operandTem.data, operandTem.length)); + + Free(model, compilation); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_SetInput_0200 + * @tc.name : 设置输入,inputIndex不存在 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_SetInput_0200, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + uint32_t inputIndex = 100000; + const OHNNOperandTest &operandTem = graphArgs.operands[0]; + auto quantParam = operandTem.quantParam; + OH_NN_Tensor operand = {operandTem.dataType, (uint32_t)operandTem.shape.size(), operandTem.shape.data(), + quantParam, operandTem.type}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, + OH_NNExecutor_SetInput(executor, inputIndex, &operand, operandTem.data, operandTem.length)); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_SetInput_0300 + * @tc.name : 设置输入,operand参数不一致 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_SetInput_0300, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + uint32_t inputIndex = 0; + const OHNNOperandTest &operandTem = graphArgs.operands[0]; + auto quantParam = operandTem.quantParam; + OH_NN_Tensor operand = {operandTem.dataType, (uint32_t)operandTem.shape.size(), operandTem.shape.data(), + quantParam, OH_NN_ADD_ACTIVATIONTYPE}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, + OH_NNExecutor_SetInput(executor, inputIndex, &operand, operandTem.data, operandTem.length)); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_SetInput_0400 + * @tc.name : 设置输入,operand形状改变 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_SetInput_0400, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + uint32_t inputIndex = 0; + const OHNNOperandTest &operandTem = graphArgs.operands[0]; + auto quantParam = operandTem.quantParam; + int32_t dimensions[3]{3, 3, 3}; + OH_NN_Tensor operand = {operandTem.dataType, (uint32_t)operandTem.shape.size(), dimensions, quantParam, + operandTem.type}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, + OH_NNExecutor_SetInput(executor, inputIndex, &operand, operandTem.data, operandTem.length)); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_SetInput_0500 + * @tc.name : 设置输入,buffer为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_SetInput_0500, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + uint32_t inputIndex = 0; + const OHNNOperandTest &operandTem = graphArgs.operands[0]; + auto quantParam = operandTem.quantParam; + OH_NN_Tensor operand = {operandTem.dataType, (uint32_t)operandTem.shape.size(), operandTem.shape.data(), + quantParam, operandTem.type}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, + OH_NNExecutor_SetInput(executor, inputIndex, &operand, nullptr, operandTem.length)); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_SetInput_0600 + * @tc.name : 设置输入,length小于输入长度 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_SetInput_0600, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + uint32_t inputIndex = 0; + const OHNNOperandTest &operandTem = graphArgs.operands[0]; + auto quantParam = operandTem.quantParam; + OH_NN_Tensor operand = {operandTem.dataType, (uint32_t)operandTem.shape.size(), operandTem.shape.data(), + quantParam, operandTem.type}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_SetInput(executor, inputIndex, &operand, operandTem.data, 0)); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_SetInput_0700 + * @tc.name : 设置输入,重复设置同一inputIndex + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_SetInput_0700, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + uint32_t inputIndex = 0; + const OHNNOperandTest &operandTem = graphArgs.operands[0]; + auto quantParam = operandTem.quantParam; + OH_NN_Tensor operand = {operandTem.dataType, (uint32_t)operandTem.shape.size(), operandTem.shape.data(), + quantParam, operandTem.type}; + ASSERT_EQ(OH_NN_SUCCESS, + OH_NNExecutor_SetInput(executor, inputIndex, &operand, operandTem.data, operandTem.length)); + ASSERT_EQ(OH_NN_SUCCESS, + OH_NNExecutor_SetInput(executor, inputIndex, &operand, operandTem.data, operandTem.length)); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_SetOutput_0100 + * @tc.name : 设置输出,executor为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_SetOutput_0100, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + uint32_t inputIndex = 0; + uint32_t outputIndex = 0; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + auto quantParam = operandTem.quantParam; + OH_NN_Tensor operand = {operandTem.dataType, (uint32_t)operandTem.shape.size(), operandTem.shape.data(), + quantParam, operandTem.type}; + if (std::find(graphArgs.inputIndices.begin(), graphArgs.inputIndices.end(), i) != + graphArgs.inputIndices.end()) { + ASSERT_EQ(OH_NN_SUCCESS, + OH_NNExecutor_SetInput(executor, inputIndex, &operand, operandTem.data, operandTem.length)); + inputIndex += 1; + } else if (std::find(graphArgs.outputIndices.begin(), graphArgs.outputIndices.end(), i) != + graphArgs.outputIndices.end()) { + ASSERT_EQ(OH_NN_INVALID_PARAMETER, + OH_NNExecutor_SetOutput(nullptr, outputIndex, operandTem.data, operandTem.length)); + outputIndex += 1; + } + } + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_SetOutput_0200 + * @tc.name : 设置输出,outputIndex不存在 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_SetOutput_0200, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + uint32_t inputIndex = 0; + uint32_t outputIndex = 10000; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + auto quantParam = operandTem.quantParam; + OH_NN_Tensor operand = {operandTem.dataType, (uint32_t)operandTem.shape.size(), operandTem.shape.data(), + quantParam, operandTem.type}; + if (std::find(graphArgs.inputIndices.begin(), graphArgs.inputIndices.end(), i) != + graphArgs.inputIndices.end()) { + ASSERT_EQ(OH_NN_SUCCESS, + OH_NNExecutor_SetInput(executor, inputIndex, &operand, operandTem.data, operandTem.length)); + inputIndex += 1; + } else if (std::find(graphArgs.outputIndices.begin(), graphArgs.outputIndices.end(), i) != + graphArgs.outputIndices.end()) { + ASSERT_EQ(OH_NN_INVALID_PARAMETER, + OH_NNExecutor_SetOutput(executor, outputIndex, operandTem.data, operandTem.length)); + outputIndex += 1; + } + } + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_SetOutput_0300 + * @tc.name : 设置输出,buffer为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_SetOutput_0300, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + uint32_t inputIndex = 0; + uint32_t outputIndex = 0; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + auto quantParam = operandTem.quantParam; + OH_NN_Tensor operand = {operandTem.dataType, (uint32_t)operandTem.shape.size(), operandTem.shape.data(), + quantParam, operandTem.type}; + if (std::find(graphArgs.inputIndices.begin(), graphArgs.inputIndices.end(), i) != + graphArgs.inputIndices.end()) { + ASSERT_EQ(OH_NN_SUCCESS, + OH_NNExecutor_SetInput(executor, inputIndex, &operand, operandTem.data, operandTem.length)); + inputIndex += 1; + } else if (std::find(graphArgs.outputIndices.begin(), graphArgs.outputIndices.end(), i) != + graphArgs.outputIndices.end()) { + ASSERT_EQ(OH_NN_INVALID_PARAMETER, + OH_NNExecutor_SetOutput(executor, outputIndex, nullptr, operandTem.length)); + outputIndex += 1; + } + } + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_SetOutput_0400 + * @tc.name : 设置输出,length小于输出长度 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_SetOutput_0400, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + uint32_t inputIndex = 0; + uint32_t outputIndex = 0; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + auto quantParam = operandTem.quantParam; + OH_NN_Tensor operand = {operandTem.dataType, (uint32_t)operandTem.shape.size(), operandTem.shape.data(), + quantParam, operandTem.type}; + if (std::find(graphArgs.inputIndices.begin(), graphArgs.inputIndices.end(), i) != + graphArgs.inputIndices.end()) { + ASSERT_EQ(OH_NN_SUCCESS, + OH_NNExecutor_SetInput(executor, inputIndex, &operand, operandTem.data, operandTem.length)); + inputIndex += 1; + } else if (std::find(graphArgs.outputIndices.begin(), graphArgs.outputIndices.end(), i) != + graphArgs.outputIndices.end()) { + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_SetOutput(executor, outputIndex, operandTem.data, 0)); + outputIndex += 1; + } + } + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_SetOutput_0500 + * @tc.name : 设置输出,重复设置同一outputIndex + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_SetOutput_0500, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + uint32_t inputIndex = 0; + uint32_t outputIndex = 0; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + auto quantParam = operandTem.quantParam; + OH_NN_Tensor operand = {operandTem.dataType, (uint32_t)operandTem.shape.size(), operandTem.shape.data(), + quantParam, operandTem.type}; + if (std::find(graphArgs.inputIndices.begin(), graphArgs.inputIndices.end(), i) != + graphArgs.inputIndices.end()) { + ASSERT_EQ(OH_NN_SUCCESS, + OH_NNExecutor_SetInput(executor, inputIndex, &operand, operandTem.data, operandTem.length)); + inputIndex += 1; + } else if (std::find(graphArgs.outputIndices.begin(), graphArgs.outputIndices.end(), i) != + graphArgs.outputIndices.end()) { + ASSERT_EQ(OH_NN_SUCCESS, + OH_NNExecutor_SetOutput(executor, outputIndex, operandTem.data, operandTem.length)); + ASSERT_EQ(OH_NN_SUCCESS, + OH_NNExecutor_SetOutput(executor, outputIndex, operandTem.data, operandTem.length)); + outputIndex += 1; + } + } + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Run_0100 + * @tc.name : 模型推理,executor为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_Run_0100, Function | MediumTest | Level3) +{ + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_Run(nullptr)); +} +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Run_0200 + * @tc.name : 模型推理,executor未设置输入 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_Run_0200, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + uint32_t outputIndex = 0; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + if (std::find(graphArgs.outputIndices.begin(), graphArgs.outputIndices.end(), i) != + graphArgs.outputIndices.end()) { + ASSERT_EQ(OH_NN_SUCCESS, + OH_NNExecutor_SetOutput(executor, outputIndex, operandTem.data, operandTem.length)); + } + } + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_Run(executor)); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Run_0300 + * @tc.name : 模型推理,executor未设置输出 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_Run_0300, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + uint32_t inputIndex = 0; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + auto quantParam = operandTem.quantParam; + OH_NN_Tensor operand = {operandTem.dataType, (uint32_t)operandTem.shape.size(), operandTem.shape.data(), + quantParam, operandTem.type}; + if (std::find(graphArgs.inputIndices.begin(), graphArgs.inputIndices.end(), i) != + graphArgs.inputIndices.end()) { + ASSERT_EQ(OH_NN_SUCCESS, + OH_NNExecutor_SetInput(executor, inputIndex, &operand, operandTem.data, operandTem.length)); + inputIndex += 1; + } + } + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_Run(executor)); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Run_0400 + * @tc.name : 模型推理,executor设置输入个数不足 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_Run_0400, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + uint32_t inputIndex = 0; + uint32_t outputIndex = 0; + + const OHNNOperandTest &operandTem = graphArgs.operands[0]; + auto quantParam = operandTem.quantParam; + OH_NN_Tensor operand = {operandTem.dataType, (uint32_t)operandTem.shape.size(), operandTem.shape.data(), + quantParam, operandTem.type}; + ASSERT_EQ(OH_NN_SUCCESS, + OH_NNExecutor_SetInput(executor, inputIndex, &operand, operandTem.data, operandTem.length)); + inputIndex += 1; + const OHNNOperandTest &operandOut = graphArgs.operands[3]; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_SetOutput(executor, outputIndex, operandOut.data, operandOut.length)); + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_Run(executor)); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Run_0500 + * @tc.name : 模型推理,executor设置输出个数不足 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_Run_0500, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + TopKModel topKModel; + graphArgs = topKModel.graphArgs; + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + + graphArgs.outputIndices = {3}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, ExecuteGraphMock(executor, graphArgs, addModel.expectValue)); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Run_0600 + * @tc.name : 定长模型推理测试 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_Run_0600, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + + ASSERT_EQ(OH_NN_SUCCESS, ExecuteGraphMock(executor, graphArgs, addModel.expectValue)); + + EXPECT_TRUE(CheckOutput(addModel.outputValue, addModel.expectValue)); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Run_0700 + * @tc.name : 变长模型推理测试 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_Run_0700, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + AvgPoolDynamicModel avgModel; + graphArgs = avgModel.graphArgs; + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + avgModel.dynamicInput.shape = {1, 3, 3, 1}; + avgModel.output.shape = {1, 2, 2, 1}; + graphArgs.operands = {avgModel.dynamicInput, avgModel.kernel, avgModel.strides, + avgModel.padMode, avgModel.activation, avgModel.output}; + ASSERT_EQ(OH_NN_SUCCESS, ExecuteGraphMock(executor, graphArgs, avgModel.expectValue)); + // check result + EXPECT_TRUE(CheckOutput(avgModel.outputValue, avgModel.expectValue)); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_GetOutputDimensions_0100 + * @tc.name : 获取输出维度,executor为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_GetOutputDimensions_0100, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + + ASSERT_EQ(OH_NN_SUCCESS, ExecuteGraphMock(executor, graphArgs, addModel.expectValue)); + + EXPECT_TRUE(CheckOutput(addModel.outputValue, addModel.expectValue)); + int32_t *outputDimensions = nullptr; + uint32_t outputDimensionCount{0}; + uint32_t addOutputIndex = {0}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, + OH_NNExecutor_GetOutputShape(nullptr, addOutputIndex, &outputDimensions, &outputDimensionCount)); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_GetOutputDimensions_0200 + * @tc.name : 获取输出维度,outputIndex不存在 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_GetOutputDimensions_0200, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + + ASSERT_EQ(OH_NN_SUCCESS, ExecuteGraphMock(executor, graphArgs, addModel.expectValue)); + + EXPECT_TRUE(CheckOutput(addModel.outputValue, addModel.expectValue)); + int32_t *outputDimensions = nullptr; + uint32_t outputDimensionCount{0}; + uint32_t addOutputIndex = {10000}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, + OH_NNExecutor_GetOutputShape(executor, addOutputIndex, &outputDimensions, &outputDimensionCount)); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_GetOutputDimensions_0300 + * @tc.name : 获取输出维度,*dimensions为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_GetOutputDimensions_0300, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + + ASSERT_EQ(OH_NN_SUCCESS, ExecuteGraphMock(executor, graphArgs, addModel.expectValue)); + + EXPECT_TRUE(CheckOutput(addModel.outputValue, addModel.expectValue)); + uint32_t outputDimensionCount{0}; + uint32_t addOutputIndex = {0}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, + OH_NNExecutor_GetOutputShape(executor, addOutputIndex, nullptr, &outputDimensionCount)); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_GetOutputDimensions_0400 + * @tc.name : 获取输出维度,**dimensions非nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_GetOutputDimensions_0400, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + + ASSERT_EQ(OH_NN_SUCCESS, ExecuteGraphMock(executor, graphArgs, addModel.expectValue)); + + EXPECT_TRUE(CheckOutput(addModel.outputValue, addModel.expectValue)); + int32_t outputDimensions{2}; + int32_t *pOutputDimensions = &outputDimensions; + uint32_t outputDimensionCount{0}; + uint32_t addOutputIndex = {0}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, + OH_NNExecutor_GetOutputShape(executor, addOutputIndex, &pOutputDimensions, &outputDimensionCount)); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_GetOutputDimensions_0500 + * @tc.name : 获取输出维度,*dimensionCount为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_GetOutputDimensions_0500, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + + ASSERT_EQ(OH_NN_SUCCESS, ExecuteGraphMock(executor, graphArgs, addModel.expectValue)); + + EXPECT_TRUE(CheckOutput(addModel.outputValue, addModel.expectValue)); + int32_t *outputDimensions = nullptr; + uint32_t addOutputIndex = {0}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, + OH_NNExecutor_GetOutputShape(executor, addOutputIndex, &outputDimensions, nullptr)); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_GetOutputDimensions_0600 + * @tc.name : 未调用推理接口,获取输出维度 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_GetOutputDimensions_0600, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + uint32_t inputIndex = 0; + uint32_t outputIndex = 0; + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + auto quantParam = operandTem.quantParam; + OH_NN_Tensor operand = {operandTem.dataType, (uint32_t)operandTem.shape.size(), operandTem.shape.data(), + quantParam, operandTem.type}; + if (std::find(graphArgs.inputIndices.begin(), graphArgs.inputIndices.end(), i) != + graphArgs.inputIndices.end()) { + ASSERT_EQ(OH_NN_SUCCESS, + OH_NNExecutor_SetInput(executor, inputIndex, &operand, operandTem.data, operandTem.length)); + inputIndex += 1; + } else if (std::find(graphArgs.outputIndices.begin(), graphArgs.outputIndices.end(), i) != + graphArgs.outputIndices.end()) { + ASSERT_EQ(OH_NN_SUCCESS, + OH_NNExecutor_SetOutput(executor, outputIndex, operandTem.data, operandTem.length)); + outputIndex += 1; + } + } + int32_t *outputDimensions = nullptr; + uint32_t outputDimensionCount{0}; + uint32_t addOutputIndex = {0}; + ASSERT_EQ(OH_NN_SUCCESS, + OH_NNExecutor_GetOutputShape(executor, addOutputIndex, &outputDimensions, &outputDimensionCount)); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_GetOutputDimensions_0700 + * @tc.name : 模型推理成功,获取输出维度 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_GetOutputDimensions_0700, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + + ASSERT_EQ(OH_NN_SUCCESS, ExecuteGraphMock(executor, graphArgs, addModel.expectValue)); + + EXPECT_TRUE(CheckOutput(addModel.outputValue, addModel.expectValue)); + int32_t *outputDimensions = nullptr; + uint32_t outputDimensionCount{0}; + uint32_t addOutputIndex = {0}; + ASSERT_EQ(OH_NN_SUCCESS, + OH_NNExecutor_GetOutputShape(executor, addOutputIndex, &outputDimensions, &outputDimensionCount)); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_GetOutputDimensions_0800 + * @tc.name : 变长模型推理成功,获取输出维度 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_GetOutputDimensions_0800, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + AvgPoolDynamicModel avgModel; + graphArgs = avgModel.graphArgs; + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + avgModel.dynamicInput.shape = {1, 3, 3, 1}; + avgModel.output.shape = {1, 2, 2, 1}; + graphArgs.operands = {avgModel.dynamicInput, avgModel.kernel, avgModel.strides, + avgModel.padMode, avgModel.activation, avgModel.output}; + ASSERT_EQ(OH_NN_SUCCESS, ExecuteGraphMock(executor, graphArgs, avgModel.expectValue)); + + // check result + EXPECT_TRUE(CheckOutput(avgModel.outputValue, avgModel.expectValue)); + int32_t *outputDimensions = nullptr; + uint32_t outputDimensionCount{0}; + uint32_t addOutputIndex = {0}; + ASSERT_EQ(OH_NN_SUCCESS, + OH_NNExecutor_GetOutputShape(executor, addOutputIndex, &outputDimensions, &outputDimensionCount)); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Destroy_0100 + * @tc.name : 销毁执行器实例,*executor为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_Destroy_0100, Function | MediumTest | Level3) +{ + OH_NNExecutor *executor = nullptr; + ASSERT_NO_THROW(OH_NNExecutor_Destroy(&executor)); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Destroy_0200 + * @tc.name : 销毁执行器实例,executor释放 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_Destroy_0200, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + + ASSERT_EQ(OH_NN_SUCCESS, ExecuteGraphMock(executor, graphArgs, addModel.expectValue)); + + EXPECT_TRUE(CheckOutput(addModel.outputValue, addModel.expectValue)); + + OH_NNExecutor_Destroy(&executor); + ASSERT_EQ(nullptr, executor); + + Free(model, compilation); +} + +/** + * @tc.number : SUB_AI_NNR_Func_North_Executor_Combine_0100 + * @tc.name : 并发模型推理,推理成功 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNR_Func_North_Executor_Combine_0100, Function | MediumTest | Level2) +{ + OH_NNModel *model1 = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model1); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model1, graphArgs)); + + OH_NNModel *model2 = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model2); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model2, graphArgs)); + + OH_NNCompilation *compilation1 = OH_NNCompilation_Construct(model1); + ASSERT_NE(nullptr, compilation1); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation1, compileParam)); + + OH_NNCompilation *compilation2 = OH_NNCompilation_Construct(model2); + ASSERT_NE(nullptr, compilation2); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation2, compileParam)); + + OH_NNExecutor *executor1 = OH_NNExecutor_Construct(compilation1); + ASSERT_NE(nullptr, executor1); + + OH_NNExecutor *executor2 = OH_NNExecutor_Construct(compilation2); + ASSERT_NE(nullptr, executor2); + + std::thread th1(ExecuteModel, executor1, graphArgs, addModel.expectValue); + std::thread th2(ExecuteModel, executor2, graphArgs, addModel.expectValue); + th1.join(); + th2.join(); + Free(model1, compilation1, executor1); + Free(model2, compilation2, executor2); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Combine_0200 + * @tc.name : 多次设置输入,仅首次成功,模型推理 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_Combine_0200, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + + float valueX2[4] = {3, 2, 1, 0}; + uint32_t inputIndex = 0; + uint32_t outputIndex = 0; + for (auto i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + auto quantParam = operandTem.quantParam; + OH_NN_Tensor operand = {operandTem.dataType, (uint32_t)operandTem.shape.size(), operandTem.shape.data(), + quantParam, operandTem.type}; + if (std::find(graphArgs.inputIndices.begin(), graphArgs.inputIndices.end(), i) != + graphArgs.inputIndices.end()) { + ASSERT_EQ(OH_NN_SUCCESS, + OH_NNExecutor_SetInput(executor, inputIndex, &operand, operandTem.data, operandTem.length)); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, + OH_NNExecutor_SetInput(executor, 3, &operand, valueX2, operandTem.length)); + inputIndex += 1; + } else if (std::find(graphArgs.outputIndices.begin(), graphArgs.outputIndices.end(), i) != + graphArgs.outputIndices.end()) { + ASSERT_EQ(OH_NN_SUCCESS, + OH_NNExecutor_SetOutput(executor, outputIndex, operandTem.data, operandTem.length)); + OHOS::sptr device = V1_0::MockIDevice::GetInstance(); + ASSERT_EQ(OH_NN_SUCCESS, device->MemoryCopy(addModel.expectValue, operandTem.length)); + outputIndex += 1; + } + } + ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_Run(executor)); + // check result + EXPECT_TRUE(CheckOutput(addModel.outputValue, addModel.expectValue)); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Combine_0300 + * @tc.name : 多次设置输出,仅首次生效,模型推理 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_Combine_0300, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + + uint32_t inputIndex = 0; + uint32_t outputIndex = 0; + + for (auto i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + auto quantParam = operandTem.quantParam; + OH_NN_Tensor operand = {operandTem.dataType, (uint32_t)operandTem.shape.size(), operandTem.shape.data(), + quantParam, operandTem.type}; + if (std::find(graphArgs.inputIndices.begin(), graphArgs.inputIndices.end(), i) != + graphArgs.inputIndices.end()) { + ASSERT_EQ(OH_NN_SUCCESS, + OH_NNExecutor_SetInput(executor, inputIndex, &operand, operandTem.data, operandTem.length)); + inputIndex += 1; + } else if (std::find(graphArgs.outputIndices.begin(), graphArgs.outputIndices.end(), i) != + graphArgs.outputIndices.end()) { + ASSERT_EQ(OH_NN_SUCCESS, + OH_NNExecutor_SetOutput(executor, outputIndex, operandTem.data, operandTem.length)); + ASSERT_EQ(OH_NN_INVALID_PARAMETER, + OH_NNExecutor_SetOutput(executor, outputIndex+10, operandTem.data, operandTem.length)); + OHOS::sptr device = V1_0::MockIDevice::GetInstance(); + ASSERT_EQ(OH_NN_SUCCESS, device->MemoryCopy(addModel.expectValue, operandTem.length)); + outputIndex += 1; + } + } + ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_Run(executor)); + // check result + EXPECT_TRUE(CheckOutput(addModel.outputValue, addModel.expectValue)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_Run(executor)); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Combine_0400 + * @tc.name : 模型推理,共享输入非共享输出 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_Combine_0400, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + OH_NN_Memory *OHNNMemory[graphArgs.inputIndices.size()]; + uint32_t inputIndex = 0; + uint32_t outputIndex = 0; + + for (auto i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + auto quantParam = operandTem.quantParam; + OH_NN_Tensor operand = {operandTem.dataType, (uint32_t)operandTem.shape.size(), operandTem.shape.data(), + quantParam, operandTem.type}; + if (std::find(graphArgs.inputIndices.begin(), graphArgs.inputIndices.end(), i) != + graphArgs.inputIndices.end()) { + + OH_NN_Memory *inputMemory = OH_NNExecutor_AllocateInputMemory(executor, inputIndex, operandTem.length); + ASSERT_NE(nullptr, inputMemory); + + ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_SetInputWithMemory(executor, inputIndex, &operand, inputMemory)); + + ASSERT_EQ(EOK, memcpy_s(inputMemory->data, + operandTem.length, static_cast(operandTem.data), operandTem.length)); + OHNNMemory[inputIndex] = inputMemory; + inputIndex += 1; + } else if (std::find(graphArgs.outputIndices.begin(), graphArgs.outputIndices.end(), i) != + graphArgs.outputIndices.end()) { + ASSERT_EQ(OH_NN_SUCCESS, + OH_NNExecutor_SetOutput(executor, outputIndex, operandTem.data, operandTem.length)); + OHOS::sptr device = V1_0::MockIDevice::GetInstance(); + ASSERT_EQ(OH_NN_SUCCESS, device->MemoryCopy(addModel.expectValue, operandTem.length)); + outputIndex += 1; + } + } + ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_Run(executor)); + // check result + EXPECT_TRUE(CheckOutput(addModel.outputValue, addModel.expectValue)); + + for (auto i = 0; i < graphArgs.inputIndices.size(); i++) { + OH_NNExecutor_DestroyInputMemory(executor, i, &OHNNMemory[i]); + ASSERT_EQ(OHNNMemory[i], nullptr); + } + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Combine_0500 + * @tc.name : 模型推理,非共享输入共享输出 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_Combine_0500, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + uint32_t inputIndex = 0; + uint32_t outputIndex = 0; + OH_NN_Memory *outputMemory; + + for (auto i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + auto quantParam = operandTem.quantParam; + OH_NN_Tensor operand = {operandTem.dataType, (uint32_t)operandTem.shape.size(), operandTem.shape.data(), + quantParam, operandTem.type}; + if (std::find(graphArgs.inputIndices.begin(), graphArgs.inputIndices.end(), i) != + graphArgs.inputIndices.end()) { + ASSERT_EQ(OH_NN_SUCCESS, + OH_NNExecutor_SetInput(executor, inputIndex, &operand, operandTem.data, operandTem.length)); + + inputIndex += 1; + } else if (std::find(graphArgs.outputIndices.begin(), graphArgs.outputIndices.end(), i) != + graphArgs.outputIndices.end()) { + + outputMemory = OH_NNExecutor_AllocateOutputMemory(executor, outputIndex, operandTem.length); + ASSERT_NE(nullptr, outputMemory); + ASSERT_EQ(OH_NN_SUCCESS, + OH_NNExecutor_SetOutputWithMemory(executor, outputIndex, outputMemory)); + OHOS::sptr device = V1_0::MockIDevice::GetInstance(); + ASSERT_EQ(OH_NN_SUCCESS, device->MemoryCopy(addModel.expectValue, operandTem.length)); + outputIndex += 1; + } + } + ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_Run(executor)); + // check result + EXPECT_TRUE(CheckOutput(static_cast(const_cast(outputMemory->data)), + static_cast(addModel.expectValue))); + + OH_NNExecutor_DestroyOutputMemory(executor, 0, &outputMemory); + ASSERT_EQ(outputMemory, nullptr); + Free(model, compilation, executor); +} \ No newline at end of file diff --git a/test/nnrt_xts_acts/neural_network_runtime/v1_0/interface/src/MemoryTest.cpp b/test/nnrt_xts_acts/neural_network_runtime/v1_0/interface/src/MemoryTest.cpp new file mode 100644 index 0000000..5af9bda --- /dev/null +++ b/test/nnrt_xts_acts/neural_network_runtime/v1_0/interface/src/MemoryTest.cpp @@ -0,0 +1,950 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include +#include +#include + +#include "nnrt_utils.h" +#include "model.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime; +using namespace OHOS::NeuralNetworkRuntime::Test; +using namespace OHOS::HDI::Nnrt::V1_0; + +namespace { + +class MemoryTest : public testing::Test { +protected: + AddModel addModel; + OHNNGraphArgs graphArgs = addModel.graphArgs; + OHNNCompileParam compileParam; +}; + +void CheckCreateInputMemory(OH_NNExecutor *executor, uint32_t inputIndex, size_t length) +{ + OH_NN_Memory *OHNNMemory = OH_NNExecutor_AllocateInputMemory(executor, inputIndex, length); + ASSERT_NE(nullptr, OHNNMemory); + OH_NNExecutor_DestroyInputMemory(executor, inputIndex, &OHNNMemory); + ASSERT_EQ(nullptr, OHNNMemory); +} + +void CheckCreateOutputMemory(OH_NNExecutor *executor, uint32_t outputIndex, size_t length) +{ + OH_NN_Memory *OHNNMemory = OH_NNExecutor_AllocateOutputMemory(executor, outputIndex, length); + ASSERT_NE(nullptr, OHNNMemory); + OH_NNExecutor_DestroyOutputMemory(executor, outputIndex, &OHNNMemory); + ASSERT_EQ(nullptr, OHNNMemory); +} + +} // namespace + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_CreateInputMemory_0100 + * @tc.name : 创建输入共享内存,executor为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_CreateInputMemory_0100, Function | MediumTest | Level3) +{ + OH_NN_Memory *OHNNMemory = OH_NNExecutor_AllocateInputMemory(nullptr, 0, 4); + ASSERT_EQ(nullptr, OHNNMemory); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_CreateInputMemory_0200 + * @tc.name : 创建输入共享内存,inputIndex不存在 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_CreateInputMemory_0200, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + OH_NN_Memory *OHNNMemory = OH_NNExecutor_AllocateInputMemory(executor, 2, graphArgs.operands[0].length); + ASSERT_EQ(nullptr, OHNNMemory); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_CreateInputMemory_0300 + * @tc.name : 创建输入共享内存,length为0 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_CreateInputMemory_0300, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + OH_NN_Memory *OHNNMemory = OH_NNExecutor_AllocateInputMemory(executor, 0, 0); + ASSERT_EQ(nullptr, OHNNMemory); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_CreateInputMemory_0400 + * @tc.name :创建输入共享内存,length为最大限制2G + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_CreateInputMemory_0400, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + OH_NN_Memory *OHNNMemory = OH_NNExecutor_AllocateInputMemory(executor, 0, 1024 * 1024 * 1024 + 1); + ASSERT_EQ(nullptr, OHNNMemory); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_CreateInputMemory_0500 + * @tc.name : 创建输入共享内存,inputIndex重复创建 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_CreateInputMemory_0500, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + OH_NN_Memory *OHNNMemory = OH_NNExecutor_AllocateInputMemory(executor, 0, graphArgs.operands[0].length); + ASSERT_NE(nullptr, OHNNMemory); + OH_NN_Memory *OHNNMemory2 = OH_NNExecutor_AllocateInputMemory(executor, 0, graphArgs.operands[0].length); + ASSERT_NE(nullptr, OHNNMemory2); + OH_NNExecutor_DestroyInputMemory(executor, 0, &OHNNMemory); + ASSERT_EQ(nullptr, OHNNMemory); + OH_NNExecutor_DestroyInputMemory(executor, 0, &OHNNMemory2); + ASSERT_EQ(nullptr, OHNNMemory2); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_CreateInputMemory_0600 + * @tc.name : 多线程创建不同index输入的共享内存 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_CreateInputMemory_0600, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + std::thread th1(CheckCreateInputMemory, executor, 0, graphArgs.operands[0].length); + std::thread th2(CheckCreateInputMemory, executor, 1, graphArgs.operands[1].length); + th1.join(); + th2.join(); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_CreateOutputMemory_0100 + * @tc.name : 创建输出共享内存,executor为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_CreateOutputMemory_0100, Function | MediumTest | Level3) +{ + OH_NN_Memory *OHNNMemory = OH_NNExecutor_AllocateOutputMemory(nullptr, 0, 4); + ASSERT_EQ(nullptr, OHNNMemory); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_CreateOutputMemory_0200 + * @tc.name : 创建输出共享内存,inputIndex不存在 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_CreateOutputMemory_0200, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + OH_NN_Memory *OHNNMemory = OH_NNExecutor_AllocateOutputMemory(executor, 2, graphArgs.operands[0].length); + ASSERT_EQ(nullptr, OHNNMemory); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_CreateOutputMemory_0300 + * @tc.name : 创建输出共享内存,length为0 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_CreateOutputMemory_0300, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + OH_NN_Memory *OHNNMemory = OH_NNExecutor_AllocateOutputMemory(executor, 0, 0); + ASSERT_EQ(nullptr, OHNNMemory); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_CreateOutputMemory_0400 + * @tc.name :创建输出共享内存,length为最大限制2G + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_CreateOutputMemory_0400, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + OH_NN_Memory *OHNNMemory = OH_NNExecutor_AllocateOutputMemory(executor, 0, 1024 * 1024 * 1024 + 1); + ASSERT_EQ(nullptr, OHNNMemory); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_CreateOutputMemory_0500 + * @tc.name : 创建输出共享内存,outputIndex重复创建 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_CreateOutputMemory_0500, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + OH_NN_Memory *OHNNMemory = OH_NNExecutor_AllocateOutputMemory(executor, 0, graphArgs.operands[0].length); + ASSERT_NE(nullptr, OHNNMemory); + OH_NN_Memory *OHNNMemory2 = OH_NNExecutor_AllocateOutputMemory(executor, 0, graphArgs.operands[0].length); + ASSERT_NE(nullptr, OHNNMemory2); + OH_NNExecutor_DestroyOutputMemory(executor, 0, &OHNNMemory); + ASSERT_EQ(nullptr, OHNNMemory); + OH_NNExecutor_DestroyOutputMemory(executor, 0, &OHNNMemory2); + ASSERT_EQ(nullptr, OHNNMemory2); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_CreateOutputMemory_0600 + * @tc.name : 多线程创建不同index输出的共享内存 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_CreateOutputMemory_0600, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + TopKModel topKModel; + graphArgs = topKModel.graphArgs; + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OHNNCompileParam compileParam; + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + std::thread th1(CheckCreateOutputMemory, executor, 0, graphArgs.operands[3].length); + std::thread th2(CheckCreateOutputMemory, executor, 1, graphArgs.operands[4].length); + th1.join(); + th2.join(); + Free(model, compilation, executor); +} +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_DestroyInputMemory_0100 + * @tc.name : 销毁输入共享内存,executor为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_DestroyInputMemory_0100, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + OH_NN_Memory *OHNNMemory = OH_NNExecutor_AllocateInputMemory(executor, 0, graphArgs.operands[0].length); + ASSERT_NE(nullptr, OHNNMemory); + OH_NNExecutor_DestroyInputMemory(nullptr, 0, &OHNNMemory); + ASSERT_NE(nullptr, OHNNMemory); + OH_NNExecutor_DestroyInputMemory(executor, 0, &OHNNMemory); + ASSERT_EQ(nullptr, OHNNMemory); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_DestroyInputMemory_0200 + * @tc.name : 销毁输入共享内存,inputIndex不存在 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_DestroyInputMemory_0200, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + OH_NN_Memory *OHNNMemory = OH_NNExecutor_AllocateInputMemory(executor, 0, graphArgs.operands[0].length); + ASSERT_NE(nullptr, OHNNMemory); + OH_NNExecutor_DestroyInputMemory(executor, 1, &OHNNMemory); + ASSERT_NE(nullptr, OHNNMemory); + OH_NNExecutor_DestroyInputMemory(executor, 0, &OHNNMemory); + ASSERT_EQ(nullptr, OHNNMemory); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_DestroyInputMemory_0300 + * @tc.name : 销毁输出共享内存,*memory为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_DestroyInputMemory_0300, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + OH_NN_Memory *OHNNMemory = nullptr; + ASSERT_NO_THROW(OH_NNExecutor_DestroyInputMemory(executor, 0, &OHNNMemory)); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_DestroyInputMemory_0400 + * @tc.name : 销毁输出共享内存,inputIndex不同memory重复销毁 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_DestroyInputMemory_0400, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + OH_NN_Memory *OHNNMemory = OH_NNExecutor_AllocateInputMemory(executor, 0, graphArgs.operands[0].length); + ASSERT_NE(nullptr, OHNNMemory); + OH_NN_Memory *OHNNMemory2 = OH_NNExecutor_AllocateInputMemory(executor, 1, graphArgs.operands[1].length); + ASSERT_NE(nullptr, OHNNMemory2); + OH_NNExecutor_DestroyInputMemory(executor, 0, &OHNNMemory); + ASSERT_EQ(nullptr, OHNNMemory); + OH_NNExecutor_DestroyInputMemory(executor, 1, &OHNNMemory2); + ASSERT_EQ(nullptr, OHNNMemory2); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_DestroyInputMemory_0500 + * @tc.name : 多线销毁不同index输入的共享内存 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_DestroyInputMemory_0500, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + OH_NN_Memory *OHNNMemory = OH_NNExecutor_AllocateInputMemory(executor, 0, graphArgs.operands[0].length); + ASSERT_NE(nullptr, OHNNMemory); + OH_NN_Memory *OHNNMemory2 = OH_NNExecutor_AllocateInputMemory(executor, 1, graphArgs.operands[1].length); + ASSERT_NE(nullptr, OHNNMemory2); + std::thread th1(OH_NNExecutor_DestroyInputMemory, executor, 0, &OHNNMemory); + std::thread th2(OH_NNExecutor_DestroyInputMemory, executor, 1, &OHNNMemory2); + th1.join(); + th2.join(); + ASSERT_EQ(nullptr, OHNNMemory); + ASSERT_EQ(nullptr, OHNNMemory2); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_DestroyOutputMemory_0100 + * @tc.name : 销毁输出共享内存,executor为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_DestroyOutputMemory_0100, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + OH_NN_Memory *OHNNMemory = OH_NNExecutor_AllocateOutputMemory(executor, 0, graphArgs.operands[0].length); + ASSERT_NE(nullptr, OHNNMemory); + OH_NNExecutor_DestroyOutputMemory(nullptr, 0, &OHNNMemory); + ASSERT_NE(nullptr, OHNNMemory); + OH_NNExecutor_DestroyOutputMemory(executor, 0, &OHNNMemory); + ASSERT_EQ(nullptr, OHNNMemory); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_DestroyOutputMemory_0200 + * @tc.name : 销毁输出共享内存,inputIndex不存在 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_DestroyOutputMemory_0200, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + OH_NN_Memory *OHNNMemory = OH_NNExecutor_AllocateOutputMemory(executor, 0, graphArgs.operands[0].length); + ASSERT_NE(nullptr, OHNNMemory); + OH_NNExecutor_DestroyOutputMemory(executor, 1, &OHNNMemory); + ASSERT_NE(nullptr, OHNNMemory); + OH_NNExecutor_DestroyOutputMemory(executor, 0, &OHNNMemory); + ASSERT_EQ(nullptr, OHNNMemory); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_DestroyOutputMemory_0300 + * @tc.name : 销毁输出共享内存,*memory为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_DestroyOutputMemory_0300, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + ASSERT_NO_THROW(OH_NNExecutor_DestroyOutputMemory(executor, 0, nullptr)); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_DestroyOutputMemory_0400 + * @tc.name : 销毁输出共享内存,inputIndex不同memory重复销毁 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_DestroyOutputMemory_0400, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + OH_NN_Memory *OHNNMemory = OH_NNExecutor_AllocateOutputMemory(executor, 0, graphArgs.operands[0].length); + ASSERT_NE(nullptr, OHNNMemory); + OH_NN_Memory *OHNNMemory2 = OH_NNExecutor_AllocateOutputMemory(executor, 0, graphArgs.operands[0].length); + ASSERT_NE(nullptr, OHNNMemory2); + OH_NNExecutor_DestroyOutputMemory(executor, 0, &OHNNMemory); + ASSERT_EQ(nullptr, OHNNMemory); + OH_NNExecutor_DestroyOutputMemory(executor, 0, &OHNNMemory2); + ASSERT_EQ(nullptr, OHNNMemory2); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_DestroyOutputMemory_0500 + * @tc.name : 多线销毁不同index输出的共享内存 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_DestroyOutputMemory_0500, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + TopKModel topKModel; + graphArgs = topKModel.graphArgs; + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + OH_NN_Memory *OHNNMemory = OH_NNExecutor_AllocateOutputMemory(executor, 0, graphArgs.operands[0].length); + ASSERT_NE(nullptr, OHNNMemory); + OH_NN_Memory *OHNNMemory2 = OH_NNExecutor_AllocateOutputMemory(executor, 1, graphArgs.operands[1].length); + ASSERT_NE(nullptr, OHNNMemory2); + std::thread th1(OH_NNExecutor_DestroyOutputMemory, executor, 0, &OHNNMemory); + std::thread th2(OH_NNExecutor_DestroyOutputMemory, executor, 1, &OHNNMemory2); + th1.join(); + th2.join(); + ASSERT_EQ(nullptr, OHNNMemory); + ASSERT_EQ(nullptr, OHNNMemory2); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_SetInputFromMemory_0100 + * @tc.name : 设置输入共享内存,executor为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_SetInputFromMemory_0100, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + OH_NN_Memory *OHNNMemory = OH_NNExecutor_AllocateInputMemory(executor, 0, graphArgs.operands[0].length); + ASSERT_NE(nullptr, OHNNMemory); + const OHNNOperandTest &operandTem = graphArgs.operands[0]; + auto quantParam = operandTem.quantParam; + OH_NN_Tensor operand = {operandTem.dataType, (uint32_t)operandTem.shape.size(), operandTem.shape.data(), + quantParam, operandTem.type}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_SetInputWithMemory(nullptr, 0, &operand, OHNNMemory)); + OH_NNExecutor_DestroyInputMemory(executor, 0, &OHNNMemory); + ASSERT_EQ(nullptr, OHNNMemory); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_SetInputFromMemory_0200 + * @tc.name : 设置输入共享内存,inputIndex不存在 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_SetInputFromMemory_0200, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + OH_NN_Memory *OHNNMemory = OH_NNExecutor_AllocateInputMemory(executor, 0, graphArgs.operands[0].length); + ASSERT_NE(nullptr, OHNNMemory); + const OHNNOperandTest &operandTem = graphArgs.operands[0]; + auto quantParam = operandTem.quantParam; + OH_NN_Tensor operand = {operandTem.dataType, (uint32_t)operandTem.shape.size(), operandTem.shape.data(), + quantParam, operandTem.type}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_SetInputWithMemory(executor, 2, &operand, OHNNMemory)); + OH_NNExecutor_DestroyInputMemory(executor, 0, &OHNNMemory); + ASSERT_EQ(nullptr, OHNNMemory); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_SetInputFromMemory_0300 + * @tc.name : 设置输入共享内存,operand为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_SetInputFromMemory_0300, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + OH_NN_Memory *OHNNMemory = OH_NNExecutor_AllocateInputMemory(executor, 0, graphArgs.operands[0].length); + ASSERT_NE(nullptr, OHNNMemory); + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_SetInputWithMemory(executor, 0, nullptr, OHNNMemory)); + OH_NNExecutor_DestroyInputMemory(executor, 0, &OHNNMemory); + ASSERT_EQ(nullptr, OHNNMemory); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_SetInputFromMemory_0400 + * @tc.name : 设置输入共享内存,operand与输入不匹配 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_SetInputFromMemory_0400, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + OH_NN_Memory *OHNNMemory1 = OH_NNExecutor_AllocateInputMemory(executor, 0, graphArgs.operands[0].length); + ASSERT_NE(nullptr, OHNNMemory1); + const OHNNOperandTest &operandTem = graphArgs.operands[2]; + auto quantParam = operandTem.quantParam; + OH_NN_Tensor operand = {operandTem.dataType, (uint32_t)operandTem.shape.size(), operandTem.shape.data(), + quantParam, operandTem.type}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_SetInputWithMemory(executor, 0, &operand, OHNNMemory1)); + OH_NNExecutor_DestroyInputMemory(executor, 0, &OHNNMemory1); + ASSERT_EQ(nullptr, OHNNMemory1); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_SetInputFromMemory_0500 + * @tc.name : 设置输入共享内存,memory为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_SetInputFromMemory_0500, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + OH_NN_Memory *OHNNMemory = OH_NNExecutor_AllocateInputMemory(executor, 0, graphArgs.operands[0].length); + ASSERT_NE(nullptr, OHNNMemory); + const OHNNOperandTest &operandTem = graphArgs.operands[0]; + auto quantParam = operandTem.quantParam; + OH_NN_Tensor operand = {operandTem.dataType, (uint32_t)operandTem.shape.size(), operandTem.shape.data(), + quantParam, operandTem.type}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_SetInputWithMemory(executor, 0, &operand, nullptr)); + OH_NNExecutor_DestroyInputMemory(executor, 0, &OHNNMemory); + ASSERT_EQ(nullptr, OHNNMemory); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_SetInputFromMemory_0600 + * @tc.name : 设置输入共享内存,重复设置相同inputIndex + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_SetInputFromMemory_0600, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + OH_NN_Memory *OHNNMemory = OH_NNExecutor_AllocateInputMemory(executor, 0, graphArgs.operands[0].length); + ASSERT_NE(nullptr, OHNNMemory); + const OHNNOperandTest &operandTem = graphArgs.operands[0]; + auto quantParam = operandTem.quantParam; + OH_NN_Tensor operand = {operandTem.dataType, (uint32_t)operandTem.shape.size(), operandTem.shape.data(), + quantParam, operandTem.type}; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_SetInputWithMemory(executor, 0, &operand, OHNNMemory)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_SetInputWithMemory(executor, 0, &operand, OHNNMemory)); + OH_NNExecutor_DestroyInputMemory(executor, 0, &OHNNMemory); + ASSERT_EQ(nullptr, OHNNMemory); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_SetOutputFromMemory_0100 + * @tc.name : 设置输出共享内存,executor为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_SetOutputFromMemory_0100, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + OH_NN_Memory *OHNNMemory = OH_NNExecutor_AllocateOutputMemory(executor, 0, graphArgs.operands[0].length); + ASSERT_NE(nullptr, OHNNMemory); + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_SetOutputWithMemory(nullptr, 0, OHNNMemory)); + OH_NNExecutor_DestroyOutputMemory(executor, 0, &OHNNMemory); + ASSERT_EQ(nullptr, OHNNMemory); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_SetOutputFromMemory_0200 + * @tc.name : 设置输出共享内存,outputIndex不存在 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_SetOutputFromMemory_0200, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + OH_NN_Memory *OHNNMemory = OH_NNExecutor_AllocateOutputMemory(executor, 0, graphArgs.operands[0].length); + ASSERT_NE(nullptr, OHNNMemory); + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_SetOutputWithMemory(executor, 1, OHNNMemory)); + OH_NNExecutor_DestroyOutputMemory(executor, 0, &OHNNMemory); + ASSERT_EQ(nullptr, OHNNMemory); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_SetOutputFromMemory_0300 + * @tc.name : 设置输出共享内存,memory为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_SetOutputFromMemory_0300, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + OH_NN_Memory *OHNNMemory = OH_NNExecutor_AllocateOutputMemory(executor, 0, graphArgs.operands[0].length); + ASSERT_NE(nullptr, OHNNMemory); + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_SetOutputWithMemory(executor, 0, nullptr)); + OH_NNExecutor_DestroyOutputMemory(executor, 0, &OHNNMemory); + ASSERT_EQ(nullptr, OHNNMemory); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_SetOutputFromMemory_0400 + * @tc.name : 设置输出共享内存,重复设置相同outputIndex + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_SetOutputFromMemory_0400, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + OH_NN_Memory *OHNNMemory = OH_NNExecutor_AllocateOutputMemory(executor, 0, graphArgs.operands[0].length); + ASSERT_NE(nullptr, OHNNMemory); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_SetOutputWithMemory(executor, 0, OHNNMemory)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_SetOutputWithMemory(executor, 0, OHNNMemory)); + OH_NNExecutor_DestroyOutputMemory(executor, 0, &OHNNMemory); + ASSERT_EQ(nullptr, OHNNMemory); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_Run_0100 + * @tc.name : 共享内存模型推理,executor设置输入个数不足 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_Run_0100, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + uint32_t inputIndex = 0; + uint32_t outputIndex = 0; + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + auto quantParam = operandTem.quantParam; + OH_NN_Tensor operand = {operandTem.dataType, (uint32_t)operandTem.shape.size(), operandTem.shape.data(), + quantParam, operandTem.type}; + if (i == 0) { + OH_NN_Memory *inputMemory = OH_NNExecutor_AllocateInputMemory(executor, inputIndex, operandTem.length); + ASSERT_NE(nullptr, inputMemory); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_SetInputWithMemory(executor, inputIndex, &operand, inputMemory)); + + ASSERT_EQ(EOK, memcpy_s(inputMemory->data, operandTem.length, static_cast(operandTem.data), operandTem.length)); + + OH_NNExecutor_DestroyInputMemory(executor, inputIndex, &inputMemory); + ASSERT_EQ(nullptr, inputMemory); + } else if (std::find(graphArgs.outputIndices.begin(), graphArgs.outputIndices.end(), i) != + graphArgs.outputIndices.end()) { + OH_NN_Memory *outputMemory = OH_NNExecutor_AllocateOutputMemory(executor, outputIndex, operandTem.length); + ASSERT_NE(nullptr, outputMemory); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_SetOutputWithMemory(executor, outputIndex, outputMemory)); + OH_NNExecutor_DestroyOutputMemory(executor, outputIndex, &outputMemory); + ASSERT_EQ(nullptr, outputMemory); + outputIndex += 1; + } + } + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_Run(executor)); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_Run_0200 + * @tc.name : 共享内存模型推理,executor设置输出个数不足 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_Run_0200, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + uint32_t inputIndex = 0; + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + auto quantParam = operandTem.quantParam; + OH_NN_Tensor operand = {operandTem.dataType, (uint32_t)operandTem.shape.size(), operandTem.shape.data(), + quantParam, operandTem.type}; + if (std::find(graphArgs.inputIndices.begin(), graphArgs.inputIndices.end(), i) != + graphArgs.inputIndices.end()) { + OH_NN_Memory *inputMemory = OH_NNExecutor_AllocateInputMemory(executor, inputIndex, operandTem.length); + ASSERT_NE(nullptr, inputMemory); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_SetInputWithMemory(executor, inputIndex, &operand, inputMemory)); + ASSERT_EQ(EOK, memcpy_s(inputMemory->data, + operandTem.length, static_cast(operandTem.data), operandTem.length)); + OH_NNExecutor_DestroyInputMemory(executor, inputIndex, &inputMemory); + ASSERT_EQ(nullptr, inputMemory); + } + } + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_Run(executor)); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_Run_0300 + * @tc.name : 共享内存,定长模型推理测试 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_Run_0300, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + size_t ioSize = graphArgs.inputIndices.size() + graphArgs.outputIndices.size(); + OH_NN_Memory *OHNNMemory[ioSize]; + ASSERT_EQ(OH_NN_SUCCESS, ExecutorWithMemory(executor, graphArgs, OHNNMemory, addModel.expectValue)); + for (size_t i = 0; i < graphArgs.inputIndices.size(); i++) { + OH_NNExecutor_DestroyInputMemory(executor, i, &OHNNMemory[i]); + ASSERT_EQ(OHNNMemory[i], nullptr); + } + for (size_t j = 0; j < graphArgs.outputIndices.size(); j++) { + auto outputIndex = graphArgs.inputIndices.size() + j; + // check memory output + EXPECT_TRUE(CheckOutput(static_cast(const_cast(OHNNMemory[outputIndex]->data)), + static_cast(addModel.expectValue))); + OH_NNExecutor_DestroyOutputMemory(executor, j, &OHNNMemory[outputIndex]); + ASSERT_EQ(OHNNMemory[outputIndex], nullptr); + } + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_Run_0400 + * @tc.name : 共享内存,变长模型推理测试 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_Run_0400, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + AvgPoolDynamicModel avgModel; + graphArgs = avgModel.graphArgs; + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + avgModel.dynamicInput.shape = {1, 3, 3, 1}; + avgModel.output.shape = {1, 2, 2, 1}; + graphArgs.operands = {avgModel.dynamicInput, avgModel.kernel, avgModel.strides, + avgModel.padMode, avgModel.activation, avgModel.output}; + size_t ioSize = graphArgs.inputIndices.size() + graphArgs.outputIndices.size(); + OH_NN_Memory *OHNNMemory[ioSize]; + + ASSERT_EQ(OH_NN_SUCCESS, ExecutorWithMemory(executor, graphArgs, OHNNMemory, avgModel.expectValue)); + + for (size_t i = 0; i < graphArgs.inputIndices.size(); i++) { + OH_NNExecutor_DestroyInputMemory(executor, i, &OHNNMemory[i]); + ASSERT_EQ(OHNNMemory[i], nullptr); + } + for (size_t j = 0; j < graphArgs.outputIndices.size(); j++) { + auto outputIndex = graphArgs.inputIndices.size() + j; + // check memory output + EXPECT_TRUE(CheckOutput(static_cast(const_cast(OHNNMemory[outputIndex]->data)), + static_cast(avgModel.expectValue))); + OH_NNExecutor_DestroyOutputMemory(executor, j, &OHNNMemory[outputIndex]); + ASSERT_EQ(OHNNMemory[outputIndex], nullptr); + } + Free(model, compilation, executor); +} \ No newline at end of file diff --git a/test/nnrt_xts_acts/neural_network_runtime/v1_0/interface/src/ModelTest.cpp b/test/nnrt_xts_acts/neural_network_runtime/v1_0/interface/src/ModelTest.cpp new file mode 100644 index 0000000..50a1185 --- /dev/null +++ b/test/nnrt_xts_acts/neural_network_runtime/v1_0/interface/src/ModelTest.cpp @@ -0,0 +1,1024 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include +#include + +#include "nnrt_utils.h" +#include "model.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Test; +using namespace OHOS::HDI::Nnrt::V1_0; + +namespace { + +class ModelTest : public testing::Test { +protected: + AddModel addModel; + OHNNGraphArgs graphArgs = addModel.graphArgs; + OHNNCompileParam compileParam; +}; + +void BuildAddTopKGraph(OH_NNModel *model) +{ + AddTopKModel addTopKModel; + OHNNGraphArgsMulti graphArgsMulti = addTopKModel.graphArgs; + ASSERT_EQ(OH_NN_SUCCESS, BuildMultiOpGraph(model, graphArgsMulti)); +} + +void BuildModel(OH_NNModel *model, const OHNNGraphArgs &graphArgs) +{ + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); +} + +} // namespace + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_CreateModel_0100 + * @tc.name : 创建模型实例,指针校验 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_CreateModel_0100, Function | MediumTest | Level0) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_CreateModel_0200 + * @tc.name : 创建多个模型实例,指针校验 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_CreateModel_0200, Function | MediumTest | Level2) +{ + OH_NNModel *model_first = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model_first); + + OH_NNModel *model_second = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model_second); + + OH_NNModel *model_third = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model_third); + + ASSERT_NE(model_first, model_second); + ASSERT_NE(model_first, model_third); + ASSERT_NE(model_second, model_third); + Free(model_first); + Free(model_second); + Free(model_third); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_AddOperand_0100 + * @tc.name : 添加操作数值,model为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_AddOperand_0100, Function | MediumTest | Level3) +{ + int32_t dimensions[3]{3, 2, 2}; + OH_NN_Tensor operand{OH_NN_FLOAT32, 3, dimensions, nullptr, OH_NN_TENSOR}; + OH_NN_ReturnCode ret = OH_NNModel_AddTensor(nullptr, &operand); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_AddOperand_0200 + * @tc.name : 添加操作数,operand为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_AddOperand_0200, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + + OH_NN_ReturnCode ret = OH_NNModel_AddTensor(model, nullptr); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_AddOperand_0300 + * @tc.name : 添加操作数,operand中dataType为100000 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_AddOperand_0300, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + + int32_t dimensions[3]{3, 2, 2}; + + OH_NN_Tensor operand{static_cast(100000), 3, dimensions, nullptr, OH_NN_TENSOR}; + OH_NN_ReturnCode ret = OH_NNModel_AddTensor(model, &operand); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_AddOperand_0400 + * @tc.name : 添加操作数,operand中type为100000 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_AddOperand_0400, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + + int32_t dimensions[3]{3, 2, 2}; + + OH_NN_Tensor operand{OH_NN_FLOAT32, 3, dimensions, nullptr, static_cast(100000)}; + OH_NN_ReturnCode ret = OH_NNModel_AddTensor(model, &operand); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_SetOperandValue_0100 + * @tc.name : 设置操作数值,model为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_SetOperandValue_0100, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + + int8_t activationValue{0}; + int32_t dimensions[3]{3, 2, 2}; + OH_NN_Tensor operand{OH_NN_FLOAT32, 3, dimensions, nullptr, OH_NN_TENSOR}; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensor(model, &operand)); + + ASSERT_EQ(OH_NN_INVALID_PARAMETER, + OH_NNModel_SetTensorData(nullptr, 1, (void *)&activationValue, sizeof(int8_t))); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_SetOperandValue_0200 + * @tc.name : 设置操作数值,操作数不存在 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_SetOperandValue_0200, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + + int8_t activationValue{0}; + int32_t dimensions[3]{3, 2, 2}; + OH_NN_Tensor operand{OH_NN_FLOAT32, 3, dimensions, nullptr, OH_NN_TENSOR}; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensor(model, &operand)); + + ASSERT_EQ(OH_NN_INVALID_PARAMETER, + OH_NNModel_SetTensorData(model, 1000, (void *)&activationValue, sizeof(int8_t))); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_SetOperandValue_0300 + * @tc.name : 设置操作数值,buffer为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_SetOperandValue_0300, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + + int32_t dimensions[3]{3, 2, 2}; + OH_NN_Tensor operand{OH_NN_FLOAT32, 3, dimensions, nullptr, OH_NN_TENSOR}; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensor(model, &operand)); + + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, 1, nullptr, sizeof(int8_t))); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_SetOperandValue_0400 + * @tc.name : 设置操作数值,length为0 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_SetOperandValue_0400, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + + int8_t activationValue{0}; + int32_t dimensions[3]{3, 2, 2}; + OH_NN_Tensor operand{OH_NN_FLOAT32, 3, dimensions, nullptr, OH_NN_TENSOR}; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensor(model, &operand)); + + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, 1, (void *)&activationValue, 0)); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_AddOperation_0100 + * @tc.name : 添加算子,model为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_AddOperation_0100, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + graphArgs.specifyIO = false; + graphArgs.build = false; + graphArgs.addOperation = false; + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NN_UInt32Array paramIndices{const_cast(graphArgs.paramIndices.data()), + graphArgs.paramIndices.size()}; + OH_NN_UInt32Array inputIndices{const_cast(graphArgs.inputIndices.data()), + graphArgs.inputIndices.size()}; + OH_NN_UInt32Array outputIndices{const_cast(graphArgs.outputIndices.data()), + graphArgs.outputIndices.size()}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, + OH_NNModel_AddOperation(nullptr, graphArgs.operationType, ¶mIndices, &inputIndices, &outputIndices)); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_AddOperation_0200 + * @tc.name : 添加算子,paramIndices为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_AddOperation_0200, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + graphArgs.specifyIO = false; + graphArgs.build = false; + graphArgs.addOperation = false; + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NN_UInt32Array inputIndices{const_cast(graphArgs.inputIndices.data()), + graphArgs.inputIndices.size()}; + OH_NN_UInt32Array outputIndices{const_cast(graphArgs.outputIndices.data()), + graphArgs.outputIndices.size()}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, + OH_NNModel_AddOperation(model, graphArgs.operationType, nullptr, &inputIndices, &outputIndices)); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_AddOperation_0300 + * @tc.name : 添加算子,paramIndices中data为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_AddOperation_0300, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + graphArgs.specifyIO = false; + graphArgs.build = false; + graphArgs.addOperation = false; + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NN_UInt32Array paramIndices{nullptr, graphArgs.paramIndices.size()}; + OH_NN_UInt32Array inputIndices{const_cast(graphArgs.inputIndices.data()), + graphArgs.inputIndices.size()}; + OH_NN_UInt32Array outputIndices{const_cast(graphArgs.outputIndices.data()), + graphArgs.outputIndices.size()}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, + OH_NNModel_AddOperation(model, graphArgs.operationType, ¶mIndices, &inputIndices, &outputIndices)); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_AddOperation_0400 + * @tc.name : 添加算子,paramIndices中data对应序号不存在 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_AddOperation_0400, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + graphArgs.specifyIO = false; + graphArgs.build = false; + graphArgs.addOperation = false; + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + uint32_t paramIndicesValue{10}; + OH_NN_UInt32Array paramIndices{¶mIndicesValue, graphArgs.paramIndices.size()}; + OH_NN_UInt32Array inputIndices{const_cast(graphArgs.inputIndices.data()), + graphArgs.inputIndices.size()}; + OH_NN_UInt32Array outputIndices{const_cast(graphArgs.outputIndices.data()), + graphArgs.outputIndices.size()}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, + OH_NNModel_AddOperation(model, graphArgs.operationType, ¶mIndices, &inputIndices, &outputIndices)); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_AddOperation_0500 + * @tc.name : 添加算子,paramIndices中size为0 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_AddOperation_0500, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + graphArgs.specifyIO = false; + graphArgs.build = false; + graphArgs.addOperation = false; + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NN_UInt32Array paramIndices{const_cast(graphArgs.paramIndices.data()), 0}; + OH_NN_UInt32Array inputIndices{const_cast(graphArgs.inputIndices.data()), + graphArgs.inputIndices.size()}; + OH_NN_UInt32Array outputIndices{const_cast(graphArgs.outputIndices.data()), + graphArgs.outputIndices.size()}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, + OH_NNModel_AddOperation(model, graphArgs.operationType, ¶mIndices, &inputIndices, &outputIndices)); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_AddOperation_0600 + * @tc.name : 添加算子,inputIndices为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_AddOperation_0600, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + graphArgs.specifyIO = false; + graphArgs.build = false; + graphArgs.addOperation = false; + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NN_UInt32Array paramIndices{const_cast(graphArgs.paramIndices.data()), + graphArgs.paramIndices.size()}; + OH_NN_UInt32Array outputIndices{const_cast(graphArgs.outputIndices.data()), + graphArgs.outputIndices.size()}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, + OH_NNModel_AddOperation(model, graphArgs.operationType, ¶mIndices, nullptr, &outputIndices)); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_AddOperation_0700 + * @tc.name : 添加算子,inputIndices中data为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_AddOperation_0700, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + graphArgs.specifyIO = false; + graphArgs.build = false; + graphArgs.addOperation = false; + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NN_UInt32Array paramIndices{const_cast(graphArgs.paramIndices.data()), + graphArgs.paramIndices.size()}; + OH_NN_UInt32Array inputIndices{nullptr, graphArgs.inputIndices.size()}; + OH_NN_UInt32Array outputIndices{const_cast(graphArgs.outputIndices.data()), + graphArgs.outputIndices.size()}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, + OH_NNModel_AddOperation(model, graphArgs.operationType, ¶mIndices, &inputIndices, &outputIndices)); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_AddOperation_0800 + * @tc.name : 添加算子,inputIndices中data对应序号不存在 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_AddOperation_0800, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + graphArgs.specifyIO = false; + graphArgs.build = false; + graphArgs.addOperation = false; + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NN_UInt32Array paramIndices{const_cast(graphArgs.paramIndices.data()), + graphArgs.paramIndices.size()}; + uint32_t inputIndicesValue{10}; + OH_NN_UInt32Array inputIndices{&inputIndicesValue, graphArgs.inputIndices.size()}; + OH_NN_UInt32Array outputIndices{const_cast(graphArgs.outputIndices.data()), + graphArgs.outputIndices.size()}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, + OH_NNModel_AddOperation(model, graphArgs.operationType, ¶mIndices, &inputIndices, &outputIndices)); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_AddOperation_0900 + * @tc.name : 添加算子,inputIndices中size为0 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_AddOperation_0900, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + graphArgs.specifyIO = false; + graphArgs.build = false; + graphArgs.addOperation = false; + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NN_UInt32Array paramIndices{const_cast(graphArgs.paramIndices.data()), + graphArgs.paramIndices.size()}; + OH_NN_UInt32Array inputIndices{const_cast(graphArgs.inputIndices.data()), 0}; + OH_NN_UInt32Array outputIndices{const_cast(graphArgs.outputIndices.data()), + graphArgs.outputIndices.size()}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, + OH_NNModel_AddOperation(model, graphArgs.operationType, ¶mIndices, &inputIndices, &outputIndices)); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_AddOperation_1000 + * @tc.name : 添加算子,outputIndices为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_AddOperation_1000, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + graphArgs.specifyIO = false; + graphArgs.build = false; + graphArgs.addOperation = false; + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NN_UInt32Array paramIndices{const_cast(graphArgs.paramIndices.data()), + graphArgs.paramIndices.size()}; + OH_NN_UInt32Array inputIndices{const_cast(graphArgs.inputIndices.data()), 0}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, + OH_NNModel_AddOperation(model, graphArgs.operationType, ¶mIndices, &inputIndices, nullptr)); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_AddOperation_1100 + * @tc.name : 添加算子,outputIndices中data为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_AddOperation_1100, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + graphArgs.specifyIO = false; + graphArgs.build = false; + graphArgs.addOperation = false; + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NN_UInt32Array paramIndices{const_cast(graphArgs.paramIndices.data()), + graphArgs.paramIndices.size()}; + OH_NN_UInt32Array inputIndices{const_cast(graphArgs.inputIndices.data()), + graphArgs.inputIndices.size()}; + OH_NN_UInt32Array outputIndices{nullptr, graphArgs.outputIndices.size()}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, + OH_NNModel_AddOperation(model, graphArgs.operationType, ¶mIndices, &inputIndices, &outputIndices)); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_AddOperation_1200 + * @tc.name : 添加算子,outputIndices中data对应序号不存在 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_AddOperation_1200, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + graphArgs.specifyIO = false; + graphArgs.build = false; + graphArgs.addOperation = false; + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NN_UInt32Array paramIndices{const_cast(graphArgs.paramIndices.data()), + graphArgs.paramIndices.size()}; + OH_NN_UInt32Array inputIndices{const_cast(graphArgs.inputIndices.data()), + graphArgs.inputIndices.size()}; + uint32_t outputIndicesValue{10}; + OH_NN_UInt32Array outputIndices{&outputIndicesValue, graphArgs.outputIndices.size()}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, + OH_NNModel_AddOperation(model, graphArgs.operationType, ¶mIndices, &inputIndices, &outputIndices)); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_AddOperation_1300 + * @tc.name : 添加算子,outputIndices中size为0 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_AddOperation_1300, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + graphArgs.specifyIO = false; + graphArgs.build = false; + graphArgs.addOperation = false; + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NN_UInt32Array paramIndices{const_cast(graphArgs.paramIndices.data()), + graphArgs.paramIndices.size()}; + OH_NN_UInt32Array inputIndices{const_cast(graphArgs.inputIndices.data()), + graphArgs.inputIndices.size()}; + OH_NN_UInt32Array outputIndices{const_cast(graphArgs.outputIndices.data()), 0}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, + OH_NNModel_AddOperation(model, graphArgs.operationType, ¶mIndices, &inputIndices, &outputIndices)); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_SpecifyInputsAndOutputs_0100 + * @tc.name : 设置输入输出,model为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_SpecifyInputsAndOutputs_0100, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + graphArgs.specifyIO = false; + graphArgs.build = false; + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NN_UInt32Array inputIndices{const_cast(graphArgs.inputIndices.data()), + graphArgs.inputIndices.size()}; + OH_NN_UInt32Array outputIndices{const_cast(graphArgs.outputIndices.data()), + graphArgs.outputIndices.size()}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(nullptr, &inputIndices, &outputIndices)); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_SpecifyInputsAndOutputs_0200 + * @tc.name : 设置输入输出,inputIndices为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_SpecifyInputsAndOutputs_0200, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + graphArgs.specifyIO = false; + graphArgs.build = false; + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NN_UInt32Array outputIndices{const_cast(graphArgs.outputIndices.data()), + graphArgs.outputIndices.size()}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, nullptr, &outputIndices)); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_SpecifyInputsAndOutputs_0300 + * @tc.name : 设置输入输出,inputIndices中data为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_SpecifyInputsAndOutputs_0300, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + graphArgs.specifyIO = false; + graphArgs.build = false; + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NN_UInt32Array inputIndices{nullptr, 2}; + OH_NN_UInt32Array outputIndices{const_cast(graphArgs.outputIndices.data()), + graphArgs.outputIndices.size()}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_SpecifyInputsAndOutputs_0400 + * @tc.name : 设置输入输出,inputIndices中data对应序号不存在 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_SpecifyInputsAndOutputs_0400, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + graphArgs.specifyIO = false; + graphArgs.build = false; + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + uint32_t modelInputIndicesValue{5}; + OH_NN_UInt32Array inputIndices{&modelInputIndicesValue, 1}; + + OH_NN_UInt32Array outputIndices{const_cast(graphArgs.outputIndices.data()), + graphArgs.outputIndices.size()}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_SpecifyInputsAndOutputs_0500 + * @tc.name : 设置输入输出,inputIndices中size为0 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_SpecifyInputsAndOutputs_0500, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + graphArgs.specifyIO = false; + graphArgs.build = false; + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NN_UInt32Array inputIndices{const_cast(graphArgs.inputIndices.data()), 0}; + OH_NN_UInt32Array outputIndices{const_cast(graphArgs.outputIndices.data()), + graphArgs.outputIndices.size()}; + + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_SpecifyInputsAndOutputs_0600 + * @tc.name : 设置输入输出,outputIndices为空指针 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_SpecifyInputsAndOutputs_0600, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + graphArgs.specifyIO = false; + graphArgs.build = false; + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NN_UInt32Array inputIndices{const_cast(graphArgs.inputIndices.data()), + graphArgs.inputIndices.size()}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, nullptr)); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_SpecifyInputsAndOutputs_0700 + * @tc.name : 设置输入输出,outputIndices中data为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_SpecifyInputsAndOutputs_0700, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + graphArgs.specifyIO = false; + graphArgs.build = false; + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NN_UInt32Array inputIndices{const_cast(graphArgs.inputIndices.data()), + graphArgs.inputIndices.size()}; + OH_NN_UInt32Array outputIndices{nullptr, 1}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_SpecifyInputsAndOutputs_0800 + * @tc.name : 设置输入输出,outputIndices中data对应序号不存在 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_SpecifyInputsAndOutputs_0800, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + graphArgs.specifyIO = false; + graphArgs.build = false; + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NN_UInt32Array inputIndices{const_cast(graphArgs.inputIndices.data()), + graphArgs.inputIndices.size()}; + uint32_t modelOutputIndicesValue{5}; + OH_NN_UInt32Array outputIndices{&modelOutputIndicesValue, 1}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_SpecifyInputsAndOutputs_0900 + * @tc.name : 设置输入输出,outputIndices中size为0 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_SpecifyInputsAndOutputs_0900, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + graphArgs.specifyIO = false; + graphArgs.build = false; + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NN_UInt32Array inputIndices{const_cast(graphArgs.inputIndices.data()), + graphArgs.inputIndices.size()}; + OH_NN_UInt32Array outputIndices{const_cast(graphArgs.outputIndices.data()), 0}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_Finish_0100 + * @tc.name : 模型构图,model为空指针 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_Finish_0100, Function | MediumTest | Level3) +{ + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_Finish(nullptr)); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_Finish_0200 + * @tc.name : 模型构图,未添加操作数 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_Finish_0200, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_OPERATION_FORBIDDEN, OH_NNModel_Finish(model)); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_Finish_0300 + * @tc.name : 模型构图,未设置输入输出 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_Finish_0300, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + graphArgs.specifyIO = false; + ASSERT_EQ(OH_NN_OPERATION_FORBIDDEN, BuildSingleOpGraph(model, graphArgs)); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_Finish_0400 + * @tc.name : 模型构图,设置输入输出,构图成功 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_Finish_0400, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_Destroy_0100 + * @tc.name : 释放模型,model为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_Destroy_0100, Function | MediumTest | Level3) +{ + OH_NNModel *model = nullptr; + ASSERT_NO_THROW(OH_NNModel_Destroy(&model)); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_Destroy_0200 + * @tc.name : 释放模型,model未构图 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_Destroy_0200, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + graphArgs.build = false; + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNModel_Destroy(&model); + ASSERT_EQ(nullptr, model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_GetSupportedOperation_0100 + * @tc.name : 查询算子支持,model为空指针 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_GetSupportedOperation_0100, Function | MediumTest | Level3) +{ + const size_t *devicesID{nullptr}; + const bool *isSupported{nullptr}; + uint32_t opCount{0}; + uint32_t devicesCount{0}; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNDevice_GetAllDevicesID(&devicesID, &devicesCount)); + size_t targetDevice = devicesID[0]; + OH_NN_ReturnCode ret = OH_NNModel_GetAvailableOperations(nullptr, targetDevice, &isSupported, &opCount); + ASSERT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_GetSupportedOperation_0200 + * @tc.name : 查询算子支持,deviceID不存在 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_GetSupportedOperation_0200, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + size_t targetDevice{100000}; + const bool *isSupported{nullptr}; + uint32_t opCount{0}; + ASSERT_EQ(OH_NN_FAILED, OH_NNModel_GetAvailableOperations(model, targetDevice, &isSupported, &opCount)); + + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_GetSupportedOperation_0300 + * @tc.name : 查询算子支持,*isSupported为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_GetSupportedOperation_0300, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + const size_t *devicesID{nullptr}; + uint32_t opCount{0}; + uint32_t devicesCount{0}; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNDevice_GetAllDevicesID(&devicesID, &devicesCount)); + size_t targetDevice = devicesID[0]; + + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_GetAvailableOperations(model, targetDevice, nullptr, &opCount)); + + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_GetSupportedOperation_0400 + * @tc.name : 查询算子支持,**isSupported非nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_GetSupportedOperation_0400, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + const size_t *devicesID{nullptr}; + const bool isSupported = true; + const bool *realSupported = &isSupported; + uint32_t opCount{0}; + uint32_t devicesCount{0}; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNDevice_GetAllDevicesID(&devicesID, &devicesCount)); + size_t targetDevice = devicesID[0]; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_GetAvailableOperations(model, targetDevice, + &realSupported, &opCount)); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_GetSupportedOperation_0500 + * @tc.name : 查询算子支持,*opCount为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_GetSupportedOperation_0500, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + const size_t *devicesID{nullptr}; + const bool *isSupported{nullptr}; + uint32_t devicesCount{0}; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNDevice_GetAllDevicesID(&devicesID, &devicesCount)); + size_t targetDevice = devicesID[0]; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_GetAvailableOperations(model, targetDevice, + &isSupported, nullptr)); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_GetSupportedOperation_0600 + * @tc.name : 查询算子支持,model未完成构图 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_GetSupportedOperation_0600, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + graphArgs.build = false; + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + const size_t *devicesID{nullptr}; + const bool *isSupported{nullptr}; + uint32_t opCount{0}; + uint32_t devicesCount{0}; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNDevice_GetAllDevicesID(&devicesID, &devicesCount)); + size_t targetDevice = devicesID[0]; + ASSERT_EQ(OH_NN_OPERATION_FORBIDDEN, OH_NNModel_GetAvailableOperations(model, targetDevice, + &isSupported, &opCount)); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_GetSupportedOperation_0700 + * @tc.name : 查询算子支持,算子均支持 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_GetSupportedOperation_0700, Function | MediumTest | Level1) +{ + OHOS::sptr device = V1_0::MockIDevice::GetInstance(); + std::vector isSupported{true, true}; + device->SetOperationsSupported(isSupported); + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + BuildAddTopKGraph(model); + + const size_t *devicesID{nullptr}; + const bool *realSupported{nullptr}; + uint32_t opCount; + uint32_t devicesCount; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNDevice_GetAllDevicesID(&devicesID, &devicesCount)); + + uint32_t number = 1; + EXPECT_GE(devicesCount, number); + const char *name = nullptr; + std::string m_deviceName{"Device-CPU_TestVendor_v1_0"}; + OH_NN_ReturnCode ret = OH_NN_FAILED; + bool isHaveDevice = false; + uint32_t deviceId = 0; + for (uint32_t i = 0; i < devicesCount; i++) { + name = nullptr; + ret = OH_NNDevice_GetName(devicesID[i], &name); + EXPECT_EQ(OH_NN_SUCCESS, ret); + std::string sName(name); + if (m_deviceName == sName) { + isHaveDevice = true; + deviceId = i; + } + } + ASSERT_EQ(isHaveDevice, true); + size_t targetDevice = devicesID[deviceId]; + + ret = OH_NNModel_GetAvailableOperations(model, targetDevice, &realSupported, &opCount); + ASSERT_EQ(OH_NN_SUCCESS, ret); + for (uint32_t i = 0; i < opCount; i++) { + EXPECT_EQ(realSupported[i], isSupported[i]); + } + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_GetSupportedOperation_0800 + * @tc.name : 查询算子支持,算子部分支持 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_GetSupportedOperation_0800, Function | MediumTest | Level2) +{ + OHOS::sptr device = V1_0::MockIDevice::GetInstance(); + std::vector isSupported{true, false}; + device->SetOperationsSupported(isSupported); + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + BuildAddTopKGraph(model); + + const size_t *devicesID{nullptr}; + const bool *realSupported{nullptr}; + uint32_t opCount; + uint32_t devicesCount; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNDevice_GetAllDevicesID(&devicesID, &devicesCount)); + size_t targetDevice = devicesID[0]; + + OH_NN_ReturnCode ret = OH_NNModel_GetAvailableOperations(model, targetDevice, &realSupported, &opCount); + ASSERT_EQ(OH_NN_SUCCESS, ret); + for (uint32_t i = 0; i < opCount; i++) { + EXPECT_EQ(realSupported[i], isSupported[i]); + } + Free(model); + device->SetOperationsSupported({true}); +} + +/** + * @tc.number : SUB_AI_NNR_Func_North_Model_Combine_0100 + * @tc.name : 不同model,多线程并发在线构图,构图成功 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNR_Func_North_Model_Combine_0100, Function | MediumTest | Level2) +{ + OH_NNModel *model1 = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model1); + OH_NNModel *model2 = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model2); + std::thread th1(BuildModel, model1, graphArgs); + std::thread th2(BuildModel, model2, graphArgs); + th1.join(); + th2.join(); + Free(model1); + Free(model2); +} + +/** + * @tc.number : SUB_AI_NNR_Func_North_Model_Combine_0200 + * @tc.name : 多模型构图,模型构图过程中释放其他模型 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNR_Func_North_Model_Combine_0200, Function | MediumTest | Level1) +{ + OH_NNModel *model1 = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model1); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model1, graphArgs)); + + OH_NNModel *model2 = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model2); + + std::thread th1(BuildModel, model2, graphArgs); + std::thread th2(OH_NNModel_Destroy, &model1); + th1.join(); + th2.join(); + ASSERT_EQ(nullptr, model1); + Free(model2); +} diff --git a/test/nnrt_xts_acts/neural_network_runtime/v1_0/stability/BUILD.gn b/test/nnrt_xts_acts/neural_network_runtime/v1_0/stability/BUILD.gn new file mode 100644 index 0000000..9cbf96f --- /dev/null +++ b/test/nnrt_xts_acts/neural_network_runtime/v1_0/stability/BUILD.gn @@ -0,0 +1,49 @@ +# Copyright (c) 2022 Huawei Device Co., Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import("//build/test.gni") + +config("nnrt_config") { + include_dirs = [ "../common" ] + cflags = [ "-Wno-error" ] + cflags_cc = [ "-fexceptions" ] +} + +ohos_systemtest("ActsAiNnrtStabilityV1_0Test") { + module_out_path = "neural_network_runtime/neural_network_runtime" + sources = [ + "../common/mock_idevice.cpp", + "../common/nnrt_utils.cpp", + "src/MultiThreadTest.cpp", + ] + + configs = [ ":nnrt_config" ] + + external_deps = [ + "c_utils:utils", + "drivers_interface_nnrt:libnnrt_proxy_1.0", + "googletest:gmock_main", + "googletest:gtest_main", + "hdf_core:libhdf_utils", + "hdf_core:libhdi", + "hilog:libhilog", + "hitrace:libhitracechain", + "ipc:ipc_single", + "mindspore:mindir_lib", + "neural_network_runtime:libneural_network_core", + "neural_network_runtime:libneural_network_runtime", + ] + + subsystem_name = "ai" + part_name = "neural_network_runtime" +} diff --git a/test/nnrt_xts_acts/neural_network_runtime/v1_0/stability/src/MultiThreadTest.cpp b/test/nnrt_xts_acts/neural_network_runtime/v1_0/stability/src/MultiThreadTest.cpp new file mode 100644 index 0000000..810f5e0 --- /dev/null +++ b/test/nnrt_xts_acts/neural_network_runtime/v1_0/stability/src/MultiThreadTest.cpp @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include +#include + +#include "neural_network_runtime/neural_network_runtime.h" +#include "nnrt_utils.h" +#include "model.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime; +using namespace OHOS::NeuralNetworkRuntime::Test; +using namespace OHOS::HDI::Nnrt::V1_0; + +class MultiThreadTest : public testing::Test { +public: + void SetUp() + { + } + void TearDown() + { + } + +protected: + OHNNCompileParam compileParam; + AddModel addModel; + OHNNGraphArgs graphArgs = addModel.graphArgs; +}; + + +/** + * @tc.number : SUB_AI_NNR_Reliability_North_Stress_0100 + * @tc.name : 模型编译并发长稳测试 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MultiThreadTest, SUB_AI_NNR_Reliability_North_Stress_0100, Reliability | MediumTest | Level2) +{ + for (int i = 0; i < STRESS_COUNT; i++) { + OH_NNModel *model1 = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model1); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model1, graphArgs)); + + OH_NNCompilation *compilation1 = OH_NNCompilation_Construct(model1); + ASSERT_NE(nullptr, compilation1); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation1, compileParam)); + + Free(model1, compilation1); + if (i % PRINT_FREQ == 0) { + printf("[NnrtTest] SUB_AI_NNR_Reliability_North_Stress_0100 times: %d/%d\n", i, STRESS_COUNT); + } + } +} + +/** + * @tc.number : SUB_AI_NNR_Reliability_North_Stress_0200 + * @tc.name : 模型推理并发长稳测试 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MultiThreadTest, SUB_AI_NNR_Reliability_North_Stress_0200, Reliability | MediumTest | Level2) +{ + OH_NNModel *model1 = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model1); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model1, graphArgs)); + + OH_NNCompilation *compilation1 = OH_NNCompilation_Construct(model1); + ASSERT_NE(nullptr, compilation1); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation1, compileParam)); + + for (int i = 0; i < STRESS_COUNT; i++) { + OH_NNExecutor *executor1 = OH_NNExecutor_Construct(compilation1); + ASSERT_NE(nullptr, executor1); + ASSERT_EQ(OH_NN_SUCCESS, ExecuteGraphMock(executor1, graphArgs, nullptr)); + OH_NNExecutor_Destroy(&executor1); + ASSERT_EQ(nullptr, executor1); + if (i % PRINT_FREQ == 0) { + printf("[NnrtTest] SUB_AI_NNR_Reliability_North_Stress_0200 times: %d/%d\n", i, STRESS_COUNT); + } + } + Free(model1, compilation1); +} diff --git a/test/nnrt_xts_acts/neural_network_runtime/v2_0/BUILD.gn b/test/nnrt_xts_acts/neural_network_runtime/v2_0/BUILD.gn new file mode 100644 index 0000000..d2f3e72 --- /dev/null +++ b/test/nnrt_xts_acts/neural_network_runtime/v2_0/BUILD.gn @@ -0,0 +1,20 @@ +# Copyright (c) 2021-2022 Huawei Device Co., Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +group("neural_network_runtime") { + testonly = true + deps = [ + "interface:ActsAiNnrtFunctionV2_0Test", + "stability:ActsAiNnrtStabilityV2_0Test", + ] +} diff --git a/test/nnrt_xts_acts/neural_network_runtime/v2_0/common/const.h b/test/nnrt_xts_acts/neural_network_runtime/v2_0/common/const.h new file mode 100644 index 0000000..8a240bd --- /dev/null +++ b/test/nnrt_xts_acts/neural_network_runtime/v2_0/common/const.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef CONST_H +#define CONST_H + +#include +#include + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Test { + +const uint32_t ADD_DATA_LENGTH = 4 * sizeof(float); +const uint32_t AVG_INPUT_LENGTH = 9 * sizeof(float); +const std::vector TENSOR_SHAPE = {1, 2, 2, 1}; +const std::vector PARAM_INDEX = {2}; +const std::vector INPUT_INDEX = {0, 1}; +const std::vector OUTPUT_INDEX = {3}; +const int32_t ELEMENT_COUNT = 4; + +const std::string CACHE_DIR = "./cache"; +const std::string CACHE_PATH = CACHE_DIR + "/0.nncache"; +const std::string CACHE_INFO_PATH = CACHE_DIR + "/cache_info.nncache"; +const uint32_t NO_DEVICE_COUNT = 0; +const int STRESS_COUNT = 100000; +const int PRINT_FREQ = 500; + +} // namespace Test +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // CONST_H \ No newline at end of file diff --git a/test/nnrt_xts_acts/neural_network_runtime/v2_0/common/mock_idevice.cpp b/test/nnrt_xts_acts/neural_network_runtime/v2_0/common/mock_idevice.cpp new file mode 100644 index 0000000..536c6ef --- /dev/null +++ b/test/nnrt_xts_acts/neural_network_runtime/v2_0/common/mock_idevice.cpp @@ -0,0 +1,369 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include + +#include "const.h" +#include "mock_idevice.h" +#include "hdi_device_v2_0.h" +#include "hdi_returncode_utils.h" +#include "log.h" +#include "utils.h" +#include "nnbackend.h" +#include "backend_registrar.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +void PrintRetLog(int32_t ret, int32_t nnrtSuccess, const std::string& makeName) +{ + if (ret < nnrtSuccess) { + LOGW("%s failed. An error occurred in HDI, errorcode is %{public}d.", makeName.c_str(), ret); + } else { + OHOS::HDI::Nnrt::V2_0::NNRT_ReturnCode nnrtRet = static_cast(ret); + LOGW("%s failed. Errorcode is %{public}s.", makeName.c_str(), ConverterRetToString(nnrtRet).c_str()); + } +} + +std::shared_ptr HDIDeviceV2_0Creator() +{ + std::string deviceName; + std::string vendorName; + std::string version; + + // only one device from HDI now. + OHOS::sptr iDevice = V2_0::INnrtDevice::Get(); + if (iDevice == nullptr) { + LOGW("Get HDI device failed."); + return nullptr; + } + + auto ret = iDevice->GetDeviceName(deviceName); + int32_t nnrtSuccess = static_cast(V2_0::NNRT_ReturnCode::NNRT_SUCCESS); + if (ret != nnrtSuccess) { + std::string makeName = "Get device name"; + PrintRetLog(ret, nnrtSuccess, makeName); + return nullptr; + } + + ret = iDevice->GetVendorName(vendorName); + if (ret != nnrtSuccess) { + std::string makeName = "Get vendor name"; + PrintRetLog(ret, nnrtSuccess, makeName); + return nullptr; + } + + std::pair hdiVersion; + ret = iDevice->GetVersion(hdiVersion.first, hdiVersion.second); + if (ret != nnrtSuccess) { + std::string makeName = "Get version"; + PrintRetLog(ret, nnrtSuccess, makeName); + return nullptr; + } + version = 'v' + std::to_string(hdiVersion.first) + '_' + std::to_string(hdiVersion.second); + const std::string& backendName = GenUniqueName(deviceName, vendorName, version); + + std::shared_ptr device = CreateSharedPtr(iDevice); + if (device == nullptr) { + LOGW("Failed to create device, because fail to create device instance."); + return nullptr; + } + + std::shared_ptr backend = std::make_shared(device, std::hash{}(backendName)); + if (backend == nullptr) { + LOGW("Failed to register backend, because fail to create backend."); + } + return backend; +} + +REGISTER_BACKEND(HDIDeviceV2_0, HDIDeviceV2_0Creator) +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +namespace OHOS { +namespace HDI { +namespace Nnrt { +namespace V2_0 { + +const uint32_t MAJOR_VERSION = 2; +const uint32_t MINOR_VERSION = 0; + +sptr INnrtDevice::Get(bool isStub) +{ + return INnrtDevice::Get("mock_device_service", isStub); +} + +sptr INnrtDevice::Get(const std::string &serviceName, bool isStub) +{ + if (isStub) { + return nullptr; + } + sptr mockIDevice = sptr(MockIDevice::GetInstance()); + return mockIDevice; +} + +MockIDevice::~MockIDevice() +{ + for (auto fd : m_fds) { + close(fd); + } +} + +MockIDevice::MockIDevice() +{ + m_bufferFd = 0; +} + +MockIPreparedModel::~MockIPreparedModel() +{ + for (auto fd : m_fds) { + close(fd); + } +} + +MockIDevice *MockIDevice::GetInstance() +{ + static MockIDevice iDevice; + return &iDevice; +} + +void MockIDevice::SetFP16Supported(bool isSupported) +{ + m_fp16 = isSupported; +} + +void MockIDevice::SetPerformanceSupported(bool isSupported) +{ + m_performance = isSupported; +} + +void MockIDevice::SetPrioritySupported(bool isSupported) +{ + m_priority = isSupported; +} + +void MockIDevice::SetModelCacheSupported(bool isSupported) +{ + m_cache = isSupported; +} + +void MockIDevice::SetOperationsSupported(std::vector isSupported) +{ + m_operations = isSupported; +} + +void MockIDevice::SetDynamicInputSupported(bool isSupported) +{ + m_dynamic = isSupported; +} + +int32_t MockIDevice::GetDeviceName(std::string& name) +{ + name = "Device-CPU"; + return HDF_SUCCESS; +} + +int32_t MockIDevice::GetVendorName(std::string& name) +{ + name = "TestVendor"; + return HDF_SUCCESS; +} + +int32_t MockIDevice::GetDeviceType(DeviceType& deviceType) +{ + deviceType = DeviceType::CPU; + return HDF_SUCCESS; +} + +int32_t MockIDevice::GetDeviceStatus(DeviceStatus& status) +{ + status = DeviceStatus::AVAILABLE; + return HDF_SUCCESS; +} + +int32_t MockIDevice::GetVersion(uint32_t &majorVersion, uint32_t &minorVersion) +{ + majorVersion = MAJOR_VERSION; + minorVersion = MINOR_VERSION; + return HDF_SUCCESS; +} + +int32_t MockIDevice::GetSupportedOperation(const Model& model, std::vector& ops) +{ + ops = m_operations; + return HDF_SUCCESS; +} + +int32_t MockIDevice::IsFloat16PrecisionSupported(bool& isSupported) +{ + isSupported = m_fp16; + return HDF_SUCCESS; +} + +int32_t MockIDevice::IsPerformanceModeSupported(bool& isSupported) +{ + isSupported = m_performance; + return HDF_SUCCESS; +} + +int32_t MockIDevice::IsPrioritySupported(bool& isSupported) +{ + isSupported = m_priority; + return HDF_SUCCESS; +} + +int32_t MockIDevice::IsDynamicInputSupported(bool& isSupported) +{ + isSupported = m_dynamic; + return HDF_SUCCESS; +} + +int32_t MockIDevice::IsModelCacheSupported(bool& isSupported) +{ + isSupported = m_cache; + return HDF_SUCCESS; +} + +int32_t MockIDevice::AllocateBuffer(uint32_t length, SharedBuffer &buffer) +{ + std::lock_guard lock(m_mtx); + buffer.fd = AshmemCreate("allocateBuffer", length); + buffer.bufferSize = AshmemGetSize(buffer.fd); + buffer.offset = 0; + buffer.dataSize = length; + + AshmemSetProt(buffer.fd, PROT_READ | PROT_WRITE); + m_fds.emplace(buffer.fd); + m_bufferFd = buffer.fd; + return HDF_SUCCESS; +} + +int32_t MockIDevice::ReleaseBuffer(const SharedBuffer &buffer) +{ + if (m_fds.find(buffer.fd) == m_fds.end()) { + LOGE("ReleaseBuffer:buffer fd is invalid. fd = %d", buffer.fd); + return HDF_FAILURE; + } + if (close(buffer.fd) != 0) { + LOGE("ReleaseBuffer:Close buffer fd failed. fd = %d", buffer.fd); + return HDF_FAILURE; + } + return HDF_SUCCESS; +} + +int32_t MockIDevice::MemoryCopy(float* data, uint32_t length) +{ + std::lock_guard lock(m_mtx); + void* mapData = mmap(nullptr, length, PROT_READ | PROT_WRITE, MAP_SHARED, m_bufferFd, 0); + if (mapData == MAP_FAILED) { + LOGE("[Mock_Device]::ExportModelCache failed, Map fd to address failed: %{public}s.", strerror(errno)); + return HDF_FAILURE; + } + + auto memRet = memcpy_s(mapData, length, data, length); + auto unmapResult = munmap(mapData, length); + if (unmapResult != 0) { + LOGE("[Mock_Device]ExportModelCache failed . Please try again."); + return HDF_FAILURE; + } + if (memRet != EOK) { + LOGE("[Mock_Device]ExportModelCache failed, failed to memcpy_s data type."); + return HDF_FAILURE; + } + return HDF_SUCCESS; +} + +int32_t MockIDevice::PrepareModel(const Model& model, const ModelConfig& config, sptr& preparedModel) +{ + preparedModel = new (std::nothrow) V2_0::MockIPreparedModel(); + return HDF_SUCCESS; +} + +int32_t MockIDevice::PrepareOfflineModel(const std::vector& offlineModels, const ModelConfig& config, + sptr& preparedModel) +{ + preparedModel = new (std::nothrow) V2_0::MockIPreparedModel(); + return V2_0::NNRT_ReturnCode::NNRT_SUCCESS; +} + +int32_t MockIDevice::PrepareModelFromModelCache(const std::vector& modelCache, const ModelConfig& config, + sptr& preparedModel) +{ + preparedModel = new (std::nothrow) V2_0::MockIPreparedModel(); + return HDF_SUCCESS; +} + +int32_t MockIPreparedModel::ExportModelCache(std::vector& modelCache) +{ + if (!modelCache.empty()) { + LOGE("[NNRtTest] The parameters of ExportModelCache should be an empty vector."); + return HDF_ERR_INVALID_PARAM; + } + uint8_t bufferData[4] = {0, 1, 2, 3}; + uint32_t size = sizeof(bufferData); + SharedBuffer buffer; + buffer.fd = AshmemCreate("cache", size); + buffer.bufferSize = AshmemGetSize(buffer.fd); + buffer.offset = 0; + buffer.dataSize = size; + AshmemSetProt(buffer.fd, PROT_READ | PROT_WRITE); + + void* data = mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_SHARED, buffer.fd, 0); + if (data == MAP_FAILED) { + LOGE("[Mock_Device]::ExportModelCache failed, Map fd to address failed: %{public}s.", strerror(errno)); + return HDF_FAILURE; + } + + auto memRet = memcpy_s(data, size, bufferData, size); + auto unmapResult = munmap(data, size); + if (unmapResult != 0) { + LOGE("[Mock_Device]ExportModelCache failed . Please try again."); + return HDF_FAILURE; + } + if (memRet != EOK) { + LOGE("[Mock_Device]ExportModelCache failed, failed to memcpy_s data type."); + return HDF_FAILURE; + } + m_fds.emplace(buffer.fd); + modelCache.emplace_back(buffer); + return HDF_SUCCESS; +} + +int32_t MockIPreparedModel::GetVersion(uint32_t &majorVersion, uint32_t &minorVersion) +{ + majorVersion = 1; + minorVersion = 0; + return HDF_SUCCESS; +} + +int32_t MockIPreparedModel::Run(const std::vector& inputs, const std::vector& outputs, + std::vector>& outputsDims) +{ + outputsDims = {{1, 2, 2, 1}}; + return HDF_SUCCESS; +} + +int32_t MockIPreparedModel::GetInputDimRanges( + std::vector>& minInputDims, std::vector>& maxInputDims) +{ + minInputDims = {{1, 1, 1, 1}, {1, 1, 1, 1}}; + maxInputDims = {{1, 100, 100, 10}, {1, 100, 100, 10}}; + + return HDF_SUCCESS; +} + +} // namespace V2_0 +} // namespace Nnrt +} // namespace HDI +} // namespace OHOS diff --git a/test/nnrt_xts_acts/neural_network_runtime/v2_0/common/mock_idevice.h b/test/nnrt_xts_acts/neural_network_runtime/v2_0/common/mock_idevice.h new file mode 100644 index 0000000..c0a51f5 --- /dev/null +++ b/test/nnrt_xts_acts/neural_network_runtime/v2_0/common/mock_idevice.h @@ -0,0 +1,128 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MOCK_IDEVICE_H +#define MOCK_IDEVICE_H + +#include +#include +#include +#include +#include + +#include +#include +#include "mindir_lite_graph.h" +#include "mindir.h" + +#include "securec.h" +#include "refbase.h" +#include "log.h" +#include "ashmem.h" + +#include +#include +#include + +namespace OHOS { +namespace HDI { +namespace Nnrt { +namespace V2_0 { + +class MockIDevice : public INnrtDevice { +public: + int32_t GetSupportedOperation(const Model& model, std::vector& ops) override; + + int32_t IsFloat16PrecisionSupported(bool& isSupported) override; + + int32_t IsPerformanceModeSupported(bool& isSupported) override; + + int32_t IsPrioritySupported(bool& isSupported) override; + + int32_t IsDynamicInputSupported(bool& isSupported) override; + + int32_t IsModelCacheSupported(bool& isSupported) override; + + int32_t AllocateBuffer(uint32_t length, SharedBuffer &buffer) override; + + int32_t ReleaseBuffer(const SharedBuffer &buffer) override; + + int32_t GetDeviceName(std::string& name) override; + + int32_t GetVendorName(std::string& name) override; + + int32_t GetDeviceType(DeviceType& deviceType) override; + + int32_t GetDeviceStatus(DeviceStatus& status) override; + + int32_t GetVersion(uint32_t &majorVersion, uint32_t &minorVersion) override; + + int32_t PrepareModel(const Model& model, const ModelConfig& config, sptr& preparedModel) override; + + int32_t PrepareOfflineModel(const std::vector& offlineModels, const ModelConfig& config, + sptr& preparedModel) override; + + int32_t PrepareModelFromModelCache(const std::vector& modelCache, const ModelConfig& config, + sptr& preparedModel) override; + + int32_t MemoryCopy(float* data, uint32_t length); + + void SetFP16Supported(bool isSupported); + + void SetPerformanceSupported(bool isSupported); + + void SetPrioritySupported(bool isSupported); + + void SetModelCacheSupported(bool isSupported); + + void SetOperationsSupported(std::vector isSupported); + + void SetDynamicInputSupported(bool isSupported); + + static MockIDevice *GetInstance(); + + MockIDevice(); + virtual ~MockIDevice(); + +private: + std::unordered_set m_fds; + int m_bufferFd; + bool m_fp16 = true; + bool m_performance = true; + bool m_priority = true; + bool m_cache = true; + bool m_dynamic = true; + std::vector m_operations{true}; + std::mutex m_mtx; +}; + +class MockIPreparedModel : public IPreparedModel { +public: + int32_t ExportModelCache(std::vector& modelCache) override; + int32_t Run(const std::vector& inputs, const std::vector& outputs, + std::vector>& outputsDims) override; + int32_t GetInputDimRanges( + std::vector>& minInputDims, std::vector>& maxInputDims) override; + int32_t GetVersion(uint32_t &majorVersion, uint32_t &minorVersion) override; + MockIPreparedModel() = default; + virtual ~MockIPreparedModel(); +private: + std::unordered_set m_fds; +}; + +} // namespace V2_0 +} // namespace Nnrt +} // namespace HDI +} // namespace OHOS +#endif // MOCK_IDEVICE_H diff --git a/test/nnrt_xts_acts/neural_network_runtime/v2_0/common/model.h b/test/nnrt_xts_acts/neural_network_runtime/v2_0/common/model.h new file mode 100644 index 0000000..f575697 --- /dev/null +++ b/test/nnrt_xts_acts/neural_network_runtime/v2_0/common/model.h @@ -0,0 +1,111 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MODEL_H +#define MODEL_H + +#include "const.h" +#include "nnrt_utils.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Test { + +struct AddModel { + // ADD MODEL + float inputValue0[4] = {0, 1, 2, 3}; + float inputValue1[4] = {0, 1, 2, 3}; + int8_t activationValue = OH_NN_FUSED_NONE; + float outputValue[4] = {0}; + float expectValue[4] = {0, 2, 4, 6}; + + OHNNOperandTest input0 = {OH_NN_FLOAT32, OH_NN_TENSOR, TENSOR_SHAPE, inputValue0, ADD_DATA_LENGTH}; + OHNNOperandTest input1 = {OH_NN_FLOAT32, OH_NN_TENSOR, TENSOR_SHAPE, inputValue1, ADD_DATA_LENGTH}; + OHNNOperandTest activation = {OH_NN_INT8, OH_NN_ADD_ACTIVATIONTYPE, {}, &activationValue, sizeof(int8_t)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, TENSOR_SHAPE, outputValue, ADD_DATA_LENGTH}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_ADD, + .operands = {input0, input1, activation, output}, + .paramIndices = {2}, + .inputIndices = {0, 1}, + .outputIndices = {3}}; +}; + +struct AvgPoolDynamicModel { + // AVG POOL MODEL + float inputValue[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + uint64_t kernelValue[2] = {2, 2}; + uint64_t strideValue[2] = {1, 1}; + int8_t padValue = 1; + int8_t activationValue = OH_NN_FUSED_NONE; + float outputValue[4] = {0}; + float expectValue[4] = {2, 3, 5, 6}; + + OHNNOperandTest dynamicInput = {OH_NN_FLOAT32, OH_NN_TENSOR, {-1, -1, -1, -1}, inputValue, AVG_INPUT_LENGTH}; + OHNNOperandTest kernel = {OH_NN_INT64, OH_NN_AVG_POOL_KERNEL_SIZE, {2}, kernelValue, sizeof(kernelValue)}; + OHNNOperandTest strides = {OH_NN_INT64, OH_NN_AVG_POOL_STRIDE, {2}, strideValue, sizeof(strideValue)}; + OHNNOperandTest padMode = {OH_NN_INT8, OH_NN_AVG_POOL_PAD_MODE, {}, &padValue, sizeof(padValue)}; + OHNNOperandTest activation = {OH_NN_INT8, OH_NN_AVG_POOL_ACTIVATION_TYPE, {}, &activationValue, sizeof(int8_t)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, {-1, -1, -1, -1}, outputValue, sizeof(outputValue)}; + + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_AVG_POOL, + .operands = {dynamicInput, kernel, strides, padMode, activation, output}, + .paramIndices = {1, 2, 3, 4}, + .inputIndices = {0}, + .outputIndices = {5}}; +}; + +struct TopKModel { + // TopK Model + float valueX[6] = {0, 1, 2, 3, 4, 5}; + int8_t valueK = 2; + bool valueSorted = true; + float valueOutput1[2]; + int32_t valueOutput2[2]; + + OHNNOperandTest x = {OH_NN_FLOAT32, OH_NN_TENSOR, {1, 6}, valueX, 6 * sizeof(float)}; + OHNNOperandTest k = {OH_NN_INT8, OH_NN_TENSOR, {}, &valueK, sizeof(int8_t)}; + OHNNOperandTest sorted = {OH_NN_BOOL, OH_NN_TOP_K_SORTED, {}, &valueSorted, sizeof(bool)}; + OHNNOperandTest output1 = {OH_NN_FLOAT32, OH_NN_TENSOR, {1, 2}, valueOutput1, 2 * sizeof(float)}; + OHNNOperandTest output2 = {OH_NN_INT32, OH_NN_TENSOR, {1, 2}, valueOutput2, 2 * sizeof(int32_t)}; + + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_TOP_K, + .operands = {x, k, sorted, output1, output2}, + .paramIndices = {2}, + .inputIndices = {0, 1}, + .outputIndices = {3, 4}}; +}; + +class AddTopKModel { + // Build two ops Model +private: + AddModel addModel; + TopKModel topKModel; + +public: + OHNNGraphArgsMulti graphArgs = { + .operationTypes = {OH_NN_OPS_ADD, OH_NN_OPS_TOP_K}, + .operands = {{addModel.input0, addModel.input1, addModel.activation, addModel.output}, + {topKModel.k, topKModel.sorted, topKModel.output1, topKModel.output2}}, + .paramIndices = {{2}, {5}}, + .inputIndices = {{0, 1}, {3, 4}}, + .outputIndices = {{3}, {6, 7}}, + .graphInput = {0, 1, 4}, + .graphOutput = {6, 7}}; +}; + +} // namespace Test +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // MODEL_H \ No newline at end of file diff --git a/test/nnrt_xts_acts/neural_network_runtime/v2_0/common/nnrt_utils.cpp b/test/nnrt_xts_acts/neural_network_runtime/v2_0/common/nnrt_utils.cpp new file mode 100644 index 0000000..4762ef4 --- /dev/null +++ b/test/nnrt_xts_acts/neural_network_runtime/v2_0/common/nnrt_utils.cpp @@ -0,0 +1,471 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "nnrt_utils.h" +#include "const.h" +#include + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Test { + +OH_NN_UInt32Array TransformUInt32Array(const std::vector& vector) +{ + uint32_t* data = (vector.empty()) ? nullptr : const_cast(vector.data()); + return {data, vector.size()}; +} + +int BuildMultiOpGraph(OH_NNModel *model, const OHNNGraphArgsMulti &graphArgs) +{ + int ret = 0; + int opCnt = 0; + for (int j = 0; j < graphArgs.operationTypes.size(); j++) { + for (int i = 0; i < graphArgs.operands[j].size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[j][i]; + auto quantParam = operandTem.quantParam; + OH_NN_Tensor operand = {operandTem.dataType, (uint32_t) operandTem.shape.size(), + operandTem.shape.data(), quantParam, operandTem.type}; + ret = OH_NNModel_AddTensor(model, &operand); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNModel_AddTensor failed! ret=%d\n", ret); + return ret; + } + if (std::find(graphArgs.paramIndices[j].begin(), graphArgs.paramIndices[j].end(), opCnt) != + graphArgs.paramIndices[j].end()) { + ret = OH_NNModel_SetTensorData(model, opCnt, operandTem.data, operandTem.length); + } + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNModel_SetTensorData failed! ret=%d\n", ret); + return ret; + } + opCnt += 1; + } + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices[j]); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices[j]); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices[j]); + + ret = OH_NNModel_AddOperation(model, graphArgs.operationTypes[j], ¶mIndices, &inputIndices, + &outputIndices); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNModel_AddOperation failed! ret=%d\n", ret); + return ret; + } + } + auto graphInputs = TransformUInt32Array(graphArgs.graphInput); + auto graphOutputs = TransformUInt32Array(graphArgs.graphOutput); + ret = OH_NNModel_SpecifyInputsAndOutputs(model, &graphInputs, &graphOutputs); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNModel_SpecifyInputsAndOutputs failed! ret=%d\n", ret); + return ret; + } + ret = OH_NNModel_Finish(model); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNModel_Finish failed! ret=%d\n", ret); + return ret; + } + return ret; +} + +int BuildSingleOpGraph(OH_NNModel *model, const OHNNGraphArgs &graphArgs) +{ + int ret = 0; + for (int i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + auto quantParam = operandTem.quantParam; + OH_NN_Tensor operand = {operandTem.dataType, (uint32_t) operandTem.shape.size(), + operandTem.shape.data(), quantParam, operandTem.type}; + ret = OH_NNModel_AddTensor(model, &operand); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNModel_AddTensor failed! ret=%d\n", ret); + return ret; + } + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + ret = OH_NNModel_SetTensorData(model, i, operandTem.data, operandTem.length); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNModel_SetTensorData failed! ret=%d\n", ret); + return ret; + } + } + } + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + if (graphArgs.addOperation) { + ret = OH_NNModel_AddOperation(model, graphArgs.operationType, ¶mIndices, &inputIndices, + &outputIndices); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNModel_AddOperation failed! ret=%d\n", ret); + return ret; + } + } + if (graphArgs.specifyIO) { + ret = OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNModel_SpecifyInputsAndOutputs failed! ret=%d\n", ret); + return ret; + } + } + if (graphArgs.build) { + ret = OH_NNModel_Finish(model); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNModel_Finish failed! ret=%d\n", ret); + return ret; + } + } + return ret; +} + +OH_NN_ReturnCode SetDevice(OH_NNCompilation *compilation) +{ + OH_NN_ReturnCode ret = OH_NN_FAILED; + const size_t *devicesID{nullptr}; + uint32_t devicesCount{0}; + ret = OH_NNDevice_GetAllDevicesID(&devicesID, &devicesCount); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNDevice_GetAllDevicesID failed! ret=%d\n", ret); + return ret; + } + if (devicesCount <= NO_DEVICE_COUNT) { + LOGE("[NNRtTest] devicesCount <= 0 devicesCount=%d\n", devicesCount); + return OH_NN_FAILED; + } + + const char *name = nullptr; + std::string m_deviceName{"Device-CPU_TestVendor_v2_0"}; + for (uint32_t i = 0; i < devicesCount; i++) { + name = nullptr; + ret = OH_NNDevice_GetName(devicesID[i], &name); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNDevice_GetName failed! ret=%d\n", ret); + return ret; + } + + std::string sName(name); + if (m_deviceName == sName) { + ret = OH_NNCompilation_SetDevice(compilation, devicesID[i]); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNCompilation_SetDevice failed! ret=%d\n", ret); + return ret; + } + return OH_NN_SUCCESS; + } + } + return OH_NN_FAILED; +} + +int CompileGraphMock(OH_NNCompilation *compilation, const OHNNCompileParam &compileParam) +{ + int ret = 0; + ret = SetDevice(compilation); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNCompilation_SetDevice failed! ret=%d\n", ret); + return ret; + } + // set cache + if (!compileParam.cacheDir.empty()) { + ret = OH_NNCompilation_SetCache(compilation, compileParam.cacheDir.c_str(), + compileParam.cacheVersion); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNCompilation_SetCache failed! ret=%d\n", ret); + return ret; + } + } + // set performance + if (compileParam.performanceMode != OH_NN_PERFORMANCE_NONE) { + ret = OH_NNCompilation_SetPerformanceMode(compilation, compileParam.performanceMode); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNCompilation_SetPerformanceMode failed! ret=%d\n", ret); + return ret; + } + } + // set priority + if (compileParam.priority != OH_NN_PRIORITY_NONE) { + ret = OH_NNCompilation_SetPriority(compilation, compileParam.priority); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNCompilation_SetPriority failed! ret=%d\n", ret); + return ret; + } + } + // enable fp16 + if (compileParam.enableFp16) { + ret = OH_NNCompilation_EnableFloat16(compilation, compileParam.enableFp16); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNCompilation_EnableFloat16 failed! ret=%d\n", ret); + return ret; + } + } + // build + ret = OH_NNCompilation_Build(compilation); + return ret; +} + + +int ExecuteGraphMock(OH_NNExecutor *executor, const OHNNGraphArgs &graphArgs, + float* expect) +{ + OHOS::sptr device = V2_0::MockIDevice::GetInstance(); + int ret = 0; + uint32_t inputIndex = 0; + uint32_t outputIndex = 0; + for (auto i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + auto quantParam = operandTem.quantParam; + OH_NN_Tensor operand = {operandTem.dataType, (uint32_t) operandTem.shape.size(), + operandTem.shape.data(), + quantParam, operandTem.type}; + if (std::find(graphArgs.inputIndices.begin(), graphArgs.inputIndices.end(), i) != + graphArgs.inputIndices.end()) { + ret = OH_NNExecutor_SetInput(executor, inputIndex, &operand, operandTem.data, + operandTem.length); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNExecutor_SetInput failed! ret=%d\n", ret); + return ret; + } + inputIndex += 1; + } else if (std::find(graphArgs.outputIndices.begin(), graphArgs.outputIndices.end(), i) != + graphArgs.outputIndices.end()) { + ret = OH_NNExecutor_SetOutput(executor, outputIndex, operandTem.data, operandTem.length); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNExecutor_SetOutput failed! ret=%d\n", ret); + return ret; + } + if (expect != nullptr) { + ret = device->MemoryCopy(expect, operandTem.length); + } + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] device set expect output failed! ret=%d\n", ret); + return ret; + } + outputIndex += 1; + } + } + ret = OH_NNExecutor_Run(executor); + return ret; +} + +int ExecutorWithMemory(OH_NNExecutor *executor, const OHNNGraphArgs &graphArgs, OH_NN_Memory *OHNNMemory[], + float* expect) +{ + OHOS::sptr device = V2_0::MockIDevice::GetInstance(); + int ret = 0; + uint32_t inputIndex = 0; + uint32_t outputIndex = 0; + for (auto i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + auto quantParam = operandTem.quantParam; + OH_NN_Tensor operand = {operandTem.dataType, (uint32_t) operandTem.shape.size(), + operandTem.shape.data(), + quantParam, operandTem.type}; + if (std::find(graphArgs.inputIndices.begin(), graphArgs.inputIndices.end(), i) != + graphArgs.inputIndices.end()) { + OH_NN_Memory *inputMemory = OH_NNExecutor_AllocateInputMemory(executor, inputIndex, + operandTem.length); + ret = OH_NNExecutor_SetInputWithMemory(executor, inputIndex, &operand, inputMemory); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNExecutor_SetInputWithMemory failed! ret=%d\n", ret); + return ret; + } + memcpy_s(inputMemory->data, operandTem.length, static_cast(operandTem.data), operandTem.length); + OHNNMemory[inputIndex] = inputMemory; + inputIndex += 1; + } else if (std::find(graphArgs.outputIndices.begin(), graphArgs.outputIndices.end(), i) != + graphArgs.outputIndices.end()) { + OH_NN_Memory *outputMemory = OH_NNExecutor_AllocateOutputMemory(executor, outputIndex, + operandTem.length); + ret = OH_NNExecutor_SetOutputWithMemory(executor, outputIndex, outputMemory); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNExecutor_SetOutputWithMemory failed! ret=%d\n", ret); + return ret; + } + ret = device->MemoryCopy(expect, operandTem.length); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] device set expect output failed! ret=%d\n", ret); + return ret; + } + OHNNMemory[inputIndex + outputIndex] = outputMemory; + outputIndex += 1; + } + } + ret = OH_NNExecutor_Run(executor); + return ret; +} + + +void Free(OH_NNModel *model, OH_NNCompilation *compilation, OH_NNExecutor *executor) +{ + if (model != nullptr) { + OH_NNModel_Destroy(&model); + ASSERT_EQ(nullptr, model); + } + if (compilation != nullptr) { + OH_NNCompilation_Destroy(&compilation); + ASSERT_EQ(nullptr, compilation); + } + if (executor != nullptr) { + OH_NNExecutor_Destroy(&executor); + ASSERT_EQ(nullptr, executor); + } +} + +PathType CheckPath(const std::string &path) +{ + if (path.empty()) { + LOGI("CheckPath: path is null"); + return PathType::NOT_FOUND; + } + struct stat buf{}; + if (stat(path.c_str(), &buf) == 0) { + if (buf.st_mode & S_IFDIR) { + return PathType::DIR; + } else if (buf.st_mode & S_IFREG) { + return PathType::FILE; + } else { + return PathType::UNKNOWN; + } + } + LOGI("%s not found", path.c_str()); + return PathType::NOT_FOUND; +} + +bool DeleteFile(const std::string &path) +{ + if (path.empty()) { + LOGI("DeleteFile: path is null"); + return false; + } + if (CheckPath(path) == PathType::NOT_FOUND) { + LOGI("not found: %s", path.c_str()); + return true; + } + if (remove(path.c_str()) == 0) { + LOGI("deleted: %s", path.c_str()); + return true; + } + LOGI("delete failed: %s", path.c_str()); + return false; +} + +void CopyFile(const std::string &srcPath, const std::string &dstPath) +{ + std::ifstream src(srcPath, std::ios::binary); + std::ofstream dst(dstPath, std::ios::binary); + + dst << src.rdbuf(); +} + +std::string ConcatPath(const std::string &str1, const std::string &str2) +{ + // boundary + if (str2.empty()) { + return str1; + } + if (str1.empty()) { + return str2; + } + // concat + char end = str1[str1.size() - 1]; + if (end == '\\' or end == '/') { + return str1 + str2; + } else { + return str1 + '/' + str2; + } +} + +void DeleteFolder(const std::string &path) +{ + if (path.empty()) { + LOGI("DeletePath: path is null"); + return; + } + + DIR *dir = opendir(path.c_str()); + // check is dir ? + if (dir == nullptr) { + LOGE("[NNRtTest] Can not open dir. Check path or permission! path: %s", path.c_str()); + return; + } + struct dirent *file; + // read all the files in dir + std::vector pathList; + while ((file = readdir(dir)) != nullptr) { + // skip "." and ".." + if (strcmp(file->d_name, ".") == 0 || strcmp(file->d_name, "..") == 0) { + continue; + } + if (file->d_type == DT_DIR) { + std::string filePath = path + "/" + file->d_name; + DeleteFolder(filePath); // 递归执行 + } else { + pathList.emplace_back(ConcatPath(path, file->d_name)); + } + } + closedir(dir); + pathList.emplace_back(path); + LOGI("[Common] Delete folder %s", path.c_str()); + for (auto &i : pathList) { + DeleteFile(i); + } +} + +bool CreateFolder(const std::string &path) +{ + if (path.empty()) { + LOGI("CreateFolder: path is empty"); + return false; + } + LOGI("CreateFolder:%s", path.c_str()); + mode_t mode = 0700; + for (int i = 1; i < path.size() - 1; i++) { + if (path[i] != '/') { + continue; + } + PathType ret = CheckPath(path.substr(0, i)); + switch (ret) { + case PathType::DIR: + continue; + case PathType::NOT_FOUND: + LOGI("mkdir: %s", path.substr(0, i).c_str()); + mkdir(path.substr(0, i).c_str(), mode); + break; + default: + LOGI("error: %s", path.substr(0, i).c_str()); + return false; + } + } + mkdir(path.c_str(), mode); + return CheckPath(path) == PathType::DIR; +} + +bool CheckOutput(const float* output, const float* expect) +{ + if (output == nullptr || expect == nullptr) { + LOGE("[NNRtTest] output or expect is nullptr\n"); + return false; + } + for (int i = 0; i < ELEMENT_COUNT; i++) { + if (std::abs(float(output[i]) - float(expect[i])) > 1e-8) { + for (int j = 0; j < ELEMENT_COUNT; j++) { + LOGE("[NNRtTest] output %d not match: expect:%f, actual:%f\n", j, float(expect[j]), float(output[j])); + } + return false; + } + } + return true; +} + +} // namespace Test +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/test/nnrt_xts_acts/neural_network_runtime/v2_0/common/nnrt_utils.h b/test/nnrt_xts_acts/neural_network_runtime/v2_0/common/nnrt_utils.h new file mode 100644 index 0000000..0e2ffa6 --- /dev/null +++ b/test/nnrt_xts_acts/neural_network_runtime/v2_0/common/nnrt_utils.h @@ -0,0 +1,99 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef NNRT_UTILS_H +#define NNRT_UTILS_H + +#include +#include +#include + +#include "neural_network_runtime/neural_network_runtime.h" +#include "log.h" +#include "mock_idevice.h" +#include "const.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Test { +namespace V2_0 = OHOS::HDI::Nnrt::V2_0; +struct OHNNOperandTest { + OH_NN_DataType dataType; + OH_NN_TensorType type; + std::vector shape; + void *data{nullptr}; + int32_t length{0}; + const OH_NN_QuantParam *quantParam = nullptr; +}; + +struct OHNNGraphArgs { + OH_NN_OperationType operationType; + std::vector operands; + std::vector paramIndices; + std::vector inputIndices; + std::vector outputIndices; + bool build = true; + bool specifyIO = true; + bool addOperation = true; +}; + +struct OHNNGraphArgsMulti { + std::vector operationTypes; + std::vector> operands; + std::vector> paramIndices; + std::vector> inputIndices; + std::vector> outputIndices; + std::vector graphInput; + std::vector graphOutput; +}; + +struct OHNNCompileParam { + int32_t deviceId = 0; + std::string cacheDir; + uint32_t cacheVersion = 0; + OH_NN_PerformanceMode performanceMode = OH_NN_PERFORMANCE_NONE; + OH_NN_Priority priority = OH_NN_PRIORITY_NONE; + bool enableFp16 = false; +}; + +int BuildSingleOpGraph(OH_NNModel *model, const OHNNGraphArgs &graphArgs); + +int ExecutorWithMemory(OH_NNExecutor *executor, const OHNNGraphArgs &graphArgs, OH_NN_Memory *OHNNMemory[], + float* expect); + +void Free(OH_NNModel *model = nullptr, OH_NNCompilation *compilation = nullptr, OH_NNExecutor *executor = nullptr); + +int CompileGraphMock(OH_NNCompilation *compilation, const OHNNCompileParam &compileParam); + +int ExecuteGraphMock(OH_NNExecutor *executor, const OHNNGraphArgs &graphArgs, float* expect); + +OH_NN_ReturnCode SetDevice(OH_NNCompilation *compilation); +int BuildMultiOpGraph(OH_NNModel *model, const OHNNGraphArgsMulti &graphArgs); +OH_NN_UInt32Array GetUInt32Array(std::vector indices); + +bool CheckOutput(const float* output, const float* expect); + +enum class PathType { FILE, DIR, UNKNOWN, NOT_FOUND }; +PathType CheckPath(const std::string &path); +bool DeleteFile(const std::string &path); +void CopyFile(const std::string &srcPath, const std::string &dstPath); +std::string ConcatPath(const std::string &str1, const std::string &str2); +void DeleteFolder(const std::string &path); +bool CreateFolder(const std::string &path); + +} // namespace Test +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NNRT_UTILS_H \ No newline at end of file diff --git a/test/nnrt_xts_acts/neural_network_runtime/v2_0/interface/BUILD.gn b/test/nnrt_xts_acts/neural_network_runtime/v2_0/interface/BUILD.gn new file mode 100644 index 0000000..60875e6 --- /dev/null +++ b/test/nnrt_xts_acts/neural_network_runtime/v2_0/interface/BUILD.gn @@ -0,0 +1,53 @@ +# Copyright (c) 2022 Huawei Device Co., Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import("//build/test.gni") + +config("nnrt_config") { + include_dirs = [ "../common" ] + cflags = [ "-Wno-error" ] + cflags_cc = [ "-fexceptions" ] +} + +ohos_systemtest("ActsAiNnrtFunctionV2_0Test") { + module_out_path = "neural_network_runtime/neural_network_runtime" + sources = [ + "../common/mock_idevice.cpp", + "../common/nnrt_utils.cpp", + "src/CompileTest.cpp", + "src/DeviceTest.cpp", + "src/ExecutorTest.cpp", + "src/MemoryTest.cpp", + "src/ModelTest.cpp", + ] + + configs = [ ":nnrt_config" ] + + external_deps = [ + "c_utils:utils", + "drivers_interface_nnrt:libnnrt_proxy_2.0", + "googletest:gmock_main", + "googletest:gtest_main", + "hdf_core:libhdf_utils", + "hdf_core:libhdi", + "hilog:libhilog", + "hitrace:libhitracechain", + "ipc:ipc_single", + "mindspore:mindir_lib", + "neural_network_runtime:libneural_network_core", + "neural_network_runtime:libneural_network_runtime", + ] + + subsystem_name = "ai" + part_name = "neural_network_runtime" +} diff --git a/test/nnrt_xts_acts/neural_network_runtime/v2_0/interface/src/CompileTest.cpp b/test/nnrt_xts_acts/neural_network_runtime/v2_0/interface/src/CompileTest.cpp new file mode 100644 index 0000000..04dee0b --- /dev/null +++ b/test/nnrt_xts_acts/neural_network_runtime/v2_0/interface/src/CompileTest.cpp @@ -0,0 +1,875 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include +#include +#include + +#include "nnrt_utils.h" +#include "model.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime; +using namespace OHOS::NeuralNetworkRuntime::Test; +using namespace OHOS::HDI::Nnrt::V2_0; + +namespace { + +class CompileTest : public testing::Test { +public: + void SetUp() + { + CreateFolder(CACHE_DIR); + } + void TearDown() + { + DeleteFolder(CACHE_DIR); + } + void GenCacheFile() + { + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + OHNNCompileParam compileParam{ + .cacheDir = "./cache", + .cacheVersion = 10, + }; + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + Free(model, compilation); + ASSERT_TRUE(CheckPath(CACHE_PATH) == PathType::FILE); + ASSERT_TRUE(CheckPath(CACHE_INFO_PATH) == PathType::FILE); + } + void DestroyCache() + { + std::ifstream ifs(CACHE_PATH.c_str(), std::ios::in | std::ios::binary); + char* ptr{nullptr}; + int cacheSize = ifs.tellg(); + int invalidCacheSize = cacheSize * 0.9; + ifs.read(ptr, cacheSize); + ifs.close(); + std::ofstream ofs(CACHE_PATH.c_str(), std::ios::out | std::ios::binary); + ofs.write(ptr, invalidCacheSize); + ofs.close(); + } + +protected: + OHNNCompileParam m_compileParam; + AddModel addModel; + OHNNGraphArgs graphArgs = addModel.graphArgs; +}; + +void CompileModel(OH_NNCompilation *compilation, const OHNNCompileParam &compileParam) +{ + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); +} + +} // namespace + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_Create_0100 + * @tc.name : 创建编译实例,model为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_Create_0100, Function | MediumTest | Level3) +{ + OH_NNCompilation *compilation = OH_NNCompilation_Construct(nullptr); + ASSERT_EQ(nullptr, compilation); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_Create_0200 + * @tc.name : 创建编译实例,model未完成构图 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_Create_0200, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, SetDevice(compilation)); + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNCompilation_Build(compilation)); + Free(model, compilation); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_Create_0300 + * @tc.name : 创建编译实例,model已完成构图,存在算子不支持 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_Create_0300, Function | MediumTest | Level2) +{ + OHOS::sptr device = V2_0::MockIDevice::GetInstance(); + std::vector isSupported = {true, false}; + device->SetOperationsSupported(isSupported); + + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + + AddTopKModel addTopKModel; + OHNNGraphArgsMulti graphArgsMulti = addTopKModel.graphArgs; + ASSERT_EQ(OH_NN_SUCCESS, BuildMultiOpGraph(model, graphArgsMulti)); + + const size_t *devicesID{nullptr}; + const bool *realSupported{nullptr}; + uint32_t opCount; + uint32_t devicesCount; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNDevice_GetAllDevicesID(&devicesID, &devicesCount)); + size_t targetDevice = devicesID[0]; + + OH_NN_ReturnCode ret = OH_NNModel_GetAvailableOperations(model, targetDevice, &realSupported, &opCount); + ASSERT_EQ(OH_NN_SUCCESS, ret); + for (int i = 0; i < opCount; i++) { + EXPECT_EQ(realSupported[i], isSupported[i]); + } + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, SetDevice(compilation)); + ASSERT_EQ(OH_NN_FAILED, OH_NNCompilation_Build(compilation)); + Free(model, compilation); + device->SetOperationsSupported({true}); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_SetDevice_0100 + * @tc.name : 设置device,compilation为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_SetDevice_0100, Function | MediumTest | Level3) +{ + const size_t *devicesID{nullptr}; + uint32_t devicesCount{0}; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNDevice_GetAllDevicesID(&devicesID, &devicesCount)); + + size_t targetDevice = devicesID[0]; // Use the first device in system test. + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNCompilation_SetDevice(nullptr, targetDevice)); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_SetDevice_0200 + * @tc.name : 设置device,deviceID不存在 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_SetDevice_0200, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_SetDevice(compilation, 100000)); + ASSERT_EQ(OH_NN_FAILED, OH_NNCompilation_Build(compilation)); + Free(model, compilation); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_SetDevice_0300 + * @tc.name : 设置device,deviceID存在 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_SetDevice_0300, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + OHNNCompileParam compileParam; + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + Free(model, compilation); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_SetCache_0100 + * @tc.name : 设置cache路径及版本,compilation为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_SetCache_0100, Function | MediumTest | Level3) +{ + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNCompilation_SetCache(nullptr, "./", 0)); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_SetCache_0200 + * @tc.name : 设置cache路径及版本,cacheDir为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_SetCache_0200, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNCompilation_SetCache(compilation, nullptr, 0)); + Free(model, compilation); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_SetCache_0300 + * @tc.name : device不支持,设置cache路径及版本 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_SetCache_0300, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + // set model cache unavailabel + OHOS::sptr device = V2_0::MockIDevice::GetInstance(); + device->SetModelCacheSupported(false); + + ASSERT_EQ(OH_NN_SUCCESS, SetDevice(compilation)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_SetCache(compilation, "./cache", 10)); + ASSERT_EQ(OH_NN_OPERATION_FORBIDDEN, OH_NNCompilation_Build(compilation)); + Free(model, compilation); + device->SetModelCacheSupported(true); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_SetCache_0400 + * @tc.name : 设置不存在cache路径 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_SetCache_0400, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + OHNNCompileParam compileParam{.cacheDir = "./test"}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, CompileGraphMock(compilation, compileParam)); + Free(model, compilation); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_SetCache_0500 + * @tc.name : 设置cache路径,cache破坏,重新生成cache + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_SetCache_0500, Function | MediumTest | Level2) +{ + // generate cache file in cache diretory + GenCacheFile(); + // destroy cache file to invalid size + DestroyCache(); + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + OHNNCompileParam compileParam{ + .cacheDir = "./cache", + .cacheVersion = 10, + }; + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + Free(model, compilation); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_SetCache_0600 + * @tc.name : 设置version,小于cache版本号 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_SetCache_0600, Function | MediumTest | Level2) +{ + GenCacheFile(); + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + OHNNCompileParam compileParam{ + .cacheDir = "./cache", + .cacheVersion = 9, + }; + ASSERT_EQ(OH_NN_OPERATION_FORBIDDEN, CompileGraphMock(compilation, compileParam)); + Free(model, compilation); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_SetCache_0700 + * @tc.name : 设置version,等于cache版本号 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_SetCache_0700, Function | MediumTest | Level2) +{ + GenCacheFile(); + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + OHNNCompileParam compileParam{ + .cacheDir = "./cache", + .cacheVersion = 10, + }; + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + Free(model, compilation); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_SetCache_0800 + * @tc.name : 设置version,大于cache版本号 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_SetCache_0800, Function | MediumTest | Level2) +{ + GenCacheFile(); + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + OHNNCompileParam compileParam{ + .cacheDir = "./cache", + .cacheVersion = 11, + }; + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + Free(model, compilation); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_SetPerformanceMode_0100 + * @tc.name : 设置priority,compilation为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_SetPerformanceMode_0100, Function | MediumTest | Level3) +{ + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNCompilation_SetPerformanceMode(nullptr, OH_NN_PERFORMANCE_MEDIUM)); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_SetPerformanceMode_0200 + * @tc.name : device不支持,设置performance + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_SetPerformanceMode_Mock_0200, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_LOW, + }; + OHOS::sptr device = V2_0::MockIDevice::GetInstance(); + device->SetPerformanceSupported(false); + ASSERT_EQ(OH_NN_OPERATION_FORBIDDEN, CompileGraphMock(compilation, compileParam)); + Free(model, compilation); + device->SetPerformanceSupported(true); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_SetPerformanceMode_0300 + * @tc.name : 设置performanceMode为NONE + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_SetPerformanceMode_0300, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, SetDevice(compilation)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_SetPerformanceMode(compilation, OH_NN_PERFORMANCE_NONE)); + Free(model, compilation); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_SetPerformanceMode_0400 + * @tc.name : 设置performanceMode为LOW + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_SetPerformanceMode_0400, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, SetDevice(compilation)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_SetPerformanceMode(compilation, OH_NN_PERFORMANCE_LOW)); + Free(model, compilation); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_SetPerformanceMode_0500 + * @tc.name : 设置performanceMode为MEDIUM + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_SetPerformanceMode_0500, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, SetDevice(compilation)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_SetPerformanceMode(compilation, OH_NN_PERFORMANCE_MEDIUM)); + Free(model, compilation); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_SetPerformanceMode_0600 + * @tc.name : 设置performanceMode为HIGH + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_SetPerformanceMode_0600, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, SetDevice(compilation)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_SetPerformanceMode(compilation, OH_NN_PERFORMANCE_HIGH)); + Free(model, compilation); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_SetPerformanceMode_0700 + * @tc.name : 设置performanceMode为EXTREME + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_SetPerformanceMode_0700, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, SetDevice(compilation)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_SetPerformanceMode(compilation, OH_NN_PERFORMANCE_EXTREME)); + Free(model, compilation); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_SetPerformanceMode_0800 + * @tc.name : 设置performanceMode为NONE-1 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_SetPerformanceMode_0800, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, SetDevice(compilation)); + ASSERT_EQ(OH_NN_SUCCESS, + OH_NNCompilation_SetPerformanceMode(compilation, + static_cast(OH_NN_PERFORMANCE_NONE - 1))); + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNCompilation_Build(compilation)); + Free(model, compilation); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_SetPerformanceMode_0900 + * @tc.name : 设置performanceMode为EXTREME+1 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_SetPerformanceMode_0900, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, SetDevice(compilation)); + ASSERT_EQ(OH_NN_SUCCESS, + OH_NNCompilation_SetPerformanceMode(compilation, + static_cast(OH_NN_PERFORMANCE_EXTREME + 1))); + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNCompilation_Build(compilation)); + Free(model, compilation); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_SetPriority_0100 + * @tc.name : 设置priority,compilation为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_SetPriority_0100, Function | MediumTest | Level3) +{ + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNCompilation_SetPriority(nullptr, OH_NN_PRIORITY_MEDIUM)); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_SetPriority_0200 + * @tc.name : device不支持,设置priority + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_SetPriority_0200, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + // set device not supported + OHOS::sptr device = V2_0::MockIDevice::GetInstance(); + device->SetPrioritySupported(false); + ASSERT_EQ(OH_NN_SUCCESS, SetDevice(compilation)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_SetPriority(compilation, OH_NN_PRIORITY_LOW)); + ASSERT_EQ(OH_NN_OPERATION_FORBIDDEN, OH_NNCompilation_Build(compilation)); + Free(model, compilation); + device->SetPrioritySupported(true); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_SetPriority_0300 + * @tc.name : 设置priority为NONE + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_SetPriority_0300, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, SetDevice(compilation)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_SetPriority(compilation, OH_NN_PRIORITY_NONE)); + Free(model, compilation); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_SetPriority_0400 + * @tc.name : 设置priority为LOW + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_SetPriority_0400, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, SetDevice(compilation)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_SetPriority(compilation, OH_NN_PRIORITY_LOW)); + Free(model, compilation); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_SetPriority_0500 + * @tc.name : 设置priority为MEDIUM + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_SetPriority_0500, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, SetDevice(compilation)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_SetPriority(compilation, OH_NN_PRIORITY_MEDIUM)); + Free(model, compilation); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_SetPriority_0600 + * @tc.name : 设置priority为LOW + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_SetPriority_0600, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, SetDevice(compilation)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_SetPriority(compilation, OH_NN_PRIORITY_HIGH)); + Free(model, compilation); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_SetPriority_0700 + * @tc.name : 设置priority为NONE-1 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_SetPriority_0700, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, SetDevice(compilation)); + ASSERT_EQ(OH_NN_SUCCESS, + OH_NNCompilation_SetPriority(compilation, static_cast(OH_NN_PRIORITY_NONE - 1))); + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNCompilation_Build(compilation)); + Free(model, compilation); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_SetPriority_0800 + * @tc.name : 设置priority为HIGH+1 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_SetPriority_0800, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, SetDevice(compilation)); + ASSERT_EQ(OH_NN_SUCCESS, + OH_NNCompilation_SetPriority(compilation, static_cast(OH_NN_PRIORITY_HIGH + 1))); + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNCompilation_Build(compilation)); + Free(model, compilation); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_EnableFloat16_0100 + * @tc.name : 设置enableFloat16,compilation为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_EnableFloat16_0100, Function | MediumTest | Level3) +{ + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNCompilation_EnableFloat16(nullptr, OH_NN_PERFORMANCE_MEDIUM)); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_EnableFloat16_0200 + * @tc.name : device支持,设置fp16推理为false + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_EnableFloat16_0200, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, SetDevice(compilation)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_EnableFloat16(compilation, false)); + Free(model, compilation); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_EnableFloat16_0300 + * @tc.name : device不支持,设置fp16推理为false + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_EnableFloat16_0300, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + // set fp16 unavailable + OHOS::sptr device = V2_0::MockIDevice::GetInstance(); + device->SetFP16Supported(false); + + ASSERT_EQ(OH_NN_SUCCESS, SetDevice(compilation)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_EnableFloat16(compilation, true)); + ASSERT_EQ(OH_NN_OPERATION_FORBIDDEN, OH_NNCompilation_Build(compilation)); + Free(model, compilation); + device->SetFP16Supported(true); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_EnableFloat16_0400 + * @tc.name : device不支持,设置fp16推理为true + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_EnableFloat16_0400, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + // set fp16 unavailable + OHOS::sptr device = V2_0::MockIDevice::GetInstance(); + device->SetFP16Supported(false); + + ASSERT_EQ(OH_NN_SUCCESS, SetDevice(compilation)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_EnableFloat16(compilation, true)); + ASSERT_EQ(OH_NN_OPERATION_FORBIDDEN, OH_NNCompilation_Build(compilation)); + Free(model, compilation); + device->SetFP16Supported(true); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_Build_0100 + * @tc.name : 编译模型,compilation为空指针 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_Build_0100, Function | MediumTest | Level3) +{ + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNCompilation_Build(nullptr)); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_Build_0200 + * @tc.name : 编译模型,未设置device,默认设备,返回成功 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_Build_0200, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_Build(compilation)); + Free(model, compilation); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_Build_0300 + * @tc.name : 编译模型,仅设置device,默认配置测试 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_Build_0300, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + OHNNCompileParam compileParam; + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + Free(model, compilation); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_Build_0400 + * @tc.name : 设置缓存路径及版本,编译模型导出缓存 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_Build_0400, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + OHNNCompileParam compileParam{ + .cacheDir = "./cache", + .cacheVersion = 10, + }; + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + Free(model, compilation); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_Destroy_0100 + * @tc.name : 释放编译实例,compilation为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_Destroy_0100, Function | MediumTest | Level3) +{ + OH_NNCompilation *compilation = nullptr; + OH_NNCompilation_Destroy(&compilation); + ASSERT_EQ(nullptr, compilation); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_Destroy_0200 + * @tc.name : 释放编译实例,未调用模型编译 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_Destroy_0200, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, SetDevice(compilation)); + OH_NNCompilation_Destroy(&compilation); + ASSERT_EQ(nullptr, compilation); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_Destroy_0300 + * @tc.name : 模型已编译,释放编译实例 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_Destroy_0300, Function | MediumTest | Level0) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + OHNNCompileParam compileParam; + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + Free(model, compilation); +} + +/** + * @tc.number : SUB_AI_NNR_Func_North_Compilation_Combine_0100 + * @tc.name : 多线程并发模型编译,编译成功 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNR_Func_North_Compilation_Combine_0100, Function | MediumTest | Level2) +{ + OH_NNModel *model1 = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model1); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model1, graphArgs)); + + OH_NNModel *model2 = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model2); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model2, graphArgs)); + + OH_NNCompilation *compilation1 = OH_NNCompilation_Construct(model1); + ASSERT_NE(nullptr, compilation1); + OH_NNCompilation *compilation2 = OH_NNCompilation_Construct(model2); + ASSERT_NE(nullptr, compilation2); + + std::thread th1(CompileModel, compilation1, m_compileParam); + std::thread th2(CompileModel, compilation2, m_compileParam); + th1.join(); + th2.join(); + Free(model1, compilation1); + Free(model2, compilation2); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Compilation_Combine_0200 + * @tc.name : 已编译模型,重复编译 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(CompileTest, SUB_AI_NNRt_Func_North_Compilation_Combine_0200, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + OHNNCompileParam compileParam; + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNCompilation_Build(compilation)); + Free(model, compilation); +} \ No newline at end of file diff --git a/test/nnrt_xts_acts/neural_network_runtime/v2_0/interface/src/DeviceTest.cpp b/test/nnrt_xts_acts/neural_network_runtime/v2_0/interface/src/DeviceTest.cpp new file mode 100644 index 0000000..e011f53 --- /dev/null +++ b/test/nnrt_xts_acts/neural_network_runtime/v2_0/interface/src/DeviceTest.cpp @@ -0,0 +1,207 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include + +#include "nnrt_utils.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Test; + +class DeviceTest : public testing::Test {}; + +/** + * @tc.number : SUB_AI_NNRtt_Func_North_Device_DeviceID_0100 + * @tc.name : 获取设备ID,*allDevicesID为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(DeviceTest, SUB_AI_NNRt_Func_North_Device_DeviceID_0100, Function | MediumTest | Level3) +{ + uint32_t count{0}; + + OH_NN_ReturnCode ret = OH_NNDevice_GetAllDevicesID(nullptr, &count); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Device_DeviceID_0200 + * @tc.name : 获取设备ID,**allDevicesID非nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(DeviceTest, SUB_AI_NNRt_Func_North_Device_DeviceID_0200, Function | MediumTest | Level3) +{ + const size_t allDeviceIds = 0; + const size_t *pAllDeviceIds = &allDeviceIds; + uint32_t count{0}; + + OH_NN_ReturnCode ret = OH_NNDevice_GetAllDevicesID(&pAllDeviceIds, &count); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Device_DeviceID_0300 + * @tc.name : 获取设备ID,获取设备ID,deviceCount为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(DeviceTest, SUB_AI_NNRt_Func_North_Device_DeviceID_0300, Function | MediumTest | Level3) +{ + const size_t *allDeviceIds = nullptr; + + OH_NN_ReturnCode ret = OH_NNDevice_GetAllDevicesID(&allDeviceIds, nullptr); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Device_DeviceID_0400 + * @tc.name : 获取设备ID,设备数量校验 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(DeviceTest, SUB_AI_NNRt_Func_North_Device_DeviceID_0400, Function | MediumTest | Level2) +{ + const size_t *allDeviceIds = nullptr; + uint32_t count{0}; + + OH_NN_ReturnCode ret = OH_NNDevice_GetAllDevicesID(&allDeviceIds, &count); + EXPECT_EQ(OH_NN_SUCCESS, ret); + + uint32_t expectCount = 1; + EXPECT_LE(expectCount, count); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Device_DeviceName_0100 + * @tc.name : 获取硬件名称,deviceID不存在 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(DeviceTest, SUB_AI_NNRt_Func_North_Device_DeviceName_0100, Function | MediumTest | Level3) +{ + const size_t deviceID{100000}; + const char *name = nullptr; + + OH_NN_ReturnCode ret = OH_NNDevice_GetName(deviceID, &name); + EXPECT_EQ(OH_NN_FAILED, ret); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Device_DeviceName_0200 + * @tc.name : 获取硬件名称,*name为nullprt + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(DeviceTest, SUB_AI_NNRt_Func_North_Device_DeviceName_0200, Function | MediumTest | Level3) +{ + const size_t *devicesID{nullptr}; + uint32_t devicesCount{0}; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNDevice_GetAllDevicesID(&devicesID, &devicesCount)); + size_t targetDevice = devicesID[0]; + + OH_NN_ReturnCode ret = OH_NNDevice_GetName(targetDevice, nullptr); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Device_DeviceName_0300 + * @tc.name : 获取硬件名称,**name非nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(DeviceTest, SUB_AI_NNRt_Func_North_Device_DeviceName_0300, Function | MediumTest | Level3) +{ + const size_t *devicesID{nullptr}; + uint32_t devicesCount{0}; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNDevice_GetAllDevicesID(&devicesID, &devicesCount)); + size_t targetDevice = devicesID[0]; + + const char *name = "name"; + + OH_NN_ReturnCode ret = OH_NNDevice_GetName(targetDevice, &name); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Device_DeviceName_0400 + * @tc.name : 获取硬件名称, 结果校验 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(DeviceTest, SUB_AI_NNRt_Func_North_Device_DeviceName_0400, Function | MediumTest | Level1) +{ + const size_t *devicesID{nullptr}; + uint32_t devicesCount{0}; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNDevice_GetAllDevicesID(&devicesID, &devicesCount)); + uint32_t number = 1; + EXPECT_GE(devicesCount, number); + + const char *name = nullptr; + std::string m_deviceName{"Device-CPU_TestVendor_v2_0"}; + OH_NN_ReturnCode ret = OH_NN_FAILED; + bool isHaveName = false; + for (uint32_t i = 0; i < devicesCount; i++) { + name = nullptr; + ret = OH_NNDevice_GetName(devicesID[i], &name); + EXPECT_EQ(OH_NN_SUCCESS, ret); + std::string sName(name); + if (m_deviceName == sName) { + isHaveName = true; + } + } + EXPECT_EQ(isHaveName, true); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Device_DeviceType_0100 + * @tc.name : 获取硬件类别,deviceType为nullprt + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(DeviceTest, SUB_AI_NNRt_Func_North_Device_DeviceType_0100, Function | MediumTest | Level3) +{ + const size_t *devicesID{nullptr}; + uint32_t devicesCount{0}; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNDevice_GetAllDevicesID(&devicesID, &devicesCount)); + size_t targetDevice = devicesID[0]; + + OH_NN_ReturnCode ret = OH_NNDevice_GetType(targetDevice, nullptr); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Device_DeviceType_0200 + * @tc.name : 获取硬件类别,deviceID不存在 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(DeviceTest, SUB_AI_NNRt_Func_North_Device_DeviceType_0200, Function | MediumTest | Level3) +{ + const size_t deviceID{100000}; + OH_NN_DeviceType type{OH_NN_OTHERS}; + + OH_NN_ReturnCode ret = OH_NNDevice_GetType(deviceID, &type); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Device_DeviceType_0300 + * @tc.name :获取硬件类别,结果校验 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(DeviceTest, SUB_AI_NNRt_Func_North_Device_DeviceType_0300, Function | MediumTest | Level1) +{ + const size_t *devicesID{nullptr}; + uint32_t devicesCount{0}; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNDevice_GetAllDevicesID(&devicesID, &devicesCount)); + size_t targetDevice = devicesID[0]; + + OH_NN_DeviceType type{OH_NN_OTHERS}; + + OH_NN_ReturnCode ret = OH_NNDevice_GetType(targetDevice, &type); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} diff --git a/test/nnrt_xts_acts/neural_network_runtime/v2_0/interface/src/ExecutorTest.cpp b/test/nnrt_xts_acts/neural_network_runtime/v2_0/interface/src/ExecutorTest.cpp new file mode 100644 index 0000000..8766aec --- /dev/null +++ b/test/nnrt_xts_acts/neural_network_runtime/v2_0/interface/src/ExecutorTest.cpp @@ -0,0 +1,1318 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include +#include + +#include "nnrt_utils.h" +#include "model.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime; +using namespace OHOS::NeuralNetworkRuntime::Test; +using namespace OHOS::HDI::Nnrt::V2_0; + +namespace { + +class ExecutorTest : public testing::Test { +protected: + OHOS::sptr device; + AddModel addModel; + OHNNGraphArgs graphArgs = addModel.graphArgs; + OHNNCompileParam compileParam; +}; + +void ExecuteModel(OH_NNExecutor *executor, const OHNNGraphArgs &graphArgs, float* expect) +{ + ASSERT_EQ(OH_NN_SUCCESS, ExecuteGraphMock(executor, graphArgs, expect)); +} + +} // namespace + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Create_0100 + * @tc.name : 创建执行实例,compilation为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_Create_0100, Function | MediumTest | Level3) +{ + OH_NNExecutor *executor = OH_NNExecutor_Construct(nullptr); + ASSERT_EQ(nullptr, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Create_0200 + * @tc.name : 创建执行实例,compilation未完成编译 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_Create_0200, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + + const size_t *devicesID{nullptr}; + uint32_t devicesCount{0}; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNDevice_GetAllDevicesID(&devicesID, &devicesCount)); + size_t targetDevice = devicesID[0]; + + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_SetDevice(compilation, targetDevice)); + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_EQ(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_SetInput_0100 + * @tc.name : 设置输入,executor为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_SetInput_0100, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + uint32_t inputIndex = 0; + const OHNNOperandTest &operandTem = graphArgs.operands[0]; + auto quantParam = operandTem.quantParam; + OH_NN_Tensor operand = {operandTem.dataType, (uint32_t)operandTem.shape.size(), operandTem.shape.data(), + quantParam, operandTem.type}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, + OH_NNExecutor_SetInput(nullptr, inputIndex, &operand, operandTem.data, operandTem.length)); + + Free(model, compilation); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_SetInput_0200 + * @tc.name : 设置输入,inputIndex不存在 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_SetInput_0200, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + uint32_t inputIndex = 100000; + const OHNNOperandTest &operandTem = graphArgs.operands[0]; + auto quantParam = operandTem.quantParam; + OH_NN_Tensor operand = {operandTem.dataType, (uint32_t)operandTem.shape.size(), operandTem.shape.data(), + quantParam, operandTem.type}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, + OH_NNExecutor_SetInput(executor, inputIndex, &operand, operandTem.data, operandTem.length)); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_SetInput_0300 + * @tc.name : 设置输入,operand参数不一致 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_SetInput_0300, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + uint32_t inputIndex = 0; + const OHNNOperandTest &operandTem = graphArgs.operands[0]; + auto quantParam = operandTem.quantParam; + OH_NN_Tensor operand = {operandTem.dataType, (uint32_t)operandTem.shape.size(), operandTem.shape.data(), + quantParam, OH_NN_ADD_ACTIVATIONTYPE}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, + OH_NNExecutor_SetInput(executor, inputIndex, &operand, operandTem.data, operandTem.length)); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_SetInput_0400 + * @tc.name : 设置输入,operand形状改变 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_SetInput_0400, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + uint32_t inputIndex = 0; + const OHNNOperandTest &operandTem = graphArgs.operands[0]; + auto quantParam = operandTem.quantParam; + int32_t dimensions[3]{3, 3, 3}; + OH_NN_Tensor operand = {operandTem.dataType, (uint32_t)operandTem.shape.size(), dimensions, quantParam, + operandTem.type}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, + OH_NNExecutor_SetInput(executor, inputIndex, &operand, operandTem.data, operandTem.length)); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_SetInput_0500 + * @tc.name : 设置输入,buffer为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_SetInput_0500, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + uint32_t inputIndex = 0; + const OHNNOperandTest &operandTem = graphArgs.operands[0]; + auto quantParam = operandTem.quantParam; + OH_NN_Tensor operand = {operandTem.dataType, (uint32_t)operandTem.shape.size(), operandTem.shape.data(), + quantParam, operandTem.type}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, + OH_NNExecutor_SetInput(executor, inputIndex, &operand, nullptr, operandTem.length)); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_SetInput_0600 + * @tc.name : 设置输入,length小于输入长度 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_SetInput_0600, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + uint32_t inputIndex = 0; + const OHNNOperandTest &operandTem = graphArgs.operands[0]; + auto quantParam = operandTem.quantParam; + OH_NN_Tensor operand = {operandTem.dataType, (uint32_t)operandTem.shape.size(), operandTem.shape.data(), + quantParam, operandTem.type}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_SetInput(executor, inputIndex, &operand, operandTem.data, 0)); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_SetInput_0700 + * @tc.name : 设置输入,重复设置同一inputIndex + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_SetInput_0700, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + uint32_t inputIndex = 0; + const OHNNOperandTest &operandTem = graphArgs.operands[0]; + auto quantParam = operandTem.quantParam; + OH_NN_Tensor operand = {operandTem.dataType, (uint32_t)operandTem.shape.size(), operandTem.shape.data(), + quantParam, operandTem.type}; + ASSERT_EQ(OH_NN_SUCCESS, + OH_NNExecutor_SetInput(executor, inputIndex, &operand, operandTem.data, operandTem.length)); + ASSERT_EQ(OH_NN_SUCCESS, + OH_NNExecutor_SetInput(executor, inputIndex, &operand, operandTem.data, operandTem.length)); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_SetInput_0800 + * @tc.name : 变长模型,设置超出动态范围的输入 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_SetInput_0800, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + AvgPoolDynamicModel avgModel; + graphArgs = avgModel.graphArgs; + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + uint32_t inputIndex = 0; + const OHNNOperandTest &operandTem = graphArgs.operands[0]; + auto quantParam = operandTem.quantParam; + int32_t shape[4] = {1, 100, 100, 11}; + OH_NN_Tensor operand = {operandTem.dataType, (uint32_t)operandTem.shape.size(), shape, + quantParam, operandTem.type}; + + size_t length = shape[0] * shape[1] * shape[2] * shape[3] * sizeof(float); + void* data = malloc(length); + ASSERT_EQ(OH_NN_INVALID_PARAMETER, + OH_NNExecutor_SetInput(executor, inputIndex, &operand, data, length)); + + free(data); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_SetInput_0900 + * @tc.name : 变长模型,设置等于动态范围的最小输入 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_SetInput_0900, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + AvgPoolDynamicModel avgModel; + graphArgs = avgModel.graphArgs; + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + uint32_t inputIndex = 0; + const OHNNOperandTest &operandTem = graphArgs.operands[0]; + auto quantParam = operandTem.quantParam; + int32_t shape[4] = {1, 1, 1, 1}; //minInputDims + OH_NN_Tensor operand = {operandTem.dataType, (uint32_t)operandTem.shape.size(), shape, + quantParam, operandTem.type}; + + size_t length = shape[0] * shape[1] * shape[2] * shape[3] * sizeof(float); + void* data = malloc(length); + ASSERT_EQ(OH_NN_SUCCESS, + OH_NNExecutor_SetInput(executor, inputIndex, &operand, data, length)); + + free(data); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_SetInput_1000 + * @tc.name : 变长模型,设置等于动态范围的最大输入 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_SetInput_1000, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + AvgPoolDynamicModel avgModel; + graphArgs = avgModel.graphArgs; + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + uint32_t inputIndex = 0; + const OHNNOperandTest &operandTem = graphArgs.operands[0]; + auto quantParam = operandTem.quantParam; + int32_t shape[4] = {1, 100, 100, 10}; //maxInputDims + OH_NN_Tensor operand = {operandTem.dataType, (uint32_t)operandTem.shape.size(), shape, + quantParam, operandTem.type}; + + size_t length = shape[0] * shape[1] * shape[2] * shape[3] * sizeof(float); + void* data = malloc(length); + ASSERT_EQ(OH_NN_SUCCESS, + OH_NNExecutor_SetInput(executor, inputIndex, &operand, data, length)); + + free(data); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_SetOutput_0100 + * @tc.name : 设置输出,executor为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_SetOutput_0100, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + uint32_t inputIndex = 0; + uint32_t outputIndex = 0; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + auto quantParam = operandTem.quantParam; + OH_NN_Tensor operand = {operandTem.dataType, (uint32_t)operandTem.shape.size(), operandTem.shape.data(), + quantParam, operandTem.type}; + if (std::find(graphArgs.inputIndices.begin(), graphArgs.inputIndices.end(), i) != + graphArgs.inputIndices.end()) { + ASSERT_EQ(OH_NN_SUCCESS, + OH_NNExecutor_SetInput(executor, inputIndex, &operand, operandTem.data, operandTem.length)); + inputIndex += 1; + } else if (std::find(graphArgs.outputIndices.begin(), graphArgs.outputIndices.end(), i) != + graphArgs.outputIndices.end()) { + ASSERT_EQ(OH_NN_INVALID_PARAMETER, + OH_NNExecutor_SetOutput(nullptr, outputIndex, operandTem.data, operandTem.length)); + outputIndex += 1; + } + } + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_SetOutput_0200 + * @tc.name : 设置输出,outputIndex不存在 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_SetOutput_0200, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + uint32_t inputIndex = 0; + uint32_t outputIndex = 10000; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + auto quantParam = operandTem.quantParam; + OH_NN_Tensor operand = {operandTem.dataType, (uint32_t)operandTem.shape.size(), operandTem.shape.data(), + quantParam, operandTem.type}; + if (std::find(graphArgs.inputIndices.begin(), graphArgs.inputIndices.end(), i) != + graphArgs.inputIndices.end()) { + ASSERT_EQ(OH_NN_SUCCESS, + OH_NNExecutor_SetInput(executor, inputIndex, &operand, operandTem.data, operandTem.length)); + inputIndex += 1; + } else if (std::find(graphArgs.outputIndices.begin(), graphArgs.outputIndices.end(), i) != + graphArgs.outputIndices.end()) { + ASSERT_EQ(OH_NN_INVALID_PARAMETER, + OH_NNExecutor_SetOutput(executor, outputIndex, operandTem.data, operandTem.length)); + outputIndex += 1; + } + } + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_SetOutput_0300 + * @tc.name : 设置输出,buffer为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_SetOutput_0300, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + uint32_t inputIndex = 0; + uint32_t outputIndex = 0; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + auto quantParam = operandTem.quantParam; + OH_NN_Tensor operand = {operandTem.dataType, (uint32_t)operandTem.shape.size(), operandTem.shape.data(), + quantParam, operandTem.type}; + if (std::find(graphArgs.inputIndices.begin(), graphArgs.inputIndices.end(), i) != + graphArgs.inputIndices.end()) { + ASSERT_EQ(OH_NN_SUCCESS, + OH_NNExecutor_SetInput(executor, inputIndex, &operand, operandTem.data, operandTem.length)); + inputIndex += 1; + } else if (std::find(graphArgs.outputIndices.begin(), graphArgs.outputIndices.end(), i) != + graphArgs.outputIndices.end()) { + ASSERT_EQ(OH_NN_INVALID_PARAMETER, + OH_NNExecutor_SetOutput(executor, outputIndex, nullptr, operandTem.length)); + outputIndex += 1; + } + } + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_SetOutput_0400 + * @tc.name : 设置输出,length小于输出长度 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_SetOutput_0400, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + uint32_t inputIndex = 0; + uint32_t outputIndex = 0; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + auto quantParam = operandTem.quantParam; + OH_NN_Tensor operand = {operandTem.dataType, (uint32_t)operandTem.shape.size(), operandTem.shape.data(), + quantParam, operandTem.type}; + if (std::find(graphArgs.inputIndices.begin(), graphArgs.inputIndices.end(), i) != + graphArgs.inputIndices.end()) { + ASSERT_EQ(OH_NN_SUCCESS, + OH_NNExecutor_SetInput(executor, inputIndex, &operand, operandTem.data, operandTem.length)); + inputIndex += 1; + } else if (std::find(graphArgs.outputIndices.begin(), graphArgs.outputIndices.end(), i) != + graphArgs.outputIndices.end()) { + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_SetOutput(executor, outputIndex, operandTem.data, 0)); + outputIndex += 1; + } + } + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_SetOutput_0500 + * @tc.name : 设置输出,重复设置同一outputIndex + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_SetOutput_0500, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + uint32_t inputIndex = 0; + uint32_t outputIndex = 0; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + auto quantParam = operandTem.quantParam; + OH_NN_Tensor operand = {operandTem.dataType, (uint32_t)operandTem.shape.size(), operandTem.shape.data(), + quantParam, operandTem.type}; + if (std::find(graphArgs.inputIndices.begin(), graphArgs.inputIndices.end(), i) != + graphArgs.inputIndices.end()) { + ASSERT_EQ(OH_NN_SUCCESS, + OH_NNExecutor_SetInput(executor, inputIndex, &operand, operandTem.data, operandTem.length)); + inputIndex += 1; + } else if (std::find(graphArgs.outputIndices.begin(), graphArgs.outputIndices.end(), i) != + graphArgs.outputIndices.end()) { + ASSERT_EQ(OH_NN_SUCCESS, + OH_NNExecutor_SetOutput(executor, outputIndex, operandTem.data, operandTem.length)); + ASSERT_EQ(OH_NN_SUCCESS, + OH_NNExecutor_SetOutput(executor, outputIndex, operandTem.data, operandTem.length)); + outputIndex += 1; + } + } + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Run_0100 + * @tc.name : 模型推理,executor为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_Run_0100, Function | MediumTest | Level3) +{ + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_Run(nullptr)); +} +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Run_0200 + * @tc.name : 模型推理,executor未设置输入 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_Run_0200, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + uint32_t outputIndex = 0; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + if (std::find(graphArgs.outputIndices.begin(), graphArgs.outputIndices.end(), i) != + graphArgs.outputIndices.end()) { + ASSERT_EQ(OH_NN_SUCCESS, + OH_NNExecutor_SetOutput(executor, outputIndex, operandTem.data, operandTem.length)); + } + } + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_Run(executor)); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Run_0300 + * @tc.name : 模型推理,executor未设置输出 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_Run_0300, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + uint32_t inputIndex = 0; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + auto quantParam = operandTem.quantParam; + OH_NN_Tensor operand = {operandTem.dataType, (uint32_t)operandTem.shape.size(), operandTem.shape.data(), + quantParam, operandTem.type}; + if (std::find(graphArgs.inputIndices.begin(), graphArgs.inputIndices.end(), i) != + graphArgs.inputIndices.end()) { + ASSERT_EQ(OH_NN_SUCCESS, + OH_NNExecutor_SetInput(executor, inputIndex, &operand, operandTem.data, operandTem.length)); + inputIndex += 1; + } + } + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_Run(executor)); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Run_0400 + * @tc.name : 模型推理,executor设置输入个数不足 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_Run_0400, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + uint32_t inputIndex = 0; + uint32_t outputIndex = 0; + + const OHNNOperandTest &operandTem = graphArgs.operands[0]; + auto quantParam = operandTem.quantParam; + OH_NN_Tensor operand = {operandTem.dataType, (uint32_t)operandTem.shape.size(), operandTem.shape.data(), + quantParam, operandTem.type}; + ASSERT_EQ(OH_NN_SUCCESS, + OH_NNExecutor_SetInput(executor, inputIndex, &operand, operandTem.data, operandTem.length)); + inputIndex += 1; + const OHNNOperandTest &operandOut = graphArgs.operands[3]; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_SetOutput(executor, outputIndex, operandOut.data, operandOut.length)); + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_Run(executor)); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Run_0500 + * @tc.name : 模型推理,executor设置输出个数不足 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_Run_0500, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + TopKModel topKModel; + graphArgs = topKModel.graphArgs; + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + + graphArgs.outputIndices = {3}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, ExecuteGraphMock(executor, graphArgs, addModel.expectValue)); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Run_0600 + * @tc.name : 定长模型推理测试 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_Run_0600, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + + ASSERT_EQ(OH_NN_SUCCESS, ExecuteGraphMock(executor, graphArgs, addModel.expectValue)); + + EXPECT_TRUE(CheckOutput(addModel.outputValue, addModel.expectValue)); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Run_0700 + * @tc.name : 变长模型推理测试 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_Run_0700, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + AvgPoolDynamicModel avgModel; + graphArgs = avgModel.graphArgs; + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + avgModel.dynamicInput.shape = {1, 3, 3, 1}; + avgModel.output.shape = {1, 2, 2, 1}; + graphArgs.operands = {avgModel.dynamicInput, avgModel.kernel, avgModel.strides, + avgModel.padMode, avgModel.activation, avgModel.output}; + ASSERT_EQ(OH_NN_SUCCESS, ExecuteGraphMock(executor, graphArgs, avgModel.expectValue)); + // check result + EXPECT_TRUE(CheckOutput(avgModel.outputValue, avgModel.expectValue)); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_GetOutputDimensions_0100 + * @tc.name : 获取输出维度,executor为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_GetOutputDimensions_0100, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + + ASSERT_EQ(OH_NN_SUCCESS, ExecuteGraphMock(executor, graphArgs, addModel.expectValue)); + + EXPECT_TRUE(CheckOutput(addModel.outputValue, addModel.expectValue)); + int32_t *outputDimensions = nullptr; + uint32_t outputDimensionCount{0}; + uint32_t addOutputIndex = {0}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, + OH_NNExecutor_GetOutputShape(nullptr, addOutputIndex, &outputDimensions, &outputDimensionCount)); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_GetOutputDimensions_0200 + * @tc.name : 获取输出维度,outputIndex不存在 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_GetOutputDimensions_0200, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + + ASSERT_EQ(OH_NN_SUCCESS, ExecuteGraphMock(executor, graphArgs, addModel.expectValue)); + + EXPECT_TRUE(CheckOutput(addModel.outputValue, addModel.expectValue)); + int32_t *outputDimensions = nullptr; + uint32_t outputDimensionCount{0}; + uint32_t addOutputIndex = {10000}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, + OH_NNExecutor_GetOutputShape(executor, addOutputIndex, &outputDimensions, &outputDimensionCount)); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_GetOutputDimensions_0300 + * @tc.name : 获取输出维度,*dimensions为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_GetOutputDimensions_0300, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + + ASSERT_EQ(OH_NN_SUCCESS, ExecuteGraphMock(executor, graphArgs, addModel.expectValue)); + + EXPECT_TRUE(CheckOutput(addModel.outputValue, addModel.expectValue)); + uint32_t outputDimensionCount{0}; + uint32_t addOutputIndex = {0}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, + OH_NNExecutor_GetOutputShape(executor, addOutputIndex, nullptr, &outputDimensionCount)); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_GetOutputDimensions_0400 + * @tc.name : 获取输出维度,**dimensions非nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_GetOutputDimensions_0400, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + + ASSERT_EQ(OH_NN_SUCCESS, ExecuteGraphMock(executor, graphArgs, addModel.expectValue)); + + EXPECT_TRUE(CheckOutput(addModel.outputValue, addModel.expectValue)); + int32_t outputDimensions{2}; + int32_t *pOutputDimensions = &outputDimensions; + uint32_t outputDimensionCount{0}; + uint32_t addOutputIndex = {0}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, + OH_NNExecutor_GetOutputShape(executor, addOutputIndex, &pOutputDimensions, &outputDimensionCount)); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_GetOutputDimensions_0500 + * @tc.name : 获取输出维度,*dimensionCount为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_GetOutputDimensions_0500, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + + ASSERT_EQ(OH_NN_SUCCESS, ExecuteGraphMock(executor, graphArgs, addModel.expectValue)); + + EXPECT_TRUE(CheckOutput(addModel.outputValue, addModel.expectValue)); + int32_t *outputDimensions = nullptr; + uint32_t addOutputIndex = {0}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, + OH_NNExecutor_GetOutputShape(executor, addOutputIndex, &outputDimensions, nullptr)); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_GetOutputDimensions_0600 + * @tc.name : 未调用推理接口,获取输出维度 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_GetOutputDimensions_0600, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + uint32_t inputIndex = 0; + uint32_t outputIndex = 0; + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + auto quantParam = operandTem.quantParam; + OH_NN_Tensor operand = {operandTem.dataType, (uint32_t)operandTem.shape.size(), operandTem.shape.data(), + quantParam, operandTem.type}; + if (std::find(graphArgs.inputIndices.begin(), graphArgs.inputIndices.end(), i) != + graphArgs.inputIndices.end()) { + ASSERT_EQ(OH_NN_SUCCESS, + OH_NNExecutor_SetInput(executor, inputIndex, &operand, operandTem.data, operandTem.length)); + inputIndex += 1; + } else if (std::find(graphArgs.outputIndices.begin(), graphArgs.outputIndices.end(), i) != + graphArgs.outputIndices.end()) { + ASSERT_EQ(OH_NN_SUCCESS, + OH_NNExecutor_SetOutput(executor, outputIndex, operandTem.data, operandTem.length)); + outputIndex += 1; + } + } + int32_t *outputDimensions = nullptr; + uint32_t outputDimensionCount{0}; + uint32_t addOutputIndex = {0}; + ASSERT_EQ(OH_NN_SUCCESS, + OH_NNExecutor_GetOutputShape(executor, addOutputIndex, &outputDimensions, &outputDimensionCount)); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_GetOutputDimensions_0700 + * @tc.name : 模型推理成功,获取输出维度 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_GetOutputDimensions_0700, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + + ASSERT_EQ(OH_NN_SUCCESS, ExecuteGraphMock(executor, graphArgs, addModel.expectValue)); + + EXPECT_TRUE(CheckOutput(addModel.outputValue, addModel.expectValue)); + int32_t *outputDimensions = nullptr; + uint32_t outputDimensionCount{0}; + uint32_t addOutputIndex = {0}; + ASSERT_EQ(OH_NN_SUCCESS, + OH_NNExecutor_GetOutputShape(executor, addOutputIndex, &outputDimensions, &outputDimensionCount)); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_GetOutputDimensions_0800 + * @tc.name : 变长模型推理成功,获取输出维度 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_GetOutputDimensions_0800, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + AvgPoolDynamicModel avgModel; + graphArgs = avgModel.graphArgs; + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + avgModel.dynamicInput.shape = {1, 3, 3, 1}; + avgModel.output.shape = {1, 2, 2, 1}; + graphArgs.operands = {avgModel.dynamicInput, avgModel.kernel, avgModel.strides, + avgModel.padMode, avgModel.activation, avgModel.output}; + ASSERT_EQ(OH_NN_SUCCESS, ExecuteGraphMock(executor, graphArgs, avgModel.expectValue)); + + // check result + EXPECT_TRUE(CheckOutput(avgModel.outputValue, avgModel.expectValue)); + int32_t *outputDimensions = nullptr; + uint32_t outputDimensionCount{0}; + uint32_t addOutputIndex = {0}; + ASSERT_EQ(OH_NN_SUCCESS, + OH_NNExecutor_GetOutputShape(executor, addOutputIndex, &outputDimensions, &outputDimensionCount)); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Destroy_0100 + * @tc.name : 销毁执行器实例,*executor为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_Destroy_0100, Function | MediumTest | Level3) +{ + OH_NNExecutor *executor = nullptr; + ASSERT_NO_THROW(OH_NNExecutor_Destroy(&executor)); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Destroy_0200 + * @tc.name : 销毁执行器实例,executor释放 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_Destroy_0200, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + + ASSERT_EQ(OH_NN_SUCCESS, ExecuteGraphMock(executor, graphArgs, addModel.expectValue)); + + EXPECT_TRUE(CheckOutput(addModel.outputValue, addModel.expectValue)); + + OH_NNExecutor_Destroy(&executor); + ASSERT_EQ(nullptr, executor); + + Free(model, compilation); +} + +/** + * @tc.number : SUB_AI_NNR_Func_North_Executor_Combine_0100 + * @tc.name : 并发模型推理,推理成功 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNR_Func_North_Executor_Combine_0100, Function | MediumTest | Level2) +{ + OH_NNModel *model1 = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model1); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model1, graphArgs)); + + OH_NNModel *model2 = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model2); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model2, graphArgs)); + + OH_NNCompilation *compilation1 = OH_NNCompilation_Construct(model1); + ASSERT_NE(nullptr, compilation1); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation1, compileParam)); + + OH_NNCompilation *compilation2 = OH_NNCompilation_Construct(model2); + ASSERT_NE(nullptr, compilation2); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation2, compileParam)); + + OH_NNExecutor *executor1 = OH_NNExecutor_Construct(compilation1); + ASSERT_NE(nullptr, executor1); + + OH_NNExecutor *executor2 = OH_NNExecutor_Construct(compilation2); + ASSERT_NE(nullptr, executor2); + + std::thread th1(ExecuteModel, executor1, graphArgs, addModel.expectValue); + std::thread th2(ExecuteModel, executor2, graphArgs, addModel.expectValue); + th1.join(); + th2.join(); + Free(model1, compilation1, executor1); + Free(model2, compilation2, executor2); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Combine_0200 + * @tc.name : 多次设置输入,仅首次成功,模型推理 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_Combine_0200, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + + float valueX2[4] = {3, 2, 1, 0}; + uint32_t inputIndex = 0; + uint32_t outputIndex = 0; + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + auto quantParam = operandTem.quantParam; + OH_NN_Tensor operand = {operandTem.dataType, (uint32_t)operandTem.shape.size(), operandTem.shape.data(), + quantParam, operandTem.type}; + if (std::find(graphArgs.inputIndices.begin(), graphArgs.inputIndices.end(), i) != + graphArgs.inputIndices.end()) { + ASSERT_EQ(OH_NN_SUCCESS, + OH_NNExecutor_SetInput(executor, inputIndex, &operand, operandTem.data, operandTem.length)); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, + OH_NNExecutor_SetInput(executor, 3, &operand, valueX2, operandTem.length)); + inputIndex += 1; + } else if (std::find(graphArgs.outputIndices.begin(), graphArgs.outputIndices.end(), i) != + graphArgs.outputIndices.end()) { + ASSERT_EQ(OH_NN_SUCCESS, + OH_NNExecutor_SetOutput(executor, outputIndex, operandTem.data, operandTem.length)); + OHOS::sptr device = V2_0::MockIDevice::GetInstance(); + ASSERT_EQ(OH_NN_SUCCESS, device->MemoryCopy(addModel.expectValue, operandTem.length)); + outputIndex += 1; + } + } + ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_Run(executor)); + // check result + EXPECT_TRUE(CheckOutput(addModel.outputValue, addModel.expectValue)); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Combine_0300 + * @tc.name : 多次设置输出,仅首次生效,模型推理 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_Combine_0300, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + + uint32_t inputIndex = 0; + uint32_t outputIndex = 0; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + auto quantParam = operandTem.quantParam; + OH_NN_Tensor operand = {operandTem.dataType, (uint32_t)operandTem.shape.size(), operandTem.shape.data(), + quantParam, operandTem.type}; + if (std::find(graphArgs.inputIndices.begin(), graphArgs.inputIndices.end(), i) != + graphArgs.inputIndices.end()) { + ASSERT_EQ(OH_NN_SUCCESS, + OH_NNExecutor_SetInput(executor, inputIndex, &operand, operandTem.data, operandTem.length)); + inputIndex += 1; + } else if (std::find(graphArgs.outputIndices.begin(), graphArgs.outputIndices.end(), i) != + graphArgs.outputIndices.end()) { + ASSERT_EQ(OH_NN_SUCCESS, + OH_NNExecutor_SetOutput(executor, outputIndex, operandTem.data, operandTem.length)); + ASSERT_EQ(OH_NN_INVALID_PARAMETER, + OH_NNExecutor_SetOutput(executor, outputIndex+10, operandTem.data, operandTem.length)); + OHOS::sptr device = V2_0::MockIDevice::GetInstance(); + ASSERT_EQ(OH_NN_SUCCESS, device->MemoryCopy(addModel.expectValue, operandTem.length)); + outputIndex += 1; + } + } + ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_Run(executor)); + // check result + EXPECT_TRUE(CheckOutput(addModel.outputValue, addModel.expectValue)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_Run(executor)); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Combine_0400 + * @tc.name : 模型推理,共享输入非共享输出 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_Combine_0400, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + OH_NN_Memory *OHNNMemory[graphArgs.inputIndices.size()]; + uint32_t inputIndex = 0; + uint32_t outputIndex = 0; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + auto quantParam = operandTem.quantParam; + OH_NN_Tensor operand = {operandTem.dataType, (uint32_t)operandTem.shape.size(), operandTem.shape.data(), + quantParam, operandTem.type}; + if (std::find(graphArgs.inputIndices.begin(), graphArgs.inputIndices.end(), i) != + graphArgs.inputIndices.end()) { + + OH_NN_Memory *inputMemory = OH_NNExecutor_AllocateInputMemory(executor, inputIndex, operandTem.length); + ASSERT_NE(nullptr, inputMemory); + + ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_SetInputWithMemory(executor, inputIndex, &operand, inputMemory)); + + ASSERT_EQ(EOK, memcpy_s(inputMemory->data, operandTem.length, static_cast(operandTem.data), operandTem.length)); + OHNNMemory[inputIndex] = inputMemory; + inputIndex += 1; + } else if (std::find(graphArgs.outputIndices.begin(), graphArgs.outputIndices.end(), i) != + graphArgs.outputIndices.end()) { + ASSERT_EQ(OH_NN_SUCCESS, + OH_NNExecutor_SetOutput(executor, outputIndex, operandTem.data, operandTem.length)); + OHOS::sptr device = V2_0::MockIDevice::GetInstance(); + ASSERT_EQ(OH_NN_SUCCESS, device->MemoryCopy(addModel.expectValue, operandTem.length)); + outputIndex += 1; + } + } + ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_Run(executor)); + // check result + EXPECT_TRUE(CheckOutput(addModel.outputValue, addModel.expectValue)); + + for (size_t i = 0; i < graphArgs.inputIndices.size(); i++) { + OH_NNExecutor_DestroyInputMemory(executor, i, &OHNNMemory[i]); + ASSERT_EQ(OHNNMemory[i], nullptr); + } + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Combine_0500 + * @tc.name : 模型推理,非共享输入共享输出 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_Combine_0500, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + uint32_t inputIndex = 0; + uint32_t outputIndex = 0; + OH_NN_Memory *outputMemory; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + auto quantParam = operandTem.quantParam; + OH_NN_Tensor operand = {operandTem.dataType, (uint32_t)operandTem.shape.size(), operandTem.shape.data(), + quantParam, operandTem.type}; + if (std::find(graphArgs.inputIndices.begin(), graphArgs.inputIndices.end(), i) != + graphArgs.inputIndices.end()) { + ASSERT_EQ(OH_NN_SUCCESS, + OH_NNExecutor_SetInput(executor, inputIndex, &operand, operandTem.data, operandTem.length)); + + inputIndex += 1; + } else if (std::find(graphArgs.outputIndices.begin(), graphArgs.outputIndices.end(), i) != + graphArgs.outputIndices.end()) { + + outputMemory = OH_NNExecutor_AllocateOutputMemory(executor, outputIndex, operandTem.length); + ASSERT_NE(nullptr, outputMemory); + ASSERT_EQ(OH_NN_SUCCESS, + OH_NNExecutor_SetOutputWithMemory(executor, outputIndex, outputMemory)); + OHOS::sptr device = V2_0::MockIDevice::GetInstance(); + ASSERT_EQ(OH_NN_SUCCESS, device->MemoryCopy(addModel.expectValue, operandTem.length)); + outputIndex += 1; + } + } + ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_Run(executor)); + // check result + EXPECT_TRUE(CheckOutput(static_cast(const_cast(outputMemory->data)), + static_cast(addModel.expectValue))); + + OH_NNExecutor_DestroyOutputMemory(executor, 0, &outputMemory); + ASSERT_EQ(outputMemory, nullptr); + Free(model, compilation, executor); +} \ No newline at end of file diff --git a/test/nnrt_xts_acts/neural_network_runtime/v2_0/interface/src/MemoryTest.cpp b/test/nnrt_xts_acts/neural_network_runtime/v2_0/interface/src/MemoryTest.cpp new file mode 100644 index 0000000..dfdfb97 --- /dev/null +++ b/test/nnrt_xts_acts/neural_network_runtime/v2_0/interface/src/MemoryTest.cpp @@ -0,0 +1,949 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include +#include +#include + +#include "nnrt_utils.h" +#include "model.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime; +using namespace OHOS::NeuralNetworkRuntime::Test; +using namespace OHOS::HDI::Nnrt::V2_0; + +namespace { + +class MemoryTest : public testing::Test { +protected: + AddModel addModel; + OHNNGraphArgs graphArgs = addModel.graphArgs; + OHNNCompileParam compileParam; +}; + +void CheckCreateInputMemory(OH_NNExecutor *executor, uint32_t inputIndex, size_t length) +{ + OH_NN_Memory *OHNNMemory = OH_NNExecutor_AllocateInputMemory(executor, inputIndex, length); + ASSERT_NE(nullptr, OHNNMemory); + OH_NNExecutor_DestroyInputMemory(executor, inputIndex, &OHNNMemory); + ASSERT_EQ(nullptr, OHNNMemory); +} + +void CheckCreateOutputMemory(OH_NNExecutor *executor, uint32_t outputIndex, size_t length) +{ + OH_NN_Memory *OHNNMemory = OH_NNExecutor_AllocateOutputMemory(executor, outputIndex, length); + ASSERT_NE(nullptr, OHNNMemory); + OH_NNExecutor_DestroyOutputMemory(executor, outputIndex, &OHNNMemory); + ASSERT_EQ(nullptr, OHNNMemory); +} + +} // namespace + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_CreateInputMemory_0100 + * @tc.name : 创建输入共享内存,executor为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_CreateInputMemory_0100, Function | MediumTest | Level3) +{ + OH_NN_Memory *OHNNMemory = OH_NNExecutor_AllocateInputMemory(nullptr, 0, 4); + ASSERT_EQ(nullptr, OHNNMemory); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_CreateInputMemory_0200 + * @tc.name : 创建输入共享内存,inputIndex不存在 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_CreateInputMemory_0200, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + OH_NN_Memory *OHNNMemory = OH_NNExecutor_AllocateInputMemory(executor, 2, graphArgs.operands[0].length); + ASSERT_EQ(nullptr, OHNNMemory); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_CreateInputMemory_0300 + * @tc.name : 创建输入共享内存,length为0 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_CreateInputMemory_0300, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + OH_NN_Memory *OHNNMemory = OH_NNExecutor_AllocateInputMemory(executor, 0, 0); + ASSERT_EQ(nullptr, OHNNMemory); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_CreateInputMemory_0400 + * @tc.name :创建输入共享内存,length为最大限制2G + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_CreateInputMemory_0400, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + OH_NN_Memory *OHNNMemory = OH_NNExecutor_AllocateInputMemory(executor, 0, 1024 * 1024 * 1024 + 1); + ASSERT_EQ(nullptr, OHNNMemory); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_CreateInputMemory_0500 + * @tc.name : 创建输入共享内存,inputIndex重复创建 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_CreateInputMemory_0500, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + OH_NN_Memory *OHNNMemory = OH_NNExecutor_AllocateInputMemory(executor, 0, graphArgs.operands[0].length); + ASSERT_NE(nullptr, OHNNMemory); + OH_NN_Memory *OHNNMemory2 = OH_NNExecutor_AllocateInputMemory(executor, 0, graphArgs.operands[0].length); + ASSERT_NE(nullptr, OHNNMemory2); + OH_NNExecutor_DestroyInputMemory(executor, 0, &OHNNMemory); + ASSERT_EQ(nullptr, OHNNMemory); + OH_NNExecutor_DestroyInputMemory(executor, 0, &OHNNMemory2); + ASSERT_EQ(nullptr, OHNNMemory2); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_CreateInputMemory_0600 + * @tc.name : 多线程创建不同index输入的共享内存 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_CreateInputMemory_0600, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + std::thread th1(CheckCreateInputMemory, executor, 0, graphArgs.operands[0].length); + std::thread th2(CheckCreateInputMemory, executor, 1, graphArgs.operands[1].length); + th1.join(); + th2.join(); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_CreateOutputMemory_0100 + * @tc.name : 创建输出共享内存,executor为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_CreateOutputMemory_0100, Function | MediumTest | Level3) +{ + OH_NN_Memory *OHNNMemory = OH_NNExecutor_AllocateOutputMemory(nullptr, 0, 4); + ASSERT_EQ(nullptr, OHNNMemory); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_CreateOutputMemory_0200 + * @tc.name : 创建输出共享内存,inputIndex不存在 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_CreateOutputMemory_0200, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + OH_NN_Memory *OHNNMemory = OH_NNExecutor_AllocateOutputMemory(executor, 2, graphArgs.operands[0].length); + ASSERT_EQ(nullptr, OHNNMemory); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_CreateOutputMemory_0300 + * @tc.name : 创建输出共享内存,length为0 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_CreateOutputMemory_0300, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + OH_NN_Memory *OHNNMemory = OH_NNExecutor_AllocateOutputMemory(executor, 0, 0); + ASSERT_EQ(nullptr, OHNNMemory); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_CreateOutputMemory_0400 + * @tc.name :创建输出共享内存,length为最大限制2G + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_CreateOutputMemory_0400, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + OH_NN_Memory *OHNNMemory = OH_NNExecutor_AllocateOutputMemory(executor, 0, 1024 * 1024 * 1024 + 1); + ASSERT_EQ(nullptr, OHNNMemory); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_CreateOutputMemory_0500 + * @tc.name : 创建输出共享内存,outputIndex重复创建 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_CreateOutputMemory_0500, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + OH_NN_Memory *OHNNMemory = OH_NNExecutor_AllocateOutputMemory(executor, 0, graphArgs.operands[0].length); + ASSERT_NE(nullptr, OHNNMemory); + OH_NN_Memory *OHNNMemory2 = OH_NNExecutor_AllocateOutputMemory(executor, 0, graphArgs.operands[0].length); + ASSERT_NE(nullptr, OHNNMemory2); + OH_NNExecutor_DestroyOutputMemory(executor, 0, &OHNNMemory); + ASSERT_EQ(nullptr, OHNNMemory); + OH_NNExecutor_DestroyOutputMemory(executor, 0, &OHNNMemory2); + ASSERT_EQ(nullptr, OHNNMemory2); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_CreateOutputMemory_0600 + * @tc.name : 多线程创建不同index输出的共享内存 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_CreateOutputMemory_0600, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + TopKModel topKModel; + graphArgs = topKModel.graphArgs; + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OHNNCompileParam compileParam; + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + std::thread th1(CheckCreateOutputMemory, executor, 0, graphArgs.operands[3].length); + std::thread th2(CheckCreateOutputMemory, executor, 1, graphArgs.operands[4].length); + th1.join(); + th2.join(); + Free(model, compilation, executor); +} +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_DestroyInputMemory_0100 + * @tc.name : 销毁输入共享内存,executor为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_DestroyInputMemory_0100, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + OH_NN_Memory *OHNNMemory = OH_NNExecutor_AllocateInputMemory(executor, 0, graphArgs.operands[0].length); + ASSERT_NE(nullptr, OHNNMemory); + OH_NNExecutor_DestroyInputMemory(nullptr, 0, &OHNNMemory); + ASSERT_NE(nullptr, OHNNMemory); + OH_NNExecutor_DestroyInputMemory(executor, 0, &OHNNMemory); + ASSERT_EQ(nullptr, OHNNMemory); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_DestroyInputMemory_0200 + * @tc.name : 销毁输入共享内存,inputIndex不存在 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_DestroyInputMemory_0200, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + OH_NN_Memory *OHNNMemory = OH_NNExecutor_AllocateInputMemory(executor, 0, graphArgs.operands[0].length); + ASSERT_NE(nullptr, OHNNMemory); + OH_NNExecutor_DestroyInputMemory(executor, 1, &OHNNMemory); + ASSERT_NE(nullptr, OHNNMemory); + OH_NNExecutor_DestroyInputMemory(executor, 0, &OHNNMemory); + ASSERT_EQ(nullptr, OHNNMemory); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_DestroyInputMemory_0300 + * @tc.name : 销毁输出共享内存,*memory为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_DestroyInputMemory_0300, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + OH_NN_Memory *OHNNMemory = nullptr; + ASSERT_NO_THROW(OH_NNExecutor_DestroyInputMemory(executor, 0, &OHNNMemory)); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_DestroyInputMemory_0400 + * @tc.name : 销毁输出共享内存,inputIndex不同memory重复销毁 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_DestroyInputMemory_0400, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + OH_NN_Memory *OHNNMemory = OH_NNExecutor_AllocateInputMemory(executor, 0, graphArgs.operands[0].length); + ASSERT_NE(nullptr, OHNNMemory); + OH_NN_Memory *OHNNMemory2 = OH_NNExecutor_AllocateInputMemory(executor, 1, graphArgs.operands[1].length); + ASSERT_NE(nullptr, OHNNMemory2); + OH_NNExecutor_DestroyInputMemory(executor, 0, &OHNNMemory); + ASSERT_EQ(nullptr, OHNNMemory); + OH_NNExecutor_DestroyInputMemory(executor, 1, &OHNNMemory2); + ASSERT_EQ(nullptr, OHNNMemory2); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_DestroyInputMemory_0500 + * @tc.name : 多线销毁不同index输入的共享内存 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_DestroyInputMemory_0500, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + OH_NN_Memory *OHNNMemory = OH_NNExecutor_AllocateInputMemory(executor, 0, graphArgs.operands[0].length); + ASSERT_NE(nullptr, OHNNMemory); + OH_NN_Memory *OHNNMemory2 = OH_NNExecutor_AllocateInputMemory(executor, 1, graphArgs.operands[1].length); + ASSERT_NE(nullptr, OHNNMemory2); + std::thread th1(OH_NNExecutor_DestroyInputMemory, executor, 0, &OHNNMemory); + std::thread th2(OH_NNExecutor_DestroyInputMemory, executor, 1, &OHNNMemory2); + th1.join(); + th2.join(); + ASSERT_EQ(nullptr, OHNNMemory); + ASSERT_EQ(nullptr, OHNNMemory2); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_DestroyOutputMemory_0100 + * @tc.name : 销毁输出共享内存,executor为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_DestroyOutputMemory_0100, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + OH_NN_Memory *OHNNMemory = OH_NNExecutor_AllocateOutputMemory(executor, 0, graphArgs.operands[0].length); + ASSERT_NE(nullptr, OHNNMemory); + OH_NNExecutor_DestroyOutputMemory(nullptr, 0, &OHNNMemory); + ASSERT_NE(nullptr, OHNNMemory); + OH_NNExecutor_DestroyOutputMemory(executor, 0, &OHNNMemory); + ASSERT_EQ(nullptr, OHNNMemory); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_DestroyOutputMemory_0200 + * @tc.name : 销毁输出共享内存,inputIndex不存在 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_DestroyOutputMemory_0200, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + OH_NN_Memory *OHNNMemory = OH_NNExecutor_AllocateOutputMemory(executor, 0, graphArgs.operands[0].length); + ASSERT_NE(nullptr, OHNNMemory); + OH_NNExecutor_DestroyOutputMemory(executor, 1, &OHNNMemory); + ASSERT_NE(nullptr, OHNNMemory); + OH_NNExecutor_DestroyOutputMemory(executor, 0, &OHNNMemory); + ASSERT_EQ(nullptr, OHNNMemory); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_DestroyOutputMemory_0300 + * @tc.name : 销毁输出共享内存,*memory为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_DestroyOutputMemory_0300, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + ASSERT_NO_THROW(OH_NNExecutor_DestroyOutputMemory(executor, 0, nullptr)); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_DestroyOutputMemory_0400 + * @tc.name : 销毁输出共享内存,inputIndex不同memory重复销毁 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_DestroyOutputMemory_0400, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + OH_NN_Memory *OHNNMemory = OH_NNExecutor_AllocateOutputMemory(executor, 0, graphArgs.operands[0].length); + ASSERT_NE(nullptr, OHNNMemory); + OH_NN_Memory *OHNNMemory2 = OH_NNExecutor_AllocateOutputMemory(executor, 0, graphArgs.operands[0].length); + ASSERT_NE(nullptr, OHNNMemory2); + OH_NNExecutor_DestroyOutputMemory(executor, 0, &OHNNMemory); + ASSERT_EQ(nullptr, OHNNMemory); + OH_NNExecutor_DestroyOutputMemory(executor, 0, &OHNNMemory2); + ASSERT_EQ(nullptr, OHNNMemory2); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_DestroyOutputMemory_0500 + * @tc.name : 多线销毁不同index输出的共享内存 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_DestroyOutputMemory_0500, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + TopKModel topKModel; + graphArgs = topKModel.graphArgs; + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + OH_NN_Memory *OHNNMemory = OH_NNExecutor_AllocateOutputMemory(executor, 0, graphArgs.operands[0].length); + ASSERT_NE(nullptr, OHNNMemory); + OH_NN_Memory *OHNNMemory2 = OH_NNExecutor_AllocateOutputMemory(executor, 1, graphArgs.operands[1].length); + ASSERT_NE(nullptr, OHNNMemory2); + std::thread th1(OH_NNExecutor_DestroyOutputMemory, executor, 0, &OHNNMemory); + std::thread th2(OH_NNExecutor_DestroyOutputMemory, executor, 1, &OHNNMemory2); + th1.join(); + th2.join(); + ASSERT_EQ(nullptr, OHNNMemory); + ASSERT_EQ(nullptr, OHNNMemory2); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_SetInputFromMemory_0100 + * @tc.name : 设置输入共享内存,executor为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_SetInputFromMemory_0100, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + OH_NN_Memory *OHNNMemory = OH_NNExecutor_AllocateInputMemory(executor, 0, graphArgs.operands[0].length); + ASSERT_NE(nullptr, OHNNMemory); + const OHNNOperandTest &operandTem = graphArgs.operands[0]; + auto quantParam = operandTem.quantParam; + OH_NN_Tensor operand = {operandTem.dataType, (uint32_t)operandTem.shape.size(), operandTem.shape.data(), + quantParam, operandTem.type}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_SetInputWithMemory(nullptr, 0, &operand, OHNNMemory)); + OH_NNExecutor_DestroyInputMemory(executor, 0, &OHNNMemory); + ASSERT_EQ(nullptr, OHNNMemory); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_SetInputFromMemory_0200 + * @tc.name : 设置输入共享内存,inputIndex不存在 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_SetInputFromMemory_0200, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + OH_NN_Memory *OHNNMemory = OH_NNExecutor_AllocateInputMemory(executor, 0, graphArgs.operands[0].length); + ASSERT_NE(nullptr, OHNNMemory); + const OHNNOperandTest &operandTem = graphArgs.operands[0]; + auto quantParam = operandTem.quantParam; + OH_NN_Tensor operand = {operandTem.dataType, (uint32_t)operandTem.shape.size(), operandTem.shape.data(), + quantParam, operandTem.type}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_SetInputWithMemory(executor, 2, &operand, OHNNMemory)); + OH_NNExecutor_DestroyInputMemory(executor, 0, &OHNNMemory); + ASSERT_EQ(nullptr, OHNNMemory); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_SetInputFromMemory_0300 + * @tc.name : 设置输入共享内存,operand为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_SetInputFromMemory_0300, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + OH_NN_Memory *OHNNMemory = OH_NNExecutor_AllocateInputMemory(executor, 0, graphArgs.operands[0].length); + ASSERT_NE(nullptr, OHNNMemory); + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_SetInputWithMemory(executor, 0, nullptr, OHNNMemory)); + OH_NNExecutor_DestroyInputMemory(executor, 0, &OHNNMemory); + ASSERT_EQ(nullptr, OHNNMemory); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_SetInputFromMemory_0400 + * @tc.name : 设置输入共享内存,operand与输入不匹配 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_SetInputFromMemory_0400, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + OH_NN_Memory *OHNNMemory1 = OH_NNExecutor_AllocateInputMemory(executor, 0, graphArgs.operands[0].length); + ASSERT_NE(nullptr, OHNNMemory1); + const OHNNOperandTest &operandTem = graphArgs.operands[2]; + auto quantParam = operandTem.quantParam; + OH_NN_Tensor operand = {operandTem.dataType, (uint32_t)operandTem.shape.size(), operandTem.shape.data(), + quantParam, operandTem.type}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_SetInputWithMemory(executor, 0, &operand, OHNNMemory1)); + OH_NNExecutor_DestroyInputMemory(executor, 0, &OHNNMemory1); + ASSERT_EQ(nullptr, OHNNMemory1); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_SetInputFromMemory_0500 + * @tc.name : 设置输入共享内存,memory为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_SetInputFromMemory_0500, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + OH_NN_Memory *OHNNMemory = OH_NNExecutor_AllocateInputMemory(executor, 0, graphArgs.operands[0].length); + ASSERT_NE(nullptr, OHNNMemory); + const OHNNOperandTest &operandTem = graphArgs.operands[0]; + auto quantParam = operandTem.quantParam; + OH_NN_Tensor operand = {operandTem.dataType, (uint32_t)operandTem.shape.size(), operandTem.shape.data(), + quantParam, operandTem.type}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_SetInputWithMemory(executor, 0, &operand, nullptr)); + OH_NNExecutor_DestroyInputMemory(executor, 0, &OHNNMemory); + ASSERT_EQ(nullptr, OHNNMemory); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_SetInputFromMemory_0600 + * @tc.name : 设置输入共享内存,重复设置相同inputIndex + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_SetInputFromMemory_0600, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + OH_NN_Memory *OHNNMemory = OH_NNExecutor_AllocateInputMemory(executor, 0, graphArgs.operands[0].length); + ASSERT_NE(nullptr, OHNNMemory); + const OHNNOperandTest &operandTem = graphArgs.operands[0]; + auto quantParam = operandTem.quantParam; + OH_NN_Tensor operand = {operandTem.dataType, (uint32_t)operandTem.shape.size(), operandTem.shape.data(), + quantParam, operandTem.type}; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_SetInputWithMemory(executor, 0, &operand, OHNNMemory)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_SetInputWithMemory(executor, 0, &operand, OHNNMemory)); + OH_NNExecutor_DestroyInputMemory(executor, 0, &OHNNMemory); + ASSERT_EQ(nullptr, OHNNMemory); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_SetOutputFromMemory_0100 + * @tc.name : 设置输出共享内存,executor为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_SetOutputFromMemory_0100, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + OH_NN_Memory *OHNNMemory = OH_NNExecutor_AllocateOutputMemory(executor, 0, graphArgs.operands[0].length); + ASSERT_NE(nullptr, OHNNMemory); + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_SetOutputWithMemory(nullptr, 0, OHNNMemory)); + OH_NNExecutor_DestroyOutputMemory(executor, 0, &OHNNMemory); + ASSERT_EQ(nullptr, OHNNMemory); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_SetOutputFromMemory_0200 + * @tc.name : 设置输出共享内存,outputIndex不存在 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_SetOutputFromMemory_0200, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + OH_NN_Memory *OHNNMemory = OH_NNExecutor_AllocateOutputMemory(executor, 0, graphArgs.operands[0].length); + ASSERT_NE(nullptr, OHNNMemory); + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_SetOutputWithMemory(executor, 1, OHNNMemory)); + OH_NNExecutor_DestroyOutputMemory(executor, 0, &OHNNMemory); + ASSERT_EQ(nullptr, OHNNMemory); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_SetOutputFromMemory_0300 + * @tc.name : 设置输出共享内存,memory为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_SetOutputFromMemory_0300, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + OH_NN_Memory *OHNNMemory = OH_NNExecutor_AllocateOutputMemory(executor, 0, graphArgs.operands[0].length); + ASSERT_NE(nullptr, OHNNMemory); + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_SetOutputWithMemory(executor, 0, nullptr)); + OH_NNExecutor_DestroyOutputMemory(executor, 0, &OHNNMemory); + ASSERT_EQ(nullptr, OHNNMemory); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_SetOutputFromMemory_0400 + * @tc.name : 设置输出共享内存,重复设置相同outputIndex + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_SetOutputFromMemory_0400, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + OH_NN_Memory *OHNNMemory = OH_NNExecutor_AllocateOutputMemory(executor, 0, graphArgs.operands[0].length); + ASSERT_NE(nullptr, OHNNMemory); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_SetOutputWithMemory(executor, 0, OHNNMemory)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_SetOutputWithMemory(executor, 0, OHNNMemory)); + OH_NNExecutor_DestroyOutputMemory(executor, 0, &OHNNMemory); + ASSERT_EQ(nullptr, OHNNMemory); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_Run_0100 + * @tc.name : 共享内存模型推理,executor设置输入个数不足 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_Run_0100, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + uint32_t inputIndex = 0; + uint32_t outputIndex = 0; + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + auto quantParam = operandTem.quantParam; + OH_NN_Tensor operand = {operandTem.dataType, (uint32_t)operandTem.shape.size(), operandTem.shape.data(), + quantParam, operandTem.type}; + if (i == 0) { + OH_NN_Memory *inputMemory = OH_NNExecutor_AllocateInputMemory(executor, inputIndex, operandTem.length); + ASSERT_NE(nullptr, inputMemory); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_SetInputWithMemory(executor, inputIndex, &operand, inputMemory)); + + ASSERT_EQ(EOK, memcpy_s(inputMemory->data, operandTem.length, static_cast(operandTem.data), operandTem.length)); + + OH_NNExecutor_DestroyInputMemory(executor, inputIndex, &inputMemory); + ASSERT_EQ(nullptr, inputMemory); + } else if (std::find(graphArgs.outputIndices.begin(), graphArgs.outputIndices.end(), i) != + graphArgs.outputIndices.end()) { + OH_NN_Memory *outputMemory = OH_NNExecutor_AllocateOutputMemory(executor, outputIndex, operandTem.length); + ASSERT_NE(nullptr, outputMemory); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_SetOutputWithMemory(executor, outputIndex, outputMemory)); + OH_NNExecutor_DestroyOutputMemory(executor, outputIndex, &outputMemory); + ASSERT_EQ(nullptr, outputMemory); + outputIndex += 1; + } + } + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_Run(executor)); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_Run_0200 + * @tc.name : 共享内存模型推理,executor设置输出个数不足 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_Run_0200, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + uint32_t inputIndex = 0; + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + auto quantParam = operandTem.quantParam; + OH_NN_Tensor operand = {operandTem.dataType, (uint32_t)operandTem.shape.size(), operandTem.shape.data(), + quantParam, operandTem.type}; + if (std::find(graphArgs.inputIndices.begin(), graphArgs.inputIndices.end(), i) != + graphArgs.inputIndices.end()) { + OH_NN_Memory *inputMemory = OH_NNExecutor_AllocateInputMemory(executor, inputIndex, operandTem.length); + ASSERT_NE(nullptr, inputMemory); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_SetInputWithMemory(executor, inputIndex, &operand, inputMemory)); + ASSERT_EQ(EOK, memcpy_s(inputMemory->data, operandTem.length, static_cast(operandTem.data), operandTem.length)); + OH_NNExecutor_DestroyInputMemory(executor, inputIndex, &inputMemory); + ASSERT_EQ(nullptr, inputMemory); + } + } + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_Run(executor)); + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_Run_0300 + * @tc.name : 共享内存,定长模型推理测试 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_Run_0300, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + size_t ioSize = graphArgs.inputIndices.size() + graphArgs.outputIndices.size(); + OH_NN_Memory *OHNNMemory[ioSize]; + ASSERT_EQ(OH_NN_SUCCESS, ExecutorWithMemory(executor, graphArgs, OHNNMemory, addModel.expectValue)); + for (size_t i = 0; i < graphArgs.inputIndices.size(); i++) { + OH_NNExecutor_DestroyInputMemory(executor, i, &OHNNMemory[i]); + ASSERT_EQ(OHNNMemory[i], nullptr); + } + for (size_t j = 0; j < graphArgs.outputIndices.size(); j++) { + auto outputIndex = graphArgs.inputIndices.size() + j; + // check memory output + EXPECT_TRUE(CheckOutput(static_cast(const_cast(OHNNMemory[outputIndex]->data)), + static_cast(addModel.expectValue))); + OH_NNExecutor_DestroyOutputMemory(executor, j, &OHNNMemory[outputIndex]); + ASSERT_EQ(OHNNMemory[outputIndex], nullptr); + } + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Executor_Memory_Run_0400 + * @tc.name : 共享内存,变长模型推理测试 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_Run_0400, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + AvgPoolDynamicModel avgModel; + graphArgs = avgModel.graphArgs; + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, executor); + avgModel.dynamicInput.shape = {1, 3, 3, 1}; + avgModel.output.shape = {1, 2, 2, 1}; + graphArgs.operands = {avgModel.dynamicInput, avgModel.kernel, avgModel.strides, + avgModel.padMode, avgModel.activation, avgModel.output}; + size_t ioSize = graphArgs.inputIndices.size() + graphArgs.outputIndices.size(); + OH_NN_Memory *OHNNMemory[ioSize]; + + ASSERT_EQ(OH_NN_SUCCESS, ExecutorWithMemory(executor, graphArgs, OHNNMemory, avgModel.expectValue)); + + for (size_t i = 0; i < graphArgs.inputIndices.size(); i++) { + OH_NNExecutor_DestroyInputMemory(executor, i, &OHNNMemory[i]); + ASSERT_EQ(OHNNMemory[i], nullptr); + } + for (size_t j = 0; j < graphArgs.outputIndices.size(); j++) { + auto outputIndex = graphArgs.inputIndices.size() + j; + // check memory output + EXPECT_TRUE(CheckOutput(static_cast(const_cast(OHNNMemory[outputIndex]->data)), + static_cast(avgModel.expectValue))); + OH_NNExecutor_DestroyOutputMemory(executor, j, &OHNNMemory[outputIndex]); + ASSERT_EQ(OHNNMemory[outputIndex], nullptr); + } + Free(model, compilation, executor); +} \ No newline at end of file diff --git a/test/nnrt_xts_acts/neural_network_runtime/v2_0/interface/src/ModelTest.cpp b/test/nnrt_xts_acts/neural_network_runtime/v2_0/interface/src/ModelTest.cpp new file mode 100644 index 0000000..a792df2 --- /dev/null +++ b/test/nnrt_xts_acts/neural_network_runtime/v2_0/interface/src/ModelTest.cpp @@ -0,0 +1,1024 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include +#include + +#include "nnrt_utils.h" +#include "model.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Test; +using namespace OHOS::HDI::Nnrt::V2_0; + +namespace { + +class ModelTest : public testing::Test { +protected: + AddModel addModel; + OHNNGraphArgs graphArgs = addModel.graphArgs; + OHNNCompileParam compileParam; +}; + +void BuildAddTopKGraph(OH_NNModel *model) +{ + AddTopKModel addTopKModel; + OHNNGraphArgsMulti graphArgsMulti = addTopKModel.graphArgs; + ASSERT_EQ(OH_NN_SUCCESS, BuildMultiOpGraph(model, graphArgsMulti)); +} + +void BuildModel(OH_NNModel *model, const OHNNGraphArgs &graphArgs) +{ + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); +} + +} // namespace + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_CreateModel_0100 + * @tc.name : 创建模型实例,指针校验 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_CreateModel_0100, Function | MediumTest | Level0) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_CreateModel_0200 + * @tc.name : 创建多个模型实例,指针校验 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_CreateModel_0200, Function | MediumTest | Level2) +{ + OH_NNModel *model_first = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model_first); + + OH_NNModel *model_second = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model_second); + + OH_NNModel *model_third = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model_third); + + ASSERT_NE(model_first, model_second); + ASSERT_NE(model_first, model_third); + ASSERT_NE(model_second, model_third); + Free(model_first); + Free(model_second); + Free(model_third); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_AddOperand_0100 + * @tc.name : 添加操作数值,model为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_AddOperand_0100, Function | MediumTest | Level3) +{ + int32_t dimensions[3]{3, 2, 2}; + OH_NN_Tensor operand{OH_NN_FLOAT32, 3, dimensions, nullptr, OH_NN_TENSOR}; + OH_NN_ReturnCode ret = OH_NNModel_AddTensor(nullptr, &operand); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_AddOperand_0200 + * @tc.name : 添加操作数,operand为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_AddOperand_0200, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + + OH_NN_ReturnCode ret = OH_NNModel_AddTensor(model, nullptr); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_AddOperand_0300 + * @tc.name : 添加操作数,operand中dataType为100000 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_AddOperand_0300, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + + int32_t dimensions[3]{3, 2, 2}; + + OH_NN_Tensor operand{static_cast(100000), 3, dimensions, nullptr, OH_NN_TENSOR}; + OH_NN_ReturnCode ret = OH_NNModel_AddTensor(model, &operand); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_AddOperand_0400 + * @tc.name : 添加操作数,operand中type为100000 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_AddOperand_0400, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + + int32_t dimensions[3]{3, 2, 2}; + + OH_NN_Tensor operand{OH_NN_FLOAT32, 3, dimensions, nullptr, static_cast(100000)}; + OH_NN_ReturnCode ret = OH_NNModel_AddTensor(model, &operand); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_SetOperandValue_0100 + * @tc.name : 设置操作数值,model为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_SetOperandValue_0100, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + + int8_t activationValue{0}; + int32_t dimensions[3]{3, 2, 2}; + OH_NN_Tensor operand{OH_NN_FLOAT32, 3, dimensions, nullptr, OH_NN_TENSOR}; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensor(model, &operand)); + + ASSERT_EQ(OH_NN_INVALID_PARAMETER, + OH_NNModel_SetTensorData(nullptr, 1, (void *)&activationValue, sizeof(int8_t))); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_SetOperandValue_0200 + * @tc.name : 设置操作数值,操作数不存在 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_SetOperandValue_0200, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + + int8_t activationValue{0}; + int32_t dimensions[3]{3, 2, 2}; + OH_NN_Tensor operand{OH_NN_FLOAT32, 3, dimensions, nullptr, OH_NN_TENSOR}; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensor(model, &operand)); + + ASSERT_EQ(OH_NN_INVALID_PARAMETER, + OH_NNModel_SetTensorData(model, 1000, (void *)&activationValue, sizeof(int8_t))); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_SetOperandValue_0300 + * @tc.name : 设置操作数值,buffer为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_SetOperandValue_0300, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + + int32_t dimensions[3]{3, 2, 2}; + OH_NN_Tensor operand{OH_NN_FLOAT32, 3, dimensions, nullptr, OH_NN_TENSOR}; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensor(model, &operand)); + + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, 1, nullptr, sizeof(int8_t))); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_SetOperandValue_0400 + * @tc.name : 设置操作数值,length为0 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_SetOperandValue_0400, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + + int8_t activationValue{0}; + int32_t dimensions[3]{3, 2, 2}; + OH_NN_Tensor operand{OH_NN_FLOAT32, 3, dimensions, nullptr, OH_NN_TENSOR}; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensor(model, &operand)); + + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, 1, (void *)&activationValue, 0)); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_AddOperation_0100 + * @tc.name : 添加算子,model为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_AddOperation_0100, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + graphArgs.specifyIO = false; + graphArgs.build = false; + graphArgs.addOperation = false; + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NN_UInt32Array paramIndices{const_cast(graphArgs.paramIndices.data()), + graphArgs.paramIndices.size()}; + OH_NN_UInt32Array inputIndices{const_cast(graphArgs.inputIndices.data()), + graphArgs.inputIndices.size()}; + OH_NN_UInt32Array outputIndices{const_cast(graphArgs.outputIndices.data()), + graphArgs.outputIndices.size()}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, + OH_NNModel_AddOperation(nullptr, graphArgs.operationType, ¶mIndices, &inputIndices, &outputIndices)); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_AddOperation_0200 + * @tc.name : 添加算子,paramIndices为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_AddOperation_0200, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + graphArgs.specifyIO = false; + graphArgs.build = false; + graphArgs.addOperation = false; + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NN_UInt32Array inputIndices{const_cast(graphArgs.inputIndices.data()), + graphArgs.inputIndices.size()}; + OH_NN_UInt32Array outputIndices{const_cast(graphArgs.outputIndices.data()), + graphArgs.outputIndices.size()}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, + OH_NNModel_AddOperation(model, graphArgs.operationType, nullptr, &inputIndices, &outputIndices)); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_AddOperation_0300 + * @tc.name : 添加算子,paramIndices中data为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_AddOperation_0300, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + graphArgs.specifyIO = false; + graphArgs.build = false; + graphArgs.addOperation = false; + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NN_UInt32Array paramIndices{nullptr, graphArgs.paramIndices.size()}; + OH_NN_UInt32Array inputIndices{const_cast(graphArgs.inputIndices.data()), + graphArgs.inputIndices.size()}; + OH_NN_UInt32Array outputIndices{const_cast(graphArgs.outputIndices.data()), + graphArgs.outputIndices.size()}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, + OH_NNModel_AddOperation(model, graphArgs.operationType, ¶mIndices, &inputIndices, &outputIndices)); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_AddOperation_0400 + * @tc.name : 添加算子,paramIndices中data对应序号不存在 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_AddOperation_0400, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + graphArgs.specifyIO = false; + graphArgs.build = false; + graphArgs.addOperation = false; + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + uint32_t paramIndicesValue{10}; + OH_NN_UInt32Array paramIndices{¶mIndicesValue, graphArgs.paramIndices.size()}; + OH_NN_UInt32Array inputIndices{const_cast(graphArgs.inputIndices.data()), + graphArgs.inputIndices.size()}; + OH_NN_UInt32Array outputIndices{const_cast(graphArgs.outputIndices.data()), + graphArgs.outputIndices.size()}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, + OH_NNModel_AddOperation(model, graphArgs.operationType, ¶mIndices, &inputIndices, &outputIndices)); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_AddOperation_0500 + * @tc.name : 添加算子,paramIndices中size为0 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_AddOperation_0500, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + graphArgs.specifyIO = false; + graphArgs.build = false; + graphArgs.addOperation = false; + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NN_UInt32Array paramIndices{const_cast(graphArgs.paramIndices.data()), 0}; + OH_NN_UInt32Array inputIndices{const_cast(graphArgs.inputIndices.data()), + graphArgs.inputIndices.size()}; + OH_NN_UInt32Array outputIndices{const_cast(graphArgs.outputIndices.data()), + graphArgs.outputIndices.size()}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, + OH_NNModel_AddOperation(model, graphArgs.operationType, ¶mIndices, &inputIndices, &outputIndices)); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_AddOperation_0600 + * @tc.name : 添加算子,inputIndices为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_AddOperation_0600, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + graphArgs.specifyIO = false; + graphArgs.build = false; + graphArgs.addOperation = false; + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NN_UInt32Array paramIndices{const_cast(graphArgs.paramIndices.data()), + graphArgs.paramIndices.size()}; + OH_NN_UInt32Array outputIndices{const_cast(graphArgs.outputIndices.data()), + graphArgs.outputIndices.size()}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, + OH_NNModel_AddOperation(model, graphArgs.operationType, ¶mIndices, nullptr, &outputIndices)); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_AddOperation_0700 + * @tc.name : 添加算子,inputIndices中data为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_AddOperation_0700, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + graphArgs.specifyIO = false; + graphArgs.build = false; + graphArgs.addOperation = false; + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NN_UInt32Array paramIndices{const_cast(graphArgs.paramIndices.data()), + graphArgs.paramIndices.size()}; + OH_NN_UInt32Array inputIndices{nullptr, graphArgs.inputIndices.size()}; + OH_NN_UInt32Array outputIndices{const_cast(graphArgs.outputIndices.data()), + graphArgs.outputIndices.size()}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, + OH_NNModel_AddOperation(model, graphArgs.operationType, ¶mIndices, &inputIndices, &outputIndices)); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_AddOperation_0800 + * @tc.name : 添加算子,inputIndices中data对应序号不存在 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_AddOperation_0800, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + graphArgs.specifyIO = false; + graphArgs.build = false; + graphArgs.addOperation = false; + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NN_UInt32Array paramIndices{const_cast(graphArgs.paramIndices.data()), + graphArgs.paramIndices.size()}; + uint32_t inputIndicesValue{10}; + OH_NN_UInt32Array inputIndices{&inputIndicesValue, graphArgs.inputIndices.size()}; + OH_NN_UInt32Array outputIndices{const_cast(graphArgs.outputIndices.data()), + graphArgs.outputIndices.size()}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, + OH_NNModel_AddOperation(model, graphArgs.operationType, ¶mIndices, &inputIndices, &outputIndices)); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_AddOperation_0900 + * @tc.name : 添加算子,inputIndices中size为0 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_AddOperation_0900, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + graphArgs.specifyIO = false; + graphArgs.build = false; + graphArgs.addOperation = false; + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NN_UInt32Array paramIndices{const_cast(graphArgs.paramIndices.data()), + graphArgs.paramIndices.size()}; + OH_NN_UInt32Array inputIndices{const_cast(graphArgs.inputIndices.data()), 0}; + OH_NN_UInt32Array outputIndices{const_cast(graphArgs.outputIndices.data()), + graphArgs.outputIndices.size()}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, + OH_NNModel_AddOperation(model, graphArgs.operationType, ¶mIndices, &inputIndices, &outputIndices)); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_AddOperation_1000 + * @tc.name : 添加算子,outputIndices为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_AddOperation_1000, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + graphArgs.specifyIO = false; + graphArgs.build = false; + graphArgs.addOperation = false; + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NN_UInt32Array paramIndices{const_cast(graphArgs.paramIndices.data()), + graphArgs.paramIndices.size()}; + OH_NN_UInt32Array inputIndices{const_cast(graphArgs.inputIndices.data()), 0}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, + OH_NNModel_AddOperation(model, graphArgs.operationType, ¶mIndices, &inputIndices, nullptr)); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_AddOperation_1100 + * @tc.name : 添加算子,outputIndices中data为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_AddOperation_1100, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + graphArgs.specifyIO = false; + graphArgs.build = false; + graphArgs.addOperation = false; + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NN_UInt32Array paramIndices{const_cast(graphArgs.paramIndices.data()), + graphArgs.paramIndices.size()}; + OH_NN_UInt32Array inputIndices{const_cast(graphArgs.inputIndices.data()), + graphArgs.inputIndices.size()}; + OH_NN_UInt32Array outputIndices{nullptr, graphArgs.outputIndices.size()}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, + OH_NNModel_AddOperation(model, graphArgs.operationType, ¶mIndices, &inputIndices, &outputIndices)); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_AddOperation_1200 + * @tc.name : 添加算子,outputIndices中data对应序号不存在 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_AddOperation_1200, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + graphArgs.specifyIO = false; + graphArgs.build = false; + graphArgs.addOperation = false; + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NN_UInt32Array paramIndices{const_cast(graphArgs.paramIndices.data()), + graphArgs.paramIndices.size()}; + OH_NN_UInt32Array inputIndices{const_cast(graphArgs.inputIndices.data()), + graphArgs.inputIndices.size()}; + uint32_t outputIndicesValue{10}; + OH_NN_UInt32Array outputIndices{&outputIndicesValue, graphArgs.outputIndices.size()}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, + OH_NNModel_AddOperation(model, graphArgs.operationType, ¶mIndices, &inputIndices, &outputIndices)); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_AddOperation_1300 + * @tc.name : 添加算子,outputIndices中size为0 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_AddOperation_1300, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + graphArgs.specifyIO = false; + graphArgs.build = false; + graphArgs.addOperation = false; + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NN_UInt32Array paramIndices{const_cast(graphArgs.paramIndices.data()), + graphArgs.paramIndices.size()}; + OH_NN_UInt32Array inputIndices{const_cast(graphArgs.inputIndices.data()), + graphArgs.inputIndices.size()}; + OH_NN_UInt32Array outputIndices{const_cast(graphArgs.outputIndices.data()), 0}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, + OH_NNModel_AddOperation(model, graphArgs.operationType, ¶mIndices, &inputIndices, &outputIndices)); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_SpecifyInputsAndOutputs_0100 + * @tc.name : 设置输入输出,model为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_SpecifyInputsAndOutputs_0100, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + graphArgs.specifyIO = false; + graphArgs.build = false; + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NN_UInt32Array inputIndices{const_cast(graphArgs.inputIndices.data()), + graphArgs.inputIndices.size()}; + OH_NN_UInt32Array outputIndices{const_cast(graphArgs.outputIndices.data()), + graphArgs.outputIndices.size()}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(nullptr, &inputIndices, &outputIndices)); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_SpecifyInputsAndOutputs_0200 + * @tc.name : 设置输入输出,inputIndices为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_SpecifyInputsAndOutputs_0200, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + graphArgs.specifyIO = false; + graphArgs.build = false; + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NN_UInt32Array outputIndices{const_cast(graphArgs.outputIndices.data()), + graphArgs.outputIndices.size()}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, nullptr, &outputIndices)); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_SpecifyInputsAndOutputs_0300 + * @tc.name : 设置输入输出,inputIndices中data为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_SpecifyInputsAndOutputs_0300, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + graphArgs.specifyIO = false; + graphArgs.build = false; + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NN_UInt32Array inputIndices{nullptr, 2}; + OH_NN_UInt32Array outputIndices{const_cast(graphArgs.outputIndices.data()), + graphArgs.outputIndices.size()}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_SpecifyInputsAndOutputs_0400 + * @tc.name : 设置输入输出,inputIndices中data对应序号不存在 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_SpecifyInputsAndOutputs_0400, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + graphArgs.specifyIO = false; + graphArgs.build = false; + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + uint32_t modelInputIndicesValue{5}; + OH_NN_UInt32Array inputIndices{&modelInputIndicesValue, 1}; + + OH_NN_UInt32Array outputIndices{const_cast(graphArgs.outputIndices.data()), + graphArgs.outputIndices.size()}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_SpecifyInputsAndOutputs_0500 + * @tc.name : 设置输入输出,inputIndices中size为0 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_SpecifyInputsAndOutputs_0500, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + graphArgs.specifyIO = false; + graphArgs.build = false; + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NN_UInt32Array inputIndices{const_cast(graphArgs.inputIndices.data()), 0}; + OH_NN_UInt32Array outputIndices{const_cast(graphArgs.outputIndices.data()), + graphArgs.outputIndices.size()}; + + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_SpecifyInputsAndOutputs_0600 + * @tc.name : 设置输入输出,outputIndices为空指针 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_SpecifyInputsAndOutputs_0600, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + graphArgs.specifyIO = false; + graphArgs.build = false; + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NN_UInt32Array inputIndices{const_cast(graphArgs.inputIndices.data()), + graphArgs.inputIndices.size()}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, nullptr)); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_SpecifyInputsAndOutputs_0700 + * @tc.name : 设置输入输出,outputIndices中data为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_SpecifyInputsAndOutputs_0700, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + graphArgs.specifyIO = false; + graphArgs.build = false; + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NN_UInt32Array inputIndices{const_cast(graphArgs.inputIndices.data()), + graphArgs.inputIndices.size()}; + OH_NN_UInt32Array outputIndices{nullptr, 1}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_SpecifyInputsAndOutputs_0800 + * @tc.name : 设置输入输出,outputIndices中data对应序号不存在 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_SpecifyInputsAndOutputs_0800, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + graphArgs.specifyIO = false; + graphArgs.build = false; + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NN_UInt32Array inputIndices{const_cast(graphArgs.inputIndices.data()), + graphArgs.inputIndices.size()}; + uint32_t modelOutputIndicesValue{5}; + OH_NN_UInt32Array outputIndices{&modelOutputIndicesValue, 1}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_SpecifyInputsAndOutputs_0900 + * @tc.name : 设置输入输出,outputIndices中size为0 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_SpecifyInputsAndOutputs_0900, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + graphArgs.specifyIO = false; + graphArgs.build = false; + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NN_UInt32Array inputIndices{const_cast(graphArgs.inputIndices.data()), + graphArgs.inputIndices.size()}; + OH_NN_UInt32Array outputIndices{const_cast(graphArgs.outputIndices.data()), 0}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_Finish_0100 + * @tc.name : 模型构图,model为空指针 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_Finish_0100, Function | MediumTest | Level3) +{ + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_Finish(nullptr)); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_Finish_0200 + * @tc.name : 模型构图,未添加操作数 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_Finish_0200, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_OPERATION_FORBIDDEN, OH_NNModel_Finish(model)); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_Finish_0300 + * @tc.name : 模型构图,未设置输入输出 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_Finish_0300, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + graphArgs.specifyIO = false; + ASSERT_EQ(OH_NN_OPERATION_FORBIDDEN, BuildSingleOpGraph(model, graphArgs)); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_Finish_0400 + * @tc.name : 模型构图,设置输入输出,构图成功 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_Finish_0400, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_Destroy_0100 + * @tc.name : 释放模型,model为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_Destroy_0100, Function | MediumTest | Level3) +{ + OH_NNModel *model = nullptr; + ASSERT_NO_THROW(OH_NNModel_Destroy(&model)); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_Destroy_0200 + * @tc.name : 释放模型,model未构图 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_Destroy_0200, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + graphArgs.build = false; + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + OH_NNModel_Destroy(&model); + ASSERT_EQ(nullptr, model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_GetSupportedOperation_0100 + * @tc.name : 查询算子支持,model为空指针 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_GetSupportedOperation_0100, Function | MediumTest | Level3) +{ + const size_t *devicesID{nullptr}; + const bool *isSupported{nullptr}; + uint32_t opCount{0}; + uint32_t devicesCount{0}; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNDevice_GetAllDevicesID(&devicesID, &devicesCount)); + size_t targetDevice = devicesID[0]; + OH_NN_ReturnCode ret = OH_NNModel_GetAvailableOperations(nullptr, targetDevice, &isSupported, &opCount); + ASSERT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_GetSupportedOperation_0200 + * @tc.name : 查询算子支持,deviceID不存在 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_GetSupportedOperation_0200, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + size_t targetDevice{100000}; + const bool *isSupported{nullptr}; + uint32_t opCount{0}; + ASSERT_EQ(OH_NN_FAILED, OH_NNModel_GetAvailableOperations(model, targetDevice, &isSupported, &opCount)); + + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_GetSupportedOperation_0300 + * @tc.name : 查询算子支持,*isSupported为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_GetSupportedOperation_0300, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + const size_t *devicesID{nullptr}; + uint32_t opCount{0}; + uint32_t devicesCount{0}; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNDevice_GetAllDevicesID(&devicesID, &devicesCount)); + size_t targetDevice = devicesID[0]; + + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_GetAvailableOperations(model, targetDevice, nullptr, &opCount)); + + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_GetSupportedOperation_0400 + * @tc.name : 查询算子支持,**isSupported非nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_GetSupportedOperation_0400, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + const size_t *devicesID{nullptr}; + const bool isSupported = true; + const bool *realSupported = &isSupported; + uint32_t opCount{0}; + uint32_t devicesCount{0}; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNDevice_GetAllDevicesID(&devicesID, &devicesCount)); + size_t targetDevice = devicesID[0]; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_GetAvailableOperations(model, targetDevice, + &realSupported, &opCount)); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_GetSupportedOperation_0500 + * @tc.name : 查询算子支持,*opCount为nullptr + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_GetSupportedOperation_0500, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + const size_t *devicesID{nullptr}; + const bool *isSupported{nullptr}; + uint32_t devicesCount{0}; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNDevice_GetAllDevicesID(&devicesID, &devicesCount)); + size_t targetDevice = devicesID[0]; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_GetAvailableOperations(model, targetDevice, + &isSupported, nullptr)); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_GetSupportedOperation_0600 + * @tc.name : 查询算子支持,model未完成构图 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_GetSupportedOperation_0600, Function | MediumTest | Level3) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + graphArgs.build = false; + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + const size_t *devicesID{nullptr}; + const bool *isSupported{nullptr}; + uint32_t opCount{0}; + uint32_t devicesCount{0}; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNDevice_GetAllDevicesID(&devicesID, &devicesCount)); + size_t targetDevice = devicesID[0]; + ASSERT_EQ(OH_NN_OPERATION_FORBIDDEN, OH_NNModel_GetAvailableOperations(model, targetDevice, + &isSupported, &opCount)); + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_GetSupportedOperation_0700 + * @tc.name : 查询算子支持,算子均支持 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_GetSupportedOperation_0700, Function | MediumTest | Level1) +{ + OHOS::sptr device = V2_0::MockIDevice::GetInstance(); + std::vector isSupported{true, true}; + device->SetOperationsSupported(isSupported); + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + BuildAddTopKGraph(model); + + const size_t *devicesID{nullptr}; + const bool *realSupported{nullptr}; + uint32_t opCount; + uint32_t devicesCount; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNDevice_GetAllDevicesID(&devicesID, &devicesCount)); + + uint32_t count = 1; + EXPECT_LE(count, devicesCount); + const char *name = nullptr; + std::string m_deviceName{"Device-CPU_TestVendor_v2_0"}; + OH_NN_ReturnCode ret = OH_NN_FAILED; + bool isHaveDevice = false; + uint32_t deviceId = 0; + for (uint32_t i = 0; i < devicesCount; i++) { + name = nullptr; + ret = OH_NNDevice_GetName(devicesID[i], &name); + EXPECT_EQ(OH_NN_SUCCESS, ret); + std::string sName(name); + if (m_deviceName == sName) { + isHaveDevice = true; + deviceId = i; + } + } + ASSERT_EQ(isHaveDevice, true); + size_t targetDevice = devicesID[deviceId]; + + ret = OH_NNModel_GetAvailableOperations(model, targetDevice, &realSupported, &opCount); + ASSERT_EQ(OH_NN_SUCCESS, ret); + for (uint32_t i = 0; i < opCount; i++) { + EXPECT_EQ(realSupported[i], isSupported[i]); + } + Free(model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_GetSupportedOperation_0800 + * @tc.name : 查询算子支持,算子部分支持 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_GetSupportedOperation_0800, Function | MediumTest | Level2) +{ + OHOS::sptr device = V2_0::MockIDevice::GetInstance(); + std::vector isSupported{true, false}; + device->SetOperationsSupported(isSupported); + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + BuildAddTopKGraph(model); + + const size_t *devicesID{nullptr}; + const bool *realSupported{nullptr}; + uint32_t opCount; + uint32_t devicesCount; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNDevice_GetAllDevicesID(&devicesID, &devicesCount)); + size_t targetDevice = devicesID[0]; + + OH_NN_ReturnCode ret = OH_NNModel_GetAvailableOperations(model, targetDevice, &realSupported, &opCount); + ASSERT_EQ(OH_NN_SUCCESS, ret); + for (uint32_t i = 0; i < opCount; i++) { + EXPECT_EQ(realSupported[i], isSupported[i]); + } + Free(model); + device->SetOperationsSupported({true}); +} + +/** + * @tc.number : SUB_AI_NNR_Func_North_Model_Combine_0100 + * @tc.name : 不同model,多线程并发在线构图,构图成功 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNR_Func_North_Model_Combine_0100, Function | MediumTest | Level2) +{ + OH_NNModel *model1 = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model1); + OH_NNModel *model2 = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model2); + std::thread th1(BuildModel, model1, graphArgs); + std::thread th2(BuildModel, model2, graphArgs); + th1.join(); + th2.join(); + Free(model1); + Free(model2); +} + +/** + * @tc.number : SUB_AI_NNR_Func_North_Model_Combine_0200 + * @tc.name : 多模型构图,模型构图过程中释放其他模型 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(ModelTest, SUB_AI_NNR_Func_North_Model_Combine_0200, Function | MediumTest | Level1) +{ + OH_NNModel *model1 = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model1); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model1, graphArgs)); + + OH_NNModel *model2 = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model2); + + std::thread th1(BuildModel, model2, graphArgs); + std::thread th2(OH_NNModel_Destroy, &model1); + th1.join(); + th2.join(); + ASSERT_EQ(nullptr, model1); + Free(model2); +} diff --git a/test/nnrt_xts_acts/neural_network_runtime/v2_0/stability/BUILD.gn b/test/nnrt_xts_acts/neural_network_runtime/v2_0/stability/BUILD.gn new file mode 100644 index 0000000..b3545d4 --- /dev/null +++ b/test/nnrt_xts_acts/neural_network_runtime/v2_0/stability/BUILD.gn @@ -0,0 +1,49 @@ +# Copyright (c) 2022 Huawei Device Co., Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import("//build/test.gni") + +config("nnrt_config") { + include_dirs = [ "../common" ] + cflags = [ "-Wno-error" ] + cflags_cc = [ "-fexceptions" ] +} + +ohos_systemtest("ActsAiNnrtStabilityV2_0Test") { + module_out_path = "neural_network_runtime/neural_network_runtime" + sources = [ + "../common/mock_idevice.cpp", + "../common/nnrt_utils.cpp", + "src/MultiThreadTest.cpp", + ] + + configs = [ ":nnrt_config" ] + + external_deps = [ + "c_utils:utils", + "drivers_interface_nnrt:libnnrt_proxy_2.0", + "googletest:gmock_main", + "googletest:gtest_main", + "hdf_core:libhdf_utils", + "hdf_core:libhdi", + "hilog:libhilog", + "hitrace:libhitracechain", + "ipc:ipc_single", + "mindspore:mindir_lib", + "neural_network_runtime:libneural_network_core", + "neural_network_runtime:libneural_network_runtime", + ] + + subsystem_name = "ai" + part_name = "neural_network_runtime" +} diff --git a/test/nnrt_xts_acts/neural_network_runtime/v2_0/stability/src/MultiThreadTest.cpp b/test/nnrt_xts_acts/neural_network_runtime/v2_0/stability/src/MultiThreadTest.cpp new file mode 100644 index 0000000..9f3b9b0 --- /dev/null +++ b/test/nnrt_xts_acts/neural_network_runtime/v2_0/stability/src/MultiThreadTest.cpp @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include +#include + +#include "neural_network_runtime/neural_network_runtime.h" +#include "nnrt_utils.h" +#include "model.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime; +using namespace OHOS::NeuralNetworkRuntime::Test; +using namespace OHOS::HDI::Nnrt::V2_0; + +class MultiThreadTest : public testing::Test { +public: + void SetUp() + { + } + void TearDown() + { + } + +protected: + OHNNCompileParam compileParam; + AddModel addModel; + OHNNGraphArgs graphArgs = addModel.graphArgs; +}; + + +/** + * @tc.number : SUB_AI_NNR_Reliability_North_Stress_0100 + * @tc.name : 模型编译并发长稳测试 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MultiThreadTest, SUB_AI_NNR_Reliability_North_Stress_0100, Reliability | MediumTest | Level2) +{ + for (int i = 0; i < STRESS_COUNT; i++) { + OH_NNModel *model1 = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model1); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model1, graphArgs)); + + OH_NNCompilation *compilation1 = OH_NNCompilation_Construct(model1); + ASSERT_NE(nullptr, compilation1); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation1, compileParam)); + + Free(model1, compilation1); + if (i % PRINT_FREQ == 0) { + printf("[NnrtTest] SUB_AI_NNR_Reliability_North_Stress_0100 times: %d/%d\n", i, STRESS_COUNT); + } + } +} + +/** + * @tc.number : SUB_AI_NNR_Reliability_North_Stress_0200 + * @tc.name : 模型推理并发长稳测试 + * @tc.desc : [C- SOFTWARE -0200] + */ +HWTEST_F(MultiThreadTest, SUB_AI_NNR_Reliability_North_Stress_0200, Reliability | MediumTest | Level2) +{ + OH_NNModel *model1 = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model1); + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model1, graphArgs)); + + OH_NNCompilation *compilation1 = OH_NNCompilation_Construct(model1); + ASSERT_NE(nullptr, compilation1); + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation1, compileParam)); + + for (int i = 0; i < STRESS_COUNT; i++) { + OH_NNExecutor *executor1 = OH_NNExecutor_Construct(compilation1); + ASSERT_NE(nullptr, executor1); + ASSERT_EQ(OH_NN_SUCCESS, ExecuteGraphMock(executor1, graphArgs, nullptr)); + OH_NNExecutor_Destroy(&executor1); + ASSERT_EQ(nullptr, executor1); + if (i % PRINT_FREQ == 0) { + printf("[NnrtTest] SUB_AI_NNR_Reliability_North_Stress_0200 times: %d/%d\n", i, STRESS_COUNT); + } + } + Free(model1, compilation1); +} diff --git a/test/nnrt_xts_acts/nncore/BUILD.gn b/test/nnrt_xts_acts/nncore/BUILD.gn new file mode 100644 index 0000000..918d7c0 --- /dev/null +++ b/test/nnrt_xts_acts/nncore/BUILD.gn @@ -0,0 +1,21 @@ +# Copyright (c) 2023 Huawei Device Co., Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +group("ActsHdfNncoreTest") { + testonly = true + deps = [ + "e2etest:ActsNnrtE2ETest", + "nncoretest:ActsNncoreTest", + "opstest:ActsNnrtOpsTest", + ] +} diff --git a/test/nnrt_xts_acts/nncore/common/mock_idevice.cpp b/test/nnrt_xts_acts/nncore/common/mock_idevice.cpp new file mode 100644 index 0000000..a0dbe95 --- /dev/null +++ b/test/nnrt_xts_acts/nncore/common/mock_idevice.cpp @@ -0,0 +1,345 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include + +#include "nncore_const.h" +#include "mock_idevice.h" +#include "hdi_device_v2_0.h" +#include "hdi_returncode_utils.h" +#include "log.h" +#include "utils.h" +#include "nnbackend.h" +#include "backend_registrar.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +void PrintRetLog(int32_t ret, int32_t nnrtSuccess, const std::string& makeName) +{ + if (ret < nnrtSuccess) { + LOGW("%s failed. An error occurred in HDI, errorcode is %{public}d.", makeName.c_str(), ret); + } else { + OHOS::HDI::Nnrt::V2_0::NNRT_ReturnCode nnrtRet = static_cast(ret); + LOGW("%s failed. Errorcode is %{public}s.", makeName.c_str(), ConverterRetToString(nnrtRet).c_str()); + } +} + +std::shared_ptr HDIDeviceV2_0Creator() +{ + std::string deviceName; + std::string vendorName; + std::string version; + + // only one device from HDI now. + OHOS::sptr iDevice = V2_0::INnrtDevice::Get(); + if (iDevice == nullptr) { + LOGW("Get HDI device failed."); + return nullptr; + } + + auto ret = iDevice->GetDeviceName(deviceName); + int32_t nnrtSuccess = static_cast(V2_0::NNRT_ReturnCode::NNRT_SUCCESS); + if (ret != nnrtSuccess) { + std::string makeName = "Get device name"; + PrintRetLog(ret, nnrtSuccess, makeName); + return nullptr; + } + + ret = iDevice->GetVendorName(vendorName); + if (ret != nnrtSuccess) { + std::string makeName = "Get vendor name"; + PrintRetLog(ret, nnrtSuccess, makeName); + return nullptr; + } + + std::pair hdiVersion; + ret = iDevice->GetVersion(hdiVersion.first, hdiVersion.second); + if (ret != nnrtSuccess) { + std::string makeName = "Get version"; + PrintRetLog(ret, nnrtSuccess, makeName); + return nullptr; + } + version = 'v' + std::to_string(hdiVersion.first) + '_' + std::to_string(hdiVersion.second); + const std::string& backendName = GenUniqueName(deviceName, vendorName, version); + + std::shared_ptr device = CreateSharedPtr(iDevice); + if (device == nullptr) { + LOGW("Failed to create device, because fail to create device instance."); + return nullptr; + } + + std::shared_ptr backend = std::make_shared(device, std::hash{}(backendName)); + if (backend == nullptr) { + LOGW("Failed to register backend, because fail to create backend."); + } + return backend; +} + +REGISTER_BACKEND(HDIDeviceV2_0, HDIDeviceV2_0Creator) +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +namespace OHOS { +namespace HDI { +namespace Nnrt { +namespace V2_0 { + +sptr INnrtDevice::Get(bool isStub) +{ + return INnrtDevice::Get("mock_device_service", isStub); +} + +sptr INnrtDevice::Get(const std::string &serviceName, bool isStub) +{ + if (isStub) { + return nullptr; + } + sptr mockIDevice = sptr(MockIDevice::GetInstance()); + return mockIDevice; +} + +MockIDevice::~MockIDevice() +{ + for (auto fd : m_fds) { + close(fd); + } +} + +MockIDevice::MockIDevice() +{ + m_bufferFd = 0; +} + +MockIPreparedModel::~MockIPreparedModel() +{ + for (auto fd : m_fds) { + close(fd); + } +} + +MockIDevice *MockIDevice::GetInstance() +{ + static MockIDevice iDevice; + return &iDevice; +} + +void MockIDevice::SetFP16Supported(bool isSupported) +{ + m_fp16 = isSupported; +} + +void MockIDevice::SetPerformanceSupported(bool isSupported) +{ + m_performance = isSupported; +} + +void MockIDevice::SetPrioritySupported(bool isSupported) +{ + m_priority = isSupported; +} + +void MockIDevice::SetModelCacheSupported(bool isSupported) +{ + m_cache = isSupported; +} + +void MockIDevice::SetOperationsSupported(std::vector isSupported) +{ + m_operations = isSupported; +} + +void MockIDevice::SetDynamicInputSupported(bool isSupported) +{ + m_dynamic = isSupported; +} + +int32_t MockIDevice::GetDeviceName(std::string& name) +{ + name = "Device-CPU"; + return HDF_SUCCESS; +} + +int32_t MockIDevice::GetVendorName(std::string& name) +{ + name = "TestVendor"; + return HDF_SUCCESS; +} + +int32_t MockIDevice::GetDeviceType(DeviceType& deviceType) +{ + deviceType = DeviceType::CPU; + return HDF_SUCCESS; +} + +int32_t MockIDevice::GetDeviceStatus(DeviceStatus& status) +{ + status = DeviceStatus::AVAILABLE; + return HDF_SUCCESS; +} + +int32_t MockIDevice::GetVersion(uint32_t &majorVersion, uint32_t &minorVersion) +{ + uint32_t twoNum = 2; + majorVersion = twoNum; + minorVersion = 0; + return HDF_SUCCESS; +} + +int32_t MockIDevice::GetSupportedOperation(const Model& model, std::vector& ops) +{ + ops = m_operations; + return HDF_SUCCESS; +} + +int32_t MockIDevice::IsFloat16PrecisionSupported(bool& isSupported) +{ + isSupported = m_fp16; + return HDF_SUCCESS; +} + +int32_t MockIDevice::IsPerformanceModeSupported(bool& isSupported) +{ + isSupported = m_performance; + return HDF_SUCCESS; +} + +int32_t MockIDevice::IsPrioritySupported(bool& isSupported) +{ + isSupported = m_priority; + return HDF_SUCCESS; +} + +int32_t MockIDevice::IsDynamicInputSupported(bool& isSupported) +{ + isSupported = m_dynamic; + return HDF_SUCCESS; +} + +int32_t MockIDevice::IsModelCacheSupported(bool& isSupported) +{ + isSupported = m_cache; + return HDF_SUCCESS; +} + +int32_t MockIDevice::AllocateBuffer(uint32_t length, SharedBuffer &buffer) +{ + std::lock_guard lock(m_mtx); + buffer.fd = AshmemCreate("allocateBuffer", length); + buffer.bufferSize = AshmemGetSize(buffer.fd); + buffer.offset = 0; + buffer.dataSize = length; + + AshmemSetProt(buffer.fd, PROT_READ | PROT_WRITE); + m_fds.emplace(buffer.fd); + m_bufferFd = buffer.fd; + return HDF_SUCCESS; +} + +int32_t MockIDevice::ReleaseBuffer(const SharedBuffer &buffer) +{ + if (m_fds.find(buffer.fd) == m_fds.end()) { + LOGE("ReleaseBuffer:buffer fd is invalid. fd = %d", buffer.fd); + return HDF_FAILURE; + } + if (close(buffer.fd) != 0) { + LOGE("ReleaseBuffer:Close buffer fd failed. fd = %d", buffer.fd); + return HDF_FAILURE; + } + return HDF_SUCCESS; +} + +int32_t MockIDevice::PrepareModel(const Model& model, const ModelConfig& config, sptr& preparedModel) +{ + preparedModel = new (std::nothrow) V2_0::MockIPreparedModel(); + return HDF_SUCCESS; +} + +int32_t MockIDevice::PrepareOfflineModel(const std::vector& offlineModels, const ModelConfig& config, + sptr& preparedModel) +{ + preparedModel = new (std::nothrow) V2_0::MockIPreparedModel(); + return V2_0::NNRT_ReturnCode::NNRT_SUCCESS; +} + +int32_t MockIDevice::PrepareModelFromModelCache(const std::vector& modelCache, const ModelConfig& config, + sptr& preparedModel) +{ + preparedModel = new (std::nothrow) V2_0::MockIPreparedModel(); + return HDF_SUCCESS; +} + +int32_t MockIPreparedModel::ExportModelCache(std::vector& modelCache) +{ + if (!modelCache.empty()) { + LOGE("[NNRtTest] The parameters of ExportModelCache should be an empty vector."); + return HDF_ERR_INVALID_PARAM; + } + uint8_t bufferData[4] = {0, 1, 2, 3}; + uint32_t size = sizeof(bufferData); + SharedBuffer buffer; + buffer.fd = AshmemCreate("cache", size); + buffer.bufferSize = AshmemGetSize(buffer.fd); + buffer.offset = 0; + buffer.dataSize = size; + AshmemSetProt(buffer.fd, PROT_READ | PROT_WRITE); + + void* data = mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_SHARED, buffer.fd, 0); + if (data == MAP_FAILED) { + LOGE("[Mock_Device]::ExportModelCache failed, Map fd to address failed: %{public}s.", strerror(errno)); + return HDF_FAILURE; + } + + auto memRet = memcpy_s(data, size, bufferData, size); + auto unmapResult = munmap(data, size); + if (unmapResult != 0) { + LOGE("[Mock_Device]ExportModelCache failed . Please try again."); + return HDF_FAILURE; + } + if (memRet != EOK) { + LOGE("[Mock_Device]ExportModelCache failed, failed to memcpy_s data type."); + return HDF_FAILURE; + } + m_fds.emplace(buffer.fd); + modelCache.emplace_back(buffer); + return HDF_SUCCESS; +} + +int32_t MockIPreparedModel::GetVersion(uint32_t &majorVersion, uint32_t &minorVersion) +{ + majorVersion = 1; + minorVersion = 0; + return HDF_SUCCESS; +} + +int32_t MockIPreparedModel::Run(const std::vector& inputs, const std::vector& outputs, + std::vector>& outputsDims) +{ + outputsDims = {{2, 2, 2, 2}}; + return HDF_SUCCESS; +} + +int32_t MockIPreparedModel::GetInputDimRanges(std::vector>& minInputDims, + std::vector>& maxInputDims) +{ + minInputDims = {{2, 2, 2, 2}, {2, 2, 2, 2}}; + maxInputDims = {{2, 100, 100, 10}, {2, 100, 100, 10}}; + + return HDF_SUCCESS; +} + +} // namespace V2_0 +} // namespace Nnrt +} // namespace HDI +} // namespace OHOS \ No newline at end of file diff --git a/test/nnrt_xts_acts/nncore/common/mock_idevice.h b/test/nnrt_xts_acts/nncore/common/mock_idevice.h new file mode 100644 index 0000000..195f8b7 --- /dev/null +++ b/test/nnrt_xts_acts/nncore/common/mock_idevice.h @@ -0,0 +1,126 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MOCK_IDEVICE_H +#define MOCK_IDEVICE_H + +#include +#include +#include +#include +#include + +#include +#include +#include "mindir_lite_graph.h" +#include "mindir.h" + +#include "securec.h" +#include "refbase.h" +#include "log.h" +#include "ashmem.h" + +#include +#include +#include + +namespace OHOS { +namespace HDI { +namespace Nnrt { +namespace V2_0 { + +class MockIDevice : public INnrtDevice { +public: + int32_t GetSupportedOperation(const Model& model, std::vector& ops) override; + + int32_t IsFloat16PrecisionSupported(bool& isSupported) override; + + int32_t IsPerformanceModeSupported(bool& isSupported) override; + + int32_t IsPrioritySupported(bool& isSupported) override; + + int32_t IsDynamicInputSupported(bool& isSupported) override; + + int32_t IsModelCacheSupported(bool& isSupported) override; + + int32_t AllocateBuffer(uint32_t length, SharedBuffer &buffer) override; + + int32_t ReleaseBuffer(const SharedBuffer &buffer) override; + + int32_t GetDeviceName(std::string& name) override; + + int32_t GetVendorName(std::string& name) override; + + int32_t GetDeviceType(DeviceType& deviceType) override; + + int32_t GetDeviceStatus(DeviceStatus& status) override; + + int32_t GetVersion(uint32_t &majorVersion, uint32_t &minorVersion) override; + + int32_t PrepareModel(const Model& model, const ModelConfig& config, sptr& preparedModel) override; + + int32_t PrepareOfflineModel(const std::vector& offlineModels, const ModelConfig& config, + sptr& preparedModel) override; + + int32_t PrepareModelFromModelCache(const std::vector& modelCache, const ModelConfig& config, + sptr& preparedModel) override; + + void SetFP16Supported(bool isSupported); + + void SetPerformanceSupported(bool isSupported); + + void SetPrioritySupported(bool isSupported); + + void SetModelCacheSupported(bool isSupported); + + void SetOperationsSupported(std::vector isSupported); + + void SetDynamicInputSupported(bool isSupported); + + static MockIDevice *GetInstance(); + + MockIDevice(); + virtual ~MockIDevice(); + +private: + std::unordered_set m_fds; + int m_bufferFd; + bool m_fp16 = true; + bool m_performance = true; + bool m_priority = true; + bool m_cache = true; + bool m_dynamic = true; + std::vector m_operations{true}; + std::mutex m_mtx; +}; + +class MockIPreparedModel : public IPreparedModel { +public: + int32_t ExportModelCache(std::vector& modelCache) override; + int32_t Run(const std::vector& inputs, const std::vector& outputs, + std::vector>& outputsDims) override; + int32_t GetInputDimRanges(std::vector>& minInputDims, + std::vector>& maxInputDims) override; + int32_t GetVersion(uint32_t &majorVersion, uint32_t &minorVersion) override; + MockIPreparedModel() = default; + virtual ~MockIPreparedModel(); +private: + std::unordered_set m_fds; +}; + +} // namespace V2_0 +} // namespace Nnrt +} // namespace HDI +} // namespace OHOS +#endif // MOCK_IDEVICE_H diff --git a/test/nnrt_xts_acts/nncore/common/nncore_const.h b/test/nnrt_xts_acts/nncore/common/nncore_const.h new file mode 100644 index 0000000..d0fd3be --- /dev/null +++ b/test/nnrt_xts_acts/nncore/common/nncore_const.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef CONST_H +#define CONST_H + +#include +#include + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Test { + +const uint32_t ADD_DATA_LENGTH = 4 * sizeof(float); +const uint32_t AVG_INPUT_LENGTH = 9 * sizeof(float); +const std::vector TENSOR_SHAPE = {2, 2, 2, 2}; +const std::vector PARAM_INDEX = {2}; +const std::vector INPUT_INDEX = {0, 1}; +const std::vector OUTPUT_INDEX = {3}; +const int32_t ELEMENT_COUNT = 4; + +const std::string CACHE_DIR = "./cache"; +const std::string CACHE_PATH = CACHE_DIR + "/0.nncache"; +const std::string CACHE_INFO_PATH = CACHE_DIR + "/cache_info.nncache"; +const uint32_t NO_DEVICE_COUNT = 0; +const int STRESS_COUNT = 10000; +const int PRINT_FREQ = 500; + +const size_t MODEL_SIZE = 100; +const size_t ZERO = 0; +const uint32_t CACHEVERSION = 1; +const std::string SUPPORTMODELPATH = "modelPath"; +const unsigned short TEST_BUFFER[14] = { + 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7, 0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad +}; +} // namespace Test +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // CONST_H \ No newline at end of file diff --git a/test/nnrt_xts_acts/nncore/common/nncore_utils.cpp b/test/nnrt_xts_acts/nncore/common/nncore_utils.cpp new file mode 100644 index 0000000..a87b0d4 --- /dev/null +++ b/test/nnrt_xts_acts/nncore/common/nncore_utils.cpp @@ -0,0 +1,704 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include + +#include "nncore_utils.h" +#include "nncore_const.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Test { +OH_NN_UInt32Array TransformUInt32Array(const std::vector& vector) +{ + uint32_t* data = (vector.empty()) ? nullptr : const_cast(vector.data()); + return {data, vector.size()}; +} + +NN_TensorDesc* createTensorDesc(const int32_t* shape, size_t shapeNum, OH_NN_DataType dataType, OH_NN_Format format) +{ + NN_TensorDesc* tensorDescTmp = OH_NNTensorDesc_Create(); + if (tensorDescTmp == nullptr) { + LOGE("[NNRtTest]OH_NNTensorDesc_Create failed!"); + return nullptr; + } + + OH_NN_ReturnCode ret = OH_NNTensorDesc_SetDataType(tensorDescTmp, dataType); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest]OH_NNTensorDesc_SetDataType failed!ret = %d\n", ret); + return nullptr; + } + + if (shape != nullptr) { + ret = OH_NNTensorDesc_SetShape(tensorDescTmp, shape, shapeNum); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest]OH_NNTensorDesc_SetShape failed!ret = %d\n", ret); + return nullptr; + } + } + + ret = OH_NNTensorDesc_SetFormat(tensorDescTmp, format); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest]OH_NNTensorDesc_SetShape failed!ret = %d\n", ret); + return nullptr; + } + + return tensorDescTmp; +} + +int MultiModelBuildEndStep(OH_NNModel *model, const OHNNGraphArgsMulti &graphArgs) +{ + int ret = 0; + auto graphInputs = TransformUInt32Array(graphArgs.graphInput); + auto graphOutputs = TransformUInt32Array(graphArgs.graphOutput); + ret = OH_NNModel_SpecifyInputsAndOutputs(model, &graphInputs, &graphOutputs); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNModel_SpecifyInputsAndOutputs failed! ret=%d\n", ret); + return ret; + } + ret = OH_NNModel_Finish(model); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNModel_Finish failed! ret=%d\n", ret); + return ret; + } + return ret; +} + +int BuildMultiOpGraph(OH_NNModel *model, const OHNNGraphArgsMulti &graphArgs) +{ + int ret = 0; + int opCnt = 0; + for (size_t j = 0; j < graphArgs.operationTypes.size(); j++) { + for (size_t i = 0; i < graphArgs.operands[j].size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[j][i]; + NN_TensorDesc* tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + ret = OH_NNModel_AddTensorToModel(model, tensorDesc); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNModel_AddTensor failed! ret=%d\n", ret); + return ret; + } + ret = OH_NNModel_SetTensorType(model, i, operandTem.type); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNBackend_SetModelTensorType failed! ret=%d\n", ret); + return ret; + } + if (std::find(graphArgs.paramIndices[j].begin(), graphArgs.paramIndices[j].end(), opCnt) != + graphArgs.paramIndices[j].end()) { + ret = OH_NNModel_SetTensorData(model, opCnt, operandTem.data, operandTem.length); + opCnt += 1; + } + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNModel_SetTensorData failed! ret=%d\n", ret); + return ret; + } + OH_NNTensorDesc_Destroy(&tensorDesc); + } + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices[j]); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices[j]); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices[j]); + ret = OH_NNModel_AddOperation(model, graphArgs.operationTypes[j], ¶mIndices, &inputIndices, + &outputIndices); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNModel_AddOperation failed! ret=%d\n", ret); + return ret; + } + } + ret = MultiModelBuildEndStep(model, graphArgs); + return ret; +} + +int SingleModelBuildEndStep(OH_NNModel *model, const OHNNGraphArgs &graphArgs) +{ + int ret = 0; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + if (graphArgs.addOperation) { + ret = OH_NNModel_AddOperation(model, graphArgs.operationType, ¶mIndices, &inputIndices, + &outputIndices); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNModel_AddOperation failed! ret=%d\n", ret); + return ret; + } + } + if (graphArgs.specifyIO) { + ret = OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNModel_SpecifyInputsAndOutputs failed! ret=%d\n", ret); + return ret; + } + } + if (graphArgs.build) { + ret = OH_NNModel_Finish(model); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNModel_Finish failed! ret=%d\n", ret); + return ret; + } + } + return ret; +} + +int BuildSingleOpGraph(OH_NNModel *model, const OHNNGraphArgs &graphArgs) +{ + int ret = 0; + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + NN_TensorDesc* tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + ret = OH_NNModel_AddTensorToModel(model, tensorDesc); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNModel_AddTensor failed! ret=%d\n", ret); + return ret; + } + ret = OH_NNModel_SetTensorType(model, i, operandTem.type); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNBackend_SetModelTensorType failed! ret=%d\n", ret); + return ret; + } + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + ret = OH_NNModel_SetTensorData(model, i, operandTem.data, operandTem.length); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNModel_SetTensorData failed! ret=%d\n", ret); + return ret; + } + } + OH_NNTensorDesc_Destroy(&tensorDesc); + } + ret = SingleModelBuildEndStep(model, graphArgs); + return ret; +} + +int BuildSingleOpGraphWithQuantParams(OH_NNModel *model, const OHNNGraphArgs &graphArgs) +{ + int ret = 0; + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + NN_TensorDesc* tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + ret = OH_NNModel_AddTensorToModel(model, tensorDesc); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNModel_AddTensor failed! ret=%d\n", ret); + return ret; + } + + NN_QuantParam* quantParam = OH_NNQuantParam_Create(); + double scales = 0.2; + int32_t zeroPoints = 0; + uint32_t numBits = 8; + ret = OH_NNQuantParam_SetScales(quantParam, &scales, 1); + ret = OH_NNQuantParam_SetZeroPoints(quantParam, &zeroPoints, 1); + ret = OH_NNQuantParam_SetNumBits(quantParam, &numBits, 1); + ret = OH_NNModel_SetTensorQuantParams(model, i, quantParam); + ret = OH_NNQuantParam_Destroy(&quantParam); + ret = OH_NNModel_SetTensorType(model, i, operandTem.type); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNBackend_SetModelTensorType failed! ret=%d\n", ret); + return ret; + } + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + ret = OH_NNModel_SetTensorData(model, i, operandTem.data, operandTem.length); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNModel_SetTensorData failed! ret=%d\n", ret); + return ret; + } + } + OH_NNTensorDesc_Destroy(&tensorDesc); + } + ret = SingleModelBuildEndStep(model, graphArgs); + return ret; +} + +OH_NN_ReturnCode GetDeviceID(size_t *deviceId) +{ + OH_NN_ReturnCode ret = OH_NN_FAILED; + const size_t *devicesID{nullptr}; + uint32_t devicesCount{0}; + ret = OH_NNDevice_GetAllDevicesID(&devicesID, &devicesCount); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNDevice_GetAllDevicesID failed! ret=%d\n", ret); + return ret; + } + if (devicesCount <= NO_DEVICE_COUNT) { + LOGE("[NNRtTest] devicesCount <= 0 devicesCount=%d\n", devicesCount); + return OH_NN_FAILED; + } + + const char *name = nullptr; + std::string deviceName{"Device-CPU_TestVendor_v2_0"}; + for (uint32_t i = 0; i < devicesCount; i++) { + name = nullptr; + ret = OH_NNDevice_GetName(devicesID[i], &name); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNDevice_GetName failed! ret=%d\n", ret); + return ret; + } + + std::string sName(name); + if (deviceName == sName) { + *deviceId = devicesID[i]; + return OH_NN_SUCCESS; + } + } + return OH_NN_FAILED; +} + +OH_NN_ReturnCode SetDevice(OH_NNCompilation *compilation) +{ + OH_NN_ReturnCode ret = OH_NN_FAILED; + const size_t *devicesID{nullptr}; + uint32_t devicesCount{0}; + ret = OH_NNDevice_GetAllDevicesID(&devicesID, &devicesCount); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNDevice_GetAllDevicesID failed! ret=%d\n", ret); + return ret; + } + if (devicesCount <= NO_DEVICE_COUNT) { + LOGE("[NNRtTest] devicesCount <= 0 devicesCount=%d\n", devicesCount); + return OH_NN_FAILED; + } + + const char *name = nullptr; + std::string deviceName{"Device-CPU_TestVendor_v2_0"}; + for (uint32_t i = 0; i < devicesCount; i++) { + name = nullptr; + ret = OH_NNDevice_GetName(devicesID[i], &name); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNDevice_GetName failed! ret=%d\n", ret); + return ret; + } + + std::string sName(name); + if (deviceName == sName) { + ret = OH_NNCompilation_SetDevice(compilation, devicesID[i]); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNCompilation_SetDevice failed! ret=%d\n", ret); + return ret; + } + return OH_NN_SUCCESS; + } + } + return OH_NN_FAILED; +} + +int CompileGraphMock(OH_NNCompilation *compilation, const OHNNCompileParam &compileParam) +{ + int ret = 0; + ret = SetDevice(compilation); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNCompilation_SetDevice failed! ret=%d\n", ret); + return ret; + } + // set cache + if (!compileParam.cacheDir.empty()) { + ret = OH_NNCompilation_SetCache(compilation, compileParam.cacheDir.c_str(), + compileParam.cacheVersion); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNCompilation_SetCache failed! ret=%d\n", ret); + return ret; + } + } + // set performance + if (compileParam.performanceMode != OH_NN_PERFORMANCE_NONE) { + ret = OH_NNCompilation_SetPerformanceMode(compilation, compileParam.performanceMode); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNCompilation_SetPerformanceMode failed! ret=%d\n", ret); + return ret; + } + } + // set priority + if (compileParam.priority != OH_NN_PRIORITY_NONE) { + ret = OH_NNCompilation_SetPriority(compilation, compileParam.priority); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNCompilation_SetPriority failed! ret=%d\n", ret); + return ret; + } + } + // enable fp16 + if (compileParam.enableFp16) { + ret = OH_NNCompilation_EnableFloat16(compilation, compileParam.enableFp16); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNCompilation_EnableFloat16 failed! ret=%d\n", ret); + return ret; + } + } + // build + ret = OH_NNCompilation_Build(compilation); + return ret; +} + +void Free(OH_NNModel *model, OH_NNCompilation *compilation, OH_NNExecutor *executor) +{ + if (model != nullptr) { + OH_NNModel_Destroy(&model); + ASSERT_EQ(nullptr, model); + } + if (compilation != nullptr) { + OH_NNCompilation_Destroy(&compilation); + ASSERT_EQ(nullptr, compilation); + } + if (executor != nullptr) { + OH_NNExecutor_Destroy(&executor); + ASSERT_EQ(nullptr, executor); + } +} + +PathType CheckPath(const std::string &path) +{ + if (path.empty()) { + LOGI("CheckPath: path is null"); + return PathType::NOT_FOUND; + } + struct stat buf{}; + if (stat(path.c_str(), &buf) == 0) { + if (buf.st_mode & S_IFDIR) { + return PathType::DIR; + } else if (buf.st_mode & S_IFREG) { + return PathType::FILE; + } else { + return PathType::UNKNOWN; + } + } + LOGI("%s not found", path.c_str()); + return PathType::NOT_FOUND; +} + +bool DeleteFile(const std::string &path) +{ + if (path.empty()) { + LOGI("DeleteFile: path is null"); + return false; + } + if (CheckPath(path) == PathType::NOT_FOUND) { + LOGI("not found: %s", path.c_str()); + return true; + } + if (remove(path.c_str()) == 0) { + LOGI("deleted: %s", path.c_str()); + return true; + } + LOGI("delete failed: %s", path.c_str()); + return false; +} + +void CopyFile(const std::string &srcPath, const std::string &dstPath) +{ + std::ifstream src(srcPath, std::ios::binary); + std::ofstream dst(dstPath, std::ios::binary); + + dst << src.rdbuf(); +} + +std::string ConcatPath(const std::string &str1, const std::string &str2) +{ + // boundary + if (str2.empty()) { + return str1; + } + if (str1.empty()) { + return str2; + } + // concat + char end = str1[str1.size() - 1]; + if (end == '\\' or end == '/') { + return str1 + str2; + } else { + return str1 + '/' + str2; + } +} + +void DeleteFolder(const std::string &path) +{ + if (path.empty()) { + LOGI("DeletePath: path is null"); + return; + } + + DIR *dir = opendir(path.c_str()); + // check is dir ? + if (dir == nullptr) { + LOGE("[NNRtTest] Can not open dir. Check path or permission! path: %s", path.c_str()); + return; + } + struct dirent *file; + // read all the files in dir + std::vector pathList; + while ((file = readdir(dir)) != nullptr) { + // skip "." and ".." + if (strcmp(file->d_name, ".") == 0 || strcmp(file->d_name, "..") == 0) { + continue; + } + if (file->d_type == DT_DIR) { + std::string filePath = path + "/" + file->d_name; + DeleteFolder(filePath); // 递归执行 + } else { + pathList.emplace_back(ConcatPath(path, file->d_name)); + } + } + closedir(dir); + pathList.emplace_back(path); + LOGI("[Common] Delete folder %s", path.c_str()); + for (auto &i : pathList) { + DeleteFile(i); + } +} + +bool CreateFolder(const std::string &path) +{ + if (path.empty()) { + LOGI("CreateFolder: path is empty"); + return false; + } + LOGI("CreateFolder:%s", path.c_str()); + mode_t mode = 0700; + for (size_t i = 1; i < path.size() - 1; i++) { + if (path[i] != '/') { + continue; + } + PathType ret = CheckPath(path.substr(0, i)); + switch (ret) { + case PathType::DIR: + continue; + case PathType::NOT_FOUND: + LOGI("mkdir: %s", path.substr(0, i).c_str()); + mkdir(path.substr(0, i).c_str(), mode); + break; + default: + LOGI("error: %s", path.substr(0, i).c_str()); + return false; + } + } + mkdir(path.c_str(), mode); + return CheckPath(path) == PathType::DIR; +} + +bool CheckOutput(const float* output, const float* expect) +{ + if (output == nullptr || expect == nullptr) { + LOGE("[NNRtTest] output or expect is nullptr\n"); + return false; + } + for (int i = 0; i < ELEMENT_COUNT; i++) { + if (std::abs(float(output[i]) - float(expect[i])) > 1e-8) { + for (int j = 0; j < ELEMENT_COUNT; j++) { + LOGE("[NNRtTest] output %d not match: expect:%f, actual:%f\n", j, float(expect[j]), float(output[j])); + } + return false; + } + } + return true; +} + +//创建定长模型 +void ConstructAddModel(OH_NNModel **model) +{ + *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + AddModel addModel; + OHNNGraphArgs graphArgs = addModel.graphArgs; + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(*model, graphArgs)); +} + +//定长模型创建compilation +void ConstructCompilation(OH_NNCompilation **compilation, OH_NNModel **model) +{ + ConstructAddModel(model); + *compilation = OH_NNCompilation_Construct(*model); + ASSERT_NE(nullptr, *compilation); +} + +//通过定长compilation创建executor +void CreateExecutor(OH_NNExecutor **executor) +{ + OH_NNCompilation *compilation = nullptr; + OH_NNModel *model = nullptr; + ConstructCompilation(&compilation, &model); + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, *executor); + OH_NNModel_Destroy(&model); + OH_NNCompilation_Destroy(&compilation); +} + +void CreateDynamicExecutor(OH_NNExecutor **executor) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + AvgPoolDynamicModel avgModel; + OHNNGraphArgs graphArgs = avgModel.graphArgs; + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + ASSERT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + *executor = OH_NNExecutor_Construct(compilation); + ASSERT_NE(nullptr, *executor); + OH_NNModel_Destroy(&model); + OH_NNCompilation_Destroy(&compilation); +} + +void GetExecutorInputOutputTensorDesc(OH_NNExecutor* executor, + std::vector& inputTensorDescs, size_t& inputCount, + std::vector& outputTensorDescs, size_t& outputCount) +{ + OH_NN_ReturnCode ret = OH_NNExecutor_GetInputCount(executor, &inputCount); + ASSERT_EQ(OH_NN_SUCCESS, ret); + NN_TensorDesc* tensorDescTmp = nullptr; + for (size_t i = 0; i < inputCount; ++i) { + tensorDescTmp = OH_NNExecutor_CreateInputTensorDesc(executor, i); + ASSERT_NE(nullptr, tensorDescTmp); + inputTensorDescs.emplace_back(tensorDescTmp); + } + + ret = OH_NNExecutor_GetOutputCount(executor, &outputCount); + ASSERT_EQ(OH_NN_SUCCESS, ret); + for (size_t i = 0; i < outputCount; ++i) { + tensorDescTmp = OH_NNExecutor_CreateOutputTensorDesc(executor, i); + ASSERT_NE(nullptr, tensorDescTmp); + outputTensorDescs.emplace_back(tensorDescTmp); + } +} + +void GetExecutorInputOutputTensorByDesc(OH_NNExecutor* executor, + std::vector& inputTensors, const std::vector& inputTensorDescs, + std::vector& outputTensors, const std::vector& outputTensorDescs) +{ + size_t deviceID = 0; + if (OH_NN_SUCCESS != GetDeviceID(&deviceID)) { + LOGE("Get deviceid failed."); + return; + } + NN_Tensor* tensor = nullptr; + for (size_t i = 0; i < inputTensorDescs.size(); ++i) { + tensor = nullptr; + tensor = OH_NNTensor_Create(deviceID, inputTensorDescs[i]); + ASSERT_NE(nullptr, tensor); + inputTensors.emplace_back(tensor); + } + + for (size_t i = 0; i < outputTensorDescs.size(); ++i) { + tensor = nullptr; + tensor = OH_NNTensor_Create(deviceID, outputTensorDescs[i]); + ASSERT_NE(nullptr, tensor); + outputTensors.emplace_back(tensor); + } +} + +void GetExecutorInputOutputTensor(OH_NNExecutor* executor, std::vector& inputTensors, size_t& inputCount, + std::vector& outputTensors, size_t& outputCount) +{ + std::vector inputTensorDescs; + std::vector outputTensorDescs; + OH_NN_ReturnCode ret = OH_NNExecutor_GetInputCount(executor, &inputCount); + ASSERT_EQ(OH_NN_SUCCESS, ret); + NN_TensorDesc* tensorDescTmp = nullptr; + for (size_t i = 0; i < inputCount; ++i) { + tensorDescTmp = OH_NNExecutor_CreateInputTensorDesc(executor, i); + ASSERT_NE(nullptr, tensorDescTmp); + inputTensorDescs.emplace_back(tensorDescTmp); + } + + ret = OH_NNExecutor_GetOutputCount(executor, &outputCount); + ASSERT_EQ(OH_NN_SUCCESS, ret); + for (size_t i = 0; i < outputCount; ++i) { + tensorDescTmp = OH_NNExecutor_CreateOutputTensorDesc(executor, i); + ASSERT_NE(nullptr, tensorDescTmp); + outputTensorDescs.emplace_back(tensorDescTmp); + } + + size_t deviceID = 0; + if (OH_NN_SUCCESS != GetDeviceID(&deviceID)) { + LOGE("Get deviceid failed."); + return; + } + NN_Tensor* tensor = nullptr; + for (size_t i = 0; i < inputTensorDescs.size(); ++i) { + tensor = nullptr; + tensor = OH_NNTensor_Create(deviceID, inputTensorDescs[i]); + ASSERT_NE(nullptr, tensor); + inputTensors.emplace_back(tensor); + } + + for (size_t i = 0; i < outputTensorDescs.size(); ++i) { + tensor = nullptr; + tensor = OH_NNTensor_Create(deviceID, outputTensorDescs[i]); + ASSERT_NE(nullptr, tensor); + outputTensors.emplace_back(tensor); + } + + DestroyTensorDesc(inputTensorDescs, outputTensorDescs); +} + +OH_NN_ReturnCode DestroyTensorDesc( + std::vector& inputTensorDescs, std::vector& outputTensorDescs) +{ + // 销毁输入输出tensordesc + OH_NN_ReturnCode returnCode {OH_NN_FAILED}; + for (size_t i = 0; i < inputTensorDescs.size(); ++i) { + returnCode = OH_NNTensorDesc_Destroy(&inputTensorDescs[i]); + if (returnCode != OH_NN_SUCCESS) { + LOGE("End2EndTest::OH_NNTensorDesc_Destroy failed."); + return returnCode; + } + } + for (size_t i = 0; i < outputTensorDescs.size(); ++i) { + returnCode = OH_NNTensorDesc_Destroy(&outputTensorDescs[i]); + if (returnCode != OH_NN_SUCCESS) { + LOGE("End2EndTest::OH_NNTensorDesc_Destroy failed."); + return returnCode; + } + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode DestroyTensor( + std::vector& inputTensors, std::vector& outputTensors) +{ + // 清理输入输出Tensor + OH_NN_ReturnCode returnCode {OH_NN_FAILED}; + for (size_t i = 0; i < inputTensors.size(); ++i) { + returnCode = OH_NNTensor_Destroy(&inputTensors[i]); + if (returnCode != OH_NN_SUCCESS) { + LOGE("End2EndTest::OH_NNTensor_Destroy failed."); + return returnCode; + } + } + for (size_t i = 0; i < outputTensors.size(); ++i) { + returnCode = OH_NNTensor_Destroy(&outputTensors[i]); + if (returnCode != OH_NN_SUCCESS) { + LOGE("End2EndTest::OH_NNTensor_Destroy failed."); + return returnCode; + } + } + + return OH_NN_SUCCESS; +} +} // namespace Test +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/nnrt_xts_acts/nncore/common/nncore_utils.h b/test/nnrt_xts_acts/nncore/common/nncore_utils.h new file mode 100644 index 0000000..de244e5 --- /dev/null +++ b/test/nnrt_xts_acts/nncore/common/nncore_utils.h @@ -0,0 +1,190 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef NNRT_UTILS_H +#define NNRT_UTILS_H + +#include +#include +#include + +#include "neural_network_runtime/neural_network_runtime.h" +#include "log.h" +#include "mock_idevice.h" +#include "nncore_const.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Test { +namespace V2_0 = OHOS::HDI::Nnrt::V2_0; +struct OHNNOperandTest { + OH_NN_DataType dataType; + OH_NN_TensorType type; + std::vector shape; + void *data{nullptr}; + int32_t length{0}; + OH_NN_Format format = OH_NN_FORMAT_NONE; + const OH_NN_QuantParam *quantParam = nullptr; +}; + +struct OHNNGraphArgs { + OH_NN_OperationType operationType; + std::vector operands; + std::vector paramIndices; + std::vector inputIndices; + std::vector outputIndices; + bool build = true; + bool specifyIO = true; + bool addOperation = true; +}; + +struct OHNNGraphArgsMulti { + std::vector operationTypes; + std::vector> operands; + std::vector> paramIndices; + std::vector> inputIndices; + std::vector> outputIndices; + std::vector graphInput; + std::vector graphOutput; +}; + +struct OHNNCompileParam { + int32_t deviceId = 0; + std::string cacheDir; + uint32_t cacheVersion = 0; + OH_NN_PerformanceMode performanceMode = OH_NN_PERFORMANCE_NONE; + OH_NN_Priority priority = OH_NN_PRIORITY_NONE; + bool enableFp16 = false; +}; + +struct AddModel { + // ADD MODEL + float inputValue0[4] = {0, 1, 2, 3}; + float inputValue1[4] = {0, 1, 2, 3}; + int8_t activationValue = OH_NN_FUSED_NONE; + float outputValue[4] = {0}; + float expectValue[4] = {0, 2, 4, 6}; + + OHNNOperandTest input0 = {OH_NN_FLOAT32, OH_NN_TENSOR, TENSOR_SHAPE, inputValue0, ADD_DATA_LENGTH}; + OHNNOperandTest input1 = {OH_NN_FLOAT32, OH_NN_TENSOR, TENSOR_SHAPE, inputValue1, ADD_DATA_LENGTH}; + OHNNOperandTest activation = {OH_NN_INT8, OH_NN_ADD_ACTIVATIONTYPE, {}, &activationValue, sizeof(int8_t)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, TENSOR_SHAPE, outputValue, ADD_DATA_LENGTH}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_ADD, + .operands = {input0, input1, activation, output}, + .paramIndices = {2}, + .inputIndices = {0, 1}, + .outputIndices = {3}}; +}; + +struct AvgPoolDynamicModel { + // ADD MODEL + float inputValue0[4] = {0, 1, 2, 3}; + float inputValue1[4] = {0, 1, 2, 3}; + int8_t activationValue = OH_NN_FUSED_NONE; + float outputValue[4] = {0}; + float expectValue[4] = {0, 2, 4, 6}; + + OHNNOperandTest input0 = {OH_NN_FLOAT32, OH_NN_TENSOR, {-1, -1, -1, -1}, inputValue0, ADD_DATA_LENGTH}; + OHNNOperandTest input1 = {OH_NN_FLOAT32, OH_NN_TENSOR, {-1, -1, -1, -1}, inputValue1, ADD_DATA_LENGTH}; + OHNNOperandTest activation = {OH_NN_INT8, OH_NN_ADD_ACTIVATIONTYPE, {}, &activationValue, sizeof(int8_t)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, {-1, -1, -1, -1}, outputValue, ADD_DATA_LENGTH}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_ADD, + .operands = {input0, input1, activation, output}, + .paramIndices = {2}, + .inputIndices = {0, 1}, + .outputIndices = {3}}; +}; + +struct TopKModel { + // TopK Model + float valueX[6] = {0, 1, 2, 3, 4, 5}; + int8_t valueK = 2; + bool valueSorted = true; + float valueOutput1[2]; + int32_t valueOutput2[2]; + + OHNNOperandTest x = {OH_NN_FLOAT32, OH_NN_TENSOR, {1, 6}, valueX, 6 * sizeof(float)}; + OHNNOperandTest k = {OH_NN_INT8, OH_NN_TENSOR, {}, &valueK, sizeof(int8_t)}; + OHNNOperandTest sorted = {OH_NN_BOOL, OH_NN_TOP_K_SORTED, {}, &valueSorted, sizeof(bool)}; + OHNNOperandTest output1 = {OH_NN_FLOAT32, OH_NN_TENSOR, {1, 2}, valueOutput1, 2 * sizeof(float)}; + OHNNOperandTest output2 = {OH_NN_INT32, OH_NN_TENSOR, {1, 2}, valueOutput2, 2 * sizeof(int32_t)}; + + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_TOP_K, + .operands = {x, k, sorted, output1, output2}, + .paramIndices = {2}, + .inputIndices = {0, 1}, + .outputIndices = {3, 4}}; +}; + +class AddTopKModel { + // Build two ops Model +private: + AddModel addModel; + TopKModel topKModel; + +public: + OHNNGraphArgsMulti graphArgs = { + .operationTypes = {OH_NN_OPS_ADD, OH_NN_OPS_TOP_K}, + .operands = {{addModel.input0, addModel.input1, addModel.activation, addModel.output}, + {topKModel.k, topKModel.sorted, topKModel.output1, topKModel.output2}}, + .paramIndices = {{2}, {5}}, + .inputIndices = {{0, 1}, {3, 4}}, + .outputIndices = {{3}, {6, 7}}, + .graphInput = {0, 1, 4}, + .graphOutput = {6, 7}}; +}; + +NN_TensorDesc* createTensorDesc(const int32_t* shape, size_t shapeNum, OH_NN_DataType dataType, OH_NN_Format format); +int MultiModelBuildEndStep(OH_NNModel *model, const OHNNGraphArgsMulti &graphArgs); +int SingleModelBuildEndStep(OH_NNModel *model, const OHNNGraphArgs &graphArgs); +int BuildSingleOpGraph(OH_NNModel *model, const OHNNGraphArgs &graphArgs); +int BuildSingleOpGraphWithQuantParams(OH_NNModel *model, const OHNNGraphArgs &graphArgs); +void Free(OH_NNModel *model = nullptr, OH_NNCompilation *compilation = nullptr, OH_NNExecutor *executor = nullptr); +int CompileGraphMock(OH_NNCompilation *compilation, const OHNNCompileParam &compileParam); +OH_NN_ReturnCode SetDevice(OH_NNCompilation *compilation); +int BuildMultiOpGraph(OH_NNModel *model, const OHNNGraphArgsMulti &graphArgs); +OH_NN_UInt32Array GetUInt32Array(std::vector indices); +bool CheckOutput(const float* output, const float* expect); +OH_NN_ReturnCode GetDeviceID(size_t *deviceId); + +//文件相关 +enum class PathType { FILE, DIR, UNKNOWN, NOT_FOUND }; +PathType CheckPath(const std::string &path); +bool DeleteFile(const std::string &path); +void CopyFile(const std::string &srcPath, const std::string &dstPath); +std::string ConcatPath(const std::string &str1, const std::string &str2); +void DeleteFolder(const std::string &path); +bool CreateFolder(const std::string &path); + +//nncore创建相关 +void ConstructAddModel(OH_NNModel **model); +void ConstructCompilation(OH_NNCompilation **compilation, OH_NNModel **model); +void CreateExecutor(OH_NNExecutor **executor); +void CreateDynamicExecutor(OH_NNExecutor **executor); +void GetExecutorInputOutputTensorDesc(OH_NNExecutor* executor, + std::vector& inputTensorDescs, size_t& inputCount, + std::vector& outputTensorDescs, size_t& outputCount); +void GetExecutorInputOutputTensorByDesc(OH_NNExecutor* executor, + std::vector& inputTensors, const std::vector& inputTensorDescs, + std::vector& outputTensors, const std::vector& outputTensorDescs); +void GetExecutorInputOutputTensor(OH_NNExecutor* executor, std::vector& inputTensors, size_t& inputCount, + std::vector& outputTensors, size_t& outputCount); +OH_NN_ReturnCode DestroyTensorDesc(std::vector& inputTensorDescs, + std::vector& outputTensorDescs); +OH_NN_ReturnCode DestroyTensor(std::vector& inputTensors, std::vector& outputTensors); +} // namespace Test +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NNRT_UTILS_H \ No newline at end of file diff --git a/test/nnrt_xts_acts/nncore/e2etest/BUILD.gn b/test/nnrt_xts_acts/nncore/e2etest/BUILD.gn new file mode 100644 index 0000000..24a0ba8 --- /dev/null +++ b/test/nnrt_xts_acts/nncore/e2etest/BUILD.gn @@ -0,0 +1,47 @@ +# Copyright (c) 2023 Huawei Device Co., Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import("//build/test.gni") +module_output_path = "neural_network_runtime/neural_network_runtime" + +config("nncore_config") { + include_dirs = [ "../common" ] + cflags = [ "-Wno-error" ] + cflags_cc = [ "-fexceptions" ] +} + +ohos_systemtest("ActsNnrtE2ETest") { + module_out_path = module_output_path + sources = [ + "../common/mock_idevice.cpp", + "../common/nncore_utils.cpp", + "src/EndToEndTest.cpp", + ] + + configs = [ ":nncore_config" ] + + external_deps = [ + "c_utils:utils", + "drivers_interface_nnrt:libnnrt_proxy_2.0", + "googletest:gmock_main", + "googletest:gtest_main", + "hilog:libhilog", + "ipc:ipc_single", + "mindspore:mindir_lib", + "neural_network_runtime:libneural_network_core", + "neural_network_runtime:libneural_network_runtime", + ] + + subsystem_name = "ai" + part_name = "neural_network_runtime" +} diff --git a/test/nnrt_xts_acts/nncore/e2etest/src/EndToEndTest.cpp b/test/nnrt_xts_acts/nncore/e2etest/src/EndToEndTest.cpp new file mode 100644 index 0000000..a62b3a3 --- /dev/null +++ b/test/nnrt_xts_acts/nncore/e2etest/src/EndToEndTest.cpp @@ -0,0 +1,447 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include + +#include "neural_network_runtime/neural_network_runtime.h" +#include "log.h" +#include "nncore_utils.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Test; + +class EndToEndTest : public testing::Test { +public: + void SetUp() + { + CreateFolder(CACHE_DIR); + } + void TearDown() + { + DeleteFolder(CACHE_DIR); + } +}; + +void BuildModel(OH_NNModel **model) +{ + *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + AddModel addModel; + OHNNGraphArgs graphArgs = addModel.graphArgs; + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(*model, graphArgs)); +} + +void BuildDynamicModel(OH_NNModel **model) +{ + *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + AvgPoolDynamicModel avgModel; + OHNNGraphArgs graphArgs = avgModel.graphArgs; + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(*model, graphArgs)); +} + +void BuildModelWithQuantParams(OH_NNModel **model) +{ + *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + AddModel addModel; + OHNNGraphArgs graphArgs = addModel.graphArgs; + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraphWithQuantParams(*model, graphArgs)); +} + +OH_NNCompilation* ConstructCompilation(OH_NNModel* model, size_t deviceId, bool isUseCache = true) +{ + OH_NNCompilation* compilation = nullptr; + if (model == nullptr) { + compilation = OH_NNCompilation_ConstructForCache(); + if (compilation == nullptr) { + LOGE("End2EndTest::OH_NNCompilation_ConstructForCache failed."); + return nullptr; + } + } else { + compilation = OH_NNCompilation_Construct(model); + if (compilation == nullptr) { + LOGE("End2EndTest::OH_NNCompilation_Construct failed."); + return nullptr; + } + } + + OH_NN_ReturnCode returnCode = OH_NNCompilation_SetDevice(compilation, deviceId); + if (returnCode != OH_NN_SUCCESS) { + LOGE("End2EndTest::OH_NNCompilation_SetDevice failed."); + return nullptr; + } + + if (isUseCache) { + returnCode = OH_NNCompilation_SetCache(compilation, CACHE_DIR.c_str(), 1); + if (returnCode != OH_NN_SUCCESS) { + LOGE("End2EndTest::OH_NNCompilation_SetCache failed."); + return nullptr; + } + } + + returnCode = OH_NNCompilation_SetPerformanceMode(compilation, OH_NN_PERFORMANCE_EXTREME); + if (returnCode != OH_NN_SUCCESS) { + LOGE("End2EndTest::OH_NNCompilation_SetPerformanceMode failed."); + return nullptr; + } + + returnCode = OH_NNCompilation_SetPriority(compilation, OH_NN_PRIORITY_HIGH); + if (returnCode != OH_NN_SUCCESS) { + LOGE("End2EndTest::OH_NNCompilation_SetPriority failed."); + return nullptr; + } + + returnCode = OH_NNCompilation_EnableFloat16(compilation, false); + if (returnCode != OH_NN_SUCCESS) { + LOGE("End2EndTest::OH_NNCompilation_EnableFloat16 failed."); + return nullptr; + } + + // 执行编译 + returnCode = OH_NNCompilation_Build(compilation); + if (returnCode != OH_NN_SUCCESS) { + LOGE("End2EndTest::OH_NNCompilation_Build failed."); + return nullptr; + } + return compilation; +} + +OH_NN_ReturnCode SetInputData(NN_Tensor* inputTensor[], size_t inputSize) +{ + OH_NN_DataType dataType(OH_NN_FLOAT32); + OH_NN_ReturnCode ret{OH_NN_FAILED}; + size_t elementNum = 0; + for (size_t i = 0; i < inputSize; ++i) { + auto data = OH_NNTensor_GetDataBuffer(inputTensor[i]); + if (data == nullptr) { + LOGE("End2EndTest::OH_NNTensor_GetDataBuffer failed."); + return OH_NN_FAILED; + } + auto desc = OH_NNTensor_GetTensorDesc(inputTensor[i]); + if (desc == nullptr) { + LOGE("End2EndTest::OH_NNTensor_GetTensorDesc failed."); + return OH_NN_FAILED; + } + ret = OH_NNTensorDesc_GetDataType(desc, &dataType); + if (ret != OH_NN_SUCCESS) { + LOGE("End2EndTest::OH_NNTensorDesc_GetDataType failed."); + return ret; + } + ret = OH_NNTensorDesc_GetElementCount(desc, &elementNum); + if (ret != OH_NN_SUCCESS) { + LOGE("End2EndTest::OH_NNTensorDesc_GetElementCount failed."); + return ret; + } + switch (dataType) { + case OH_NN_FLOAT32: { + float* floatValue = reinterpret_cast(data); + for (size_t j = 0; j < elementNum; ++j) { + floatValue[j] = static_cast(j); + } + break; + } + case OH_NN_INT32: { + int* intValue = reinterpret_cast(data); + for (size_t j = 0; j < elementNum; ++j) { + intValue[j] = static_cast(j); + } + break; + } + default: + return OH_NN_FAILED; + } + } + return OH_NN_SUCCESS; +} +OH_NN_ReturnCode GetInputAndOutputTensorDesc(OH_NNExecutor *executor, + size_t *inputCount, std::vector& inputTensorDescs, + size_t *outputCount, std::vector& outputTensorDescs) +{ + OH_NN_ReturnCode returnCode = OH_NNExecutor_GetInputCount(executor, inputCount); + if (returnCode != OH_NN_SUCCESS) { + LOGE("End2EndTest::OH_NNExecutor_GetInputCount failed."); + return returnCode; + } + NN_TensorDesc* tensorDescTmp = nullptr; + for (size_t i = 0; i < *inputCount; ++i) { + tensorDescTmp = OH_NNExecutor_CreateInputTensorDesc(executor, i); + if (tensorDescTmp == nullptr) { + LOGE("End2EndTest::OH_NNExecutor_CreateInputTensorDesc failed."); + return OH_NN_FAILED; + } + inputTensorDescs.emplace_back(tensorDescTmp); + } + returnCode = OH_NNExecutor_GetOutputCount(executor, outputCount); + if (returnCode != OH_NN_SUCCESS) { + LOGE("End2EndTest::OH_NNExecutor_GetOutputCount failed."); + return returnCode; + } + for (size_t i = 0; i < *outputCount; ++i) { + tensorDescTmp = OH_NNExecutor_CreateOutputTensorDesc(executor, i); + if (tensorDescTmp == nullptr) { + LOGE("End2EndTest::OH_NNExecutor_CreateOutputTensorDesc failed."); + return OH_NN_FAILED; + } + outputTensorDescs.emplace_back(tensorDescTmp); + } + + return returnCode; +} + +OH_NN_ReturnCode GetInputDimAndSetShape(OH_NNExecutor *executor, std::vector& inputTensorDescs, + std::vector& outputTensorDescs, bool isDynamic) +{ + if (isDynamic) { + size_t *minInputDims = nullptr; + size_t *maxInputDims = nullptr; + size_t shapeLength = ZERO; + for (size_t i = 0; i < inputTensorDescs.size(); ++i) { + if (OH_NN_SUCCESS != OH_NNExecutor_GetInputDimRange(executor, i, &minInputDims, + &maxInputDims, &shapeLength)) { + LOGE("End2EndTest::OH_NNExecutor_GetInputDimRange failed."); + return OH_NN_FAILED; + } + std::vector minInputDimsT; + for (size_t j = 0; j < shapeLength; ++j) { + minInputDimsT.emplace_back(static_cast(minInputDims[j])); + } + if (OH_NN_SUCCESS != OH_NNTensorDesc_SetShape(inputTensorDescs[i], minInputDimsT.data(), shapeLength)) { + LOGE("End2EndTest::OH_NNTensorDesc_SetShape failed."); + return OH_NN_FAILED; + } + } + std::vector outputShape{1, 2, 2, 1}; + for (size_t i = 0; i < outputTensorDescs.size(); ++i) { + if (OH_NN_SUCCESS != OH_NNTensorDesc_SetShape(outputTensorDescs[i], + outputShape.data(), outputShape.size())) { + LOGE("End2EndTest::OH_NNTensorDesc_SetShape failed."); + return OH_NN_FAILED; + } + } + } + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode CreateTensorAndDestroyTensorDesc(NN_Tensor* tensors[], size_t count, + std::vector& tensorDescs, size_t deviceId) +{ + NN_Tensor* tensor = nullptr; + for (size_t i = 0; i < count; ++i) { + tensor = nullptr; + tensor = OH_NNTensor_Create(deviceId, tensorDescs[i]); + if (tensor == nullptr) { + LOGE("End2EndTest::OH_NNTensor_Create failed."); + return OH_NN_FAILED; + } + tensors[i] = tensor; + } + for (size_t i = 0; i < count; ++i) { + if (OH_NN_SUCCESS != OH_NNTensorDesc_Destroy(&tensorDescs[i])) { + LOGE("End2EndTest::OH_NNTensorDesc_Destroy failed."); + return OH_NN_FAILED; + } + } + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode DestroyInputAndOutputTensor(NN_Tensor** inputTensors, size_t inputCount, + NN_Tensor** outputTensors, size_t outputCount) +{ + OH_NN_ReturnCode returnCode = OH_NN_SUCCESS; + for (size_t i = 0; i < inputCount; ++i) { + returnCode = OH_NNTensor_Destroy(&inputTensors[i]); + if (returnCode != OH_NN_SUCCESS) { + LOGE("End2EndTest::OH_NNTensor_Destroy failed."); + return OH_NN_FAILED; + } + } + for (size_t i = 0; i < outputCount; ++i) { + returnCode = OH_NNTensor_Destroy(&outputTensors[i]); + if (returnCode != OH_NN_SUCCESS) { + LOGE("End2EndTest::OH_NNTensor_Destroy failed."); + return OH_NN_FAILED; + } + } + return OH_NN_SUCCESS; +} + +OH_NNExecutor* RunExecutor(OH_NNCompilation* compilation, size_t deviceId, bool isDynamic = false) +{ + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + if (executor == nullptr) { + LOGE("End2EndTest::OH_NNExecutor_Construct failed."); + return nullptr; + } + size_t inputCount = 0; + std::vector inputTensorDescs; + size_t outputCount = 0; + std::vector outputTensorDescs; + OH_NN_ReturnCode returnCode = GetInputAndOutputTensorDesc(executor, &inputCount, inputTensorDescs, + &outputCount, outputTensorDescs); + if (returnCode != OH_NN_SUCCESS) { + LOGE("End2EndTest::GetInputAndOutputTensorDesc failed."); + return nullptr; + } + returnCode = GetInputDimAndSetShape(executor, inputTensorDescs, outputTensorDescs, isDynamic); + if (returnCode != OH_NN_SUCCESS) { + LOGE("End2EndTest::GetInputDimAndSetShape failed."); + return nullptr; + } + NN_Tensor* inputTensors[inputCount]; + OH_NN_ReturnCode returnCodeTmp = CreateTensorAndDestroyTensorDesc(inputTensors, inputCount, + inputTensorDescs, deviceId); + NN_Tensor* outputTensors[outputCount]; + returnCode = CreateTensorAndDestroyTensorDesc(outputTensors, outputCount, outputTensorDescs, deviceId); + if (returnCode != OH_NN_SUCCESS || returnCodeTmp != OH_NN_SUCCESS) { + LOGE("End2EndTest::Tensors create failed."); + return nullptr; + } + returnCode = SetInputData(inputTensors, inputCount); + if (returnCode != OH_NN_SUCCESS) { + LOGE("End2EndTest::SetInputData failed."); + return nullptr; + } + returnCode = OH_NNExecutor_RunSync(executor, inputTensors, inputCount, outputTensors, outputCount); + if (returnCode != OH_NN_SUCCESS) { + LOGE("End2EndTest::OH_NNExecutor_RunSync failed."); + return nullptr; + } + returnCode = DestroyInputAndOutputTensor(inputTensors, inputCount, outputTensors, outputCount); + if (returnCode != OH_NN_SUCCESS) { + LOGE("End2EndTest::DestroyInputAndOutputTensor failed."); + return nullptr; + } + return executor; +} + +/* +* @tc.name:sub_AI_NNRt_Core_Func_North_EndToEnd_0100 +* @tc.desc:定长模型编译端到端测试 +* @tc.type:FUNC +*/ +HWTEST_F(EndToEndTest, sub_AI_NNRt_Core_Func_North_EndToEnd_0100, Function | MediumTest | Level1) +{ + size_t deviceId = 0; + ASSERT_EQ(OH_NN_SUCCESS, GetDeviceID(&deviceId)); + + OH_NNModel* model = nullptr; + BuildModel(&model); + + OH_NNCompilation* compilation = ConstructCompilation(model, deviceId); + ASSERT_NE(nullptr, compilation); + + OH_NNModel_Destroy(&model); + OH_NNExecutor* executor = RunExecutor(compilation, deviceId); + ASSERT_NE(nullptr, executor); + OH_NNCompilation_Destroy(&compilation); + OH_NNExecutor_Destroy(&executor); +} + +/* +* @tc.name:sub_AI_NNRt_Core_Func_North_EndToEnd_0200 +* @tc.desc:变长模型编译端到端测试 +* @tc.type:FUNC +*/ +HWTEST_F(EndToEndTest, sub_AI_NNRt_Core_Func_North_EndToEnd_0200, Function | MediumTest | Level1) +{ + size_t deviceId = 0; + ASSERT_EQ(OH_NN_SUCCESS, GetDeviceID(&deviceId)); + OH_NNModel* model = nullptr; + BuildDynamicModel(&model); + + OH_NNCompilation* compilation = ConstructCompilation(model, deviceId); + ASSERT_NE(nullptr, compilation); + OH_NNModel_Destroy(&model); + OH_NNExecutor* executor = RunExecutor(compilation, deviceId, true); + ASSERT_NE(nullptr, executor); + OH_NNCompilation_Destroy(&compilation); + OH_NNExecutor_Destroy(&executor); +} + +/* +* @tc.name:sub_AI_NNRt_Core_Func_North_EndToEnd_0300 +* @tc.desc:定长模型编译带量化参数端到端测试 +* @tc.type:FUNC +*/ +HWTEST_F(EndToEndTest, sub_AI_NNRt_Core_Func_North_EndToEnd_0300, Function | MediumTest | Level1) +{ + size_t deviceId = 0; + ASSERT_EQ(OH_NN_SUCCESS, GetDeviceID(&deviceId)); + + OH_NNModel* model = nullptr; + BuildModelWithQuantParams(&model); + + OH_NNCompilation* compilation = ConstructCompilation(model, deviceId); + ASSERT_NE(nullptr, compilation); + OH_NNModel_Destroy(&model); + OH_NNExecutor* executor = RunExecutor(compilation, deviceId); + ASSERT_NE(nullptr, executor); + OH_NNCompilation_Destroy(&compilation); + OH_NNExecutor_Destroy(&executor); +} + +/* +* @tc.name:sub_AI_NNRt_Core_Func_North_Reliability_0100 +* @tc.desc:定长模型编译长稳测试 +* @tc.type:FUNC +*/ +HWTEST_F(EndToEndTest, sub_AI_NNRt_Core_Func_North_Reliability_0100, Reliability | MediumTest | Level2) +{ + size_t deviceId = 0; + ASSERT_EQ(OH_NN_SUCCESS, GetDeviceID(&deviceId)); + + OH_NNModel* model = nullptr; + BuildModel(&model); + for (int i = 0; i < STRESS_COUNT; i++) { + OH_NNCompilation* compilation = ConstructCompilation(model, deviceId); + ASSERT_NE(nullptr, compilation); + OH_NNExecutor* executor = RunExecutor(compilation, deviceId); + ASSERT_NE(nullptr, executor); + OH_NNCompilation_Destroy(&compilation); + OH_NNExecutor_Destroy(&executor); + if (i % PRINT_FREQ == 0) { + printf("[NnrtTest] Reliability_test_001 times: %d/%d\n", i, STRESS_COUNT); + } + } + OH_NNModel_Destroy(&model); +} + +/* +* @tc.name:sub_AI_NNRt_Core_Func_North_Reliability_0200 +* @tc.desc:变长模型编译端到端测试 +* @tc.type:FUNC +*/ +HWTEST_F(EndToEndTest, sub_AI_NNRt_Core_Func_North_Reliability_0200, Reliability | MediumTest | Level2) +{ + size_t deviceId = 0; + ASSERT_EQ(OH_NN_SUCCESS, GetDeviceID(&deviceId)); + + OH_NNModel* model = nullptr; + BuildDynamicModel(&model); + for (int i = 0; i < STRESS_COUNT; i++) { + OH_NNCompilation* compilation = ConstructCompilation(model, deviceId, false); + ASSERT_NE(nullptr, compilation); + OH_NNExecutor* executor = RunExecutor(compilation, deviceId, true); + ASSERT_NE(nullptr, executor); + OH_NNCompilation_Destroy(&compilation); + OH_NNExecutor_Destroy(&executor); + if (i % PRINT_FREQ == 0) { + printf("[NnrtTest] Reliability_test_002 times: %d/%d\n", i, STRESS_COUNT); + } + } + OH_NNModel_Destroy(&model); +} diff --git a/test/nnrt_xts_acts/nncore/nncoretest/BUILD.gn b/test/nnrt_xts_acts/nncore/nncoretest/BUILD.gn new file mode 100644 index 0000000..c395602 --- /dev/null +++ b/test/nnrt_xts_acts/nncore/nncoretest/BUILD.gn @@ -0,0 +1,51 @@ +# Copyright (c) 2023 Huawei Device Co., Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import("//build/test.gni") +module_output_path = "neural_network_runtime/neural_network_runtime" + +config("nncore_config") { + include_dirs = [ "../common" ] + cflags = [ "-Wno-error" ] + cflags_cc = [ "-fexceptions" ] +} + +ohos_systemtest("ActsNncoreTest") { + module_out_path = module_output_path + sources = [ + "../common/mock_idevice.cpp", + "../common/nncore_utils.cpp", + "src/HdiCompilationTest.cpp", + "src/HdiExecutorTest.cpp", + "src/HdiModelTest.cpp", + "src/HdiTensorDescTest.cpp", + "src/HdiTensorTest.cpp", + ] + + configs = [ ":nncore_config" ] + + external_deps = [ + "c_utils:utils", + "drivers_interface_nnrt:libnnrt_proxy_2.0", + "googletest:gmock_main", + "googletest:gtest_main", + "hilog:libhilog", + "ipc:ipc_single", + "mindspore:mindir_lib", + "neural_network_runtime:libneural_network_core", + "neural_network_runtime:libneural_network_runtime", + ] + + subsystem_name = "ai" + part_name = "neural_network_runtime" +} diff --git a/test/nnrt_xts_acts/nncore/nncoretest/src/HdiCompilationTest.cpp b/test/nnrt_xts_acts/nncore/nncoretest/src/HdiCompilationTest.cpp new file mode 100644 index 0000000..2d9b8a9 --- /dev/null +++ b/test/nnrt_xts_acts/nncore/nncoretest/src/HdiCompilationTest.cpp @@ -0,0 +1,362 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include + +#include "nncore_utils.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Test; +namespace OHOS::NeuralNetworkCore { +class CompilationTest : public testing::Test { +public: + void SetUp() + { + CreateFolder(CACHE_DIR); + } + void TearDown() + { + DeleteFolder(CACHE_DIR); + } + void GenCacheFile() + { + OH_NNCompilation *compilation = nullptr; + OH_NNModel *model = nullptr; + ConstructCompilation(&compilation, &model); + OHNNCompileParam compileParam{ + .cacheDir = CACHE_DIR, + .cacheVersion = CACHEVERSION, + }; + ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + ASSERT_TRUE(CheckPath(CACHE_PATH) == PathType::FILE); + ASSERT_TRUE(CheckPath(CACHE_INFO_PATH) == PathType::FILE); + OH_NNModel_Destroy(&model); + OH_NNCompilation_Destroy(&compilation); + } + void SaveSupportModel() + { + OH_NNModel *model = nullptr; + ConstructAddModel(&model); + std::ofstream ofs(SUPPORTMODELPATH, std::ios::out | std::ios::binary); + if (ofs) { + ofs.write(reinterpret_cast(model), sizeof(reinterpret_cast(model))); + ofs.close(); + } + OH_NNModel_Destroy(&model); + } + +protected: + OHNNCompileParam m_compileParam; + AddModel addModel; + OHNNGraphArgs graphArgs = addModel.graphArgs; +}; + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_Construct_Compilation_For_Cache_0100 + * @tc.desc: 创建compilation,检查返回值为空,设置正确的cache路径,build成功,推理成功 + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, SUB_AI_NNRt_Core_Func_North_Construct_Compilation_For_Cache_0100, + Function | MediumTest | Level1) +{ + GenCacheFile(); + OH_NNCompilation *compilation = OH_NNCompilation_ConstructForCache(); + ASSERT_NE(nullptr, compilation); + + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_SetCache(compilation, CACHE_DIR.c_str(), CACHEVERSION)); + ASSERT_EQ(OH_NN_SUCCESS, SetDevice(compilation)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_Build(compilation)); + OH_NNCompilation_Destroy(&compilation); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_Construct_Compilation_For_Cache_0200 + * @tc.desc: 创建compilation,检查返回值非空,不设置cache,build失败 + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, SUB_AI_NNRt_Core_Func_North_Construct_Compilation_For_Cache_0200, + Function | MediumTest | Level1) +{ + OH_NNCompilation *compilation = OH_NNCompilation_ConstructForCache(); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNCompilation_Build(compilation)); + OH_NNCompilation_Destroy(&compilation); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_AddExtension_Config_To_Compilation_0100 + * @tc.desc: 创建compilation,增加config,传入compilation为空,返回错误 + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, SUB_AI_NNRt_Core_Func_North_AddExtension_Config_To_Compilation_0100, + Function | MediumTest | Level1) +{ + const char *configName = "test"; + const void *configValue = reinterpret_cast(10); + const size_t configValueSize = 1; + OH_NN_ReturnCode ret = OH_NNCompilation_AddExtensionConfig(nullptr, configName, configValue, configValueSize); + ASSERT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_AddExtension_Config_To_Compilation_0200 + * @tc.desc: 创建compilation,增加config,传入configNames为空指针,返回错误 + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, SUB_AI_NNRt_Core_Func_North_AddExtension_Config_To_Compilation_0200, + Function | MediumTest | Level1) +{ + OH_NNCompilation *compilation = nullptr; + OH_NNModel *model = nullptr; + ConstructCompilation(&compilation, &model); + + const void *configValue = reinterpret_cast(10); + const size_t configValueSize = 1; + OH_NN_ReturnCode ret = OH_NNCompilation_AddExtensionConfig(compilation, nullptr, configValue, configValueSize); + ASSERT_EQ(OH_NN_INVALID_PARAMETER, ret); + OH_NNModel_Destroy(&model); + OH_NNCompilation_Destroy(&compilation); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_AddExtension_Config_To_Compilation_0300 + * @tc.desc: 创建compilation,增加config,传入configNames为空字符串,报错 + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, SUB_AI_NNRt_Core_Func_North_AddExtension_Config_To_Compilation_0300, + Function | MediumTest | Level1) +{ + OH_NNCompilation *compilation = nullptr; + OH_NNModel *model = nullptr; + ConstructCompilation(&compilation, &model); + + const char *configName = ""; + int num = 10; + const void *configValue = # + const size_t configValueSize = sizeof(num); + + OH_NN_ReturnCode ret = OH_NNCompilation_AddExtensionConfig(compilation, configName, configValue, configValueSize); + ASSERT_EQ(OH_NN_INVALID_PARAMETER, ret); + OH_NNModel_Destroy(&model); + OH_NNCompilation_Destroy(&compilation); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_AddExtension_Config_To_Compilation_0400 + * @tc.desc: 创建compilation,增加config,传入configValues为空,报错 + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, SUB_AI_NNRt_Core_Func_North_AddExtension_Config_To_Compilation_0400, + Function | MediumTest | Level1) +{ + OH_NNCompilation *compilation = nullptr; + OH_NNModel *model = nullptr; + ConstructCompilation(&compilation, &model); + + const char *configName = "test"; + const size_t configValueSize = 1; + OH_NN_ReturnCode ret = OH_NNCompilation_AddExtensionConfig(compilation, configName, nullptr, configValueSize); + ASSERT_EQ(OH_NN_INVALID_PARAMETER, ret); + OH_NNModel_Destroy(&model); + OH_NNCompilation_Destroy(&compilation); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_AddExtension_Config_To_Compilation_0500 + * @tc.desc: 创建compilation,增加config,传入configValueSize为0 + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, SUB_AI_NNRt_Core_Func_North_AddExtension_Config_To_Compilation_0500, + Function | MediumTest | Level1) +{ + OH_NNCompilation *compilation = nullptr; + OH_NNModel *model = nullptr; + ConstructCompilation(&compilation, &model); + + const char *configName = "test"; + const void *configValue = reinterpret_cast(10); + const size_t configValueSize = 0; + OH_NN_ReturnCode ret = OH_NNCompilation_AddExtensionConfig(compilation, configName, configValue, configValueSize); + ASSERT_EQ(OH_NN_INVALID_PARAMETER, ret); + OH_NNModel_Destroy(&model); + OH_NNCompilation_Destroy(&compilation); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_Construct_Compilation_With_OfflineModel_File_0100 + * @tc.desc: 传入filepath为空指针,返回不支持 + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, SUB_AI_NNRt_Core_Func_North_Construct_Compilation_With_OfflineModel_File_0100, + Function | MediumTest | Level1) +{ + OH_NNCompilation *compilation = OH_NNCompilation_ConstructWithOfflineModelFile(nullptr); + ASSERT_EQ(nullptr, compilation); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_Construct_Compilation_With_OfflineModel_File_0200 + * @tc.desc: 传入合法文件,返回不支持 + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, SUB_AI_NNRt_Core_Func_North_Construct_Compilation_With_OfflineModel_File_0200, + Function | MediumTest | Level1) +{ + SaveSupportModel(); + OH_NNCompilation *compilation = OH_NNCompilation_ConstructWithOfflineModelFile(SUPPORTMODELPATH.c_str()); + ASSERT_NE(nullptr, compilation); + + ASSERT_EQ(OH_NN_SUCCESS, SetDevice(compilation)); + ASSERT_EQ(OH_NN_FAILED, OH_NNCompilation_Build(compilation)); + DeleteFile(SUPPORTMODELPATH); + OH_NNCompilation_Destroy(&compilation); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_Construct_Compilation_With_Offline_ModelBuffer_0100 + * @tc.desc: 传入modelData为空指针,返回错误 + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, SUB_AI_NNRt_Core_Func_North_Construct_Compilation_With_Offline_ModelBuffer_0100, + Function | MediumTest | Level1) +{ + int modelSize = 0; + const void *buffer = nullptr; + OH_NNCompilation *compilation = OH_NNCompilation_ConstructWithOfflineModelBuffer(buffer, modelSize); + ASSERT_EQ(nullptr, compilation); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_Construct_Compilation_With_Offline_ModelBuffer_0200 + * @tc.desc: 传入modelData为合法离线模型buffer,返回不支持 + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, SUB_AI_NNRt_Core_Func_North_Construct_Compilation_With_Offline_ModelBuffer_0200, + Function | MediumTest | Level1) +{ + OH_NNCompilation *compilation = + OH_NNCompilation_ConstructWithOfflineModelBuffer(reinterpret_cast(TEST_BUFFER), 28); + ASSERT_NE(nullptr, compilation); + ASSERT_EQ(OH_NN_SUCCESS, SetDevice(compilation)); + ASSERT_EQ(OH_NN_FAILED, OH_NNCompilation_Build(compilation)); + OH_NNCompilation_Destroy(&compilation); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_Export_Compilation_Cache_To_Buffer_0100 + * @tc.desc: 传入空指针返回失败 + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, SUB_AI_NNRt_Core_Func_North_Export_Compilation_Cache_To_Buffer_0100, + Function | MediumTest | Level1) +{ + const char *any = "123456789"; + const void *buffer = reinterpret_cast(any); + size_t length = 10; + size_t *modelSize = &length; + OH_NN_ReturnCode ret = OH_NNCompilation_ExportCacheToBuffer(nullptr, buffer, length, modelSize); + ASSERT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_Export_Compilation_Cache_To_Buffer_0200 + * @tc.desc: 参数正确,nnrt模型返回不支持 + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, SUB_AI_NNRt_Core_Func_North_Export_Compilation_Cache_To_Buffer_0200, + Function | MediumTest | Level1) +{ + OH_NNCompilation *compilation = nullptr; + OH_NNModel *model = nullptr; + ConstructCompilation(&compilation, &model); + ASSERT_EQ(OH_NN_SUCCESS, SetDevice(compilation)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_Build(compilation)); + + const char *any = "123456789"; + const void *buffer = reinterpret_cast(any); + size_t length = 10; + size_t *modelSize = &length; + OH_NN_ReturnCode ret = OH_NNCompilation_ExportCacheToBuffer(compilation, buffer, length, modelSize); + ASSERT_EQ(OH_NN_UNSUPPORTED, ret); + OH_NNModel_Destroy(&model); + OH_NNCompilation_Destroy(&compilation); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_Import_Compilation_Cache_From_Buffer_0100 + * @tc.desc: buffer为空,返回错误 + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, SUB_AI_NNRt_Core_Func_North_Import_Compilation_Cache_From_Buffer_0100, + Function | MediumTest | Level1) +{ + OH_NNCompilation *compilation = nullptr; + OH_NNModel *model = nullptr; + ConstructCompilation(&compilation, &model); + + const void *buffer = nullptr; + size_t modelSize = MODEL_SIZE; + OH_NN_ReturnCode ret = OH_NNCompilation_ImportCacheFromBuffer(compilation, buffer, modelSize); + ASSERT_EQ(OH_NN_INVALID_PARAMETER, ret); + OH_NNModel_Destroy(&model); + OH_NNCompilation_Destroy(&compilation); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_Import_Compilation_Cache_From_Buffer_0200 + * @tc.desc: modelSize为0,返回错误 + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, SUB_AI_NNRt_Core_Func_North_Import_Compilation_Cache_From_Buffer_0200, + Function | MediumTest | Level1) +{ + OH_NNCompilation *compilation = nullptr; + OH_NNModel *model = nullptr; + ConstructCompilation(&compilation, &model); + const char *any = "123456789"; + const void *buffer = reinterpret_cast(any); + size_t modelSize = ZERO; + OH_NN_ReturnCode ret = OH_NNCompilation_ImportCacheFromBuffer(compilation, buffer, modelSize); + ASSERT_EQ(OH_NN_INVALID_PARAMETER, ret); + OH_NNModel_Destroy(&model); + OH_NNCompilation_Destroy(&compilation); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_Import_Compilation_Cache_From_Buffer_0300 + * @tc.desc: 参数正确,返回不支持 + * @tc.type: FUNC + */ +HWTEST_F(CompilationTest, SUB_AI_NNRt_Core_Func_North_Import_Compilation_Cache_From_Buffer_0300, + Function | MediumTest | Level1) +{ + OH_NNCompilation *compilation = nullptr; + OH_NNModel *model = nullptr; + ConstructCompilation(&compilation, &model); + + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_SetCache(compilation, CACHE_DIR.c_str(), CACHEVERSION)); + ASSERT_EQ(OH_NN_SUCCESS, SetDevice(compilation)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_SetPerformanceMode(compilation, OH_NN_PERFORMANCE_EXTREME)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_SetPriority(compilation, OH_NN_PRIORITY_HIGH)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_EnableFloat16(compilation, false)); + + const char *any = "123456789"; + const void *buffer = reinterpret_cast(any); + size_t modelSize = MODEL_SIZE; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_ImportCacheFromBuffer(compilation, buffer, modelSize)); + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNCompilation_Build(compilation)); + OH_NNModel_Destroy(&model); + OH_NNCompilation_Destroy(&compilation); +} +} \ No newline at end of file diff --git a/test/nnrt_xts_acts/nncore/nncoretest/src/HdiExecutorTest.cpp b/test/nnrt_xts_acts/nncore/nncoretest/src/HdiExecutorTest.cpp new file mode 100644 index 0000000..422de5c --- /dev/null +++ b/test/nnrt_xts_acts/nncore/nncoretest/src/HdiExecutorTest.cpp @@ -0,0 +1,1032 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include + +#include "nncore_utils.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Test; + +namespace OHOS::NeuralNetworkCore { +void RunDone(void *pointer, OH_NN_ReturnCode returnCode, void* pointerArray[], int32_t intNum) +{ + return; +} + +void ServiceDied(void* point) +{ + return; +} +class ExecutorTest : public testing::Test { +protected: + OHNNCompileParam m_compileParam; + AddModel addModel; + OHNNGraphArgs graphArgs = addModel.graphArgs; +}; + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_Construct_Executor_0100 + * @tc.desc: compilation为空,返回失败 + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Construct_Executor_0100, Function | MediumTest | Level1) +{ + OH_NNCompilation *compilation = nullptr; + ASSERT_EQ(nullptr, OH_NNExecutor_Construct(compilation)); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_Destroy_Executor_0100 + * @tc.desc: 重复释放executor,返回失败 + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Destroy_Executor_0100, Function | MediumTest | Level1) +{ + OH_NNExecutor *executor = nullptr; + CreateExecutor(&executor); + + OH_NNExecutor_Destroy(&executor); + ASSERT_EQ(nullptr, executor); + OH_NNExecutor_Destroy(&executor); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_Destroy_Executor_0200 + * @tc.desc: 正常释放,检查executor为空 + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Destroy_Executor_0200, Function | MediumTest | Level1) +{ + OH_NNExecutor *executor = nullptr; + CreateExecutor(&executor); + + OH_NNExecutor_Destroy(&executor); + ASSERT_EQ(nullptr, executor); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_Set_Executor_OnRunDone_0100 + * @tc.desc: 在推理完成时设置executor,executor为空,返回失败 + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Set_Executor_OnRunDone_0100, Function | MediumTest | Level1) +{ + NN_OnRunDone onRunDone = RunDone; + OH_NNExecutor *executor = nullptr; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_SetOnRunDone(executor, onRunDone)); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_Set_Executor_OnRunDone_0200 + * @tc.desc: 在推理完成时设置executor,合法参数返回不支持 + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Set_Executor_OnRunDone_0200, Function | MediumTest | Level1) +{ + NN_OnRunDone onRunDone= RunDone; + OH_NNExecutor *executor = nullptr; + CreateExecutor(&executor); + ASSERT_EQ(OH_NN_OPERATION_FORBIDDEN, OH_NNExecutor_SetOnRunDone(executor, onRunDone)); + OH_NNExecutor_Destroy(&executor); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_Set_Executor_Service_Died_0100 + * @tc.desc: executor为空,返回失败 + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Set_Executor_Service_Died_0100, Function | MediumTest | Level1) +{ + NN_OnServiceDied onServiceDied = ServiceDied; + OH_NNExecutor *executor = nullptr; + + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_SetOnServiceDied(executor, onServiceDied)); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_Set_Executor_Service_Died_0200 + * @tc.desc: 合法参数,返回不支持 + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Set_Executor_Service_Died_0200, Function | MediumTest | Level1) +{ + NN_OnServiceDied onServiceDied = ServiceDied; + OH_NNExecutor *executor = nullptr; + CreateExecutor(&executor); + + ASSERT_EQ(OH_NN_OPERATION_FORBIDDEN, OH_NNExecutor_SetOnServiceDied(executor, onServiceDied)); + OH_NNExecutor_Destroy(&executor); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_RunSync_0100 + * @tc.desc: executor为空,返回失败 + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_RunSync_0100, Function | MediumTest | Level1) +{ + vectorinputTensors, outputTensors; + size_t inputCount = 0; + size_t outputCount = 0; + OH_NNExecutor* executor = nullptr; + CreateExecutor(&executor); + GetExecutorInputOutputTensor(executor, inputTensors, inputCount, outputTensors, outputCount); + OH_NNExecutor_Destroy(&executor); + ASSERT_EQ(OH_NN_SUCCESS, DestroyTensor(inputTensors, outputTensors)); + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_RunSync(executor, inputTensors.data(), inputCount, + outputTensors.data(), outputCount)); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_RunSync_0200 + * @tc.desc: executor sync推理,inputTensor数组为空,返回失败 + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_RunSync_0200, Function | MediumTest | Level1) +{ + vector inputTensors, outputTensors; + size_t inputCount = 0; + size_t outputCount = 0; + OH_NNExecutor* executor = nullptr; + CreateExecutor(&executor); + GetExecutorInputOutputTensor(executor, inputTensors, inputCount, outputTensors, outputCount); + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_RunSync(executor, nullptr, inputCount, + outputTensors.data(), outputCount)); + OH_NNExecutor_Destroy(&executor); + ASSERT_EQ(OH_NN_SUCCESS, DestroyTensor(inputTensors, outputTensors)); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_RunSync_0300 + * @tc.desc: executor sync推理,outputTensor数组为空,返回失败 + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_RunSync_0300, Function | MediumTest | Level1) +{ + vector inputTensors, outputTensors; + size_t inputCount = 0; + size_t outputCount = 0; + OH_NNExecutor* executor = nullptr; + CreateExecutor(&executor); + GetExecutorInputOutputTensor(executor, inputTensors, inputCount, outputTensors, outputCount); + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_RunSync(executor, inputTensors.data(), inputCount, + nullptr, outputCount)); + OH_NNExecutor_Destroy(&executor); + ASSERT_EQ(OH_NN_SUCCESS, DestroyTensor(inputTensors, outputTensors)); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_RunSync_0400 + * @tc.desc: executor sync推理,inputCount为0,返回失败 + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_RunSync_0400, Function | MediumTest | Level1) +{ + vector inputTensors, outputTensors; + size_t inputCount = 0; + size_t outputCount = 0; + OH_NNExecutor* executor = nullptr; + CreateExecutor(&executor); + GetExecutorInputOutputTensor(executor, inputTensors, inputCount, outputTensors, outputCount); + inputCount = 0; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_RunSync(executor, inputTensors.data(), inputCount, + outputTensors.data(), outputCount)); + OH_NNExecutor_Destroy(&executor); + ASSERT_EQ(OH_NN_SUCCESS, DestroyTensor(inputTensors, outputTensors)); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_RunSync_0500 + * @tc.desc: executor sync推理,outputCount为0,返回失败 + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_RunSync_0500, Function | MediumTest | Level1) +{ + vector inputTensors, outputTensors; + size_t inputCount = 0; + size_t outputCount = 0; + OH_NNExecutor* executor = nullptr; + CreateExecutor(&executor); + GetExecutorInputOutputTensor(executor, inputTensors, inputCount, outputTensors, outputCount); + outputCount = 0; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_RunSync(executor, inputTensors.data(), inputCount, + outputTensors.data(), outputCount)); + OH_NNExecutor_Destroy(&executor); + ASSERT_EQ(OH_NN_SUCCESS, DestroyTensor(inputTensors, outputTensors)); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_RunSync_0600 + * @tc.desc: executor sync推理,inputTensor个数不足,小于正确的输入数量,返回错误 + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_RunSync_0600, Function | MediumTest | Level1) +{ + vector inputTensors, outputTensors; + size_t inputCount = 0; + size_t outputCount = 0; + OH_NNExecutor* executor = nullptr; + CreateExecutor(&executor); + GetExecutorInputOutputTensor(executor, inputTensors, inputCount, outputTensors, outputCount); + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_RunSync(executor, inputTensors.data(), inputCount - 1, + outputTensors.data(), outputCount)); + OH_NNExecutor_Destroy(&executor); + ASSERT_EQ(OH_NN_SUCCESS, DestroyTensor(inputTensors, outputTensors)); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_RunSync_0700 + * @tc.desc: executor sync推理,inputTensor数组个数超过inputNum,返回错误 + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_RunSync_0700, Function | MediumTest | Level1) +{ + vector inputTensors, outputTensors; + size_t inputCount = 0; + size_t outputCount = 0; + OH_NNExecutor* executor = nullptr; + CreateExecutor(&executor); + GetExecutorInputOutputTensor(executor, inputTensors, inputCount, outputTensors, outputCount); + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_RunSync(executor, inputTensors.data(), inputCount + 1, + outputTensors.data(), outputCount)); + OH_NNExecutor_Destroy(&executor); + ASSERT_EQ(OH_NN_SUCCESS, DestroyTensor(inputTensors, outputTensors)); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_RunSync_0800 + * @tc.desc: executor sync推理,outputTensor个数不足,小于正确的输入数量,返回错误 + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_RunSync_0800, Function | MediumTest | Level1) +{ + vector inputTensors, outputTensors; + size_t inputCount = 0; + size_t outputCount = 0; + OH_NNExecutor* executor = nullptr; + CreateExecutor(&executor); + GetExecutorInputOutputTensor(executor, inputTensors, inputCount, outputTensors, outputCount); + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_RunSync(executor, inputTensors.data(), inputCount, + outputTensors.data(), outputCount - 1)); + OH_NNExecutor_Destroy(&executor); + ASSERT_EQ(OH_NN_SUCCESS, DestroyTensor(inputTensors, outputTensors)); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_RunSync_0900 + * @tc.desc: executor sync推理,outputTensor数组个数超过outputNum,返回错误 + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_RunSync_0900, Function | MediumTest | Level1) +{ + vector inputTensors, outputTensors; + size_t inputCount = 0; + size_t outputCount = 0; + OH_NNExecutor* executor = nullptr; + CreateExecutor(&executor); + GetExecutorInputOutputTensor(executor, inputTensors, inputCount, outputTensors, outputCount); + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_RunSync(executor, inputTensors.data(), inputCount, + outputTensors.data(), outputCount + 1)); + OH_NNExecutor_Destroy(&executor); + ASSERT_EQ(OH_NN_SUCCESS, DestroyTensor(inputTensors, outputTensors)); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_RunASync_0100 + * @tc.desc: executor async推理,executor为空,返回失败 + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_RunASync_0100, Function | MediumTest | Level1) +{ + vector inputTensors, outputTensors; + size_t inputCount = 0; + size_t outputCount = 0; + OH_NNExecutor* executor = nullptr; + CreateExecutor(&executor); + int32_t timeout = 60; + void* userData = (void*) executor; + GetExecutorInputOutputTensor(executor, inputTensors, inputCount, outputTensors, outputCount); + OH_NNExecutor_Destroy(&executor); + ASSERT_EQ(OH_NN_SUCCESS, DestroyTensor(inputTensors, outputTensors)); + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_RunAsync(executor, inputTensors.data(), inputCount, + outputTensors.data(), outputCount, timeout, userData)); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_RunASync_0200 + * @tc.desc: executor async推理,inputCount为0,返回失败 + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_RunASync_0200, Function | MediumTest | Level1) +{ + vector inputTensors, outputTensors; + size_t inputCount = 0; + size_t outputCount = 0; + OH_NNExecutor* executor = nullptr; + CreateExecutor(&executor); + int32_t timeout = 60; + void* userData = (void*) executor; + GetExecutorInputOutputTensor(executor, inputTensors, inputCount, outputTensors, outputCount); + inputCount = 0; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_RunAsync(executor, inputTensors.data(), inputCount, + outputTensors.data(), outputCount, timeout, userData)); + OH_NNExecutor_Destroy(&executor); + ASSERT_EQ(OH_NN_SUCCESS, DestroyTensor(inputTensors, outputTensors)); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_RunASync_0300 + * @tc.desc: executor async推理,outputCount为0,返回失败 + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_RunASync_0300, Function | MediumTest | Level1) +{ + vector inputTensors, outputTensors; + size_t inputCount = 0; + size_t outputCount = 0; + OH_NNExecutor* executor = nullptr; + CreateExecutor(&executor); + int32_t timeout = 60; + void* userData = (void*) executor; + GetExecutorInputOutputTensor(executor, inputTensors, inputCount, outputTensors, outputCount); + outputCount = 0; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_RunAsync(executor, inputTensors.data(), inputCount, + outputTensors.data(), outputCount, timeout, userData)); + OH_NNExecutor_Destroy(&executor); + ASSERT_EQ(OH_NN_SUCCESS, DestroyTensor(inputTensors, outputTensors)); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_RunASync_0400 + * @tc.desc: executor async推理,inputTensor为空指针 + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_RunASync_0400, Function | MediumTest | Level1) +{ + vector inputTensors, outputTensors; + size_t inputCount = 0; + size_t outputCount = 0; + OH_NNExecutor* executor = nullptr; + CreateExecutor(&executor); + int32_t timeout = 60; + void* userData = (void*) executor; + GetExecutorInputOutputTensor(executor, inputTensors, inputCount, outputTensors, outputCount); + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_RunAsync(executor, nullptr, inputCount, + outputTensors.data(), outputCount, timeout, userData)); + OH_NNExecutor_Destroy(&executor); + ASSERT_EQ(OH_NN_SUCCESS, DestroyTensor(inputTensors, outputTensors)); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_RunASync_0500 + * @tc.desc: executor async推理,outputTensor为空指针 + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_RunASync_0500, Function | MediumTest | Level1) +{ + vector inputTensors, outputTensors; + size_t inputCount = 0; + size_t outputCount = 0; + OH_NNExecutor* executor = nullptr; + CreateExecutor(&executor); + int32_t timeout = 60; + void* userData = (void*) executor; + GetExecutorInputOutputTensor(executor, inputTensors, inputCount, outputTensors, outputCount); + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_RunAsync(executor, inputTensors.data(), inputCount, + nullptr, outputCount, timeout, userData)); + OH_NNExecutor_Destroy(&executor); + ASSERT_EQ(OH_NN_SUCCESS, DestroyTensor(inputTensors, outputTensors)); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_RunASync_0600 + * @tc.desc: executor async推理,定长模型返回不支持 + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_RunASync_0600, Function | MediumTest | Level1) +{ + vector inputTensors, outputTensors; + size_t inputCount = 0; + size_t outputCount = 0; + OH_NNExecutor* executor = nullptr; + CreateExecutor(&executor); + int32_t timeout = 60; + void* userData = (void*) executor; + GetExecutorInputOutputTensor(executor, inputTensors, inputCount, outputTensors, outputCount); + ASSERT_EQ(OH_NN_OPERATION_FORBIDDEN, OH_NNExecutor_RunAsync(executor, inputTensors.data(), inputCount, + outputTensors.data(), outputCount, + timeout, userData)); + OH_NNExecutor_Destroy(&executor); + ASSERT_EQ(OH_NN_SUCCESS, DestroyTensor(inputTensors, outputTensors)); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_Get_Output_Shape_0100 + * @tc.desc: executor为空,返回失败 + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_Get_Output_Shape_0100, Function | MediumTest | Level1) +{ + int32_t *outputDimensions = nullptr; + uint32_t outputDimensionCount = 0; + uint32_t outputIndex = 0; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_GetOutputShape(nullptr, outputIndex, + &outputDimensions, &outputDimensionCount)); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_Get_Output_Shape_0200 + * @tc.desc: outputindex不存在,等于输出个数,返回失败 + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_Get_Output_Shape_0200, Function | MediumTest | Level1) +{ + OH_NNExecutor* executor = nullptr; + CreateExecutor(&executor); + + int32_t *outputDimensions = nullptr; + uint32_t outputDimensionCount = 0; + uint32_t addOutputIndex = 4; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_GetOutputShape(executor, addOutputIndex, + &outputDimensions, &outputDimensionCount)); + OH_NNExecutor_Destroy(&executor); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_Get_Output_Shape_0300 + * @tc.desc: 定长模型推理成功,获取输出维度成功 + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_Get_Output_Shape_0300, Function | MediumTest | Level1) +{ + OH_NNExecutor* executor = nullptr; + CreateExecutor(&executor); + + vector inputTensors; + vector outputTensors; + size_t inputCount = 0; + size_t outputCount = 0; + GetExecutorInputOutputTensor(executor, inputTensors, inputCount, outputTensors, outputCount); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_RunSync(executor, inputTensors.data(), inputCount, + outputTensors.data(), outputCount)); + + int32_t *outputDimensions = nullptr; + uint32_t outputDimensionCount = 0; + uint32_t addOutputIndex = 0; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_GetOutputShape(executor, addOutputIndex, + &outputDimensions, &outputDimensionCount)); + + // 销毁Executor + OH_NNExecutor_Destroy(&executor); + ASSERT_EQ(OH_NN_SUCCESS, DestroyTensor(inputTensors, outputTensors)); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_Get_Output_Shape_0400 + * @tc.desc: 变长模型推理成功,获取输出维度成功 + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_Get_Output_Shape_0400, Function | MediumTest | Level1) +{ + OH_NNExecutor* executor = nullptr; + CreateDynamicExecutor(&executor); + + // 创建输入输出tensorDesc + vector inputTensorDescs; + vector outputTensorDescs; + size_t inputCount = 0; + size_t outputCount = 0; + GetExecutorInputOutputTensorDesc(executor, inputTensorDescs, inputCount, outputTensorDescs, outputCount); + + // 设置输入维度合法 + size_t *minInputDims = nullptr; + size_t *maxInputDims = nullptr; + size_t shapeLength = ZERO; + for (size_t i = 0; i < inputTensorDescs.size(); ++i) { + ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_GetInputDimRange(executor, i, &minInputDims, + &maxInputDims, &shapeLength)); + std::vector minInputDimsT; + for (size_t j = 0; j < shapeLength; ++j) { + minInputDimsT.emplace_back(static_cast(minInputDims[j])); + } + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_SetShape(inputTensorDescs[i], minInputDimsT.data(), shapeLength)); + } + std::vector outputShape{1, 2, 2, 1}; + for (size_t i = 0; i < outputTensorDescs.size(); ++i) { + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_SetShape(outputTensorDescs[i], + outputShape.data(), outputShape.size())); + } + + vector inputTensors; + vector outputTensors; + GetExecutorInputOutputTensorByDesc(executor, inputTensors, inputTensorDescs, outputTensors, outputTensorDescs); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_RunSync(executor, inputTensors.data(), inputCount, + outputTensors.data(), outputCount)); + + int32_t *outputDimensions = nullptr; + uint32_t outputDimensionCount = 0; + uint32_t addOutputIndex = 0; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_GetOutputShape(executor, addOutputIndex, + &outputDimensions, &outputDimensionCount)); + + // 销毁Executor + OH_NNExecutor_Destroy(&executor); + ASSERT_EQ(OH_NN_SUCCESS, DestroyTensorDesc(inputTensorDescs, outputTensorDescs)); + ASSERT_EQ(OH_NN_SUCCESS, DestroyTensor(inputTensors, outputTensors)); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_Get_Input_Count_0100 + * @tc.desc: executor为空,返回失败 + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_Get_Input_Count_0100, Function | MediumTest | Level1) +{ + size_t inputCount = ZERO; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_GetInputCount(nullptr, &inputCount)); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_Get_Input_Count_0200 + * @tc.desc: inputCount为空,返回失败 + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_Get_Input_Count_0200, Function | MediumTest | Level1) +{ + OH_NNExecutor* executor = nullptr; + CreateExecutor(&executor); + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_GetInputCount(executor, nullptr)); + OH_NNExecutor_Destroy(&executor); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_Get_Input_Count_0300 + * @tc.desc: 获取输入个数,返回成功 + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_Get_Input_Count_0300, Function | MediumTest | Level1) +{ + OH_NNExecutor* executor = nullptr; + CreateExecutor(&executor); + + size_t inputCount = ZERO; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_GetOutputCount(executor, &inputCount)); + ASSERT_LT(ZERO, inputCount); + OH_NNExecutor_Destroy(&executor); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_Get_Output_Count_0100 + * @tc.desc: executor为空,返回失败 + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_Get_Output_Count_0100, Function | MediumTest | Level1) +{ + size_t outputCount = 0; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_GetOutputCount(nullptr, &outputCount)); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_Get_Output_Count_0200 + * @tc.desc: outputCount为空,返回失败 + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_Get_Output_Count_0200, Function | MediumTest | Level1) +{ + OH_NNExecutor* executor = nullptr; + CreateExecutor(&executor); + + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_GetOutputCount(executor, nullptr)); + OH_NNExecutor_Destroy(&executor); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_Get_Output_Count_0300 + * @tc.desc: 获取输出个数,返回成功 + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_Get_Output_Count_0300, Function | MediumTest | Level1) +{ + OH_NNExecutor* executor = nullptr; + CreateExecutor(&executor); + + size_t outputCount = ZERO; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_GetOutputCount(executor, &outputCount)); + ASSERT_LT(ZERO, outputCount); + OH_NNExecutor_Destroy(&executor); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_Create_Input_TensorDesc_0100 + * @tc.desc: excutor为空,返回失败 + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_Create_Input_TensorDesc_0100, + Function | MediumTest | Level1) +{ + OH_NNExecutor* executor = nullptr; + CreateExecutor(&executor); + + size_t index = ZERO; + ASSERT_EQ(nullptr, OH_NNExecutor_CreateInputTensorDesc(nullptr, index)); + OH_NNExecutor_Destroy(&executor); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_Create_Input_TensorDesc_0200 + * @tc.desc: 遍历创建输入tensorDesc,index小于输出个数,成功 + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_Create_Input_TensorDesc_0200, + Function | MediumTest | Level1) +{ + OH_NNExecutor* executor = nullptr; + CreateExecutor(&executor); + size_t inputCount = ZERO; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_GetInputCount(executor, &inputCount)); + NN_TensorDesc* tensorDesc = nullptr; + for (size_t i = 0; i < inputCount; i++) { + tensorDesc = OH_NNExecutor_CreateInputTensorDesc(executor, i); + ASSERT_NE(nullptr, tensorDesc); + OH_NNTensorDesc_Destroy(&tensorDesc); + } + OH_NNExecutor_Destroy(&executor); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_Create_Input_TensorDesc_0300 + * @tc.desc: index等于输出个数,返回失败 + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_Create_Input_TensorDesc_0300, + Function | MediumTest | Level1) +{ + OH_NNExecutor* executor = nullptr; + CreateExecutor(&executor); + size_t inputCount = ZERO; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_GetInputCount(executor, &inputCount)); + + ASSERT_EQ(nullptr, OH_NNExecutor_CreateInputTensorDesc(executor, inputCount)); + OH_NNExecutor_Destroy(&executor); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_Create_Output_TensorDesc_0100 + * @tc.desc: excutor为空,返回失败 + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_Create_Output_TensorDesc_0100, + Function | MediumTest | Level1) +{ + OH_NNExecutor* executor = nullptr; + CreateExecutor(&executor); + + size_t index = ZERO; + ASSERT_EQ(nullptr, OH_NNExecutor_CreateOutputTensorDesc(nullptr, index)); + OH_NNExecutor_Destroy(&executor); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_Create_Output_TensorDesc_0200 + * @tc.desc: 遍历创建输入tensorDesc,index小于输出个数,成功 + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_Create_Output_TensorDesc_0200, + Function | MediumTest | Level1) +{ + OH_NNExecutor* executor = nullptr; + CreateExecutor(&executor); + size_t outputCount = ZERO; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_GetOutputCount(executor, &outputCount)); + NN_TensorDesc* tensorDesc = nullptr; + for (size_t i = 0; i < outputCount; i++) { + tensorDesc = OH_NNExecutor_CreateOutputTensorDesc(executor, i); + ASSERT_NE(nullptr, tensorDesc); + OH_NNTensorDesc_Destroy(&tensorDesc); + } + OH_NNExecutor_Destroy(&executor); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_Create_Output_TensorDesc_0300 + * @tc.desc: index等于输出个数,返回失败 + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_Create_Output_TensorDesc_0300, + Function | MediumTest | Level1) +{ + OH_NNExecutor* executor = nullptr; + CreateExecutor(&executor); + size_t outputCount = ZERO; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_GetOutputCount(executor, &outputCount)); + ASSERT_EQ(nullptr, OH_NNExecutor_CreateOutputTensorDesc(executor, outputCount)); + OH_NNExecutor_Destroy(&executor); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_Get_Input_Dim_Range_0100 + * @tc.desc: excutor为空,返回失败 + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_Get_Input_Dim_Range_0100, Function | MediumTest | Level1) +{ + OH_NNExecutor* executor = nullptr; + size_t index = ZERO; + size_t *minInputDims = nullptr; + size_t *maxInputDims = nullptr; + size_t shapeLength = ZERO; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_GetInputDimRange(executor, index, &minInputDims, + &maxInputDims, &shapeLength)); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_Get_Input_Dim_Range_0200 + * @tc.desc: 获取动态输入范围,设置index等于输入个数,超出限制,获取失败 + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_Get_Input_Dim_Range_0200, Function | MediumTest | Level1) +{ + OH_NNExecutor* executor = nullptr; + CreateDynamicExecutor(&executor); + + size_t index = 6; + size_t *minInputDims = nullptr; + size_t *maxInputDims = nullptr; + size_t shapeLength = ZERO; + + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_GetInputDimRange(executor, index, &minInputDims, + &maxInputDims, &shapeLength)); + OH_NNExecutor_Destroy(&executor); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_Get_Input_Dim_Range_0300 + * @tc.desc: 变长模型推理,获取输入维度,获取成功,设置输入维度等于最小临界值,成功 + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_Get_Input_Dim_Range_0300, Function | MediumTest | Level1) +{ + OH_NNExecutor* executor = nullptr; + CreateDynamicExecutor(&executor); + + // 创建输入输出tensorDesc + vector inputTensorDescs; + vector outputTensorDescs; + size_t inputCount = 0; + size_t outputCount = 0; + GetExecutorInputOutputTensorDesc(executor, inputTensorDescs, inputCount, outputTensorDescs, outputCount); + + // 修改tensorDesc中shape为最小临界值 + size_t *minInputDims = nullptr; + size_t *maxInputDims = nullptr; + size_t shapeLength = ZERO; + for (size_t i = 0; i < inputTensorDescs.size(); ++i) { + ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_GetInputDimRange(executor, i, &minInputDims, + &maxInputDims, &shapeLength)); + std::vector minInputDimsT; + for (size_t j = 0; j < shapeLength; ++j) { + minInputDimsT.emplace_back(static_cast(minInputDims[j])); + } + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_SetShape(inputTensorDescs[i], minInputDimsT.data(), shapeLength)); + } + std::vector outputShape{1, 2, 2, 1}; + for (size_t i = 0; i < outputTensorDescs.size(); ++i) { + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_SetShape(outputTensorDescs[i], + outputShape.data(), outputShape.size())); + } + + vector inputTensors; + vector outputTensors; + GetExecutorInputOutputTensorByDesc(executor, inputTensors, inputTensorDescs, outputTensors, outputTensorDescs); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_RunSync(executor, inputTensors.data(), inputCount, + outputTensors.data(), outputCount)); + + // 销毁Executor + OH_NNExecutor_Destroy(&executor); + ASSERT_EQ(OH_NN_SUCCESS, DestroyTensorDesc(inputTensorDescs, outputTensorDescs)); + ASSERT_EQ(OH_NN_SUCCESS, DestroyTensor(inputTensors, outputTensors)); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_Get_Input_Dim_Range_0400 + * @tc.desc: 变长模型编推理,获取输入维度,获取成功,设置输入维度等于最大临界值,成功 + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_Get_Input_Dim_Range_0400, Function | MediumTest | Level1) +{ + OH_NNExecutor* executor = nullptr; + CreateDynamicExecutor(&executor); + + // 创建输入输出tensorDesc + vector inputTensorDescs; + vector outputTensorDescs; + size_t inputCount = 0; + size_t outputCount = 0; + GetExecutorInputOutputTensorDesc(executor, inputTensorDescs, inputCount, outputTensorDescs, outputCount); + + // 修改tensorDesc中shape为最大临界值 + size_t *minInputDims = nullptr; + size_t *maxInputDims = nullptr; + size_t shapeLength = ZERO; + for (size_t i = 0; i < inputTensorDescs.size(); ++i) { + ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_GetInputDimRange(executor, i, &minInputDims, + &maxInputDims, &shapeLength)); + std::vector maxInputDimsT; + for (size_t j = 0; j < shapeLength; ++j) { + maxInputDimsT.emplace_back(static_cast(maxInputDims[j])); + } + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_SetShape(inputTensorDescs[i], maxInputDimsT.data(), shapeLength)); + } + std::vector outputShape{1, 2, 2, 1}; + for (size_t i = 0; i < outputTensorDescs.size(); ++i) { + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_SetShape(outputTensorDescs[i], + outputShape.data(), outputShape.size())); + } + + vector inputTensors; + vector outputTensors; + GetExecutorInputOutputTensorByDesc(executor, inputTensors, inputTensorDescs, outputTensors, outputTensorDescs); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_RunSync(executor, inputTensors.data(), inputCount, + outputTensors.data(), outputCount)); + + // 销毁Executor + OH_NNExecutor_Destroy(&executor); + ASSERT_EQ(OH_NN_SUCCESS, DestroyTensorDesc(inputTensorDescs, outputTensorDescs)); + ASSERT_EQ(OH_NN_SUCCESS, DestroyTensor(inputTensors, outputTensors)); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_Get_Input_Dim_Range_0500 + * @tc.desc: 变长模型推理,获取输入维度,获取成功,设置输入维度小于最小临界值,推理失败 + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_Get_Input_Dim_Range_0500, Function | MediumTest | Level1) +{ + OH_NNExecutor* executor = nullptr; + CreateDynamicExecutor(&executor); + + // 创建输入输出tensorDesc + vector inputTensorDescs; + vector outputTensorDescs; + size_t inputCount = 0; + size_t outputCount = 0; + GetExecutorInputOutputTensorDesc(executor, inputTensorDescs, inputCount, outputTensorDescs, outputCount); + + // 设置输入维度小于最小临界值 + size_t *minInputDims = nullptr; + size_t *maxInputDims = nullptr; + size_t shapeLength = ZERO; + for (size_t i = 0; i < inputTensorDescs.size(); ++i) { + ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_GetInputDimRange(executor, i, &minInputDims, + &maxInputDims, &shapeLength)); + std::vector minInputDimsT; + for (size_t j = 0; j < shapeLength; ++j) { + minInputDimsT.emplace_back(static_cast(minInputDims[j] - 1)); + } + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_SetShape(inputTensorDescs[i], minInputDimsT.data(), shapeLength)); + } + std::vector outputShape{1, 2, 2, 1}; + for (size_t i = 0; i < outputTensorDescs.size(); ++i) { + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_SetShape(outputTensorDescs[i], + outputShape.data(), outputShape.size())); + } + + vector inputTensors; + vector outputTensors; + GetExecutorInputOutputTensorByDesc(executor, inputTensors, inputTensorDescs, outputTensors, outputTensorDescs); + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_RunSync(executor, inputTensors.data(), inputCount, + outputTensors.data(), outputCount)); + + // 销毁Executor + OH_NNExecutor_Destroy(&executor); + ASSERT_EQ(OH_NN_SUCCESS, DestroyTensorDesc(inputTensorDescs, outputTensorDescs)); + ASSERT_EQ(OH_NN_SUCCESS, DestroyTensor(inputTensors, outputTensors)); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_Get_Input_Dim_Range_0600 + * @tc.desc: 变长模型推理,依次获取输入维度,获取成功,设置输入维度大于最大临界值,推理失败 + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_Get_Input_Dim_Range_0600, Function | MediumTest | Level1) +{ + OH_NNExecutor* executor = nullptr; + CreateDynamicExecutor(&executor); + + // 创建输入输出tensorDesc + vector inputTensorDescs; + vector outputTensorDescs; + size_t inputCount = 0; + size_t outputCount = 0; + GetExecutorInputOutputTensorDesc(executor, inputTensorDescs, inputCount, outputTensorDescs, outputCount); + + // 设置输入维度大于最大临界值 + size_t *minInputDims = nullptr; + size_t *maxInputDims = nullptr; + size_t shapeLength = ZERO; + for (size_t i = 0; i < inputTensorDescs.size(); ++i) { + ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_GetInputDimRange(executor, i, &minInputDims, + &maxInputDims, &shapeLength)); + std::vector maxInputDimsT; + for (size_t j = 0; j < shapeLength; ++j) { + maxInputDimsT.emplace_back(static_cast(maxInputDims[j] + 1)); + } + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_SetShape(inputTensorDescs[i], maxInputDimsT.data(), shapeLength)); + } + std::vector outputShape{1, 2, 2, 1}; + for (size_t i = 0; i < outputTensorDescs.size(); ++i) { + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_SetShape(outputTensorDescs[i], + outputShape.data(), outputShape.size())); + } + + vector inputTensors; + vector outputTensors; + GetExecutorInputOutputTensorByDesc(executor, inputTensors, inputTensorDescs, outputTensors, outputTensorDescs); + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_RunSync(executor, inputTensors.data(), inputCount, + outputTensors.data(), outputCount)); + + // 销毁Executor + OH_NNExecutor_Destroy(&executor); + ASSERT_EQ(OH_NN_SUCCESS, DestroyTensorDesc(inputTensorDescs, outputTensorDescs)); + ASSERT_EQ(OH_NN_SUCCESS, DestroyTensor(inputTensors, outputTensors)); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_Get_Input_Dim_Range_0700 + * @tc.desc: 变长模型推理,依次获取输入维度,获取成功,设置输入shape个数超过shapeNum,推理失败 + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_Get_Input_Dim_Range_0700, Function | MediumTest | Level1) +{ + OH_NNExecutor* executor = nullptr; + CreateDynamicExecutor(&executor); + + // 创建输入输出tensorDesc + vector inputTensorDescs; + vector outputTensorDescs; + size_t inputCount = 0; + size_t outputCount = 0; + GetExecutorInputOutputTensorDesc(executor, inputTensorDescs, inputCount, outputTensorDescs, outputCount); + + // 修改tensorDesc中shape为最大临界值 + size_t *minInputDims = nullptr; + size_t *maxInputDims = nullptr; + size_t shapeLength = ZERO; + for (size_t i = 0; i < inputTensorDescs.size(); ++i) { + ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_GetInputDimRange(executor, i, &minInputDims, + &maxInputDims, &shapeLength)); + std::vector maxInputDimsT; + for (size_t j = 0; j < shapeLength; ++j) { + maxInputDimsT.emplace_back(static_cast(maxInputDims[j])); + } + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_SetShape(inputTensorDescs[i], maxInputDimsT.data(), shapeLength)); + } + std::vector outputShape{1, 2, 2, 1}; + for (size_t i = 0; i < outputTensorDescs.size(); ++i) { + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_SetShape(outputTensorDescs[i], + outputShape.data(), outputShape.size())); + } + + vector inputTensors; + vector outputTensors; + GetExecutorInputOutputTensorByDesc(executor, inputTensors, inputTensorDescs, outputTensors, outputTensorDescs); + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNExecutor_RunSync(executor, inputTensors.data(), inputCount + 1, + outputTensors.data(), outputCount)); + + // 销毁Executor + OH_NNExecutor_Destroy(&executor); + ASSERT_EQ(OH_NN_SUCCESS, DestroyTensorDesc(inputTensorDescs, outputTensorDescs)); + ASSERT_EQ(OH_NN_SUCCESS, DestroyTensor(inputTensors, outputTensors)); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_Executor_Get_Input_Dim_Range_0800 + * @tc.desc: 定长模型推理调用,返回正确 + * @tc.type: FUNC + */ +HWTEST_F(ExecutorTest, SUB_AI_NNRt_Core_Func_North_Executor_Get_Input_Dim_Range_0800, Function | MediumTest | Level1) +{ + OH_NNExecutor* executor = nullptr; + CreateExecutor(&executor); + + size_t index = ZERO; + size_t *minInputDims = nullptr; + size_t *maxInputDims = nullptr; + size_t shapeLength = ZERO; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_GetInputDimRange(executor, index, &minInputDims, + &maxInputDims, &shapeLength)); + OH_NNExecutor_Destroy(&executor); +} +} // namespace OHOS::NeuralNetworkCore diff --git a/test/nnrt_xts_acts/nncore/nncoretest/src/HdiModelTest.cpp b/test/nnrt_xts_acts/nncore/nncoretest/src/HdiModelTest.cpp new file mode 100644 index 0000000..440541e --- /dev/null +++ b/test/nnrt_xts_acts/nncore/nncoretest/src/HdiModelTest.cpp @@ -0,0 +1,420 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include +#include + +#include "nncore_utils.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Test; +using namespace OHOS::HDI::Nnrt::V2_0; + +namespace { + +class ModelTest : public testing::Test { +protected: + AddModel addModel; + OHNNGraphArgs graphArgs = addModel.graphArgs; +}; + +void BuildAddTopKGraph(OH_NNModel *model) +{ + AddTopKModel addTopKModel; + OHNNGraphArgsMulti graphArgsMulti = addTopKModel.graphArgs; + ASSERT_EQ(OH_NN_SUCCESS, BuildMultiOpGraph(model, graphArgsMulti)); +} + +void BuildModel(OH_NNModel *model, const OHNNGraphArgs &graphArgs) +{ + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); +} + +} // namespace + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_CreateQuantParam_0100 + * @tc.desc: 创建量化参数 + * @tc.type: FUNC + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_CreateQuantParam_0100, Function | MediumTest | Level0) +{ + NN_QuantParam* quantParam = OH_NNQuantParam_Create(); + ASSERT_NE(nullptr, quantParam); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNQuantParam_Destroy(&quantParam)); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_DestroyQuantParam_0100 + * @tc.desc: 释放量化参数,*quantParam为空 + * @tc.type: FUNC + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_DestroyQuantParam_0100, Function | MediumTest | Level0) +{ + NN_QuantParam* quantParam = nullptr; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNQuantParam_Destroy(&quantParam)); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_DestroyQuantParam_0200 + * @tc.desc: 释放量化参数,quantParam为空 + * @tc.type: FUNC + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_DestroyQuantParam_0200, Function | MediumTest | Level0) +{ + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNQuantParam_Destroy(nullptr)); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_SetQuantParam_0100 + * @tc.desc: 设置量化参数,参数为空,返回失败 + * @tc.type: FUNC + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_SetQuantParam_0100, Function | MediumTest | Level0) +{ + double scales = 0.2; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNQuantParam_SetScales(nullptr, &scales, 1)); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_SetQuantParam_0200 + * @tc.desc: 设置量化参数,参数合法,count=1,参数的channel=1,调用成功 + * @tc.type: FUNC + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_SetQuantParam_0200, Function | MediumTest | Level0) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + int32_t inputDims[4] = {1, 1, 2, 3}; + NN_TensorDesc* tensorDesc = createTensorDesc(inputDims, 4, OH_NN_FLOAT32, OH_NN_FORMAT_NCHW); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + NN_QuantParam* quantParam = OH_NNQuantParam_Create(); + double scales = 0.2; + int32_t zeroPoints = 0; + uint32_t numBits = 8; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNQuantParam_SetScales(quantParam, &scales, 1)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNQuantParam_SetZeroPoints(quantParam, &zeroPoints, 1)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNQuantParam_SetNumBits(quantParam, &numBits, 1)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorQuantParams(model, 0, quantParam)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNQuantParam_Destroy(&quantParam)); + OH_NNModel_Destroy(&model); + OH_NNTensorDesc_Destroy(&tensorDesc); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_SetQuantParam_0300 + * @tc.desc: 设置量化参数,count=1,channel=2,Per-Layer模式,设置成功 + * @tc.type: FUNC + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_SetQuantParam_0300, Function | MediumTest | Level0) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + int32_t inputDims[4] = {1, 2, 2, 3}; + NN_TensorDesc* tensorDesc = createTensorDesc(inputDims, 4, OH_NN_FLOAT32, OH_NN_FORMAT_NCHW); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + NN_QuantParam* quantParam = OH_NNQuantParam_Create(); + double scales = 0.2; + int32_t zeroPoints = 0; + uint32_t numBits = 8; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNQuantParam_SetScales(quantParam, &scales, 1)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNQuantParam_SetZeroPoints(quantParam, &zeroPoints, 1)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNQuantParam_SetNumBits(quantParam, &numBits, 1)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorQuantParams(model, 0, quantParam)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNQuantParam_Destroy(&quantParam)); + OH_NNModel_Destroy(&model); + OH_NNTensorDesc_Destroy(&tensorDesc); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_SetQuantParam_0400 + * @tc.desc: 设置量化参数,参数合法,count=2,参数的channel=1,调用失败 + * @tc.type: FUNC + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_SetQuantParam_0400, Function | MediumTest | Level0) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + int32_t inputDims[4] = {1, 1, 2, 3}; + NN_TensorDesc* tensorDesc = createTensorDesc(inputDims, 4, OH_NN_FLOAT32, OH_NN_FORMAT_NCHW); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + NN_QuantParam* quantParam = OH_NNQuantParam_Create(); + double scales = 0.2; + int32_t zeroPoints = 0; + uint32_t numBits = 8; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNQuantParam_SetScales(quantParam, &scales, 2)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNQuantParam_SetZeroPoints(quantParam, &zeroPoints, 2)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNQuantParam_SetNumBits(quantParam, &numBits, 2)); + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorQuantParams(model, 0, quantParam)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNQuantParam_Destroy(&quantParam)); + OH_NNModel_Destroy(&model); + OH_NNTensorDesc_Destroy(&tensorDesc); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_SetQuantParam_0500 + * @tc.desc: 设置量化参数,参数合法,参数count不一致,调用失败 + * @tc.type: FUNC + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_SetQuantParam_0500, Function | MediumTest | Level0) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + int32_t inputDims[4] = {1, 1, 2, 3}; + NN_TensorDesc* tensorDesc = createTensorDesc(inputDims, 4, OH_NN_FLOAT32, OH_NN_FORMAT_NCHW); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + NN_QuantParam* quantParam = OH_NNQuantParam_Create(); + double scales = 0.2; + int32_t zeroPoints = 0; + uint32_t numBits = 8; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNQuantParam_SetScales(quantParam, &scales, 1)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNQuantParam_SetZeroPoints(quantParam, &zeroPoints, 2)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNQuantParam_SetNumBits(quantParam, &numBits, 1)); + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorQuantParams(model, 0, quantParam)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNQuantParam_Destroy(&quantParam)); + OH_NNModel_Destroy(&model); + OH_NNTensorDesc_Destroy(&tensorDesc); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_SetQuantParam_0600 + * @tc.desc: 设置量化参数,参数合法,不设置zeroPoints,调用失败 + * @tc.type: FUNC + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_SetQuantParam_0600, Function | MediumTest | Level0) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + int32_t inputDims[4] = {1, 1, 2, 3}; + NN_TensorDesc* tensorDesc = createTensorDesc(inputDims, 4, OH_NN_FLOAT32, OH_NN_FORMAT_NCHW); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + NN_QuantParam* quantParam = OH_NNQuantParam_Create(); + double scales = 0.2; + uint32_t numBits = 8; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNQuantParam_SetScales(quantParam, &scales, 1)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNQuantParam_SetNumBits(quantParam, &numBits, 1)); + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorQuantParams(model, 0, quantParam)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNQuantParam_Destroy(&quantParam)); + OH_NNModel_Destroy(&model); + OH_NNTensorDesc_Destroy(&tensorDesc); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_SetQuantParam_0700 + * @tc.desc: 设置量化参数,参数合法,不设置numBits,调用失败 + * @tc.type: FUNC + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_SetQuantParam_0700, Function | MediumTest | Level0) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + int32_t inputDims[4] = {1, 1, 2, 3}; + NN_TensorDesc* tensorDesc = createTensorDesc(inputDims, 4, OH_NN_FLOAT32, OH_NN_FORMAT_NCHW); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + NN_QuantParam* quantParam = OH_NNQuantParam_Create(); + double scales = 0.2; + int32_t zeroPoints = 0; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNQuantParam_SetScales(quantParam, &scales, 1)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNQuantParam_SetZeroPoints(quantParam, &zeroPoints, 1)); + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorQuantParams(model, 0, quantParam)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNQuantParam_Destroy(&quantParam)); + OH_NNModel_Destroy(&model); + OH_NNTensorDesc_Destroy(&tensorDesc); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_SetQuantParam_0800 + * @tc.desc: 设置量化参数,参数合法,不设置scales,调用失败 + * @tc.type: FUNC + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_SetQuantParam_0800, Function | MediumTest | Level0) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + int32_t inputDims[4] = {1, 1, 2, 3}; + NN_TensorDesc* tensorDesc = createTensorDesc(inputDims, 4, OH_NN_FLOAT32, OH_NN_FORMAT_NCHW); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + NN_QuantParam* quantParam = OH_NNQuantParam_Create(); + int32_t zeroPoints = 0; + uint32_t numBits = 8; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNQuantParam_SetZeroPoints(quantParam, &zeroPoints, 1)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNQuantParam_SetNumBits(quantParam, &numBits, 1)); + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorQuantParams(model, 0, quantParam)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNQuantParam_Destroy(&quantParam)); + OH_NNModel_Destroy(&model); + OH_NNTensorDesc_Destroy(&tensorDesc); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_SetQuantParam_0900 + * @tc.desc: 设置量化参数,参数合法,tensor下标等于当前tensor个数,返回失败 + * @tc.type: FUNC + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_SetQuantParam_0900, Function | MediumTest | Level0) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + int32_t inputDims[4] = {1, 1, 2, 3}; + NN_TensorDesc* tensorDesc = createTensorDesc(inputDims, 4, OH_NN_FLOAT32, OH_NN_FORMAT_NCHW); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + NN_QuantParam* quantParam = OH_NNQuantParam_Create(); + double scales = 0.2; + int32_t zeroPoints = 0; + uint32_t numBits = 8; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNQuantParam_SetScales(quantParam, &scales, 1)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNQuantParam_SetZeroPoints(quantParam, &zeroPoints, 1)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNQuantParam_SetNumBits(quantParam, &numBits, 1)); + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorQuantParams(model, 1, quantParam)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNQuantParam_Destroy(&quantParam)); + OH_NNModel_Destroy(&model); + OH_NNTensorDesc_Destroy(&tensorDesc); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_AddTensorToModel_0100 + * @tc.desc: 添加tensor到模型,model为空 + * @tc.type: FUNC + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_AddTensorToModel_0100, Function | MediumTest | Level0) +{ + int32_t inputDims[4] = {1, 1, 2, 3}; + NN_TensorDesc* tensorDesc = createTensorDesc(inputDims, 4, OH_NN_FLOAT32, OH_NN_FORMAT_NCHW); + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddTensorToModel(nullptr, tensorDesc)); + OH_NNTensorDesc_Destroy(&tensorDesc); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_AddTensorToModel_0200 + * @tc.desc: 添加tensor到模型,tensorDesc为空 + * @tc.type: FUNC + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_AddTensorToModel_0200, Function | MediumTest | Level0) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddTensorToModel(model, nullptr)); + OH_NNModel_Destroy(&model); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_SetTensorData_0100 + * @tc.desc: 设置操作数值,model为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_SetTensorData_0100, Function | MediumTest | Level0) +{ + int8_t activationValue{0}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(nullptr, 0, (void *)&activationValue, sizeof(int8_t))); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_SetTensorData_0200 + * @tc.desc: 设置操作数值,databuffer为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_SetTensorData_0200, Function | MediumTest | Level0) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + int32_t inputDims[4] = {1, 1, 2, 3}; + NN_TensorDesc* tensorDesc = createTensorDesc(inputDims, 4, OH_NN_FLOAT32, OH_NN_FORMAT_NCHW); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, 0, nullptr, sizeof(int8_t))); + OH_NNModel_Destroy(&model); + OH_NNTensorDesc_Destroy(&tensorDesc); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_SetTensorData_0300 + * @tc.desc: 设置操作数值,length = 0 + * @tc.type: FUNC + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_SetTensorData_0300, Function | MediumTest | Level0) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + int32_t inputDims[4] = {1, 1, 2, 3}; + NN_TensorDesc* tensorDesc = createTensorDesc(inputDims, 4, OH_NN_FLOAT32, OH_NN_FORMAT_NCHW); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + int8_t activationValue{0}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, 0, (void *)&activationValue, 0)); + OH_NNModel_Destroy(&model); + OH_NNTensorDesc_Destroy(&tensorDesc); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_SetTensorData_0400 + * @tc.desc: 设置操作数值,index大于tensor个数 + * @tc.type: FUNC + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_SetTensorData_0400, Function | MediumTest | Level0) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + int32_t inputDims[4] = {1, 1, 2, 3}; + NN_TensorDesc* tensorDesc = createTensorDesc(inputDims, 4, OH_NN_FLOAT32, OH_NN_FORMAT_NCHW); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + int8_t activationValue{0}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, 1000, (void *)&activationValue, sizeof(int8_t))); + OH_NNModel_Destroy(&model); + OH_NNTensorDesc_Destroy(&tensorDesc); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_SetTensorType_0100 + * @tc.desc: 设置tensortype,model为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_SetTensorType_0100, Function | MediumTest | Level0) +{ + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorType(nullptr, 0, OH_NN_TENSOR)); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_SetTensorType_0200 + * @tc.desc: 设置tensortype,index大于tensor个数 + * @tc.type: FUNC + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_SetTensorType_0200, Function | MediumTest | Level0) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + int32_t inputDims[4] = {1, 1, 2, 3}; + NN_TensorDesc* tensorDesc = createTensorDesc(inputDims, 4, OH_NN_FLOAT32, OH_NN_FORMAT_NCHW); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorType(model, 1000, OH_NN_TENSOR)); + OH_NNModel_Destroy(&model); + OH_NNTensorDesc_Destroy(&tensorDesc); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Model_SetTensorType_0300 + * @tc.desc: 设置tensortype,遍历设置tensorType + * @tc.type: FUNC + */ +HWTEST_F(ModelTest, SUB_AI_NNRt_Func_North_Model_SetTensorType_0300, Function | MediumTest | Level0) +{ + OH_NNModel *model = OH_NNModel_Construct(); + ASSERT_NE(nullptr, model); + int32_t inputDims[4] = {1, 1, 2, 3}; + NN_TensorDesc* tensorDesc = createTensorDesc(inputDims, 4, OH_NN_FLOAT32, OH_NN_FORMAT_NCHW); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + for (int tensorType = 0; tensorType < 77; tensorType++) { + ASSERT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, 0, static_cast(tensorType))); + } + OH_NNModel_Destroy(&model); + OH_NNTensorDesc_Destroy(&tensorDesc); +} diff --git a/test/nnrt_xts_acts/nncore/nncoretest/src/HdiTensorDescTest.cpp b/test/nnrt_xts_acts/nncore/nncoretest/src/HdiTensorDescTest.cpp new file mode 100644 index 0000000..92c299a --- /dev/null +++ b/test/nnrt_xts_acts/nncore/nncoretest/src/HdiTensorDescTest.cpp @@ -0,0 +1,427 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include + +#include "nncore_utils.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Test; +using namespace OHOS::HDI::Nnrt::V2_0; +class TensorDescTest : public testing::Test {}; + + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_TensorDescCreate_0100 + * @tc.desc: 创建TensorDesc + * @tc.type: FUNC + */ +HWTEST_F(TensorDescTest, SUB_AI_NNRt_Core_Func_North_TensorDescCreate_0100, Function | MediumTest | Level1) +{ + NN_TensorDesc* tensorDesc = OH_NNTensorDesc_Create(); + ASSERT_NE(nullptr, tensorDesc); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_Destroy(&tensorDesc)); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_TensorDescDestroy_0100 + * @tc.desc: 销毁TensorDesc,TensorDesc为空 + * @tc.type: FUNC + */ +HWTEST_F(TensorDescTest, SUB_AI_NNRt_Core_Func_North_TensorDescDestroy_0100, Function | MediumTest | Level1) +{ + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNTensorDesc_Destroy(nullptr)); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_TensorDescDestroy_0200 + * @tc.desc: 销毁TensorDesc,*TensorDesc为空 + * @tc.type: FUNC + */ +HWTEST_F(TensorDescTest, SUB_AI_NNRt_Core_Func_North_TensorDescDestroy_0200, Function | MediumTest | Level1) +{ + NN_TensorDesc* tensorDesc = nullptr; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNTensorDesc_Destroy(&tensorDesc)); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_TensorDescDestroy_0300 + * @tc.desc: 销毁TensorDesc + * @tc.type: FUNC + */ +HWTEST_F(TensorDescTest, SUB_AI_NNRt_Core_Func_North_TensorDescDestroy_0300, Function | MediumTest | Level1) +{ + NN_TensorDesc* tensorDesc = OH_NNTensorDesc_Create(); + ASSERT_NE(nullptr, tensorDesc); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_Destroy(&tensorDesc)); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_TensorDescSetDataType_0100 + * @tc.desc: 设置TensorDesc数据类型,TensorDesc为空 + * @tc.type: FUNC + */ +HWTEST_F(TensorDescTest, SUB_AI_NNRt_Core_Func_North_TensorDescSetDataType_0100, Function | MediumTest | Level1) +{ + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNTensorDesc_SetDataType(nullptr, OH_NN_UNKNOWN)); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_TensorDescSetDataType_0200 + * @tc.desc: 设置TensorDesc数据类型,遍历DataType + * @tc.type: FUNC + */ +HWTEST_F(TensorDescTest, SUB_AI_NNRt_Core_Func_North_TensorDescSetDataType_0200, Function | MediumTest | Level1) +{ + NN_TensorDesc* tensorDesc = OH_NNTensorDesc_Create(); + ASSERT_NE(nullptr, tensorDesc); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_SetDataType(tensorDesc, OH_NN_UNKNOWN)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_SetDataType(tensorDesc, OH_NN_FLOAT64)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_SetDataType(tensorDesc, OH_NN_BOOL)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_Destroy(&tensorDesc)); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_TensorDescSetDataType_0300 + * @tc.desc: 设置TensorDesc数据类型,DataType不合法 + * @tc.type: FUNC + */ +HWTEST_F(TensorDescTest, SUB_AI_NNRt_Core_Func_North_TensorDescSetDataType_0300, Function | MediumTest | Level1) +{ + NN_TensorDesc* tensorDesc = OH_NNTensorDesc_Create(); + ASSERT_NE(nullptr, tensorDesc); + int dataType = static_cast(OH_NN_FLOAT64)+1; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNTensorDesc_SetDataType(tensorDesc, static_cast(dataType))); + dataType = static_cast(OH_NN_UNKNOWN)-1; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNTensorDesc_SetDataType(tensorDesc, static_cast(dataType))); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_Destroy(&tensorDesc)); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_TensorDescGetDataType_0100 + * @tc.desc: 获取TensorDesc数据类型,TensorDesc为空 + * @tc.type: FUNC + */ +HWTEST_F(TensorDescTest, SUB_AI_NNRt_Core_Func_North_TensorDescGetDataType_0100, Function | MediumTest | Level1) +{ + OH_NN_DataType dataType; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNTensorDesc_GetDataType(nullptr, &dataType)); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_TensorDescGetDataType_0200 + * @tc.desc: 获取TensorDesc数据类型,未设置DataType + * @tc.type: FUNC + */ +HWTEST_F(TensorDescTest, SUB_AI_NNRt_Core_Func_North_TensorDescGetDataType_0200, Function | MediumTest | Level1) +{ + NN_TensorDesc* tensorDesc = OH_NNTensorDesc_Create(); + ASSERT_NE(nullptr, tensorDesc); + OH_NN_DataType dataType = OH_NN_UNKNOWN; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_GetDataType(tensorDesc, &dataType)); + ASSERT_EQ(OH_NN_UNKNOWN, dataType); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_Destroy(&tensorDesc)); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_TensorDescSetShape_0100 + * @tc.desc: 设置TensorDescShape,TensorDesc为空 + * @tc.type: FUNC + */ +HWTEST_F(TensorDescTest, SUB_AI_NNRt_Core_Func_North_TensorDescSetShape_0100, Function | MediumTest | Level1) +{ + int32_t inputDims[4] = {1, 2, 2, 3}; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNTensorDesc_SetShape(nullptr, inputDims, 4)); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_TensorDescSetShape_0200 + * @tc.desc: 设置TensorDescShape,shape为空 + * @tc.type: FUNC + */ +HWTEST_F(TensorDescTest, SUB_AI_NNRt_Core_Func_North_TensorDescSetShape_0200, Function | MediumTest | Level1) +{ + NN_TensorDesc* tensorDesc = OH_NNTensorDesc_Create(); + ASSERT_NE(nullptr, tensorDesc); + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNTensorDesc_SetShape(tensorDesc, nullptr, 0)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_Destroy(&tensorDesc)); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_TensorDescGetShape_0100 + * @tc.desc: 获取TensorDescShape,TensorDesc为空 + * @tc.type: FUNC + */ +HWTEST_F(TensorDescTest, SUB_AI_NNRt_Core_Func_North_TensorDescGetShape_0100, Function | MediumTest | Level1) +{ + size_t shapeLength = 0; + int32_t* shape = nullptr; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNTensorDesc_GetShape(nullptr, &shape, &shapeLength)); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_TensorDescGetShape_0200 + * @tc.desc: 获取TensorDescShape,未设置shape + * @tc.type: FUNC + */ +HWTEST_F(TensorDescTest, SUB_AI_NNRt_Core_Func_North_TensorDescGetShape_0200, Function | MediumTest | Level1) +{ + NN_TensorDesc* tensorDesc = OH_NNTensorDesc_Create(); + ASSERT_NE(nullptr, tensorDesc); + size_t shapeLength = 0; + int32_t* shape = nullptr; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_GetShape(tensorDesc, &shape, &shapeLength)); + ASSERT_EQ(nullptr, shape); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_Destroy(&tensorDesc)); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_TensorDescGetShape_0300 + * @tc.desc: 获取TensorDescShape,合法设置返回成功 + * @tc.type: FUNC + */ +HWTEST_F(TensorDescTest, SUB_AI_NNRt_Core_Func_North_TensorDescGetShape_0300, Function | MediumTest | Level1) +{ + NN_TensorDesc* tensorDesc = OH_NNTensorDesc_Create(); + ASSERT_NE(nullptr, tensorDesc); + int32_t inputDims[4] = {1, 2, 2, 3}; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_SetShape(tensorDesc, inputDims, 4)); + size_t shapeLength = 0; + int32_t* shape = nullptr; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_GetShape(tensorDesc, &shape, &shapeLength)); + ASSERT_EQ(*inputDims, *shape); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_Destroy(&tensorDesc)); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_TensorDescSetFormat_0100 + * @tc.desc: 设置TensorDescFormat,TensorDesc为空 + * @tc.type: FUNC + */ +HWTEST_F(TensorDescTest, SUB_AI_NNRt_Core_Func_North_TensorDescSetFormat_0100, Function | MediumTest | Level1) +{ + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNTensorDesc_SetFormat(nullptr, OH_NN_FORMAT_NONE)); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_TensorDescSetFormat_0200 + * @tc.desc: 设置TensorDescFormat,遍历设置format + * @tc.type: FUNC + */ +HWTEST_F(TensorDescTest, SUB_AI_NNRt_Core_Func_North_TensorDescSetFormat_0200, Function | MediumTest | Level1) +{ + NN_TensorDesc* tensorDesc = OH_NNTensorDesc_Create(); + ASSERT_NE(nullptr, tensorDesc); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_SetFormat(tensorDesc, OH_NN_FORMAT_NONE)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_SetFormat(tensorDesc, OH_NN_FORMAT_NCHW)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_SetFormat(tensorDesc, OH_NN_FORMAT_NHWC)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_SetFormat(tensorDesc, OH_NN_FORMAT_ND)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_Destroy(&tensorDesc)); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_TensorDescGetFormat_0100 + * @tc.desc: 获取TensorDescFormat,TensorDesc为空 + * @tc.type: FUNC + */ +HWTEST_F(TensorDescTest, SUB_AI_NNRt_Core_Func_North_TensorDescGetFormat_0100, Function | MediumTest | Level1) +{ + OH_NN_Format format = OH_NN_FORMAT_NONE; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNTensorDesc_GetFormat(nullptr, &format)); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_TensorDescGetFormat_0200 + * @tc.desc: 获取TensorDescFormat,未设置Format + * @tc.type: FUNC + */ +HWTEST_F(TensorDescTest, SUB_AI_NNRt_Core_Func_North_TensorDescGetFormat_0200, Function | MediumTest | Level1) +{ + NN_TensorDesc* tensorDesc = OH_NNTensorDesc_Create(); + ASSERT_NE(nullptr, tensorDesc); + OH_NN_Format format = OH_NN_FORMAT_NONE; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_GetFormat(tensorDesc, &format)); + ASSERT_EQ(OH_NN_FORMAT_NONE, format); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_Destroy(&tensorDesc)); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_TensorDescGetFormat_0300 + * @tc.desc: 获取TensorDescFormat,合法获取,返回成功 + * @tc.type: FUNC + */ +HWTEST_F(TensorDescTest, SUB_AI_NNRt_Core_Func_North_TensorDescGetFormat_0300, Function | MediumTest | Level1) +{ + NN_TensorDesc* tensorDesc = OH_NNTensorDesc_Create(); + ASSERT_NE(nullptr, tensorDesc); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_SetFormat(tensorDesc, OH_NN_FORMAT_NCHW)); + OH_NN_Format format = OH_NN_FORMAT_NONE; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_GetFormat(tensorDesc, &format)); + ASSERT_EQ(OH_NN_FORMAT_NCHW, format); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_Destroy(&tensorDesc)); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_TensorDescGetElementCount_0100 + * @tc.desc: 获取TensorDescElementCount,TensorDesc为空 + * @tc.type: FUNC + */ +HWTEST_F(TensorDescTest, SUB_AI_NNRt_Core_Func_North_TensorDescGetElementCount_0100, Function | MediumTest | Level1) +{ + size_t elementCount = 0; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNTensorDesc_GetElementCount(nullptr, &elementCount)); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_TensorDescGetElementCount_0200 + * @tc.desc: 获取TensorDescElementCount,合理设置返回正确 + * @tc.type: FUNC + */ +HWTEST_F(TensorDescTest, SUB_AI_NNRt_Core_Func_North_TensorDescGetElementCount_0200, Function | MediumTest | Level1) +{ + NN_TensorDesc* tensorDesc = OH_NNTensorDesc_Create(); + ASSERT_NE(nullptr, tensorDesc); + int32_t inputDims[4] = {1, 2, 2, 3}; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_SetShape(tensorDesc, inputDims, 4)); + size_t elementCount = 0; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_GetElementCount(tensorDesc, &elementCount)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_Destroy(&tensorDesc)); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_TensorDescGetElementCount_0300 + * @tc.desc: 获取TensorDescElementCount,不设置,返回错误 + * @tc.type: FUNC + */ +HWTEST_F(TensorDescTest, SUB_AI_NNRt_Core_Func_North_TensorDescGetElementCount_0300, Function | MediumTest | Level1) +{ + NN_TensorDesc* tensorDesc = OH_NNTensorDesc_Create(); + ASSERT_NE(nullptr, tensorDesc); + size_t elementCount = 0; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNTensorDesc_GetElementCount(tensorDesc, &elementCount)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_Destroy(&tensorDesc)); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_TensorDescGetByteSize_0100 + * @tc.desc: 获取TensorDescByteSize,TensorDesc为空 + * @tc.type: FUNC + */ +HWTEST_F(TensorDescTest, SUB_AI_NNRt_Core_Func_North_TensorDescGetByteSize_0100, Function | MediumTest | Level1) +{ + size_t byteSize = 0; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNTensorDesc_GetByteSize(nullptr, &byteSize)); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_TensorDescGetByteSize_0200 + * @tc.desc: 获取TensorDescByteSize,合理设置返回正确 + * @tc.type: FUNC + */ +HWTEST_F(TensorDescTest, SUB_AI_NNRt_Core_Func_North_TensorDescGetByteSize_0200, Function | MediumTest | Level1) +{ + NN_TensorDesc* tensorDesc = OH_NNTensorDesc_Create(); + ASSERT_NE(nullptr, tensorDesc); + int32_t inputDims[4] = {1, 2, 2, 3}; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_SetDataType(tensorDesc, OH_NN_FLOAT32)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_SetShape(tensorDesc, inputDims, 4)); + size_t byteSize = 0; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_GetByteSize(tensorDesc, &byteSize)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_Destroy(&tensorDesc)); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_TensorDescGetByteSize_0300 + * @tc.desc: 获取TensorDescByteSize,不设置,返回错误 + * @tc.type: FUNC + */ +HWTEST_F(TensorDescTest, SUB_AI_NNRt_Core_Func_North_TensorDescGetByteSize_0300, Function | MediumTest | Level1) +{ + NN_TensorDesc* tensorDesc = OH_NNTensorDesc_Create(); + ASSERT_NE(nullptr, tensorDesc); + size_t byteSize = 0; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNTensorDesc_GetByteSize(tensorDesc, &byteSize)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_Destroy(&tensorDesc)); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_TensorDescSetName_0100 + * @tc.desc: 设置TensorDescName,TensorDesc为空 + * @tc.type: FUNC + */ +HWTEST_F(TensorDescTest, SUB_AI_NNRt_Core_Func_North_TensorDescSetName_0100, Function | MediumTest | Level1) +{ + const char* name = "tensorDesc"; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNTensorDesc_SetName(nullptr, name)); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_TensorDescSetName_0200 + * @tc.desc: 设置TensorDescName,name为空 + * @tc.type: FUNC + */ +HWTEST_F(TensorDescTest, SUB_AI_NNRt_Core_Func_North_TensorDescSetName_0200, Function | MediumTest | Level1) +{ + NN_TensorDesc* tensorDesc = OH_NNTensorDesc_Create(); + ASSERT_NE(nullptr, tensorDesc); + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNTensorDesc_SetName(tensorDesc, nullptr)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_Destroy(&tensorDesc)); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_TensorDescGetName_0100 + * @tc.desc: 获取TensorDescName,TensorDesc为空 + * @tc.type: FUNC + */ +HWTEST_F(TensorDescTest, SUB_AI_NNRt_Core_Func_North_TensorDescGetName_0100, Function | MediumTest | Level1) +{ + const char* name = nullptr; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNTensorDesc_GetName(nullptr, &name)); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_TensorDescGetName_0200 + * @tc.desc: 获取TensorDescName,合理设置返回正确 + * @tc.type: FUNC + */ +HWTEST_F(TensorDescTest, SUB_AI_NNRt_Core_Func_North_TensorDescGetName_0200, Function | MediumTest | Level1) +{ + NN_TensorDesc* tensorDesc = OH_NNTensorDesc_Create(); + ASSERT_NE(nullptr, tensorDesc); + const char* nameIn = "tensorDesc"; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_SetName(tensorDesc, nameIn)); + const char* nameOut = nullptr; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_GetName(tensorDesc, &nameOut)); + ASSERT_EQ(*nameIn, *nameOut); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_Destroy(&tensorDesc)); +} + +/** + * @tc.name: SUB_AI_NNRt_Core_Func_North_TensorDescGetName_0300 + * @tc.desc: 获取TensorDescName,不设置,返回错误 + * @tc.type: FUNC + */ +HWTEST_F(TensorDescTest, SUB_AI_NNRt_Core_Func_North_TensorDescGetName_0300, Function | MediumTest | Level1) +{ + NN_TensorDesc* tensorDesc = OH_NNTensorDesc_Create(); + ASSERT_NE(nullptr, tensorDesc); + const char* nameOut = nullptr; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_GetName(tensorDesc, &nameOut)); + std::string name(nameOut); + std::string empty(""); + ASSERT_EQ(empty, name); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_Destroy(&tensorDesc)); +} + diff --git a/test/nnrt_xts_acts/nncore/nncoretest/src/HdiTensorTest.cpp b/test/nnrt_xts_acts/nncore/nncoretest/src/HdiTensorTest.cpp new file mode 100644 index 0000000..d94c791 --- /dev/null +++ b/test/nnrt_xts_acts/nncore/nncoretest/src/HdiTensorTest.cpp @@ -0,0 +1,547 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include +#include + +#include "nncore_utils.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Test; +using namespace OHOS::HDI::Nnrt::V2_0; + +namespace { + +class TensorTest : public testing::Test { +protected: + AddModel addModel; + OHNNGraphArgs graphArgs = addModel.graphArgs; +}; + +void BuildAddTopKGraph(OH_NNModel *model) +{ + AddTopKModel addTopKModel; + OHNNGraphArgsMulti graphArgsMulti = addTopKModel.graphArgs; + ASSERT_EQ(OH_NN_SUCCESS, BuildMultiOpGraph(model, graphArgsMulti)); +} + +void BuildModel(OH_NNModel *model, const OHNNGraphArgs &graphArgs) +{ + ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); +} + +} // namespace + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Tensor_CreateTensor_0100 + * @tc.desc: 创建Tensor,TensorDesc为nullptr + * @tc.type: FUNC + */ +HWTEST_F(TensorTest, SUB_AI_NNRt_Func_North_Tensor_CreateTensor_0100, Function | MediumTest | Level0) +{ + size_t deviceId = 0; + ASSERT_EQ(OH_NN_SUCCESS, GetDeviceID(&deviceId)); + NN_Tensor* tensor = OH_NNTensor_Create(deviceId, nullptr); + ASSERT_EQ(nullptr, tensor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Tensor_CreateTensor_0200 + * @tc.desc: 创建Tensor,deviceId=0,设备找到,创建成功 + * @tc.type: FUNC + */ +HWTEST_F(TensorTest, SUB_AI_NNRt_Func_North_Tensor_CreateTensor_0200, Function | MediumTest | Level0) +{ + int32_t inputDims[4] = {1, 1, 2, 3}; + NN_TensorDesc* tensorDesc = createTensorDesc(inputDims, 4, OH_NN_FLOAT32, OH_NN_FORMAT_NCHW); + size_t deviceId = 0; + NN_Tensor* tensor = OH_NNTensor_Create(deviceId, tensorDesc); + ASSERT_NE(nullptr, tensor); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensor_Destroy(&tensor)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_Destroy(&tensorDesc)); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Tensor_CreateTensor_0300 + * @tc.desc: 创建Tensor,成功创建 + * @tc.type: FUNC + */ +HWTEST_F(TensorTest, SUB_AI_NNRt_Func_North_Tensor_CreateTensor_0300, Function | MediumTest | Level0) +{ + int32_t inputDims[4] = {1, 1, 2, 3}; + NN_TensorDesc* tensorDesc = createTensorDesc(inputDims, 4, OH_NN_FLOAT32, OH_NN_FORMAT_NCHW); + size_t deviceId = 0; + ASSERT_EQ(OH_NN_SUCCESS, GetDeviceID(&deviceId)); + NN_Tensor* tensor = OH_NNTensor_Create(deviceId, tensorDesc); + ASSERT_NE(nullptr, tensor); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensor_Destroy(&tensor)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_Destroy(&tensorDesc)); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Tensor_CreateTensorWithSize_0100 + * @tc.desc: 根据size创建Tensor,TensorDesc为nullptr + * @tc.type: FUNC + */ +HWTEST_F(TensorTest, SUB_AI_NNRt_Func_North_Tensor_CreateTensorWithSize_0100, Function | MediumTest | Level0) +{ + size_t deviceId = 0; + ASSERT_EQ(OH_NN_SUCCESS, GetDeviceID(&deviceId)); + size_t size = 4; + NN_Tensor* tensor = OH_NNTensor_CreateWithSize(deviceId, nullptr, size); + ASSERT_EQ(nullptr, tensor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Tensor_CreateTensorWithSize_0200 + * @tc.desc: 根据size创建Tensor,size = 0 + * @tc.type: FUNC + */ +HWTEST_F(TensorTest, SUB_AI_NNRt_Func_North_Tensor_CreateTensorWithSize_0200, Function | MediumTest | Level0) +{ + int32_t inputDims[4] = {1, 1, 2, 3}; + NN_TensorDesc* tensorDesc = createTensorDesc(inputDims, 4, OH_NN_FLOAT32, OH_NN_FORMAT_NCHW); + size_t deviceId = 0; + ASSERT_EQ(OH_NN_SUCCESS, GetDeviceID(&deviceId)); + size_t size = 0; + NN_Tensor* tensor = OH_NNTensor_CreateWithSize(deviceId, tensorDesc, size); + ASSERT_EQ(nullptr, tensor); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_Destroy(&tensorDesc)); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Tensor_CreateTensorWithSize_0300 + * @tc.desc: 根据size创建Tensor,size < tensor size + * @tc.type: FUNC + */ +HWTEST_F(TensorTest, SUB_AI_NNRt_Func_North_Tensor_CreateTensorWithSize_0300, Function | MediumTest | Level0) +{ + int32_t inputDims[4] = {1, 1, 2, 3}; + NN_TensorDesc* tensorDesc = createTensorDesc(inputDims, 4, OH_NN_FLOAT32, OH_NN_FORMAT_NCHW); + size_t deviceId = 0; + ASSERT_EQ(OH_NN_SUCCESS, GetDeviceID(&deviceId)); + size_t byteSize = 0; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_GetByteSize(tensorDesc, &byteSize)); + size_t size = byteSize - 1; + NN_Tensor* tensor = OH_NNTensor_CreateWithSize(deviceId, tensorDesc, size); + ASSERT_EQ(nullptr, tensor); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_Destroy(&tensorDesc)); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Tensor_CreateTensorWithFd_0100 + * @tc.desc: 通过fd根据size创建Tensor,TensorDesc为nullptr + * @tc.type: FUNC + */ +HWTEST_F(TensorTest, SUB_AI_NNRt_Func_North_Tensor_CreateTensorWithFd_0100, Function | MediumTest | Level0) +{ + int32_t inputDims[4] = {1, 1, 2, 3}; + NN_TensorDesc* tensorDesc = createTensorDesc(inputDims, 4, OH_NN_FLOAT32, OH_NN_FORMAT_NCHW); + size_t deviceId = 0; + ASSERT_EQ(OH_NN_SUCCESS, GetDeviceID(&deviceId)); + size_t size = 4; + NN_Tensor* tensor = OH_NNTensor_Create(deviceId, tensorDesc); + ASSERT_NE(nullptr, tensor); + int fd = -1; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensor_GetFd(tensor, &fd)); + size_t offset = 1; + NN_Tensor* tensortmp = OH_NNTensor_CreateWithFd(deviceId, nullptr, fd, size, offset); + ASSERT_EQ(nullptr, tensortmp); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensor_Destroy(&tensor)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_Destroy(&tensorDesc)); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Tensor_CreateTensorWithFd_0200 + * @tc.desc: 通过fd根据size创建Tensor,fd = -1 + * @tc.type: FUNC + */ +HWTEST_F(TensorTest, SUB_AI_NNRt_Func_North_Tensor_CreateTensorWithFd_0200, Function | MediumTest | Level0) +{ + int32_t inputDims[4] = {1, 1, 2, 3}; + NN_TensorDesc* tensorDesc = createTensorDesc(inputDims, 4, OH_NN_FLOAT32, OH_NN_FORMAT_NCHW); + size_t deviceId = 0; + ASSERT_EQ(OH_NN_SUCCESS, GetDeviceID(&deviceId)); + size_t size = 4; + int fd = -1; + size_t offset = 1; + NN_Tensor* tensor = OH_NNTensor_CreateWithFd(deviceId, tensorDesc, fd, size, offset); + ASSERT_EQ(nullptr, tensor); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_Destroy(&tensorDesc)); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Tensor_CreateTensorWithFd_0300 + * @tc.desc: 通过fd根据size创建Tensor,size = 0 + * @tc.type: FUNC + */ +HWTEST_F(TensorTest, SUB_AI_NNRt_Func_North_Tensor_CreateTensorWithFd_0300, Function | MediumTest | Level0) +{ + int32_t inputDims[4] = {1, 1, 2, 3}; + NN_TensorDesc* tensorDesc = createTensorDesc(inputDims, 4, OH_NN_FLOAT32, OH_NN_FORMAT_NCHW); + size_t deviceId = 0; + ASSERT_EQ(OH_NN_SUCCESS, GetDeviceID(&deviceId)); + NN_Tensor* tensor = OH_NNTensor_Create(deviceId, tensorDesc); + ASSERT_NE(nullptr, tensor); + int fd = -1; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensor_GetFd(tensor, &fd)); + size_t offset = 0; + NN_TensorDesc* tensorDescTmp = createTensorDesc(inputDims, 4, OH_NN_FLOAT32, OH_NN_FORMAT_NCHW); + NN_Tensor* tensortmp = OH_NNTensor_CreateWithFd(deviceId, tensorDescTmp, fd, 0, offset); + ASSERT_EQ(nullptr, tensortmp); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensor_Destroy(&tensor)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_Destroy(&tensorDesc)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_Destroy(&tensorDescTmp)); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Tensor_CreateTensorWithFd_0400 + * @tc.desc: 通过fd根据size创建Tensor,size = offset + * @tc.type: FUNC + */ +HWTEST_F(TensorTest, SUB_AI_NNRt_Func_North_Tensor_CreateTensorWithFd_0400, Function | MediumTest | Level0) +{ + int32_t inputDims[4] = {1, 1, 2, 3}; + NN_TensorDesc* tensorDesc = createTensorDesc(inputDims, 4, OH_NN_FLOAT32, OH_NN_FORMAT_NCHW); + size_t deviceId = 0; + ASSERT_EQ(OH_NN_SUCCESS, GetDeviceID(&deviceId)); + size_t size = 4; + NN_Tensor* tensor = OH_NNTensor_Create(deviceId, tensorDesc); + ASSERT_NE(nullptr, tensor); + int fd = -1; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensor_GetFd(tensor, &fd)); + size_t offset = 4; + NN_TensorDesc* tensorDescTmp = createTensorDesc(inputDims, 4, OH_NN_FLOAT32, OH_NN_FORMAT_NCHW); + NN_Tensor* tensortmp = OH_NNTensor_CreateWithFd(deviceId, tensorDescTmp, fd, size, offset); + ASSERT_EQ(nullptr, tensortmp); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensor_Destroy(&tensor)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_Destroy(&tensorDesc)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_Destroy(&tensorDescTmp)); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Tensor_CreateTensorWithFd_0500 + * @tc.desc: 通过fd根据size创建Tensor,size-offset < tensorDescBytesize + * @tc.type: FUNC + */ +HWTEST_F(TensorTest, SUB_AI_NNRt_Func_North_Tensor_CreateTensorWithFd_0500, Function | MediumTest | Level0) +{ + int32_t inputDims[4] = {1, 1, 2, 3}; + NN_TensorDesc* tensorDesc = createTensorDesc(inputDims, 4, OH_NN_FLOAT32, OH_NN_FORMAT_NCHW); + size_t deviceId = 0; + ASSERT_EQ(OH_NN_SUCCESS, GetDeviceID(&deviceId)); + size_t byteSize = 0; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_GetByteSize(tensorDesc, &byteSize)); + size_t size = 10; + NN_Tensor* tensor = OH_NNTensor_Create(deviceId, tensorDesc); + ASSERT_NE(nullptr, tensor); + int fd = -1; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensor_GetFd(tensor, &fd)); + size_t offset = size - byteSize + 1; + NN_TensorDesc* tensorDescTmp = createTensorDesc(inputDims, 4, OH_NN_FLOAT32, OH_NN_FORMAT_NCHW); + NN_Tensor* tensortmp = OH_NNTensor_CreateWithFd(deviceId, tensorDescTmp, fd, size, offset); + ASSERT_EQ(nullptr, tensortmp); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensor_Destroy(&tensor)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_Destroy(&tensorDesc)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_Destroy(&tensorDescTmp)); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Tensor_CreateTensorWithFd_0600 + * @tc.desc: 通过fd根据size创建Tensor,size-offset = tensorDescBytesize + * @tc.type: FUNC + */ +HWTEST_F(TensorTest, SUB_AI_NNRt_Func_North_Tensor_CreateTensorWithFd_0600, Function | MediumTest | Level0) +{ + int32_t inputDims[4] = {1, 1, 2, 515}; + NN_TensorDesc* tensorDesc = createTensorDesc(inputDims, 4, OH_NN_FLOAT32, OH_NN_FORMAT_NCHW); + size_t deviceId = 0; + ASSERT_EQ(OH_NN_SUCCESS, GetDeviceID(&deviceId)); + size_t byteSize = 0; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_GetByteSize(tensorDesc, &byteSize)); + size_t size = byteSize; + NN_Tensor* tensor = OH_NNTensor_Create(deviceId, tensorDesc); + ASSERT_NE(nullptr, tensor); + int fd = -1; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensor_GetFd(tensor, &fd)); + inputDims[3] = 3; + NN_TensorDesc* tensorDescTmp = createTensorDesc(inputDims, 4, OH_NN_FLOAT32, OH_NN_FORMAT_NCHW); + size_t byteSizeTmp = 0; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_GetByteSize(tensorDescTmp, &byteSizeTmp)); + size_t offset = size - byteSizeTmp; + NN_Tensor* tensortmp = OH_NNTensor_CreateWithFd(deviceId, tensorDescTmp, fd, size, offset); + ASSERT_NE(nullptr, tensortmp); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensor_Destroy(&tensor)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensor_Destroy(&tensortmp)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_Destroy(&tensorDesc)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_Destroy(&tensorDescTmp)); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Tensor_GetDataBuffer_0100 + * @tc.desc: 获取databuffer,tensor为空 + * @tc.type: FUNC + */ +HWTEST_F(TensorTest, SUB_AI_NNRt_Func_North_Tensor_GetDataBuffer_0100, Function | MediumTest | Level0) +{ + ASSERT_EQ(nullptr, OH_NNTensor_GetDataBuffer(nullptr)); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Tensor_GetDataBuffer_0200 + * @tc.desc: 获取databuffer,返回正确 + * @tc.type: FUNC + */ +HWTEST_F(TensorTest, SUB_AI_NNRt_Func_North_Tensor_GetDataBuffer_0200, Function | MediumTest | Level0) +{ + int32_t inputDims[4] = {1, 1, 2, 3}; + NN_TensorDesc* tensorDesc = createTensorDesc(inputDims, 4, OH_NN_FLOAT32, OH_NN_FORMAT_NCHW); + size_t deviceId = 0; + ASSERT_EQ(OH_NN_SUCCESS, GetDeviceID(&deviceId)); + NN_Tensor* tensor = OH_NNTensor_Create(deviceId, tensorDesc); + ASSERT_NE(nullptr, tensor); + ASSERT_NE(nullptr, OH_NNTensor_GetDataBuffer(tensor)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensor_Destroy(&tensor)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_Destroy(&tensorDesc)); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Tensor_Destroy_0100 + * @tc.desc: 销毁tensor,tensor为空 + * @tc.type: FUNC + */ +HWTEST_F(TensorTest, SUB_AI_NNRt_Func_North_Tensor_Destroy_0100, Function | MediumTest | Level0) +{ + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNTensor_Destroy(nullptr)); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Tensor_Destroy_0200 + * @tc.desc: 销毁tensor,tensor为空 + * @tc.type: FUNC + */ +HWTEST_F(TensorTest, SUB_AI_NNRt_Func_North_Tensor_Destroy_0200, Function | MediumTest | Level0) +{ + NN_Tensor* tensor = nullptr; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNTensor_Destroy(&tensor)); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Tensor_GetTensorDesc_0100 + * @tc.desc: 获取TensorDesc,tensor为空 + * @tc.type: FUNC + */ +HWTEST_F(TensorTest, SUB_AI_NNRt_Func_North_Tensor_GetTensorDesc_0100, Function | MediumTest | Level0) +{ + ASSERT_EQ(nullptr, OH_NNTensor_GetTensorDesc(nullptr)); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Tensor_GetTensorDesc_0200 + * @tc.desc: 获取TensorDesc,返回正确 + * @tc.type: FUNC + */ +HWTEST_F(TensorTest, SUB_AI_NNRt_Func_North_Tensor_GetTensorDesc_0200, Function | MediumTest | Level0) +{ + int32_t inputDims[4] = {1, 1, 2, 3}; + NN_TensorDesc* tensorDesc = createTensorDesc(inputDims, 4, OH_NN_FLOAT32, OH_NN_FORMAT_NCHW); + size_t deviceId = 0; + ASSERT_EQ(OH_NN_SUCCESS, GetDeviceID(&deviceId)); + NN_Tensor* tensor = OH_NNTensor_Create(deviceId, tensorDesc); + ASSERT_NE(nullptr, tensor); + ASSERT_NE(nullptr, OH_NNTensor_GetTensorDesc(tensor)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensor_Destroy(&tensor)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_Destroy(&tensorDesc)); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Tensor_GetSize_0100 + * @tc.desc: 获取SIZE,tensor为空 + * @tc.type: FUNC + */ +HWTEST_F(TensorTest, SUB_AI_NNRt_Func_North_Tensor_GetSize_0100, Function | MediumTest | Level0) +{ + size_t size = 0; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNTensor_GetSize(nullptr, &size)); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Tensor_GetSize_0200 + * @tc.desc: 获取SIZE,OH_NNTensor_Create创建 + * @tc.type: FUNC + */ +HWTEST_F(TensorTest, SUB_AI_NNRt_Func_North_Tensor_GetSize_0200, Function | MediumTest | Level0) +{ + int32_t inputDims[4] = {1, 1, 2, 3}; + NN_TensorDesc* tensorDesc = createTensorDesc(inputDims, 4, OH_NN_FLOAT32, OH_NN_FORMAT_NCHW); + size_t deviceId = 0; + ASSERT_EQ(OH_NN_SUCCESS, GetDeviceID(&deviceId)); + NN_Tensor* tensor = OH_NNTensor_Create(deviceId, tensorDesc); + ASSERT_NE(nullptr, tensor); + size_t size = 0; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensor_GetSize(tensor, &size)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensor_Destroy(&tensor)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_Destroy(&tensorDesc)); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Tensor_GetSize_0300 + * @tc.desc: 获取SIZE,OH_NNTensor_CreateWithSize创建 + * @tc.type: FUNC + */ +HWTEST_F(TensorTest, SUB_AI_NNRt_Func_North_Tensor_GetSize_0300, Function | MediumTest | Level0) +{ + int32_t inputDims[4] = {1, 1, 2, 3}; + NN_TensorDesc* tensorDesc = createTensorDesc(inputDims, 4, OH_NN_FLOAT32, OH_NN_FORMAT_NCHW); + size_t deviceId = 0; + ASSERT_EQ(OH_NN_SUCCESS, GetDeviceID(&deviceId)); + size_t byteSize = 0; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_GetByteSize(tensorDesc, &byteSize)); + size_t size = byteSize + 2; + NN_Tensor* tensor = OH_NNTensor_CreateWithSize(deviceId, tensorDesc, size); + ASSERT_NE(nullptr, tensor); + size_t sizeTmp = 0; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensor_GetSize(tensor, &sizeTmp)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensor_Destroy(&tensor)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_Destroy(&tensorDesc)); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Tensor_GetSize_0400 + * @tc.desc: 获取SIZE,OH_NNTensor_CreateWithFd创建 + * @tc.type: FUNC + */ +HWTEST_F(TensorTest, SUB_AI_NNRt_Func_North_Tensor_GetSize_0400, Function | MediumTest | Level0) +{ + int32_t inputDims[4] = {1, 1, 2, 515}; + NN_TensorDesc* tensorDesc = createTensorDesc(inputDims, 4, OH_NN_FLOAT32, OH_NN_FORMAT_NCHW); + size_t deviceId = 0; + ASSERT_EQ(OH_NN_SUCCESS, GetDeviceID(&deviceId)); + NN_Tensor* tensor = OH_NNTensor_Create(deviceId, tensorDesc); + ASSERT_NE(nullptr, tensor); + int fd = -1; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensor_GetFd(tensor, &fd)); + size_t byteSize = 0; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_GetByteSize(tensorDesc, &byteSize)); + size_t size = byteSize; + inputDims[3] = 3; + NN_TensorDesc* tensorDescTmp = createTensorDesc(inputDims, 4, OH_NN_FLOAT32, OH_NN_FORMAT_NCHW); + size_t byteSizeTmp = 0; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_GetByteSize(tensorDescTmp, &byteSizeTmp)); + size_t offset = size - byteSizeTmp; + NN_Tensor* tensortmp = OH_NNTensor_CreateWithFd(deviceId, tensorDescTmp, fd, size, offset); + size_t sizeTmp = 0; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensor_GetSize(tensortmp, &sizeTmp)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensor_Destroy(&tensor)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensor_Destroy(&tensortmp)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_Destroy(&tensorDesc)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_Destroy(&tensorDescTmp)); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Tensor_GetFd_0100 + * @tc.desc: 获取Fd,tensor为空 + * @tc.type: FUNC + */ +HWTEST_F(TensorTest, SUB_AI_NNRt_Func_North_Tensor_GetFd_0100, Function | MediumTest | Level0) +{ + int fd = -1; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNTensor_GetFd(nullptr, &fd)); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Tensor_GetFd_0200 + * @tc.desc: 获取Fd,返回成功 + * @tc.type: FUNC + */ +HWTEST_F(TensorTest, SUB_AI_NNRt_Func_North_Tensor_GetFd_0200, Function | MediumTest | Level0) +{ + int32_t inputDims[4] = {1, 1, 2, 3}; + NN_TensorDesc* tensorDesc = createTensorDesc(inputDims, 4, OH_NN_FLOAT32, OH_NN_FORMAT_NCHW); + size_t deviceId = 0; + ASSERT_EQ(OH_NN_SUCCESS, GetDeviceID(&deviceId)); + NN_Tensor* tensor = OH_NNTensor_Create(deviceId, tensorDesc); + ASSERT_NE(nullptr, tensor); + int fd = -1; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensor_GetFd(tensor, &fd)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_Destroy(&tensorDesc)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensor_Destroy(&tensor)); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Tensor_GetOffset_0100 + * @tc.desc: 获取offset,tensor为空 + * @tc.type: FUNC + */ +HWTEST_F(TensorTest, SUB_AI_NNRt_Func_North_Tensor_GetOffset_0100, Function | MediumTest | Level0) +{ + size_t offset = 0; + ASSERT_EQ(OH_NN_INVALID_PARAMETER, OH_NNTensor_GetOffset(nullptr, &offset)); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Tensor_GetOffset_0200 + * @tc.desc: 获取offset,返回成功,offset = 0 + * @tc.type: FUNC + */ +HWTEST_F(TensorTest, SUB_AI_NNRt_Func_North_Tensor_GetOffset_0200, Function | MediumTest | Level0) +{ + int32_t inputDims[4] = {1, 1, 2, 3}; + NN_TensorDesc* tensorDesc = createTensorDesc(inputDims, 4, OH_NN_FLOAT32, OH_NN_FORMAT_NCHW); + size_t deviceId = 0; + ASSERT_EQ(OH_NN_SUCCESS, GetDeviceID(&deviceId)); + NN_Tensor* tensor = OH_NNTensor_Create(deviceId, tensorDesc); + ASSERT_NE(nullptr, tensor); + size_t offset = 0; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensor_GetOffset(tensor, &offset)); + ASSERT_EQ(0, offset); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_Destroy(&tensorDesc)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensor_Destroy(&tensor)); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Tensor_GetOffset_0300 + * @tc.desc: 获取offset,通过OH_NNTensor_CreateWithFd创建,返回成功,offset = 设置值 + * @tc.type: FUNC + */ +HWTEST_F(TensorTest, SUB_AI_NNRt_Func_North_Tensor_GetOffset_0300, Function | MediumTest | Level0) +{ + int32_t inputDims[4] = {1, 1, 2, 515}; + NN_TensorDesc* tensorDesc = createTensorDesc(inputDims, 4, OH_NN_FLOAT32, OH_NN_FORMAT_NCHW); + size_t deviceId = 0; + ASSERT_EQ(OH_NN_SUCCESS, GetDeviceID(&deviceId)); + size_t byteSize = 0; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_GetByteSize(tensorDesc, &byteSize)); + size_t size = byteSize; + NN_Tensor* tensor = OH_NNTensor_Create(deviceId, tensorDesc); + ASSERT_NE(nullptr, tensor); + int fd = -1; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensor_GetFd(tensor, &fd)); + inputDims[3] = 3; + NN_TensorDesc* tensorDescTmp = createTensorDesc(inputDims, 4, OH_NN_FLOAT32, OH_NN_FORMAT_NCHW); + size_t byteSizeTmp = 0; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_GetByteSize(tensorDescTmp, &byteSizeTmp)); + size_t offset = size - byteSizeTmp; + NN_Tensor* tensortmp = OH_NNTensor_CreateWithFd(deviceId, tensorDescTmp, fd, size, offset); + ASSERT_NE(nullptr, tensortmp); + size_t offsettmp = 0; + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensor_GetOffset(tensortmp, &offsettmp)); + ASSERT_EQ(offset, offsettmp); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensor_Destroy(&tensor)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensor_Destroy(&tensortmp)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_Destroy(&tensorDesc)); + ASSERT_EQ(OH_NN_SUCCESS, OH_NNTensorDesc_Destroy(&tensorDescTmp)); +} diff --git a/test/nnrt_xts_acts/nncore/opstest/BUILD.gn b/test/nnrt_xts_acts/nncore/opstest/BUILD.gn new file mode 100644 index 0000000..02e776c --- /dev/null +++ b/test/nnrt_xts_acts/nncore/opstest/BUILD.gn @@ -0,0 +1,100 @@ +# Copyright (c) 2024 Huawei Device Co., Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import("//build/test.gni") +module_output_path = "neural_network_runtime/neural_network_runtime" + +config("ops_config") { + include_dirs = [ "include" ] + cflags = [ "-Wno-error" ] + cflags_cc = [ "-fexceptions" ] +} + +ohos_systemtest("ActsNnrtOpsTest") { + module_out_path = module_output_path + sources = [ + "src/abs_test.cpp", + "src/all_test.cpp", + "src/assert_test.cpp", + "src/broadcast_to_test.cpp", + "src/ceil_test.cpp", + "src/clip_test.cpp", + "src/constant_of_shape_test.cpp", + "src/cos_test.cpp", + "src/crop_test.cpp", + "src/depth_to_space_test.cpp", + "src/detection_post_process_test.cpp", + "src/equal_test.cpp", + "src/erf_test.cpp", + "src/exp_test.cpp", + "src/flatten_test.cpp", + "src/floor_test.cpp", + "src/gather_nd_test.cpp", + "src/greater_equal_test.cpp", + "src/greater_test.cpp", + "src/hard_sigmoid_test.cpp", + "src/instance_norm_test.cpp", + "src/l2_normalize_test.cpp", + "src/leaky_relu_test.cpp", + "src/less_test.cpp", + "src/log_softmax_test.cpp", + "src/log_test.cpp", + "src/logical_and_test.cpp", + "src/logical_not_test.cpp", + "src/logical_or_test.cpp", + "src/lrn_test.cpp", + "src/lstm_test.cpp", + "src/minimum_test.cpp", + "src/mock_idevice.cpp", + "src/mod_test.cpp", + "src/neg_test.cpp", + "src/nncore_utils.cpp", + "src/not_equal_test.cpp", + "src/pow_test.cpp", + "src/quant_dtype_cast_test.cpp", + "src/range_test.cpp", + "src/rank_test.cpp", + "src/reciprocal_test.cpp", + "src/reducel2_test.cpp", + "src/reducemax_test.cpp", + "src/reducemin_test.cpp", + "src/reducesum_test.cpp", + "src/round_test.cpp", + "src/scatter_nd_test.cpp", + "src/select_test.cpp", + "src/sin_test.cpp", + "src/space_to_depth_test.cpp", + "src/sparse_to_dense_test.cpp", + "src/square_test.cpp", + "src/swish_test.cpp", + "src/unstack_test.cpp", + "src/where_test.cpp", + ] + + configs = [ ":ops_config" ] + + external_deps = [ + "c_utils:utils", + "drivers_interface_nnrt:libnnrt_proxy_2.1", + "googletest:gmock_main", + "googletest:gtest_main", + "hilog:libhilog", + "ipc:ipc_single", + "mindspore:mindir_lib", + "neural_network_runtime:libneural_network_core", + "neural_network_runtime:libneural_network_runtime", + ] + + subsystem_name = "ai" + part_name = "neural_network_runtime" +} diff --git a/test/nnrt_xts_acts/nncore/opstest/include/mock_idevice.h b/test/nnrt_xts_acts/nncore/opstest/include/mock_idevice.h new file mode 100644 index 0000000..5c8212c --- /dev/null +++ b/test/nnrt_xts_acts/nncore/opstest/include/mock_idevice.h @@ -0,0 +1,128 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MOCK_IDEVICE_H +#define MOCK_IDEVICE_H + +#include +#include +#include +#include +#include + +#include +#include +#include "mindir_lite_graph.h" +#include "mindir.h" + +#include "securec.h" +#include "refbase.h" +#include "log.h" +#include "ashmem.h" + +#include +#include +#include + +namespace OHOS { +namespace HDI { +namespace Nnrt { +namespace V2_1 { + +class MockIDevice : public INnrtDevice { +public: + int32_t GetSupportedOperation(const Model& model, std::vector& ops) override; + + int32_t IsFloat16PrecisionSupported(bool& isSupported) override; + + int32_t IsPerformanceModeSupported(bool& isSupported) override; + + int32_t IsPrioritySupported(bool& isSupported) override; + + int32_t IsDynamicInputSupported(bool& isSupported) override; + + int32_t IsModelCacheSupported(bool& isSupported) override; + + int32_t AllocateBuffer(uint32_t length, SharedBuffer &buffer) override; + + int32_t ReleaseBuffer(const SharedBuffer &buffer) override; + + int32_t GetDeviceName(std::string& name) override; + + int32_t GetVendorName(std::string& name) override; + + int32_t GetDeviceType(DeviceType& deviceType) override; + + int32_t GetDeviceStatus(DeviceStatus& status) override; + + int32_t GetVersion(uint32_t &majorVersion, uint32_t &minorVersion) override; + + int32_t PrepareModel(const Model& model, const ModelConfig& config, sptr& preparedModel) override; + + int32_t PrepareOfflineModel(const std::vector& offlineModels, const ModelConfig& config, + sptr& preparedModel) override; + + int32_t PrepareModelFromModelCache(const std::vector& modelCache, const ModelConfig& config, + sptr& preparedModel) override; + + int32_t MemoryCopy(float* data, uint32_t length); + + void SetFP16Supported(bool isSupported); + + void SetPerformanceSupported(bool isSupported); + + void SetPrioritySupported(bool isSupported); + + void SetModelCacheSupported(bool isSupported); + + void SetOperationsSupported(std::vector isSupported); + + void SetDynamicInputSupported(bool isSupported); + + static MockIDevice *GetInstance(); + + MockIDevice(); + virtual ~MockIDevice(); + +private: + std::unordered_set m_fds; + int m_bufferFd; + bool m_fp16 = true; + bool m_performance = true; + bool m_priority = true; + bool m_cache = true; + bool m_dynamic = true; + std::vector m_operations {true}; + std::mutex m_mtx; +}; + +class MockIPreparedModel : public IPreparedModel { +public: + int32_t ExportModelCache(std::vector& modelCache) override; + int32_t Run(const std::vector& inputs, const std::vector& outputs, + std::vector>& outputsDims) override; + int32_t GetInputDimRanges(std::vector>& minInputDims, + std::vector>& maxInputDims) override; + int32_t GetVersion(uint32_t &majorVersion, uint32_t &minorVersion) override; + MockIPreparedModel() = default; + virtual ~MockIPreparedModel(); +private: + std::unordered_set m_fds; +}; + +} // namespace V2_1 +} // namespace Nnrt +} // namespace HDI +} // namespace OHOS +#endif // MOCK_IDEVICE_H diff --git a/test/nnrt_xts_acts/nncore/opstest/include/nncore_const.h b/test/nnrt_xts_acts/nncore/opstest/include/nncore_const.h new file mode 100644 index 0000000..97ad90b --- /dev/null +++ b/test/nnrt_xts_acts/nncore/opstest/include/nncore_const.h @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef CONST_H +#define CONST_H + +#include +#include + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Test { + +const uint32_t NO_DEVICE_COUNT = 0; + +} // namespace Test +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // CONST_H \ No newline at end of file diff --git a/test/nnrt_xts_acts/nncore/opstest/include/nncore_utils.h b/test/nnrt_xts_acts/nncore/opstest/include/nncore_utils.h new file mode 100644 index 0000000..58f23ca --- /dev/null +++ b/test/nnrt_xts_acts/nncore/opstest/include/nncore_utils.h @@ -0,0 +1,83 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef NNRT_UTILS_H +#define NNRT_UTILS_H + +#include +#include +#include + +#include "neural_network_runtime/neural_network_runtime.h" +#include "log.h" +#include "nncore_const.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Test { +struct OHNNOperandTest { + OH_NN_DataType dataType; + OH_NN_TensorType type; + std::vector shape; + void *data {nullptr}; + int32_t length {0}; + OH_NN_Format format = OH_NN_FORMAT_NONE; + const OH_NN_QuantParam *quantParam = nullptr; +}; + +struct OHNNGraphArgs { + OH_NN_OperationType operationType; + std::vector operands; + std::vector paramIndices; + std::vector inputIndices; + std::vector outputIndices; + bool build = true; + bool specifyIO = true; + bool addOperation = true; +}; + +struct OHNNGraphArgsMulti { + std::vector operationTypes; + std::vector> operands; + std::vector> paramIndices; + std::vector> inputIndices; + std::vector> outputIndices; + std::vector graphInput; + std::vector graphOutput; +}; + +struct OHNNCompileParam { + int32_t deviceId = 0; + std::string cacheDir; + uint32_t cacheVersion = 0; + OH_NN_PerformanceMode performanceMode = OH_NN_PERFORMANCE_NONE; + OH_NN_Priority priority = OH_NN_PRIORITY_NONE; + bool enableFp16 = false; +}; + +OH_NN_UInt32Array TransformUInt32Array(const std::vector& vector); +NN_TensorDesc* createTensorDesc(const int32_t* shape, size_t shapeNum, OH_NN_DataType dataType, OH_NN_Format format); +int SingleModelBuildEndStep(OH_NNModel *model, const OHNNGraphArgs &graphArgs); +int BuildSingleOpGraph(OH_NNModel *model, const OHNNGraphArgs &graphArgs); +void Free(OH_NNModel *model = nullptr, OH_NNCompilation *compilation = nullptr, OH_NNExecutor *executor = nullptr); +void FreeTensorDescVec(std::vector tensorDescVec); +int CompileGraphMock(OH_NNCompilation *compilation, const OHNNCompileParam &compileParam); +OH_NN_UInt32Array GetUInt32Array(std::vector indices); +OH_NN_ReturnCode GetDeviceID(size_t *deviceId); + +} // namespace Test +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NNRT_UTILS_H \ No newline at end of file diff --git a/test/nnrt_xts_acts/nncore/opstest/src/abs_test.cpp b/test/nnrt_xts_acts/nncore/opstest/src/abs_test.cpp new file mode 100644 index 0000000..0f10aa1 --- /dev/null +++ b/test/nnrt_xts_acts/nncore/opstest/src/abs_test.cpp @@ -0,0 +1,757 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "nncore_utils.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Test; +class AbsTest : public testing::Test {}; + +struct AbsModel1 { + const std::vector tensor_shape = {7}; + float inputValue[7] = {-3, -2.5, -1, 0, 1, 2, 3}; + float outputValue[7] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, inputValue, 7*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, outputValue, 7*sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_ABS, + .operands = {input, output}, + .paramIndices = {}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + + +struct AbsModel2 { + const std::vector tensor_shape = {}; + float* inputValue = {}; + float* outputValue = {}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, inputValue, 0*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, outputValue, 0*sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_ABS, + .operands = {input, output}, + .paramIndices = {}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Abs_Build_01 + * @tc.desc: AbsModel1模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(AbsTest, SUB_AI_NNRt_Func_North_Abs_Build_01, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AbsModel1 absModel; + OHNNGraphArgs graphArgs = absModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Abs_Build_02 + * @tc.desc: AbsModel2模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(AbsTest, SUB_AI_NNRt_Func_North_Abs_Build_02, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AbsModel2 absModel; + OHNNGraphArgs graphArgs = absModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Abs_Build_03 + * @tc.desc: AbsModel1模型输入Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(AbsTest, SUB_AI_NNRt_Func_North_Abs_Build_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AbsModel1 absModel; + OHNNGraphArgs graphArgs = absModel.graphArgs; + graphArgs.operands = {absModel.input, absModel.input, absModel.output}; + graphArgs.inputIndices = {0, 1}; + graphArgs.outputIndices = {2}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Abs_Build_04 + * @tc.desc: AbsModel1模型输出Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(AbsTest, SUB_AI_NNRt_Func_North_Abs_Build_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AbsModel1 absModel; + OHNNGraphArgs graphArgs = absModel.graphArgs; + graphArgs.operands = {absModel.input, absModel.output, absModel.output}; + graphArgs.inputIndices = {0}; + graphArgs.outputIndices = {1, 2}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Abs_Build_05 + * @tc.desc: AbsModel1模型传入非法参数进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(AbsTest, SUB_AI_NNRt_Func_North_Abs_Build_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AbsModel1 absModel; + OHNNGraphArgs graphArgs = absModel.graphArgs; + + int8_t activationValue = OH_NN_FUSED_NONE; + OHNNOperandTest activation = {OH_NN_INT8, OH_NN_ADD_ACTIVATIONTYPE, {}, &activationValue, sizeof(int8_t)}; + graphArgs.operands = {absModel.input, absModel.output, activation}; + graphArgs.paramIndices = {2}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Abs_Model_Finish_01 + * @tc.desc: 模型构图,未添加操作数 + * @tc.type: FUNC + */ +HWTEST_F(AbsTest, SUB_AI_NNRt_Func_North_Abs_Model_Finish_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + OHNNGraphArgs graphArgs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, SingleModelBuildEndStep(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Abs_Model_Finish_02 + * @tc.desc: 模型构图,未设置输入输出 + * @tc.type: FUNC + */ +HWTEST_F(AbsTest, SUB_AI_NNRt_Func_North_Abs_Model_Finish_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AbsModel1 absModel; + OHNNGraphArgs graphArgs = absModel.graphArgs; + graphArgs.specifyIO = false; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Abs_Model_Finish_03 + * @tc.desc: 模型构图,设置输入输出,构图成功 + * @tc.type: FUNC + */ +HWTEST_F(AbsTest, SUB_AI_NNRt_Func_North_Abs_Model_Finish_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AbsModel1 absModel; + OHNNGraphArgs graphArgs = absModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Abs_Model_SetOperandValue_01 + * @tc.desc: 设置操作数值,操作数不存在 + * @tc.type: FUNC + */ +HWTEST_F(AbsTest, SUB_AI_NNRt_Func_North_Abs_Model_SetOperandValue_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AbsModel1 absModel; + OHNNGraphArgs graphArgs = absModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData( + model, 1000+i, operandTem.data, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Abs_Model_SetOperandValue_02 + * @tc.desc: 设置操作数值,buufer为nullptr + * @tc.type: FUNC + */ +HWTEST_F(AbsTest, SUB_AI_NNRt_Func_North_Abs_Model_SetOperandValue_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AbsModel1 absModel; + OHNNGraphArgs graphArgs = absModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, i, nullptr, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Abs_Model_SetOperandValue_03 + * @tc.desc: 设置操作数值,length为0 + * @tc.type: FUNC + */ +HWTEST_F(AbsTest, SUB_AI_NNRt_Func_North_Abs_Model_SetOperandValue_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AbsModel1 absModel; + OHNNGraphArgs graphArgs = absModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, 1000+i, operandTem.data, 0)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Abs_Model_SpecifyInputsAndOutputs_01 + * @tc.desc: 设置输入输出,inputIndices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(AbsTest, SUB_AI_NNRt_Func_North_Abs_Model_SpecifyInputsAndOutputs_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AbsModel1 absModel; + OHNNGraphArgs graphArgs = absModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Abs_Model_SpecifyInputsAndOutputs_02 + * @tc.desc: 设置输入输出,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(AbsTest, SUB_AI_NNRt_Func_North_Abs_Model_SpecifyInputsAndOutputs_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AbsModel1 absModel; + OHNNGraphArgs graphArgs = absModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Abs_Model_SpecifyInputsAndOutputs_03 + * @tc.desc: 设置输入输出,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(AbsTest, SUB_AI_NNRt_Func_North_Abs_Model_SpecifyInputsAndOutputs_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AbsModel1 absModel; + OHNNGraphArgs graphArgs = absModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Abs_Model_SpecifyInputsAndOutputs_04 + * @tc.desc: 设置输入输出,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(AbsTest, SUB_AI_NNRt_Func_North_Abs_Model_SpecifyInputsAndOutputs_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AbsModel1 absModel; + OHNNGraphArgs graphArgs = absModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Abs_Model_SpecifyInputsAndOutputs_05 + * @tc.desc: 设置输入输出,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(AbsTest, SUB_AI_NNRt_Func_North_Abs_Model_SpecifyInputsAndOutputs_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AbsModel1 absModel; + OHNNGraphArgs graphArgs = absModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Abs_Model_SpecifyInputsAndOutputs_06 + * @tc.desc: 设置输入输出,outputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(AbsTest, SUB_AI_NNRt_Func_North_Abs_Model_SpecifyInputsAndOutputs_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AbsModel1 absModel; + OHNNGraphArgs graphArgs = absModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Abs_Model_SpecifyInputsAndOutputs_07 + * @tc.desc: 设置输入输出,outputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(AbsTest, SUB_AI_NNRt_Func_North_Abs_Model_SpecifyInputsAndOutputs_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AbsModel1 absModel; + OHNNGraphArgs graphArgs = absModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.outputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Abs_Model_SpecifyInputsAndOutputs_08 + * @tc.desc: 设置输入输出,outputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(AbsTest, SUB_AI_NNRt_Func_North_Abs_Model_SpecifyInputsAndOutputs_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AbsModel1 absModel; + OHNNGraphArgs graphArgs = absModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Abs_Model_AddOperation_01 + * @tc.desc: 添加算子,paramindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(AbsTest, SUB_AI_NNRt_Func_North_Abs_Model_AddOperation_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AbsModel1 absModel; + OHNNGraphArgs graphArgs = absModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + nullptr, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Abs_Model_AddOperation_02 + * @tc.desc: 添加算子,paramindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(AbsTest, SUB_AI_NNRt_Func_North_Abs_Model_AddOperation_02, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AbsModel1 absModel; + OHNNGraphArgs graphArgs = absModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.data = nullptr; + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Abs_Model_AddOperation_03 + * @tc.desc: 添加算子,paramindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(AbsTest, SUB_AI_NNRt_Func_North_Abs_Model_AddOperation_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AbsModel1 absModel; + OHNNGraphArgs graphArgs = absModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.paramIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Abs_Model_AddOperation_04 + * @tc.desc: 添加算子,paramindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(AbsTest, SUB_AI_NNRt_Func_North_Abs_Model_AddOperation_04, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AbsModel1 absModel; + OHNNGraphArgs graphArgs = absModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.size = 0; + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Abs_Model_AddOperation_05 + * @tc.desc: 添加算子,inputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(AbsTest, SUB_AI_NNRt_Func_North_Abs_Model_AddOperation_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AbsModel1 absModel; + OHNNGraphArgs graphArgs = absModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Abs_Model_AddOperation_06 + * @tc.desc: 添加算子,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(AbsTest, SUB_AI_NNRt_Func_North_Abs_Model_AddOperation_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AbsModel1 absModel; + OHNNGraphArgs graphArgs = absModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Abs_Model_AddOperation_07 + * @tc.desc: 添加算子,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(AbsTest, SUB_AI_NNRt_Func_North_Abs_Model_AddOperation_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AbsModel1 absModel; + OHNNGraphArgs graphArgs = absModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Abs_Model_AddOperation_08 + * @tc.desc: 添加算子,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(AbsTest, SUB_AI_NNRt_Func_North_Abs_Model_AddOperation_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AbsModel1 absModel; + OHNNGraphArgs graphArgs = absModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Abs_Model_AddOperation_09 + * @tc.desc: 添加算子,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(AbsTest, SUB_AI_NNRt_Func_North_Abs_Model_AddOperation_09, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AbsModel1 absModel; + OHNNGraphArgs graphArgs = absModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(nullptr, graphArgs.operationType, + ¶mIndices, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} \ No newline at end of file diff --git a/test/nnrt_xts_acts/nncore/opstest/src/all_test.cpp b/test/nnrt_xts_acts/nncore/opstest/src/all_test.cpp new file mode 100644 index 0000000..e228e12 --- /dev/null +++ b/test/nnrt_xts_acts/nncore/opstest/src/all_test.cpp @@ -0,0 +1,817 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "nncore_utils.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Test; +class AllTest : public testing::Test {}; + +struct AllModel1 { + const std::vector input_shape = {2, 2}; + const std::vector output_shape = {1}; + std::vector keepDimsValue = {0}; + bool inputValue[2][2] = {{true, true}, {true, false}}; + int64_t* axisValue = {}; + bool outputValue[1] = {false}; + + OHNNOperandTest input = {OH_NN_BOOL, OH_NN_TENSOR, input_shape, inputValue, 4*sizeof(bool)}; + OHNNOperandTest axis = {OH_NN_INT64, OH_NN_TENSOR, {}, axisValue, 0*sizeof(int64_t)}; + OHNNOperandTest output = {OH_NN_BOOL, OH_NN_TENSOR, output_shape, outputValue, sizeof(bool)}; + OHNNOperandTest keepDims = {OH_NN_INT64, OH_NN_ALL_KEEP_DIMS, {1}, &keepDimsValue, sizeof(int64_t)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_ALL, + .operands = {input, axis, output, keepDims}, + .paramIndices = {3}, + .inputIndices = {0, 1}, + .outputIndices = {2}}; +}; + +struct AllModel2 { + const std::vector input_shape = {2, 3}; + const std::vector output_shape = {2, 1}; + std::vector keepDimsValue = {1}; + bool inputValue[2][3] = {{true, false, true}, {true, true, true}}; + int64_t axisValue[1] = {1}; + bool outputValue[2][1] = {false}; + + OHNNOperandTest input = {OH_NN_BOOL, OH_NN_TENSOR, input_shape, inputValue, 6*sizeof(float)}; + OHNNOperandTest axis = {OH_NN_INT64, OH_NN_TENSOR, {1}, axisValue, sizeof(int64_t)}; + OHNNOperandTest output = {OH_NN_BOOL, OH_NN_TENSOR, output_shape, outputValue, 2*sizeof(float)}; + OHNNOperandTest keepDims = {OH_NN_INT64, OH_NN_ALL_KEEP_DIMS, {1}, &keepDimsValue, sizeof(int64_t)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_ALL, + .operands = {input, axis, output, keepDims}, + .paramIndices = {3}, + .inputIndices = {0, 1}, + .outputIndices = {2}}; +}; + +struct AllModel3 { + const std::vector input_shape = {2, 2, 2}; + const std::vector output_shape = {1}; + std::vector keepDimsValue = {1}; + bool inputValue[2][2][2] = {{{true, true}, {true, true}}, {{true, true}, {true, true}}}; + int64_t* axisValue = {}; + bool outputValue[1] = {false}; + + OHNNOperandTest input = {OH_NN_BOOL, OH_NN_TENSOR, input_shape, inputValue, 8*sizeof(float)}; + OHNNOperandTest axis = {OH_NN_INT64, OH_NN_TENSOR, {}, axisValue, 0*sizeof(int64_t)}; + OHNNOperandTest output = {OH_NN_BOOL, OH_NN_TENSOR, output_shape, outputValue, sizeof(float)}; + OHNNOperandTest keepDims = {OH_NN_INT64, OH_NN_ALL_KEEP_DIMS, {1}, &keepDimsValue, sizeof(int64_t)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_ALL, + .operands = {input, axis, output, keepDims}, + .paramIndices = {3}, + .inputIndices = {0, 1}, + .outputIndices = {2}}; +}; + +/** + * @tc.number : SUB_AI_NNRt_Func_North_All_Build_01 + * @tc.desc: AllModel1模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(AllTest, SUB_AI_NNRt_Func_North_All_Build_01, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AllModel1 allModel; + OHNNGraphArgs graphArgs = allModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_All_Build_02 + * @tc.desc: AllModel2模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(AllTest, SUB_AI_NNRt_Func_North_All_Build_02, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AllModel2 allModel; + OHNNGraphArgs graphArgs = allModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_All_Build_03 + * @tc.desc: AllModel3模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(AllTest, SUB_AI_NNRt_Func_North_All_Build_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AllModel3 allModel; + OHNNGraphArgs graphArgs = allModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_All_Build_04 + * @tc.desc: AllModel1模型输入Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(AllTest, SUB_AI_NNRt_Func_North_All_Build_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AllModel1 allModel; + OHNNGraphArgs graphArgs = allModel.graphArgs; + graphArgs.operands = {allModel.input, allModel.input, allModel.axis, + allModel.output, allModel.keepDims}; + graphArgs.inputIndices = {0, 1, 2}; + graphArgs.outputIndices = {3}; + graphArgs.paramIndices = {4}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_All_Build_05 + * @tc.desc: AllModel1模型输出Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(AllTest, SUB_AI_NNRt_Func_North_All_Build_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AllModel1 allModel; + OHNNGraphArgs graphArgs = allModel.graphArgs; + graphArgs.operands = {allModel.input, allModel.axis, allModel.output, + allModel.output, allModel.keepDims}; + graphArgs.inputIndices = {0, 1}; + graphArgs.outputIndices = {2, 3}; + graphArgs.paramIndices = {4}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_All_Build_06 + * @tc.desc: AllModel1模型传入非法参数进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(AllTest, SUB_AI_NNRt_Func_North_All_Build_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AllModel1 allModel; + OHNNGraphArgs graphArgs = allModel.graphArgs; + + int8_t activationValue = OH_NN_FUSED_NONE; + OHNNOperandTest activation = {OH_NN_INT8, OH_NN_ADD_ACTIVATIONTYPE, {}, &activationValue, sizeof(int8_t)}; + graphArgs.operands = {allModel.input, allModel.axis, allModel.output, allModel.keepDims, activation}; + graphArgs.paramIndices = {3, 4}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_All_Model_Finish_01 + * @tc.desc: 模型构图,未添加操作数 + * @tc.type: FUNC + */ +HWTEST_F(AllTest, SUB_AI_NNRt_Func_North_All_Model_Finish_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + OHNNGraphArgs graphArgs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, SingleModelBuildEndStep(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_All_Model_Finish_02 + * @tc.desc: 模型构图,未设置输入输出 + * @tc.type: FUNC + */ +HWTEST_F(AllTest, SUB_AI_NNRt_Func_North_All_Model_Finish_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AllModel1 allModel; + OHNNGraphArgs graphArgs = allModel.graphArgs; + graphArgs.specifyIO = false; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_All_Model_Finish_03 + * @tc.desc: 模型构图,设置输入输出,构图成功 + * @tc.type: FUNC + */ +HWTEST_F(AllTest, SUB_AI_NNRt_Func_North_All_Model_Finish_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AllModel1 allModel; + OHNNGraphArgs graphArgs = allModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_All_Model_SetOperandValue_01 + * @tc.desc: 设置操作数值,操作数不存在 + * @tc.type: FUNC + */ +HWTEST_F(AllTest, SUB_AI_NNRt_Func_North_All_Model_SetOperandValue_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AllModel1 allModel; + OHNNGraphArgs graphArgs = allModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData( + model, 1000+i, operandTem.data, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_All_Model_SetOperandValue_02 + * @tc.desc: 设置操作数值,buffer为nullptr + * @tc.type: FUNC + */ +HWTEST_F(AllTest, SUB_AI_NNRt_Func_North_All_Model_SetOperandValue_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AllModel1 allModel; + OHNNGraphArgs graphArgs = allModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, i, nullptr, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_All_Model_SetOperandValue_03 + * @tc.desc: 设置操作数值,length为0 + * @tc.type: FUNC + */ +HWTEST_F(AllTest, SUB_AI_NNRt_Func_North_All_Model_SetOperandValue_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AllModel1 allModel; + OHNNGraphArgs graphArgs = allModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, 1000+i, operandTem.data, 0)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_All_Model_SpecifyInputsAndOutputs_01 + * @tc.desc: 设置输入输出,inputIndices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(AllTest, SUB_AI_NNRt_Func_North_All_Model_SpecifyInputsAndOutputs_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AllModel1 allModel; + OHNNGraphArgs graphArgs = allModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_All_Model_SpecifyInputsAndOutputs_02 + * @tc.desc: 设置输入输出,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(AllTest, SUB_AI_NNRt_Func_North_All_Model_SpecifyInputsAndOutputs_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AllModel1 allModel; + OHNNGraphArgs graphArgs = allModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_All_Model_SpecifyInputsAndOutputs_03 + * @tc.desc: 设置输入输出,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(AllTest, SUB_AI_NNRt_Func_North_All_Model_SpecifyInputsAndOutputs_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AllModel1 allModel; + OHNNGraphArgs graphArgs = allModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_All_Model_SpecifyInputsAndOutputs_04 + * @tc.desc: 设置输入输出,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(AllTest, SUB_AI_NNRt_Func_North_All_Model_SpecifyInputsAndOutputs_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AllModel1 allModel; + OHNNGraphArgs graphArgs = allModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_All_Model_SpecifyInputsAndOutputs_05 + * @tc.desc: 设置输入输出,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(AllTest, SUB_AI_NNRt_Func_North_All_Model_SpecifyInputsAndOutputs_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AllModel1 allModel; + OHNNGraphArgs graphArgs = allModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_All_Model_SpecifyInputsAndOutputs_06 + * @tc.desc: 设置输入输出,outputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(AllTest, SUB_AI_NNRt_Func_North_All_Model_SpecifyInputsAndOutputs_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AllModel1 allModel; + OHNNGraphArgs graphArgs = allModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_All_Model_SpecifyInputsAndOutputs_07 + * @tc.desc: 设置输入输出,outputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(AllTest, SUB_AI_NNRt_Func_North_All_Model_SpecifyInputsAndOutputs_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AllModel1 allModel; + OHNNGraphArgs graphArgs = allModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.outputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_All_Model_SpecifyInputsAndOutputs_08 + * @tc.desc: 设置输入输出,outputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(AllTest, SUB_AI_NNRt_Func_North_All_Model_SpecifyInputsAndOutputs_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AllModel1 allModel; + OHNNGraphArgs graphArgs = allModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_All_Model_AddOperation_01 + * @tc.desc: 添加算子,paramindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(AllTest, SUB_AI_NNRt_Func_North_All_Model_AddOperation_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AllModel1 allModel; + OHNNGraphArgs graphArgs = allModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + nullptr, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_All_Model_AddOperation_02 + * @tc.desc: 添加算子,paramindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(AllTest, SUB_AI_NNRt_Func_North_All_Model_AddOperation_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AllModel1 allModel; + OHNNGraphArgs graphArgs = allModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_All_Model_AddOperation_03 + * @tc.desc: 添加算子,paramindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(AllTest, SUB_AI_NNRt_Func_North_All_Model_AddOperation_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AllModel1 allModel; + OHNNGraphArgs graphArgs = allModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.paramIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_All_Model_AddOperation_04 + * @tc.desc: 添加算子,paramindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(AllTest, SUB_AI_NNRt_Func_North_All_Model_AddOperation_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AllModel1 allModel; + OHNNGraphArgs graphArgs = allModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_All_Model_AddOperation_05 + * @tc.desc: 添加算子,inputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(AllTest, SUB_AI_NNRt_Func_North_All_Model_AddOperation_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AllModel1 allModel; + OHNNGraphArgs graphArgs = allModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_All_Model_AddOperation_06 + * @tc.desc: 添加算子,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(AllTest, SUB_AI_NNRt_Func_North_All_Model_AddOperation_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AllModel1 allModel; + OHNNGraphArgs graphArgs = allModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_All_Model_AddOperation_07 + * @tc.desc: 添加算子,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(AllTest, SUB_AI_NNRt_Func_North_All_Model_AddOperation_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AllModel1 allModel; + OHNNGraphArgs graphArgs = allModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_All_Model_AddOperation_08 + * @tc.desc: 添加算子,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(AllTest, SUB_AI_NNRt_Func_North_All_Model_AddOperation_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AllModel1 allModel; + OHNNGraphArgs graphArgs = allModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_All_Model_AddOperation_09 + * @tc.desc: 添加算子,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(AllTest, SUB_AI_NNRt_Func_North_All_Model_AddOperation_09, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AllModel1 allModel; + OHNNGraphArgs graphArgs = allModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(nullptr, graphArgs.operationType, + ¶mIndices, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} \ No newline at end of file diff --git a/test/nnrt_xts_acts/nncore/opstest/src/assert_test.cpp b/test/nnrt_xts_acts/nncore/opstest/src/assert_test.cpp new file mode 100644 index 0000000..51aa9e2 --- /dev/null +++ b/test/nnrt_xts_acts/nncore/opstest/src/assert_test.cpp @@ -0,0 +1,769 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "nncore_utils.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Test; +class AssertTest : public testing::Test {}; + +struct AssertModel1 { + const std::vector tensor_shape = {2, 3}; + const std::vector condition_shape = {1}; + std::vector summarizeValue = {3}; + bool conditionValue[1] = {false}; + int64_t inputValue[2][3] = {{1, 2, 3}, {4, 5, 6}}; + int64_t outputValue[2][3] = {0}; + + OHNNOperandTest input = {OH_NN_INT64, OH_NN_TENSOR, tensor_shape, inputValue, 6*sizeof(int64_t)}; + OHNNOperandTest condition = {OH_NN_BOOL, OH_NN_TENSOR, condition_shape, conditionValue, sizeof(bool)}; + OHNNOperandTest output = {OH_NN_INT64, OH_NN_TENSOR, tensor_shape, outputValue, 6*sizeof(int64_t)}; + OHNNOperandTest summarize = {OH_NN_INT64, OH_NN_ASSERT_SUMMARIZE, {1}, &summarizeValue, sizeof(int64_t)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_ASSERT, + .operands = {input, condition, output, summarize}, + .paramIndices = {3}, + .inputIndices = {0, 1}, + .outputIndices = {2}}; +}; + +struct AssertModel2 { + const std::vector tensor_shape = {2, 3}; + const std::vector condition_shape = {1}; + std::vector summarizeValue = {2}; + bool conditionValue[1] = {true}; + int64_t inputValue[2][3] = {{7, 8, 9}, {10, 11, 12}}; + int64_t outputValue[2][3] = {0}; + + OHNNOperandTest input = {OH_NN_INT64, OH_NN_TENSOR, tensor_shape, inputValue, 6*sizeof(int64_t)}; + OHNNOperandTest condition = {OH_NN_BOOL, OH_NN_TENSOR, condition_shape, conditionValue, sizeof(bool)}; + OHNNOperandTest output = {OH_NN_INT64, OH_NN_TENSOR, tensor_shape, outputValue, 6*sizeof(int64_t)}; + OHNNOperandTest summarize = {OH_NN_INT64, OH_NN_ASSERT_SUMMARIZE, {1}, &summarizeValue, sizeof(int64_t)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_ASSERT, + .operands = {input, condition, output, summarize}, + .paramIndices = {3}, + .inputIndices = {0, 1}, + .outputIndices = {2}}; +}; + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Assert_Build_01 + * @tc.desc: AssertModel1模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(AssertTest, SUB_AI_NNRt_Func_North_Assert_Build_01, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AssertModel1 assertModel; + OHNNGraphArgs graphArgs = assertModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Assert_Build_02 + * @tc.desc: AssertModel2模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(AssertTest, SUB_AI_NNRt_Func_North_Assert_Build_02, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AssertModel2 assertModel; + OHNNGraphArgs graphArgs = assertModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Assert_Build_03 + * @tc.desc: AssertModel1模型输入Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(AssertTest, SUB_AI_NNRt_Func_North_Assert_Build_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AssertModel1 assertModel; + OHNNGraphArgs graphArgs = assertModel.graphArgs; + graphArgs.operands = {assertModel.input, assertModel.input, assertModel.condition, + assertModel.output, assertModel.summarize}; + graphArgs.inputIndices = {0, 1, 2}; + graphArgs.outputIndices = {3}; + graphArgs.paramIndices = {4}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Assert_Build_04 + * @tc.desc: AssertModel1模型输出Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(AssertTest, SUB_AI_NNRt_Func_North_Assert_Build_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AssertModel1 assertModel; + OHNNGraphArgs graphArgs = assertModel.graphArgs; + graphArgs.operands = {assertModel.input, assertModel.condition, assertModel.output, + assertModel.output, assertModel.summarize}; + graphArgs.inputIndices = {0, 1}; + graphArgs.outputIndices = {2, 3}; + graphArgs.paramIndices = {4}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Assert_Build_05 + * @tc.desc: AssertModel1模型传入非法参数进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(AssertTest, SUB_AI_NNRt_Func_North_Assert_Build_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AssertModel1 assertModel; + OHNNGraphArgs graphArgs = assertModel.graphArgs; + + int8_t activationValue = OH_NN_FUSED_NONE; + OHNNOperandTest activation = {OH_NN_INT8, OH_NN_ADD_ACTIVATIONTYPE, {}, &activationValue, sizeof(int8_t)}; + graphArgs.operands = {assertModel.input, assertModel.condition, assertModel.output, + assertModel.summarize, activation}; + graphArgs.paramIndices = {3, 4}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Assert_Model_Finish_01 + * @tc.desc: 模型构图,未添加操作数 + * @tc.type: FUNC + */ +HWTEST_F(AssertTest, SUB_AI_NNRt_Func_North_Assert_Model_Finish_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + OHNNGraphArgs graphArgs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, SingleModelBuildEndStep(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Assert_Model_Finish_02 + * @tc.desc: 模型构图,未设置输入输出 + * @tc.type: FUNC + */ +HWTEST_F(AssertTest, SUB_AI_NNRt_Func_North_Assert_Model_Finish_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AssertModel1 assertModel; + OHNNGraphArgs graphArgs = assertModel.graphArgs; + graphArgs.specifyIO = false; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Assert_Model_Finish_03 + * @tc.desc: 模型构图,设置输入输出,构图成功 + * @tc.type: FUNC + */ +HWTEST_F(AssertTest, SUB_AI_NNRt_Func_North_Assert_Model_Finish_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AssertModel1 assertModel; + OHNNGraphArgs graphArgs = assertModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Assert_Model_SetOperandValue_01 + * @tc.desc: 设置操作数值,操作数不存在 + * @tc.type: FUNC + */ +HWTEST_F(AssertTest, SUB_AI_NNRt_Func_North_Assert_Model_SetOperandValue_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AssertModel1 assertModel; + OHNNGraphArgs graphArgs = assertModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData( + model, 1000+i, operandTem.data, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Assert_Model_SetOperandValue_02 + * @tc.desc: 设置操作数值,buffer为nullptr + * @tc.type: FUNC + */ +HWTEST_F(AssertTest, SUB_AI_NNRt_Func_North_Assert_Model_SetOperandValue_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AssertModel1 assertModel; + OHNNGraphArgs graphArgs = assertModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, i, nullptr, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Assert_Model_SetOperandValue_03 + * @tc.desc: 设置操作数值,length为0 + * @tc.type: FUNC + */ +HWTEST_F(AssertTest, SUB_AI_NNRt_Func_North_Assert_Model_SetOperandValue_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AssertModel1 assertModel; + OHNNGraphArgs graphArgs = assertModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, 1000+i, operandTem.data, 0)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Assert_Model_SpecifyInputsAndOutputs_01 + * @tc.desc: 设置输入输出,inputIndices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(AssertTest, SUB_AI_NNRt_Func_North_Assert_Model_SpecifyInputsAndOutputs_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AssertModel1 assertModel; + OHNNGraphArgs graphArgs = assertModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Assert_Model_SpecifyInputsAndOutputs_02 + * @tc.desc: 设置输入输出,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(AssertTest, SUB_AI_NNRt_Func_North_Assert_Model_SpecifyInputsAndOutputs_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AssertModel1 assertModel; + OHNNGraphArgs graphArgs = assertModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Assert_Model_SpecifyInputsAndOutputs_03 + * @tc.desc: 设置输入输出,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(AssertTest, SUB_AI_NNRt_Func_North_Assert_Model_SpecifyInputsAndOutputs_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AssertModel1 assertModel; + OHNNGraphArgs graphArgs = assertModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Assert_Model_SpecifyInputsAndOutputs_04 + * @tc.desc: 设置输入输出,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(AssertTest, SUB_AI_NNRt_Func_North_Assert_Model_SpecifyInputsAndOutputs_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AssertModel1 assertModel; + OHNNGraphArgs graphArgs = assertModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Assert_Model_SpecifyInputsAndOutputs_05 + * @tc.desc: 设置输入输出,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(AssertTest, SUB_AI_NNRt_Func_North_Assert_Model_SpecifyInputsAndOutputs_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AssertModel1 assertModel; + OHNNGraphArgs graphArgs = assertModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Assert_Model_SpecifyInputsAndOutputs_06 + * @tc.desc: 设置输入输出,outputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(AssertTest, SUB_AI_NNRt_Func_North_Assert_Model_SpecifyInputsAndOutputs_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AssertModel1 assertModel; + OHNNGraphArgs graphArgs = assertModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Assert_Model_SpecifyInputsAndOutputs_07 + * @tc.desc: 设置输入输出,outputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(AssertTest, SUB_AI_NNRt_Func_North_Assert_Model_SpecifyInputsAndOutputs_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AssertModel1 assertModel; + OHNNGraphArgs graphArgs = assertModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.outputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Assert_Model_SpecifyInputsAndOutputs_08 + * @tc.desc: 设置输入输出,outputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(AssertTest, SUB_AI_NNRt_Func_North_Assert_Model_SpecifyInputsAndOutputs_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AssertModel1 assertModel; + OHNNGraphArgs graphArgs = assertModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Assert_Model_AddOperation_01 + * @tc.desc: 添加算子,paramindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(AssertTest, SUB_AI_NNRt_Func_North_Assert_Model_AddOperation_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AssertModel1 assertModel; + OHNNGraphArgs graphArgs = assertModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + nullptr, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Assert_Model_AddOperation_02 + * @tc.desc: 添加算子,paramindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(AssertTest, SUB_AI_NNRt_Func_North_Assert_Model_AddOperation_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AssertModel1 assertModel; + OHNNGraphArgs graphArgs = assertModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Assert_Model_AddOperation_03 + * @tc.desc: 添加算子,paramindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(AssertTest, SUB_AI_NNRt_Func_North_Assert_Model_AddOperation_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AssertModel1 assertModel; + OHNNGraphArgs graphArgs = assertModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.paramIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Assert_Model_AddOperation_04 + * @tc.desc: 添加算子,paramindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(AssertTest, SUB_AI_NNRt_Func_North_Assert_Model_AddOperation_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AssertModel1 assertModel; + OHNNGraphArgs graphArgs = assertModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Assert_Model_AddOperation_05 + * @tc.desc: 添加算子,inputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(AssertTest, SUB_AI_NNRt_Func_North_Assert_Model_AddOperation_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AssertModel1 assertModel; + OHNNGraphArgs graphArgs = assertModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Assert_Model_AddOperation_06 + * @tc.desc: 添加算子,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(AssertTest, SUB_AI_NNRt_Func_North_Assert_Model_AddOperation_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AssertModel1 assertModel; + OHNNGraphArgs graphArgs = assertModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Assert_Model_AddOperation_07 + * @tc.desc: 添加算子,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(AssertTest, SUB_AI_NNRt_Func_North_Assert_Model_AddOperation_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AssertModel1 assertModel; + OHNNGraphArgs graphArgs = assertModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Assert_Model_AddOperation_08 + * @tc.desc: 添加算子,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(AssertTest, SUB_AI_NNRt_Func_North_Assert_Model_AddOperation_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AssertModel1 assertModel; + OHNNGraphArgs graphArgs = assertModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Assert_Model_AddOperation_09 + * @tc.desc: 添加算子,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(AssertTest, SUB_AI_NNRt_Func_North_Assert_Model_AddOperation_09, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + AssertModel1 assertModel; + OHNNGraphArgs graphArgs = assertModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(nullptr, graphArgs.operationType, + ¶mIndices, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} \ No newline at end of file diff --git a/test/nnrt_xts_acts/nncore/opstest/src/broadcast_to_test.cpp b/test/nnrt_xts_acts/nncore/opstest/src/broadcast_to_test.cpp new file mode 100644 index 0000000..65c7ec4 --- /dev/null +++ b/test/nnrt_xts_acts/nncore/opstest/src/broadcast_to_test.cpp @@ -0,0 +1,819 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "nncore_utils.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Test; +class BroadcastToTest : public testing::Test {}; + +struct BroadcastToModel1 { + const std::vector input_shape = {3}; + const std::vector output_shape = {3, 3}; + std::vector shapeValue = {3, 3}; + float inputValue[3] = {1, 2, 3}; + float outputValue[9] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, input_shape, inputValue, 3*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, output_shape, outputValue, 9*sizeof(float)}; + OHNNOperandTest shape = {OH_NN_INT64, OH_NN_BROADCAST_TO_SHAPE, {2}, &shapeValue, 2*sizeof(int64_t)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_BROADCAST_TO, + .operands = {input, output, shape}, + .paramIndices = {2}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +struct BroadcastToModel2 { + const std::vector input_shape = {3}; + const std::vector output_shape = {2, 2}; + std::vector shapeValue = {2, 2}; + float inputValue[3] = {1, 2, 3}; + float* outputValue = {}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, input_shape, inputValue, 3*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, output_shape, outputValue, 4*sizeof(float)}; + OHNNOperandTest shape = {OH_NN_INT64, OH_NN_BROADCAST_TO_SHAPE, {2}, &shapeValue, 2*sizeof(int64_t)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_BROADCAST_TO, + .operands = {input, output, shape}, + .paramIndices = {2}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +struct BroadcastToModel3 { + const std::vector input_shape = {3}; + const std::vector output_shape = {1, 3}; + std::vector shapeValue = {0, 3}; + float inputValue[3] = {1, 2, 3}; + float* outputValue = {}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, input_shape, inputValue, 3*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, output_shape, outputValue, 3*sizeof(float)}; + OHNNOperandTest shape = {OH_NN_INT64, OH_NN_BROADCAST_TO_SHAPE, {2}, &shapeValue, 2*sizeof(int64_t)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_BROADCAST_TO, + .operands = {input, output, shape}, + .paramIndices = {2}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +/** + * @tc.number : SUB_AI_NNRt_Func_North_BroadcastTo_Build_01 + * @tc.desc: BroadcastToModel1模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(BroadcastToTest, SUB_AI_NNRt_Func_North_BroadcastTo_Build_01, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + BroadcastToModel1 broadcastToModel; + OHNNGraphArgs graphArgs = broadcastToModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_BroadcastTo_Build_02 + * @tc.desc: BroadcastToModel2模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(BroadcastToTest, SUB_AI_NNRt_Func_North_BroadcastTo_Build_02, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + BroadcastToModel2 broadcastToModel; + OHNNGraphArgs graphArgs = broadcastToModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_BroadcastTo_Build_03 + * @tc.desc: BroadcastToModel3模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(BroadcastToTest, SUB_AI_NNRt_Func_North_BroadcastTo_Build_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + BroadcastToModel3 broadcastToModel; + OHNNGraphArgs graphArgs = broadcastToModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_BroadcastTo_Build_04 + * @tc.desc: BroadcastToModel1模型输入Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(BroadcastToTest, SUB_AI_NNRt_Func_North_BroadcastTo_Build_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + BroadcastToModel1 broadcastToModel; + OHNNGraphArgs graphArgs = broadcastToModel.graphArgs; + graphArgs.operands = {broadcastToModel.input, broadcastToModel.input, + broadcastToModel.output, broadcastToModel.shape}; + graphArgs.inputIndices = {0, 1}; + graphArgs.outputIndices = {2}; + graphArgs.paramIndices = {3}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_BroadcastTo_Build_05 + * @tc.desc: BroadcastToModel1模型输出Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(BroadcastToTest, SUB_AI_NNRt_Func_North_BroadcastTo_Build_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + BroadcastToModel1 broadcastToModel; + OHNNGraphArgs graphArgs = broadcastToModel.graphArgs; + graphArgs.operands = {broadcastToModel.input, broadcastToModel.output, + broadcastToModel.output, broadcastToModel.shape}; + graphArgs.inputIndices = {0}; + graphArgs.outputIndices = {1, 2}; + graphArgs.paramIndices = {3}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_BroadcastTo_Build_06 + * @tc.desc: BroadcastToModel1模型传入非法参数进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(BroadcastToTest, SUB_AI_NNRt_Func_North_BroadcastTo_Build_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + BroadcastToModel1 broadcastToModel; + OHNNGraphArgs graphArgs = broadcastToModel.graphArgs; + + int8_t activationValue = OH_NN_FUSED_NONE; + OHNNOperandTest activation = {OH_NN_INT8, OH_NN_ADD_ACTIVATIONTYPE, {}, &activationValue, sizeof(int8_t)}; + graphArgs.operands = {broadcastToModel.input, broadcastToModel.output, broadcastToModel.shape, activation}; + graphArgs.paramIndices = {2, 3}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_BroadcastTo_Model_Finish_01 + * @tc.desc: 模型构图,未添加操作数 + * @tc.type: FUNC + */ +HWTEST_F(BroadcastToTest, SUB_AI_NNRt_Func_North_BroadcastTo_Model_Finish_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + OHNNGraphArgs graphArgs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, SingleModelBuildEndStep(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_BroadcastTo_Model_Finish_02 + * @tc.desc: 模型构图,未设置输入输出 + * @tc.type: FUNC + */ +HWTEST_F(BroadcastToTest, SUB_AI_NNRt_Func_North_BroadcastTo_Model_Finish_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + BroadcastToModel1 broadcastToModel; + OHNNGraphArgs graphArgs = broadcastToModel.graphArgs; + graphArgs.specifyIO = false; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_BroadcastTo_Model_Finish_03 + * @tc.desc: 模型构图,设置输入输出,构图成功 + * @tc.type: FUNC + */ +HWTEST_F(BroadcastToTest, SUB_AI_NNRt_Func_North_BroadcastTo_Model_Finish_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + BroadcastToModel1 broadcastToModel; + OHNNGraphArgs graphArgs = broadcastToModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_BroadcastTo_Model_SetOperandValue_01 + * @tc.desc: 设置操作数值,操作数不存在 + * @tc.type: FUNC + */ +HWTEST_F(BroadcastToTest, SUB_AI_NNRt_Func_North_BroadcastTo_Model_SetOperandValue_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + BroadcastToModel1 broadcastToModel; + OHNNGraphArgs graphArgs = broadcastToModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData( + model, 1000+i, operandTem.data, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_BroadcastTo_Model_SetOperandValue_02 + * @tc.desc: 设置操作数值,buffer为nullptr + * @tc.type: FUNC + */ +HWTEST_F(BroadcastToTest, SUB_AI_NNRt_Func_North_BroadcastTo_Model_SetOperandValue_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + BroadcastToModel1 broadcastToModel; + OHNNGraphArgs graphArgs = broadcastToModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, i, nullptr, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_BroadcastTo_Model_SetOperandValue_03 + * @tc.desc: 设置操作数值,length为0 + * @tc.type: FUNC + */ +HWTEST_F(BroadcastToTest, SUB_AI_NNRt_Func_North_BroadcastTo_Model_SetOperandValue_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + BroadcastToModel1 broadcastToModel; + OHNNGraphArgs graphArgs = broadcastToModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, 1000+i, operandTem.data, 0)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_BroadcastTo_Model_SpecifyInputsAndOutputs_01 + * @tc.desc: 设置输入输出,inputIndices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(BroadcastToTest, SUB_AI_NNRt_Func_North_BroadcastTo_Model_SpecifyInputsAndOutputs_01, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + BroadcastToModel1 broadcastToModel; + OHNNGraphArgs graphArgs = broadcastToModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_BroadcastTo_Model_SpecifyInputsAndOutputs_02 + * @tc.desc: 设置输入输出,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(BroadcastToTest, SUB_AI_NNRt_Func_North_BroadcastTo_Model_SpecifyInputsAndOutputs_02, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + BroadcastToModel1 broadcastToModel; + OHNNGraphArgs graphArgs = broadcastToModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_BroadcastTo_Model_SpecifyInputsAndOutputs_03 + * @tc.desc: 设置输入输出,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(BroadcastToTest, SUB_AI_NNRt_Func_North_BroadcastTo_Model_SpecifyInputsAndOutputs_03, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + BroadcastToModel1 broadcastToModel; + OHNNGraphArgs graphArgs = broadcastToModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_BroadcastTo_Model_SpecifyInputsAndOutputs_04 + * @tc.desc: 设置输入输出,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(BroadcastToTest, SUB_AI_NNRt_Func_North_BroadcastTo_Model_SpecifyInputsAndOutputs_04, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + BroadcastToModel1 broadcastToModel; + OHNNGraphArgs graphArgs = broadcastToModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_BroadcastTo_Model_SpecifyInputsAndOutputs_05 + * @tc.desc: 设置输入输出,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(BroadcastToTest, SUB_AI_NNRt_Func_North_BroadcastTo_Model_SpecifyInputsAndOutputs_05, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + BroadcastToModel1 broadcastToModel; + OHNNGraphArgs graphArgs = broadcastToModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_BroadcastTo_Model_SpecifyInputsAndOutputs_06 + * @tc.desc: 设置输入输出,outputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(BroadcastToTest, SUB_AI_NNRt_Func_North_BroadcastTo_Model_SpecifyInputsAndOutputs_06, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + BroadcastToModel1 broadcastToModel; + OHNNGraphArgs graphArgs = broadcastToModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_BroadcastTo_Model_SpecifyInputsAndOutputs_07 + * @tc.desc: 设置输入输出,outputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(BroadcastToTest, SUB_AI_NNRt_Func_North_BroadcastTo_Model_SpecifyInputsAndOutputs_07, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + BroadcastToModel1 broadcastToModel; + OHNNGraphArgs graphArgs = broadcastToModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.outputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_BroadcastTo_Model_SpecifyInputsAndOutputs_08 + * @tc.desc: 设置输入输出,outputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(BroadcastToTest, SUB_AI_NNRt_Func_North_BroadcastTo_Model_SpecifyInputsAndOutputs_08, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + BroadcastToModel1 broadcastToModel; + OHNNGraphArgs graphArgs = broadcastToModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_BroadcastTo_Model_AddOperation_01 + * @tc.desc: 添加算子,paramindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(BroadcastToTest, SUB_AI_NNRt_Func_North_BroadcastTo_Model_AddOperation_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + BroadcastToModel1 broadcastToModel; + OHNNGraphArgs graphArgs = broadcastToModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + nullptr, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_BroadcastTo_Model_AddOperation_02 + * @tc.desc: 添加算子,paramindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(BroadcastToTest, SUB_AI_NNRt_Func_North_BroadcastTo_Model_AddOperation_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + BroadcastToModel1 broadcastToModel; + OHNNGraphArgs graphArgs = broadcastToModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_BroadcastTo_Model_AddOperation_03 + * @tc.desc: 添加算子,paramindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(BroadcastToTest, SUB_AI_NNRt_Func_North_BroadcastTo_Model_AddOperation_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + BroadcastToModel1 broadcastToModel; + OHNNGraphArgs graphArgs = broadcastToModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.paramIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_BroadcastTo_Model_AddOperation_04 + * @tc.desc: 添加算子,paramindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(BroadcastToTest, SUB_AI_NNRt_Func_North_BroadcastTo_Model_AddOperation_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + BroadcastToModel1 broadcastToModel; + OHNNGraphArgs graphArgs = broadcastToModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_BroadcastTo_Model_AddOperation_05 + * @tc.desc: 添加算子,inputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(BroadcastToTest, SUB_AI_NNRt_Func_North_BroadcastTo_Model_AddOperation_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + BroadcastToModel1 broadcastToModel; + OHNNGraphArgs graphArgs = broadcastToModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_BroadcastTo_Model_AddOperation_06 + * @tc.desc: 添加算子,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(BroadcastToTest, SUB_AI_NNRt_Func_North_BroadcastTo_Model_AddOperation_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + BroadcastToModel1 broadcastToModel; + OHNNGraphArgs graphArgs = broadcastToModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_BroadcastTo_Model_AddOperation_07 + * @tc.desc: 添加算子,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(BroadcastToTest, SUB_AI_NNRt_Func_North_BroadcastTo_Model_AddOperation_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + BroadcastToModel1 broadcastToModel; + OHNNGraphArgs graphArgs = broadcastToModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_BroadcastTo_Model_AddOperation_08 + * @tc.desc: 添加算子,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(BroadcastToTest, SUB_AI_NNRt_Func_North_BroadcastTo_Model_AddOperation_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + BroadcastToModel1 broadcastToModel; + OHNNGraphArgs graphArgs = broadcastToModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_BroadcastTo_Model_AddOperation_09 + * @tc.desc: 添加算子,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(BroadcastToTest, SUB_AI_NNRt_Func_North_BroadcastTo_Model_AddOperation_09, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + BroadcastToModel1 broadcastToModel; + OHNNGraphArgs graphArgs = broadcastToModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(nullptr, graphArgs.operationType, + ¶mIndices, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} \ No newline at end of file diff --git a/test/nnrt_xts_acts/nncore/opstest/src/ceil_test.cpp b/test/nnrt_xts_acts/nncore/opstest/src/ceil_test.cpp new file mode 100644 index 0000000..4362a23 --- /dev/null +++ b/test/nnrt_xts_acts/nncore/opstest/src/ceil_test.cpp @@ -0,0 +1,757 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "nncore_utils.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Test; +class CeilTest : public testing::Test {}; + +struct CeilModel1 { + const std::vector tensor_shape = {5}; + float inputValue[5] = {-2.5, -1.1, 0, 1.1, 2.9}; + float outputValue[5] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, inputValue, 5*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, outputValue, 5*sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_CEIL, + .operands = {input, output}, + .paramIndices = {}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + + +struct CeilModel2 { + const std::vector tensor_shape = {4}; + float inputValue[4] = {3.5, 4.7, 5.1, 6.6}; + float outputValue[4] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, inputValue, 4*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, outputValue, 4*sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_CEIL, + .operands = {input, output}, + .paramIndices = {}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Ceil_Build_01 + * @tc.desc: CeilModel1模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(CeilTest, SUB_AI_NNRt_Func_North_Ceil_Build_01, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CeilModel1 ceilModel; + OHNNGraphArgs graphArgs = ceilModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Ceil_Build_02 + * @tc.desc: CeilModel2模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(CeilTest, SUB_AI_NNRt_Func_North_Ceil_Build_02, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CeilModel2 ceilModel; + OHNNGraphArgs graphArgs = ceilModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Ceil_Build_03 + * @tc.desc: CeilModel1模型输入Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(CeilTest, SUB_AI_NNRt_Func_North_Ceil_Build_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CeilModel1 ceilModel; + OHNNGraphArgs graphArgs = ceilModel.graphArgs; + graphArgs.operands = {ceilModel.input, ceilModel.input, ceilModel.output}; + graphArgs.inputIndices = {0, 1}; + graphArgs.outputIndices = {2}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Ceil_Build_04 + * @tc.desc: CeilModel1模型输出Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(CeilTest, SUB_AI_NNRt_Func_North_Ceil_Build_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CeilModel1 ceilModel; + OHNNGraphArgs graphArgs = ceilModel.graphArgs; + graphArgs.operands = {ceilModel.input, ceilModel.output, ceilModel.output}; + graphArgs.inputIndices = {0}; + graphArgs.outputIndices = {1, 2}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Ceil_Build_05 + * @tc.desc: CeilModel1模型传入非法参数进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(CeilTest, SUB_AI_NNRt_Func_North_Ceil_Build_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CeilModel1 ceilModel; + OHNNGraphArgs graphArgs = ceilModel.graphArgs; + + int8_t activationValue = OH_NN_FUSED_NONE; + OHNNOperandTest activation = {OH_NN_INT8, OH_NN_ADD_ACTIVATIONTYPE, {}, &activationValue, sizeof(int8_t)}; + graphArgs.operands = {ceilModel.input, ceilModel.output, activation}; + graphArgs.paramIndices = {2}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Ceil_Model_Finish_01 + * @tc.desc: 模型构图,未添加操作数 + * @tc.type: FUNC + */ +HWTEST_F(CeilTest, SUB_AI_NNRt_Func_North_Ceil_Model_Finish_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + OHNNGraphArgs graphArgs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, SingleModelBuildEndStep(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Ceil_Model_Finish_02 + * @tc.desc: 模型构图,未设置输入输出 + * @tc.type: FUNC + */ +HWTEST_F(CeilTest, SUB_AI_NNRt_Func_North_Ceil_Model_Finish_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CeilModel1 ceilModel; + OHNNGraphArgs graphArgs = ceilModel.graphArgs; + graphArgs.specifyIO = false; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Ceil_Model_Finish_03 + * @tc.desc: 模型构图,设置输入输出,构图成功 + * @tc.type: FUNC + */ +HWTEST_F(CeilTest, SUB_AI_NNRt_Func_North_Ceil_Model_Finish_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CeilModel1 ceilModel; + OHNNGraphArgs graphArgs = ceilModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Ceil_Model_SetOperandValue_01 + * @tc.desc: 设置操作数值,操作数不存在 + * @tc.type: FUNC + */ +HWTEST_F(CeilTest, SUB_AI_NNRt_Func_North_Ceil_Model_SetOperandValue_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CeilModel1 ceilModel; + OHNNGraphArgs graphArgs = ceilModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData( + model, 1000+i, operandTem.data, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Ceil_Model_SetOperandValue_02 + * @tc.desc: 设置操作数值,buufer为nullptr + * @tc.type: FUNC + */ +HWTEST_F(CeilTest, SUB_AI_NNRt_Func_North_Ceil_Model_SetOperandValue_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CeilModel1 ceilModel; + OHNNGraphArgs graphArgs = ceilModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, i, nullptr, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Ceil_Model_SetOperandValue_03 + * @tc.desc: 设置操作数值,length为0 + * @tc.type: FUNC + */ +HWTEST_F(CeilTest, SUB_AI_NNRt_Func_North_Ceil_Model_SetOperandValue_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CeilModel1 ceilModel; + OHNNGraphArgs graphArgs = ceilModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, 1000+i, operandTem.data, 0)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Ceil_Model_SpecifyInputsAndOutputs_01 + * @tc.desc: 设置输入输出,inputIndices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(CeilTest, SUB_AI_NNRt_Func_North_Ceil_Model_SpecifyInputsAndOutputs_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CeilModel1 ceilModel; + OHNNGraphArgs graphArgs = ceilModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Ceil_Model_SpecifyInputsAndOutputs_02 + * @tc.desc: 设置输入输出,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(CeilTest, SUB_AI_NNRt_Func_North_Ceil_Model_SpecifyInputsAndOutputs_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CeilModel1 ceilModel; + OHNNGraphArgs graphArgs = ceilModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Ceil_Model_SpecifyInputsAndOutputs_03 + * @tc.desc: 设置输入输出,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(CeilTest, SUB_AI_NNRt_Func_North_Ceil_Model_SpecifyInputsAndOutputs_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CeilModel1 ceilModel; + OHNNGraphArgs graphArgs = ceilModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Ceil_Model_SpecifyInputsAndOutputs_04 + * @tc.desc: 设置输入输出,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(CeilTest, SUB_AI_NNRt_Func_North_Ceil_Model_SpecifyInputsAndOutputs_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CeilModel1 ceilModel; + OHNNGraphArgs graphArgs = ceilModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Ceil_Model_SpecifyInputsAndOutputs_05 + * @tc.desc: 设置输入输出,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(CeilTest, SUB_AI_NNRt_Func_North_Ceil_Model_SpecifyInputsAndOutputs_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CeilModel1 ceilModel; + OHNNGraphArgs graphArgs = ceilModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Ceil_Model_SpecifyInputsAndOutputs_06 + * @tc.desc: 设置输入输出,outputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(CeilTest, SUB_AI_NNRt_Func_North_Ceil_Model_SpecifyInputsAndOutputs_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CeilModel1 ceilModel; + OHNNGraphArgs graphArgs = ceilModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Ceil_Model_SpecifyInputsAndOutputs_07 + * @tc.desc: 设置输入输出,outputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(CeilTest, SUB_AI_NNRt_Func_North_Ceil_Model_SpecifyInputsAndOutputs_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CeilModel1 ceilModel; + OHNNGraphArgs graphArgs = ceilModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.outputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Ceil_Model_SpecifyInputsAndOutputs_08 + * @tc.desc: 设置输入输出,outputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(CeilTest, SUB_AI_NNRt_Func_North_Ceil_Model_SpecifyInputsAndOutputs_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CeilModel1 ceilModel; + OHNNGraphArgs graphArgs = ceilModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Ceil_Model_AddOperation_01 + * @tc.desc: 添加算子,paramindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(CeilTest, SUB_AI_NNRt_Func_North_Ceil_Model_AddOperation_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CeilModel1 ceilModel; + OHNNGraphArgs graphArgs = ceilModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + nullptr, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Ceil_Model_AddOperation_02 + * @tc.desc: 添加算子,paramindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(CeilTest, SUB_AI_NNRt_Func_North_Ceil_Model_AddOperation_02, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CeilModel1 ceilModel; + OHNNGraphArgs graphArgs = ceilModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.data = nullptr; + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Ceil_Model_AddOperation_03 + * @tc.desc: 添加算子,paramindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(CeilTest, SUB_AI_NNRt_Func_North_Ceil_Model_AddOperation_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CeilModel1 ceilModel; + OHNNGraphArgs graphArgs = ceilModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.paramIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Ceil_Model_AddOperation_04 + * @tc.desc: 添加算子,paramindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(CeilTest, SUB_AI_NNRt_Func_North_Ceil_Model_AddOperation_04, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CeilModel1 ceilModel; + OHNNGraphArgs graphArgs = ceilModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.size = 0; + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Ceil_Model_AddOperation_05 + * @tc.desc: 添加算子,inputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(CeilTest, SUB_AI_NNRt_Func_North_Ceil_Model_AddOperation_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CeilModel1 ceilModel; + OHNNGraphArgs graphArgs = ceilModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Ceil_Model_AddOperation_06 + * @tc.desc: 添加算子,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(CeilTest, SUB_AI_NNRt_Func_North_Ceil_Model_AddOperation_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CeilModel1 ceilModel; + OHNNGraphArgs graphArgs = ceilModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Ceil_Model_AddOperation_07 + * @tc.desc: 添加算子,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(CeilTest, SUB_AI_NNRt_Func_North_Ceil_Model_AddOperation_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CeilModel1 ceilModel; + OHNNGraphArgs graphArgs = ceilModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Ceil_Model_AddOperation_08 + * @tc.desc: 添加算子,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(CeilTest, SUB_AI_NNRt_Func_North_Ceil_Model_AddOperation_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CeilModel1 ceilModel; + OHNNGraphArgs graphArgs = ceilModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Ceil_Model_AddOperation_09 + * @tc.desc: 添加算子,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(CeilTest, SUB_AI_NNRt_Func_North_Ceil_Model_AddOperation_09, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CeilModel1 ceilModel; + OHNNGraphArgs graphArgs = ceilModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(nullptr, graphArgs.operationType, + ¶mIndices, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} \ No newline at end of file diff --git a/test/nnrt_xts_acts/nncore/opstest/src/clip_test.cpp b/test/nnrt_xts_acts/nncore/opstest/src/clip_test.cpp new file mode 100644 index 0000000..ef0e46b --- /dev/null +++ b/test/nnrt_xts_acts/nncore/opstest/src/clip_test.cpp @@ -0,0 +1,911 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "nncore_utils.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Test; +class ClipTest : public testing::Test {}; + +struct ClipModel1 { + const std::vector input_shape = {7}; + const std::vector output_shape = {7}; + float minValue[1] = {-1}; + float maxValue[1] = {1}; + float inputValue[7] = {-3, -2, -1, 0, 1, 2, 3}; + float outputValue[7] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, input_shape, inputValue, 7*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, output_shape, outputValue, 7*sizeof(float)}; + OHNNOperandTest min = {OH_NN_FLOAT32, OH_NN_CLIP_MIN, {1}, minValue, sizeof(float)}; + OHNNOperandTest max = {OH_NN_FLOAT32, OH_NN_CLIP_MAX, {1}, maxValue, sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_CLIP, + .operands = {input, output, min, max}, + .paramIndices = {2, 3}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +struct ClipModel2 { + const std::vector input_shape = {7}; + const std::vector output_shape = {7}; + float minValue[1] = {1}; + float maxValue[1] = {1}; + float inputValue[7] = {-3, -2, -1, 0, 1, 2, 3}; + float outputValue[7] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, input_shape, inputValue, 7*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, output_shape, outputValue, 7*sizeof(float)}; + OHNNOperandTest min = {OH_NN_FLOAT32, OH_NN_CLIP_MIN, {1}, minValue, sizeof(float)}; + OHNNOperandTest max = {OH_NN_FLOAT32, OH_NN_CLIP_MAX, {1}, maxValue, sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_CLIP, + .operands = {input, output, min, max}, + .paramIndices = {2, 3}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +struct ClipModel3 { + const std::vector input_shape = {7}; + const std::vector output_shape = {7}; + float minValue[1] = {1}; + float maxValue[1] = {0}; + float inputValue[7] = {-3, -2, -1, 0, 1, 2, 3}; + float outputValue[7] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, input_shape, inputValue, 7*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, output_shape, outputValue, 7*sizeof(float)}; + OHNNOperandTest min = {OH_NN_FLOAT32, OH_NN_CLIP_MIN, {1}, minValue, sizeof(float)}; + OHNNOperandTest max = {OH_NN_FLOAT32, OH_NN_CLIP_MAX, {1}, maxValue, sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_CLIP, + .operands = {input, output, min, max}, + .paramIndices = {2, 3}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +struct ClipModel4 { + const std::vector input_shape = {7}; + const std::vector output_shape = {7}; + float minValue[1] = {1}; + float maxValue[1] = {1.1}; + float inputValue[7] = {-3, -2, -1, 0, 1, 2, 3}; + float outputValue[7] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, input_shape, inputValue, 7*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, output_shape, outputValue, 7*sizeof(float)}; + OHNNOperandTest min = {OH_NN_FLOAT32, OH_NN_CLIP_MIN, {1}, minValue, sizeof(float)}; + OHNNOperandTest max = {OH_NN_FLOAT32, OH_NN_CLIP_MAX, {1}, maxValue, sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_CLIP, + .operands = {input, output, min, max}, + .paramIndices = {2, 3}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +struct ClipModel5 { + const std::vector input_shape = {}; + const std::vector output_shape = {}; + float minValue[1] = {1}; + float maxValue[1] = {1.1}; + float* inputValue = {}; + float* outputValue = {}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, input_shape, inputValue, 0*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, output_shape, outputValue, 0*sizeof(float)}; + OHNNOperandTest min = {OH_NN_FLOAT32, OH_NN_CLIP_MIN, {1}, minValue, sizeof(float)}; + OHNNOperandTest max = {OH_NN_FLOAT32, OH_NN_CLIP_MAX, {1}, maxValue, sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_CLIP, + .operands = {input, output, min, max}, + .paramIndices = {2, 3}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Clip_Build_01 + * @tc.desc: ClipModel1模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(ClipTest, SUB_AI_NNRt_Func_North_Clip_Build_01, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ClipModel1 clipModel; + OHNNGraphArgs graphArgs = clipModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Clip_Build_02 + * @tc.desc: ClipModel2模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(ClipTest, SUB_AI_NNRt_Func_North_Clip_Build_02, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ClipModel2 clipModel; + OHNNGraphArgs graphArgs = clipModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Clip_Build_03 + * @tc.desc: ClipModel3模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(ClipTest, SUB_AI_NNRt_Func_North_Clip_Build_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ClipModel3 clipModel; + OHNNGraphArgs graphArgs = clipModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Clip_Build_04 + * @tc.desc: ClipModel4模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(ClipTest, SUB_AI_NNRt_Func_North_Clip_Build_04, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ClipModel4 clipModel; + OHNNGraphArgs graphArgs = clipModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Clip_Build_05 + * @tc.desc: ClipModel5模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(ClipTest, SUB_AI_NNRt_Func_North_Clip_Build_05, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ClipModel5 clipModel; + OHNNGraphArgs graphArgs = clipModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Clip_Build_06 + * @tc.desc: ClipModel1模型输入Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(ClipTest, SUB_AI_NNRt_Func_North_Clip_Build_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ClipModel1 clipModel; + OHNNGraphArgs graphArgs = clipModel.graphArgs; + graphArgs.operands = {clipModel.input, clipModel.input, clipModel.output, clipModel.min, clipModel.max}; + graphArgs.inputIndices = {0, 1}; + graphArgs.outputIndices = {2}; + graphArgs.paramIndices = {3, 4}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Clip_Build_07 + * @tc.desc: ClipModel1模型输出Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(ClipTest, SUB_AI_NNRt_Func_North_Clip_Build_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ClipModel1 clipModel; + OHNNGraphArgs graphArgs = clipModel.graphArgs; + graphArgs.operands = {clipModel.input, clipModel.output, clipModel.output, clipModel.min, clipModel.max}; + graphArgs.inputIndices = {0}; + graphArgs.outputIndices = {1, 2}; + graphArgs.paramIndices = {3, 4}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Clip_Build_08 + * @tc.desc: ClipModel1模型传入非法参数进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(ClipTest, SUB_AI_NNRt_Func_North_Clip_Build_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ClipModel1 clipModel; + OHNNGraphArgs graphArgs = clipModel.graphArgs; + + int8_t activationValue = OH_NN_FUSED_NONE; + OHNNOperandTest activation = {OH_NN_INT8, OH_NN_ADD_ACTIVATIONTYPE, {}, &activationValue, sizeof(int8_t)}; + graphArgs.operands = {clipModel.input, clipModel.output, clipModel.min, clipModel.max, activation}; + graphArgs.paramIndices = {2, 3, 4}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Clip_Model_Finish_01 + * @tc.desc: 模型构图,未添加操作数 + * @tc.type: FUNC + */ +HWTEST_F(ClipTest, SUB_AI_NNRt_Func_North_Clip_Model_Finish_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + OHNNGraphArgs graphArgs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, SingleModelBuildEndStep(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Clip_Model_Finish_02 + * @tc.desc: 模型构图,未设置输入输出 + * @tc.type: FUNC + */ +HWTEST_F(ClipTest, SUB_AI_NNRt_Func_North_Clip_Model_Finish_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ClipModel1 clipModel; + OHNNGraphArgs graphArgs = clipModel.graphArgs; + graphArgs.specifyIO = false; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Clip_Model_Finish_03 + * @tc.desc: 模型构图,设置输入输出,构图成功 + * @tc.type: FUNC + */ +HWTEST_F(ClipTest, SUB_AI_NNRt_Func_North_Clip_Model_Finish_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ClipModel1 clipModel; + OHNNGraphArgs graphArgs = clipModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Clip_Model_SetOperandValue_01 + * @tc.desc: 设置操作数值,操作数不存在 + * @tc.type: FUNC + */ +HWTEST_F(ClipTest, SUB_AI_NNRt_Func_North_Clip_Model_SetOperandValue_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ClipModel1 clipModel; + OHNNGraphArgs graphArgs = clipModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData( + model, 1000+i, operandTem.data, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Clip_Model_SetOperandValue_02 + * @tc.desc: 设置操作数值,buufer为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ClipTest, SUB_AI_NNRt_Func_North_Clip_Model_SetOperandValue_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ClipModel1 clipModel; + OHNNGraphArgs graphArgs = clipModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, i, nullptr, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Clip_Model_SetOperandValue_03 + * @tc.desc: 设置操作数值,length为0 + * @tc.type: FUNC + */ +HWTEST_F(ClipTest, SUB_AI_NNRt_Func_North_Clip_Model_SetOperandValue_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ClipModel1 clipModel; + OHNNGraphArgs graphArgs = clipModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, 1000+i, operandTem.data, 0)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Clip_Model_SpecifyInputsAndOutputs_01 + * @tc.desc: 设置输入输出,inputIndices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ClipTest, SUB_AI_NNRt_Func_North_Clip_Model_SpecifyInputsAndOutputs_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ClipModel1 clipModel; + OHNNGraphArgs graphArgs = clipModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Clip_Model_SpecifyInputsAndOutputs_02 + * @tc.desc: 设置输入输出,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ClipTest, SUB_AI_NNRt_Func_North_Clip_Model_SpecifyInputsAndOutputs_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ClipModel1 clipModel; + OHNNGraphArgs graphArgs = clipModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Clip_Model_SpecifyInputsAndOutputs_03 + * @tc.desc: 设置输入输出,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(ClipTest, SUB_AI_NNRt_Func_North_Clip_Model_SpecifyInputsAndOutputs_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ClipModel1 clipModel; + OHNNGraphArgs graphArgs = clipModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Clip_Model_SpecifyInputsAndOutputs_04 + * @tc.desc: 设置输入输出,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(ClipTest, SUB_AI_NNRt_Func_North_Clip_Model_SpecifyInputsAndOutputs_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ClipModel1 clipModel; + OHNNGraphArgs graphArgs = clipModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Clip_Model_SpecifyInputsAndOutputs_05 + * @tc.desc: 设置输入输出,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ClipTest, SUB_AI_NNRt_Func_North_Clip_Model_SpecifyInputsAndOutputs_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ClipModel1 clipModel; + OHNNGraphArgs graphArgs = clipModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Clip_Model_SpecifyInputsAndOutputs_06 + * @tc.desc: 设置输入输出,outputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ClipTest, SUB_AI_NNRt_Func_North_Clip_Model_SpecifyInputsAndOutputs_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ClipModel1 clipModel; + OHNNGraphArgs graphArgs = clipModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Clip_Model_SpecifyInputsAndOutputs_07 + * @tc.desc: 设置输入输出,outputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(ClipTest, SUB_AI_NNRt_Func_North_Clip_Model_SpecifyInputsAndOutputs_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ClipModel1 clipModel; + OHNNGraphArgs graphArgs = clipModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.outputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Clip_Model_SpecifyInputsAndOutputs_08 + * @tc.desc: 设置输入输出,outputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(ClipTest, SUB_AI_NNRt_Func_North_Clip_Model_SpecifyInputsAndOutputs_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ClipModel1 clipModel; + OHNNGraphArgs graphArgs = clipModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Clip_Model_AddOperation_01 + * @tc.desc: 添加算子,paramindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ClipTest, SUB_AI_NNRt_Func_North_Clip_Model_AddOperation_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ClipModel1 clipModel; + OHNNGraphArgs graphArgs = clipModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + nullptr, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Clip_Model_AddOperation_02 + * @tc.desc: 添加算子,paramindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ClipTest, SUB_AI_NNRt_Func_North_Clip_Model_AddOperation_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ClipModel1 clipModel; + OHNNGraphArgs graphArgs = clipModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Clip_Model_AddOperation_03 + * @tc.desc: 添加算子,paramindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(ClipTest, SUB_AI_NNRt_Func_North_Clip_Model_AddOperation_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ClipModel1 clipModel; + OHNNGraphArgs graphArgs = clipModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.paramIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Clip_Model_AddOperation_04 + * @tc.desc: 添加算子,paramindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(ClipTest, SUB_AI_NNRt_Func_North_Clip_Model_AddOperation_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ClipModel1 clipModel; + OHNNGraphArgs graphArgs = clipModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Clip_Model_AddOperation_05 + * @tc.desc: 添加算子,inputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ClipTest, SUB_AI_NNRt_Func_North_Clip_Model_AddOperation_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ClipModel1 clipModel; + OHNNGraphArgs graphArgs = clipModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Clip_Model_AddOperation_06 + * @tc.desc: 添加算子,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ClipTest, SUB_AI_NNRt_Func_North_Clip_Model_AddOperation_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ClipModel1 clipModel; + OHNNGraphArgs graphArgs = clipModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Clip_Model_AddOperation_07 + * @tc.desc: 添加算子,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(ClipTest, SUB_AI_NNRt_Func_North_Clip_Model_AddOperation_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ClipModel1 clipModel; + OHNNGraphArgs graphArgs = clipModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Clip_Model_AddOperation_08 + * @tc.desc: 添加算子,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(ClipTest, SUB_AI_NNRt_Func_North_Clip_Model_AddOperation_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ClipModel1 clipModel; + OHNNGraphArgs graphArgs = clipModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Clip_Model_AddOperation_09 + * @tc.desc: 添加算子,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ClipTest, SUB_AI_NNRt_Func_North_Clip_Model_AddOperation_09, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ClipModel1 clipModel; + OHNNGraphArgs graphArgs = clipModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(nullptr, graphArgs.operationType, + ¶mIndices, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} \ No newline at end of file diff --git a/test/nnrt_xts_acts/nncore/opstest/src/constant_of_shape_test.cpp b/test/nnrt_xts_acts/nncore/opstest/src/constant_of_shape_test.cpp new file mode 100644 index 0000000..a9c6bdd --- /dev/null +++ b/test/nnrt_xts_acts/nncore/opstest/src/constant_of_shape_test.cpp @@ -0,0 +1,791 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "nncore_utils.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Test; +class ConstantOfShapeTest : public testing::Test {}; + +struct ConstantOfShapeModel1 { + const std::vector input_shape = {2}; + const std::vector output_shape = {2, 3}; + const std::vector param_shape = {1}; + int64_t dataTypeValue[1] = {45}; + float valueValue[1] = {4.5}; + float inputValue[2] = {2, 3}; + float outputValue[2][3] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, input_shape, inputValue, 2*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT16, OH_NN_TENSOR, output_shape, outputValue, 6*sizeof(float)}; + OHNNOperandTest dataType = {OH_NN_INT64, OH_NN_CONSTANT_OF_SHAPE_DATA_TYPE, {1}, dataTypeValue, sizeof(int64_t)}; + OHNNOperandTest value = {OH_NN_FLOAT32, OH_NN_CONSTANT_OF_SHAPE_VALUE, {1}, valueValue, sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_CONSTANT_OF_SHAPE, + .operands = {input, output, dataType, value}, + .paramIndices = {2, 3}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +struct ConstantOfShapeModel2 { + const std::vector input_shape = {1}; + const std::vector output_shape = {}; + int64_t dataTypeValue[1] = {45}; + float valueValue[1] = {4.5}; + float inputValue[1] = {0}; + float* outputValue = {}; + + OHNNOperandTest input = {OH_NN_FLOAT16, OH_NN_TENSOR, input_shape, inputValue, sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT16, OH_NN_TENSOR, output_shape, outputValue, 0*sizeof(float)}; + OHNNOperandTest dataType = {OH_NN_INT64, OH_NN_CONSTANT_OF_SHAPE_DATA_TYPE, {1}, dataTypeValue, sizeof(int64_t)}; + OHNNOperandTest value = {OH_NN_FLOAT32, OH_NN_CONSTANT_OF_SHAPE_VALUE, {1}, valueValue, sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_CONSTANT_OF_SHAPE, + .operands = {input, output, dataType, value}, + .paramIndices = {2, 3}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ConstantOfShape_Build_01 + * @tc.desc: ConstantOfShapeModel1模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(ConstantOfShapeTest, SUB_AI_NNRt_Func_North_ConstantOfShape_Build_01, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ConstantOfShapeModel1 constantOfShapeModel; + OHNNGraphArgs graphArgs = constantOfShapeModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ConstantOfShape_Build_02 + * @tc.desc: ConstantOfShapeModel2模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(ConstantOfShapeTest, SUB_AI_NNRt_Func_North_ConstantOfShape_Build_02, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ConstantOfShapeModel2 constantOfShapeModel; + OHNNGraphArgs graphArgs = constantOfShapeModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ConstantOfShape_Build_03 + * @tc.desc: ConstantOfShapeModel1模型输入Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(ConstantOfShapeTest, SUB_AI_NNRt_Func_North_ConstantOfShape_Build_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ConstantOfShapeModel1 constantOfShapeModel; + OHNNGraphArgs graphArgs = constantOfShapeModel.graphArgs; + graphArgs.operands = {constantOfShapeModel.input, constantOfShapeModel.input, constantOfShapeModel.output, + constantOfShapeModel.dataType, constantOfShapeModel.value}; + graphArgs.inputIndices = {0, 1}; + graphArgs.outputIndices = {2}; + graphArgs.paramIndices = {3, 4}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ConstantOfShape_Build_04 + * @tc.desc: ConstantOfShapeModel1模型输出Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(ConstantOfShapeTest, SUB_AI_NNRt_Func_North_ConstantOfShape_Build_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ConstantOfShapeModel1 constantOfShapeModel; + OHNNGraphArgs graphArgs = constantOfShapeModel.graphArgs; + graphArgs.operands = {constantOfShapeModel.input, constantOfShapeModel.output, constantOfShapeModel.output, + constantOfShapeModel.dataType, constantOfShapeModel.value}; + graphArgs.inputIndices = {0}; + graphArgs.outputIndices = {1, 2}; + graphArgs.paramIndices = {3, 4}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ConstantOfShape_Build_05 + * @tc.desc: ConstantOfShapeModel1模型传入非法参数进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(ConstantOfShapeTest, SUB_AI_NNRt_Func_North_ConstantOfShape_Build_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ConstantOfShapeModel1 constantOfShapeModel; + OHNNGraphArgs graphArgs = constantOfShapeModel.graphArgs; + + int8_t activationValue = OH_NN_FUSED_NONE; + OHNNOperandTest activation = {OH_NN_INT8, OH_NN_ADD_ACTIVATIONTYPE, {}, &activationValue, sizeof(int8_t)}; + graphArgs.operands = {constantOfShapeModel.input, constantOfShapeModel.output, constantOfShapeModel.dataType, + constantOfShapeModel.value, activation}; + graphArgs.paramIndices = {2, 3, 4}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ConstantOfShape_Model_Finish_01 + * @tc.desc: 模型构图,未添加操作数 + * @tc.type: FUNC + */ +HWTEST_F(ConstantOfShapeTest, SUB_AI_NNRt_Func_North_ConstantOfShape_Model_Finish_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + OHNNGraphArgs graphArgs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, SingleModelBuildEndStep(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ConstantOfShape_Model_Finish_02 + * @tc.desc: 模型构图,未设置输入输出 + * @tc.type: FUNC + */ +HWTEST_F(ConstantOfShapeTest, SUB_AI_NNRt_Func_North_ConstantOfShape_Model_Finish_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ConstantOfShapeModel1 constantOfShapeModel; + OHNNGraphArgs graphArgs = constantOfShapeModel.graphArgs; + graphArgs.specifyIO = false; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ConstantOfShape_Model_Finish_03 + * @tc.desc: 模型构图,设置输入输出,构图成功 + * @tc.type: FUNC + */ +HWTEST_F(ConstantOfShapeTest, SUB_AI_NNRt_Func_North_ConstantOfShape_Model_Finish_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ConstantOfShapeModel1 constantOfShapeModel; + OHNNGraphArgs graphArgs = constantOfShapeModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ConstantOfShape_Model_SetOperandValue_01 + * @tc.desc: 设置操作数值,操作数不存在 + * @tc.type: FUNC + */ +HWTEST_F(ConstantOfShapeTest, SUB_AI_NNRt_Func_North_ConstantOfShape_Model_SetOperandValue_01, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ConstantOfShapeModel1 constantOfShapeModel; + OHNNGraphArgs graphArgs = constantOfShapeModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData( + model, 1000+i, operandTem.data, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ConstantOfShape_Model_SetOperandValue_02 + * @tc.desc: 设置操作数值,buufer为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ConstantOfShapeTest, SUB_AI_NNRt_Func_North_ConstantOfShape_Model_SetOperandValue_02, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ConstantOfShapeModel1 constantOfShapeModel; + OHNNGraphArgs graphArgs = constantOfShapeModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, i, nullptr, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ConstantOfShape_Model_SetOperandValue_03 + * @tc.desc: 设置操作数值,length为0 + * @tc.type: FUNC + */ +HWTEST_F(ConstantOfShapeTest, SUB_AI_NNRt_Func_North_ConstantOfShape_Model_SetOperandValue_03, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ConstantOfShapeModel1 constantOfShapeModel; + OHNNGraphArgs graphArgs = constantOfShapeModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, 1000+i, operandTem.data, 0)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ConstantOfShape_Model_SpecifyInputsAndOutputs_01 + * @tc.desc: 设置输入输出,inputIndices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ConstantOfShapeTest, SUB_AI_NNRt_Func_North_ConstantOfShape_Model_SpecifyInputsAndOutputs_01, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ConstantOfShapeModel1 constantOfShapeModel; + OHNNGraphArgs graphArgs = constantOfShapeModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ConstantOfShape_Model_SpecifyInputsAndOutputs_02 + * @tc.desc: 设置输入输出,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ConstantOfShapeTest, SUB_AI_NNRt_Func_North_ConstantOfShape_Model_SpecifyInputsAndOutputs_02, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ConstantOfShapeModel1 constantOfShapeModel; + OHNNGraphArgs graphArgs = constantOfShapeModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ConstantOfShape_Model_SpecifyInputsAndOutputs_03 + * @tc.desc: 设置输入输出,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(ConstantOfShapeTest, SUB_AI_NNRt_Func_North_ConstantOfShape_Model_SpecifyInputsAndOutputs_03, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ConstantOfShapeModel1 constantOfShapeModel; + OHNNGraphArgs graphArgs = constantOfShapeModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ConstantOfShape_Model_SpecifyInputsAndOutputs_04 + * @tc.desc: 设置输入输出,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(ConstantOfShapeTest, SUB_AI_NNRt_Func_North_ConstantOfShape_Model_SpecifyInputsAndOutputs_04, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ConstantOfShapeModel1 constantOfShapeModel; + OHNNGraphArgs graphArgs = constantOfShapeModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ConstantOfShape_Model_SpecifyInputsAndOutputs_05 + * @tc.desc: 设置输入输出,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ConstantOfShapeTest, SUB_AI_NNRt_Func_North_ConstantOfShape_Model_SpecifyInputsAndOutputs_05, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ConstantOfShapeModel1 constantOfShapeModel; + OHNNGraphArgs graphArgs = constantOfShapeModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ConstantOfShape_Model_SpecifyInputsAndOutputs_06 + * @tc.desc: 设置输入输出,outputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ConstantOfShapeTest, SUB_AI_NNRt_Func_North_ConstantOfShape_Model_SpecifyInputsAndOutputs_06, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ConstantOfShapeModel1 constantOfShapeModel; + OHNNGraphArgs graphArgs = constantOfShapeModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ConstantOfShape_Model_SpecifyInputsAndOutputs_07 + * @tc.desc: 设置输入输出,outputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(ConstantOfShapeTest, SUB_AI_NNRt_Func_North_ConstantOfShape_Model_SpecifyInputsAndOutputs_07, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ConstantOfShapeModel1 constantOfShapeModel; + OHNNGraphArgs graphArgs = constantOfShapeModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.outputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ConstantOfShape_Model_SpecifyInputsAndOutputs_08 + * @tc.desc: 设置输入输出,outputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(ConstantOfShapeTest, SUB_AI_NNRt_Func_North_ConstantOfShape_Model_SpecifyInputsAndOutputs_08, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ConstantOfShapeModel1 constantOfShapeModel; + OHNNGraphArgs graphArgs = constantOfShapeModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ConstantOfShape_Model_AddOperation_01 + * @tc.desc: 添加算子,paramindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ConstantOfShapeTest, SUB_AI_NNRt_Func_North_ConstantOfShape_Model_AddOperation_01, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ConstantOfShapeModel1 constantOfShapeModel; + OHNNGraphArgs graphArgs = constantOfShapeModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + nullptr, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ConstantOfShape_Model_AddOperation_02 + * @tc.desc: 添加算子,paramindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ConstantOfShapeTest, SUB_AI_NNRt_Func_North_ConstantOfShape_Model_AddOperation_02, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ConstantOfShapeModel1 constantOfShapeModel; + OHNNGraphArgs graphArgs = constantOfShapeModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ConstantOfShape_Model_AddOperation_03 + * @tc.desc: 添加算子,paramindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(ConstantOfShapeTest, SUB_AI_NNRt_Func_North_ConstantOfShape_Model_AddOperation_03, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ConstantOfShapeModel1 constantOfShapeModel; + OHNNGraphArgs graphArgs = constantOfShapeModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.paramIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ConstantOfShape_Model_AddOperation_04 + * @tc.desc: 添加算子,paramindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(ConstantOfShapeTest, SUB_AI_NNRt_Func_North_ConstantOfShape_Model_AddOperation_04, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ConstantOfShapeModel1 constantOfShapeModel; + OHNNGraphArgs graphArgs = constantOfShapeModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ConstantOfShape_Model_AddOperation_05 + * @tc.desc: 添加算子,inputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ConstantOfShapeTest, SUB_AI_NNRt_Func_North_ConstantOfShape_Model_AddOperation_05, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ConstantOfShapeModel1 constantOfShapeModel; + OHNNGraphArgs graphArgs = constantOfShapeModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ConstantOfShape_Model_AddOperation_06 + * @tc.desc: 添加算子,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ConstantOfShapeTest, SUB_AI_NNRt_Func_North_ConstantOfShape_Model_AddOperation_06, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ConstantOfShapeModel1 constantOfShapeModel; + OHNNGraphArgs graphArgs = constantOfShapeModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ConstantOfShape_Model_AddOperation_07 + * @tc.desc: 添加算子,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(ConstantOfShapeTest, SUB_AI_NNRt_Func_North_ConstantOfShape_Model_AddOperation_07, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ConstantOfShapeModel1 constantOfShapeModel; + OHNNGraphArgs graphArgs = constantOfShapeModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ConstantOfShape_Model_AddOperation_08 + * @tc.desc: 添加算子,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(ConstantOfShapeTest, SUB_AI_NNRt_Func_North_ConstantOfShape_Model_AddOperation_08, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ConstantOfShapeModel1 constantOfShapeModel; + OHNNGraphArgs graphArgs = constantOfShapeModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ConstantOfShape_Model_AddOperation_09 + * @tc.desc: 添加算子,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ConstantOfShapeTest, SUB_AI_NNRt_Func_North_ConstantOfShape_Model_AddOperation_09, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ConstantOfShapeModel1 constantOfShapeModel; + OHNNGraphArgs graphArgs = constantOfShapeModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(nullptr, graphArgs.operationType, + ¶mIndices, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} \ No newline at end of file diff --git a/test/nnrt_xts_acts/nncore/opstest/src/cos_test.cpp b/test/nnrt_xts_acts/nncore/opstest/src/cos_test.cpp new file mode 100644 index 0000000..20de446 --- /dev/null +++ b/test/nnrt_xts_acts/nncore/opstest/src/cos_test.cpp @@ -0,0 +1,755 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "nncore_utils.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Test; +class CosTest : public testing::Test {}; + +struct CosModel1 { + const std::vector tensor_shape = {3}; + int64_t inputValue[3] = {0, 1, 2}; + float outputValue[3] = {0}; + + OHNNOperandTest input = {OH_NN_INT64, OH_NN_TENSOR, tensor_shape, inputValue, 3*sizeof(int64_t)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, outputValue, 3*sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_COS, + .operands = {input, output}, + .paramIndices = {}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +struct CosModel2 { + const std::vector tensor_shape = {2}; + bool inputValue[2] = {false, true}; + float outputValue[2] = {0}; + + OHNNOperandTest input = {OH_NN_BOOL, OH_NN_TENSOR, tensor_shape, inputValue, 2*sizeof(bool)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, outputValue, 2*sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_COS, + .operands = {input, output}, + .paramIndices = {}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Cos_Build_01 + * @tc.desc: CosModel1模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(CosTest, SUB_AI_NNRt_Func_North_Cos_Build_01, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CosModel1 cosModel; + OHNNGraphArgs graphArgs = cosModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Cos_Build_02 + * @tc.desc: CosModel2模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(CosTest, SUB_AI_NNRt_Func_North_Cos_Build_02, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CosModel2 cosModel; + OHNNGraphArgs graphArgs = cosModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Cos_Build_03 + * @tc.desc: CosModel1模型输入Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(CosTest, SUB_AI_NNRt_Func_North_Cos_Build_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CosModel2 cosModel; + OHNNGraphArgs graphArgs = cosModel.graphArgs; + graphArgs.operands = {cosModel.input, cosModel.input, cosModel.output}; + graphArgs.inputIndices = {0, 1}; + graphArgs.outputIndices = {2}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Cos_Build_04 + * @tc.desc: CosModel1模型输出Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(CosTest, SUB_AI_NNRt_Func_North_Cos_Build_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CosModel2 cosModel; + OHNNGraphArgs graphArgs = cosModel.graphArgs; + graphArgs.operands = {cosModel.input, cosModel.output, cosModel.output}; + graphArgs.inputIndices = {0}; + graphArgs.outputIndices = {1, 2}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Cos_Build_05 + * @tc.desc: CosModel1模型传入非法参数进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(CosTest, SUB_AI_NNRt_Func_North_Cos_Build_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CosModel2 cosModel; + OHNNGraphArgs graphArgs = cosModel.graphArgs; + + int8_t activationValue = OH_NN_FUSED_NONE; + OHNNOperandTest activation = {OH_NN_INT8, OH_NN_ADD_ACTIVATIONTYPE, {}, &activationValue, sizeof(int8_t)}; + graphArgs.operands = {cosModel.input, cosModel.output, activation}; + graphArgs.paramIndices = {2}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Cos_Model_Finish_01 + * @tc.desc: 模型构图,未添加操作数 + * @tc.type: FUNC + */ +HWTEST_F(CosTest, SUB_AI_NNRt_Func_North_Cos_Model_Finish_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + OHNNGraphArgs graphArgs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, SingleModelBuildEndStep(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Cos_Model_Finish_02 + * @tc.desc: 模型构图,未设置输入输出 + * @tc.type: FUNC + */ +HWTEST_F(CosTest, SUB_AI_NNRt_Func_North_Cos_Model_Finish_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CosModel1 cosModel; + OHNNGraphArgs graphArgs = cosModel.graphArgs; + graphArgs.specifyIO = false; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Cos_Model_Finish_03 + * @tc.desc: 模型构图,设置输入输出,构图成功 + * @tc.type: FUNC + */ +HWTEST_F(CosTest, SUB_AI_NNRt_Func_North_Cos_Model_Finish_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CosModel1 cosModel; + OHNNGraphArgs graphArgs = cosModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Cos_Model_SetOperandValue_01 + * @tc.desc: 设置操作数值,操作数不存在 + * @tc.type: FUNC + */ +HWTEST_F(CosTest, SUB_AI_NNRt_Func_North_Cos_Model_SetOperandValue_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CosModel1 cosModel; + OHNNGraphArgs graphArgs = cosModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData( + model, 1000+i, operandTem.data, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Cos_Model_SetOperandValue_02 + * @tc.desc: 设置操作数值,buufer为nullptr + * @tc.type: FUNC + */ +HWTEST_F(CosTest, SUB_AI_NNRt_Func_North_Cos_Model_SetOperandValue_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CosModel1 cosModel; + OHNNGraphArgs graphArgs = cosModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, i, nullptr, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Cos_Model_SetOperandValue_03 + * @tc.desc: 设置操作数值,length为0 + * @tc.type: FUNC + */ +HWTEST_F(CosTest, SUB_AI_NNRt_Func_North_Cos_Model_SetOperandValue_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CosModel1 cosModel; + OHNNGraphArgs graphArgs = cosModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, 1000+i, operandTem.data, 0)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Cos_Model_SpecifyInputsAndOutputs_01 + * @tc.desc: 设置输入输出,inputIndices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(CosTest, SUB_AI_NNRt_Func_North_Cos_Model_SpecifyInputsAndOutputs_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CosModel1 cosModel; + OHNNGraphArgs graphArgs = cosModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Cos_Model_SpecifyInputsAndOutputs_02 + * @tc.desc: 设置输入输出,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(CosTest, SUB_AI_NNRt_Func_North_Cos_Model_SpecifyInputsAndOutputs_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CosModel1 cosModel; + OHNNGraphArgs graphArgs = cosModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Cos_Model_SpecifyInputsAndOutputs_03 + * @tc.desc: 设置输入输出,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(CosTest, SUB_AI_NNRt_Func_North_Cos_Model_SpecifyInputsAndOutputs_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CosModel1 cosModel; + OHNNGraphArgs graphArgs = cosModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Cos_Model_SpecifyInputsAndOutputs_04 + * @tc.desc: 设置输入输出,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(CosTest, SUB_AI_NNRt_Func_North_Cos_Model_SpecifyInputsAndOutputs_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CosModel1 cosModel; + OHNNGraphArgs graphArgs = cosModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Cos_Model_SpecifyInputsAndOutputs_05 + * @tc.desc: 设置输入输出,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(CosTest, SUB_AI_NNRt_Func_North_Cos_Model_SpecifyInputsAndOutputs_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CosModel1 cosModel; + OHNNGraphArgs graphArgs = cosModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Cos_Model_SpecifyInputsAndOutputs_06 + * @tc.desc: 设置输入输出,outputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(CosTest, SUB_AI_NNRt_Func_North_Cos_Model_SpecifyInputsAndOutputs_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CosModel1 cosModel; + OHNNGraphArgs graphArgs = cosModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Cos_Model_SpecifyInputsAndOutputs_07 + * @tc.desc: 设置输入输出,outputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(CosTest, SUB_AI_NNRt_Func_North_Cos_Model_SpecifyInputsAndOutputs_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CosModel1 cosModel; + OHNNGraphArgs graphArgs = cosModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.outputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Cos_Model_SpecifyInputsAndOutputs_08 + * @tc.desc: 设置输入输出,outputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(CosTest, SUB_AI_NNRt_Func_North_Cos_Model_SpecifyInputsAndOutputs_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CosModel1 cosModel; + OHNNGraphArgs graphArgs = cosModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Cos_Model_AddOperation_01 + * @tc.desc: 添加算子,paramindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(CosTest, SUB_AI_NNRt_Func_North_Cos_Model_AddOperation_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CosModel1 cosModel; + OHNNGraphArgs graphArgs = cosModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + nullptr, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Cos_Model_AddOperation_02 + * @tc.desc: 添加算子,paramindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(CosTest, SUB_AI_NNRt_Func_North_Cos_Model_AddOperation_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CosModel1 cosModel; + OHNNGraphArgs graphArgs = cosModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.data = nullptr; + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Cos_Model_AddOperation_03 + * @tc.desc: 添加算子,paramindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(CosTest, SUB_AI_NNRt_Func_North_Cos_Model_AddOperation_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CosModel1 cosModel; + OHNNGraphArgs graphArgs = cosModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.paramIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Cos_Model_AddOperation_04 + * @tc.desc: 添加算子,paramindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(CosTest, SUB_AI_NNRt_Func_North_Cos_Model_AddOperation_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CosModel1 cosModel; + OHNNGraphArgs graphArgs = cosModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.size = 0; + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Cos_Model_AddOperation_05 + * @tc.desc: 添加算子,inputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(CosTest, SUB_AI_NNRt_Func_North_Cos_Model_AddOperation_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CosModel1 cosModel; + OHNNGraphArgs graphArgs = cosModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Cos_Model_AddOperation_06 + * @tc.desc: 添加算子,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(CosTest, SUB_AI_NNRt_Func_North_Cos_Model_AddOperation_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CosModel1 cosModel; + OHNNGraphArgs graphArgs = cosModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Cos_Model_AddOperation_07 + * @tc.desc: 添加算子,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(CosTest, SUB_AI_NNRt_Func_North_Cos_Model_AddOperation_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CosModel1 cosModel; + OHNNGraphArgs graphArgs = cosModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Cos_Model_AddOperation_08 + * @tc.desc: 添加算子,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(CosTest, SUB_AI_NNRt_Func_North_Cos_Model_AddOperation_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CosModel1 cosModel; + OHNNGraphArgs graphArgs = cosModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Cos_Model_AddOperation_09 + * @tc.desc: 添加算子,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(CosTest, SUB_AI_NNRt_Func_North_Cos_Model_AddOperation_09, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CosModel1 cosModel; + OHNNGraphArgs graphArgs = cosModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(nullptr, graphArgs.operationType, + ¶mIndices, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} \ No newline at end of file diff --git a/test/nnrt_xts_acts/nncore/opstest/src/crop_test.cpp b/test/nnrt_xts_acts/nncore/opstest/src/crop_test.cpp new file mode 100644 index 0000000..490fb30 --- /dev/null +++ b/test/nnrt_xts_acts/nncore/opstest/src/crop_test.cpp @@ -0,0 +1,838 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "nncore_utils.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Test; +class CropTest : public testing::Test {}; + +struct CropModel1 { + const std::vector input_shape = {2, 4, 4}; + const std::vector shape_shape = {3}; + const std::vector output_shape = {2, 2, 2}; + + int64_t axisValue[1] = {0}; + std::vector offsetsValue = {0, 1, 1}; + float inputValue[2][4][4] = {1}; + float shapeValue[3] = {2, 2, 2}; + float outputValue[2][2][2] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, input_shape, inputValue, 32*sizeof(float)}; + OHNNOperandTest shape = {OH_NN_FLOAT32, OH_NN_TENSOR, shape_shape, shapeValue, 3*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, output_shape, outputValue, 8*sizeof(float)}; + OHNNOperandTest axis = {OH_NN_INT64, OH_NN_CROP_AXIS, {1}, axisValue, sizeof(int64_t)}; + OHNNOperandTest offsets = {OH_NN_INT64, OH_NN_CROP_OFFSET, {3}, &offsetsValue, 3*sizeof(int64_t)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_CROP, + .operands = {input, shape, output, axis, offsets}, + .paramIndices = {3, 4}, + .inputIndices = {0, 1}, + .outputIndices = {2}}; +}; + +struct CropModel2 { + const std::vector input_shape = {3, 2, 2}; + const std::vector shape_shape = {3}; + const std::vector output_shape = {3, 2, 2}; + + int64_t axisValue[1] = {1}; + std::vector offsetsValue = {0, 2, 2}; + float inputValue[3][2][2] = {1}; + float shapeValue[3] = {3, 2, 2}; + float outputValue[3][2][2] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, input_shape, inputValue, 12*sizeof(float)}; + OHNNOperandTest shape = {OH_NN_FLOAT32, OH_NN_TENSOR, shape_shape, shapeValue, 3*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, output_shape, outputValue, 12*sizeof(float)}; + OHNNOperandTest axis = {OH_NN_INT64, OH_NN_CROP_AXIS, {1}, axisValue, sizeof(int64_t)}; + OHNNOperandTest offsets = {OH_NN_INT64, OH_NN_CROP_OFFSET, {3}, &offsetsValue, 3*sizeof(int64_t)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_CROP, + .operands = {input, shape, output, axis, offsets}, + .paramIndices = {3, 4}, + .inputIndices = {0, 1}, + .outputIndices = {2}}; +}; + +struct CropModel3 { + const std::vector input_shape = {3, 2, 2}; + const std::vector shape_shape = {3}; + const std::vector output_shape = {4, 2, 2}; + + int64_t axisValue[1] = {1}; + std::vector offsetsValue = {0, 1, 1}; + float inputValue[3][2][2] = {1}; + float shapeValue[3] = {4, 2, 2}; + float outputValue[4][2][2] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, input_shape, inputValue, 12*sizeof(float)}; + OHNNOperandTest shape = {OH_NN_FLOAT32, OH_NN_TENSOR, shape_shape, shapeValue, 3*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, output_shape, outputValue, 16*sizeof(float)}; + OHNNOperandTest axis = {OH_NN_INT64, OH_NN_CROP_AXIS, {1}, axisValue, sizeof(int64_t)}; + OHNNOperandTest offsets = {OH_NN_INT64, OH_NN_CROP_OFFSET, {3}, &offsetsValue, 3*sizeof(int64_t)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_CROP, + .operands = {input, shape, output, axis, offsets}, + .paramIndices = {3, 4}, + .inputIndices = {0, 1}, + .outputIndices = {2}}; +}; + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Crop_Build_01 + * @tc.desc: CropModel1模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(CropTest, SUB_AI_NNRt_Func_North_Crop_Build_01, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CropModel1 cropModel; + OHNNGraphArgs graphArgs = cropModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Crop_Build_02 + * @tc.desc: CropModel2模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(CropTest, SUB_AI_NNRt_Func_North_Crop_Build_02, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CropModel2 cropModel; + OHNNGraphArgs graphArgs = cropModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Crop_Build_03 + * @tc.desc: CropModel3模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(CropTest, SUB_AI_NNRt_Func_North_Crop_Build_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CropModel3 cropModel; + OHNNGraphArgs graphArgs = cropModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Crop_Build_04 + * @tc.desc: CropModel1模型输入Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(CropTest, SUB_AI_NNRt_Func_North_Crop_Build_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CropModel1 cropModel; + OHNNGraphArgs graphArgs = cropModel.graphArgs; + graphArgs.operands = {cropModel.input, cropModel.input, cropModel.shape, cropModel.output, + cropModel.axis, cropModel.offsets}; + graphArgs.inputIndices = {0, 1, 2}; + graphArgs.outputIndices = {3}; + graphArgs.paramIndices = {4, 5}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Crop_Build_05 + * @tc.desc: CropModel1模型输出Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(CropTest, SUB_AI_NNRt_Func_North_Crop_Build_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CropModel1 cropModel; + OHNNGraphArgs graphArgs = cropModel.graphArgs; + graphArgs.operands = {cropModel.input, cropModel.shape, cropModel.output, cropModel.output, + cropModel.axis, cropModel.offsets}; + graphArgs.inputIndices = {0, 1}; + graphArgs.outputIndices = {2, 3}; + graphArgs.paramIndices = {4, 5}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Crop_Build_06 + * @tc.desc: CropModel1模型传入非法参数进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(CropTest, SUB_AI_NNRt_Func_North_Crop_Build_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CropModel1 cropModel; + OHNNGraphArgs graphArgs = cropModel.graphArgs; + + int8_t activationValue = OH_NN_FUSED_NONE; + OHNNOperandTest activation = {OH_NN_INT8, OH_NN_ADD_ACTIVATIONTYPE, {}, &activationValue, sizeof(int8_t)}; + graphArgs.operands = {cropModel.input, cropModel.shape, cropModel.output, cropModel.axis, + cropModel.offsets, activation}; + graphArgs.paramIndices = {3, 4, 5}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Crop_Model_Finish_01 + * @tc.desc: 模型构图,未添加操作数 + * @tc.type: FUNC + */ +HWTEST_F(CropTest, SUB_AI_NNRt_Func_North_Crop_Model_Finish_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + OHNNGraphArgs graphArgs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, SingleModelBuildEndStep(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Crop_Model_Finish_02 + * @tc.desc: 模型构图,未设置输入输出 + * @tc.type: FUNC + */ +HWTEST_F(CropTest, SUB_AI_NNRt_Func_North_Crop_Model_Finish_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CropModel1 cropModel; + OHNNGraphArgs graphArgs = cropModel.graphArgs; + graphArgs.specifyIO = false; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Crop_Model_Finish_03 + * @tc.desc: 模型构图,设置输入输出,构图成功 + * @tc.type: FUNC + */ +HWTEST_F(CropTest, SUB_AI_NNRt_Func_North_Crop_Model_Finish_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CropModel1 cropModel; + OHNNGraphArgs graphArgs = cropModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Crop_Model_SetOperandValue_01 + * @tc.desc: 设置操作数值,操作数不存在 + * @tc.type: FUNC + */ +HWTEST_F(CropTest, SUB_AI_NNRt_Func_North_Crop_Model_SetOperandValue_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CropModel1 cropModel; + OHNNGraphArgs graphArgs = cropModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData( + model, 1000+i, operandTem.data, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Crop_Model_SetOperandValue_02 + * @tc.desc: 设置操作数值,buffer为nullptr + * @tc.type: FUNC + */ +HWTEST_F(CropTest, SUB_AI_NNRt_Func_North_Crop_Model_SetOperandValue_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CropModel1 cropModel; + OHNNGraphArgs graphArgs = cropModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, i, nullptr, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Crop_Model_SetOperandValue_03 + * @tc.desc: 设置操作数值,length为0 + * @tc.type: FUNC + */ +HWTEST_F(CropTest, SUB_AI_NNRt_Func_North_Crop_Model_SetOperandValue_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CropModel1 cropModel; + OHNNGraphArgs graphArgs = cropModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, 1000+i, operandTem.data, 0)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Crop_Model_SpecifyInputsAndOutputs_01 + * @tc.desc: 设置输入输出,inputIndices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(CropTest, SUB_AI_NNRt_Func_North_Crop_Model_SpecifyInputsAndOutputs_01, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CropModel1 cropModel; + OHNNGraphArgs graphArgs = cropModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Crop_Model_SpecifyInputsAndOutputs_02 + * @tc.desc: 设置输入输出,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(CropTest, SUB_AI_NNRt_Func_North_Crop_Model_SpecifyInputsAndOutputs_02, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CropModel1 cropModel; + OHNNGraphArgs graphArgs = cropModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Crop_Model_SpecifyInputsAndOutputs_03 + * @tc.desc: 设置输入输出,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(CropTest, SUB_AI_NNRt_Func_North_Crop_Model_SpecifyInputsAndOutputs_03, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CropModel1 cropModel; + OHNNGraphArgs graphArgs = cropModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Crop_Model_SpecifyInputsAndOutputs_04 + * @tc.desc: 设置输入输出,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(CropTest, SUB_AI_NNRt_Func_North_Crop_Model_SpecifyInputsAndOutputs_04, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CropModel1 cropModel; + OHNNGraphArgs graphArgs = cropModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Crop_Model_SpecifyInputsAndOutputs_05 + * @tc.desc: 设置输入输出,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(CropTest, SUB_AI_NNRt_Func_North_Crop_Model_SpecifyInputsAndOutputs_05, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CropModel1 cropModel; + OHNNGraphArgs graphArgs = cropModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Crop_Model_SpecifyInputsAndOutputs_06 + * @tc.desc: 设置输入输出,outputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(CropTest, SUB_AI_NNRt_Func_North_Crop_Model_SpecifyInputsAndOutputs_06, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CropModel1 cropModel; + OHNNGraphArgs graphArgs = cropModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Crop_Model_SpecifyInputsAndOutputs_07 + * @tc.desc: 设置输入输出,outputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(CropTest, SUB_AI_NNRt_Func_North_Crop_Model_SpecifyInputsAndOutputs_07, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CropModel1 cropModel; + OHNNGraphArgs graphArgs = cropModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.outputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Crop_Model_SpecifyInputsAndOutputs_08 + * @tc.desc: 设置输入输出,outputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(CropTest, SUB_AI_NNRt_Func_North_Crop_Model_SpecifyInputsAndOutputs_08, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CropModel1 cropModel; + OHNNGraphArgs graphArgs = cropModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Crop_Model_AddOperation_01 + * @tc.desc: 添加算子,paramindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(CropTest, SUB_AI_NNRt_Func_North_Crop_Model_AddOperation_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CropModel1 cropModel; + OHNNGraphArgs graphArgs = cropModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + nullptr, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Crop_Model_AddOperation_02 + * @tc.desc: 添加算子,paramindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(CropTest, SUB_AI_NNRt_Func_North_Crop_Model_AddOperation_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CropModel1 cropModel; + OHNNGraphArgs graphArgs = cropModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Crop_Model_AddOperation_03 + * @tc.desc: 添加算子,paramindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(CropTest, SUB_AI_NNRt_Func_North_Crop_Model_AddOperation_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CropModel1 cropModel; + OHNNGraphArgs graphArgs = cropModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.paramIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Crop_Model_AddOperation_04 + * @tc.desc: 添加算子,paramindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(CropTest, SUB_AI_NNRt_Func_North_Crop_Model_AddOperation_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CropModel1 cropModel; + OHNNGraphArgs graphArgs = cropModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Crop_Model_AddOperation_05 + * @tc.desc: 添加算子,inputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(CropTest, SUB_AI_NNRt_Func_North_Crop_Model_AddOperation_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CropModel1 cropModel; + OHNNGraphArgs graphArgs = cropModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Crop_Model_AddOperation_06 + * @tc.desc: 添加算子,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(CropTest, SUB_AI_NNRt_Func_North_Crop_Model_AddOperation_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CropModel1 cropModel; + OHNNGraphArgs graphArgs = cropModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Crop_Model_AddOperation_07 + * @tc.desc: 添加算子,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(CropTest, SUB_AI_NNRt_Func_North_Crop_Model_AddOperation_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CropModel1 cropModel; + OHNNGraphArgs graphArgs = cropModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Crop_Model_AddOperation_08 + * @tc.desc: 添加算子,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(CropTest, SUB_AI_NNRt_Func_North_Crop_Model_AddOperation_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CropModel1 cropModel; + OHNNGraphArgs graphArgs = cropModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Crop_Model_AddOperation_09 + * @tc.desc: 添加算子,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(CropTest, SUB_AI_NNRt_Func_North_Crop_Model_AddOperation_09, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + CropModel1 cropModel; + OHNNGraphArgs graphArgs = cropModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(nullptr, graphArgs.operationType, + ¶mIndices, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} \ No newline at end of file diff --git a/test/nnrt_xts_acts/nncore/opstest/src/depth_to_space_test.cpp b/test/nnrt_xts_acts/nncore/opstest/src/depth_to_space_test.cpp new file mode 100644 index 0000000..4f843b4 --- /dev/null +++ b/test/nnrt_xts_acts/nncore/opstest/src/depth_to_space_test.cpp @@ -0,0 +1,780 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "nncore_utils.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Test; +class DepthToSpaceTest : public testing::Test {}; + +struct DepthToSpaceModel1 { + const std::vector input_shape = {1, 4, 4, 4}; + const std::vector output_shape = {1, 1, 8, 8}; + std::vector blockSizeValue = {2}; + float inputValue[1][4][4][4] = {{{{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}, {13, 14, 15, 16}}, + {{17, 18, 19, 20}, {21, 22, 23, 24}, {25, 26, 27, 28}, {29, 30, 31, 32}}, + {{33, 34, 35, 36}, {37, 38, 39, 40}, {41, 42, 43, 44}, {45, 46, 47, 48}}, + {{49, 50, 51, 52}, {53, 54, 55, 56}, {57, 58, 59, 60}, {61, 62, 63, 64}}}}; + float outputValue[1][1][8][8] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, input_shape, inputValue, 64*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, output_shape, outputValue, 64*sizeof(float)}; + OHNNOperandTest blockSize = {OH_NN_INT64, OH_NN_DEPTH_TO_SPACE_BLOCK_SIZE, {1}, &blockSizeValue, sizeof(int64_t)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_DEPTH_TO_SPACE, + .operands = {input, output, blockSize}, + .paramIndices = {2}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +struct DepthToSpaceModel2 { + const std::vector tensor_shape = {1, 12, 1, 1}; + std::vector blockSizeValue = {0}; + float inputValue[1][12][1][1] = {{{{1}}, {{2}}, {{3}}, {{4}}, {{5}}, {{6}}, + {{7}}, {{8}}, {{9}}, {{10}}, {{11}}, {{12}}}}; + float outputValue[1][12][1][1] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, inputValue, 12*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, outputValue, 12*sizeof(float)}; + OHNNOperandTest blockSize = {OH_NN_INT64, OH_NN_DEPTH_TO_SPACE_BLOCK_SIZE, {1}, &blockSizeValue, sizeof(int64_t)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_DEPTH_TO_SPACE, + .operands = {input, output, blockSize}, + .paramIndices = {2}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +/** + * @tc.number : SUB_AI_NNRt_Func_North_DepthToSpace_Build_01 + * @tc.desc: DepthToSpaceModel1模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(DepthToSpaceTest, SUB_AI_NNRt_Func_North_DepthToSpace_Build_01, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + DepthToSpaceModel1 depthToSpaceModel; + OHNNGraphArgs graphArgs = depthToSpaceModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_DepthToSpace_Build_02 + * @tc.desc: DepthToSpaceModel2模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(DepthToSpaceTest, SUB_AI_NNRt_Func_North_DepthToSpace_Build_02, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + DepthToSpaceModel2 depthToSpaceModel; + OHNNGraphArgs graphArgs = depthToSpaceModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_DepthToSpace_Build_03 + * @tc.desc: DepthToSpaceModel1模型输入Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(DepthToSpaceTest, SUB_AI_NNRt_Func_North_DepthToSpace_Build_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + DepthToSpaceModel1 depthToSpaceModel; + OHNNGraphArgs graphArgs = depthToSpaceModel.graphArgs; + graphArgs.operands = {depthToSpaceModel.input, depthToSpaceModel.input, + depthToSpaceModel.output, depthToSpaceModel.blockSize}; + graphArgs.inputIndices = {0, 1}; + graphArgs.outputIndices = {2}; + graphArgs.paramIndices = {3}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_DepthToSpace_Build_04 + * @tc.desc: DepthToSpaceModel1模型输出Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(DepthToSpaceTest, SUB_AI_NNRt_Func_North_DepthToSpace_Build_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + DepthToSpaceModel1 depthToSpaceModel; + OHNNGraphArgs graphArgs = depthToSpaceModel.graphArgs; + graphArgs.operands = {depthToSpaceModel.input, depthToSpaceModel.output, + depthToSpaceModel.output, depthToSpaceModel.blockSize}; + graphArgs.inputIndices = {0}; + graphArgs.outputIndices = {1, 2}; + graphArgs.paramIndices = {3}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_DepthToSpace_Build_05 + * @tc.desc: DepthToSpaceModel1模型传入非法参数进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(DepthToSpaceTest, SUB_AI_NNRt_Func_North_DepthToSpace_Build_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + DepthToSpaceModel1 depthToSpaceModel; + OHNNGraphArgs graphArgs = depthToSpaceModel.graphArgs; + + int8_t activationValue = OH_NN_FUSED_NONE; + OHNNOperandTest activation = {OH_NN_INT8, OH_NN_ADD_ACTIVATIONTYPE, {}, &activationValue, sizeof(int8_t)}; + graphArgs.operands = {depthToSpaceModel.input, depthToSpaceModel.output, + depthToSpaceModel.blockSize, activation}; + graphArgs.paramIndices = {2, 3}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_DepthToSpace_Model_Finish_01 + * @tc.desc: 模型构图,未添加操作数 + * @tc.type: FUNC + */ +HWTEST_F(DepthToSpaceTest, SUB_AI_NNRt_Func_North_DepthToSpace_Model_Finish_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + OHNNGraphArgs graphArgs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, SingleModelBuildEndStep(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_DepthToSpace_Model_Finish_02 + * @tc.desc: 模型构图,未设置输入输出 + * @tc.type: FUNC + */ +HWTEST_F(DepthToSpaceTest, SUB_AI_NNRt_Func_North_DepthToSpace_Model_Finish_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + DepthToSpaceModel1 depthToSpaceModel; + OHNNGraphArgs graphArgs = depthToSpaceModel.graphArgs; + graphArgs.specifyIO = false; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_DepthToSpace_Model_Finish_03 + * @tc.desc: 模型构图,设置输入输出,构图成功 + * @tc.type: FUNC + */ +HWTEST_F(DepthToSpaceTest, SUB_AI_NNRt_Func_North_DepthToSpace_Model_Finish_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + DepthToSpaceModel1 depthToSpaceModel; + OHNNGraphArgs graphArgs = depthToSpaceModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_DepthToSpace_Model_SetOperandValue_01 + * @tc.desc: 设置操作数值,操作数不存在 + * @tc.type: FUNC + */ +HWTEST_F(DepthToSpaceTest, SUB_AI_NNRt_Func_North_DepthToSpace_Model_SetOperandValue_01, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + DepthToSpaceModel1 depthToSpaceModel; + OHNNGraphArgs graphArgs = depthToSpaceModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData( + model, 1000+i, operandTem.data, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_DepthToSpace_Model_SetOperandValue_02 + * @tc.desc: 设置操作数值,buufer为nullptr + * @tc.type: FUNC + */ +HWTEST_F(DepthToSpaceTest, SUB_AI_NNRt_Func_North_DepthToSpace_Model_SetOperandValue_02, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + DepthToSpaceModel1 depthToSpaceModel; + OHNNGraphArgs graphArgs = depthToSpaceModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, i, nullptr, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_DepthToSpace_Model_SetOperandValue_03 + * @tc.desc: 设置操作数值,length为0 + * @tc.type: FUNC + */ +HWTEST_F(DepthToSpaceTest, SUB_AI_NNRt_Func_North_DepthToSpace_Model_SetOperandValue_03, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + DepthToSpaceModel1 depthToSpaceModel; + OHNNGraphArgs graphArgs = depthToSpaceModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, 1000+i, operandTem.data, 0)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_DepthToSpace_Model_SpecifyInputsAndOutputs_01 + * @tc.desc: 设置输入输出,inputIndices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(DepthToSpaceTest, SUB_AI_NNRt_Func_North_DepthToSpace_Model_SpecifyInputsAndOutputs_01, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + DepthToSpaceModel1 depthToSpaceModel; + OHNNGraphArgs graphArgs = depthToSpaceModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_DepthToSpace_Model_SpecifyInputsAndOutputs_02 + * @tc.desc: 设置输入输出,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(DepthToSpaceTest, SUB_AI_NNRt_Func_North_DepthToSpace_Model_SpecifyInputsAndOutputs_02, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + DepthToSpaceModel1 depthToSpaceModel; + OHNNGraphArgs graphArgs = depthToSpaceModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_DepthToSpace_Model_SpecifyInputsAndOutputs_03 + * @tc.desc: 设置输入输出,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(DepthToSpaceTest, SUB_AI_NNRt_Func_North_DepthToSpace_Model_SpecifyInputsAndOutputs_03, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + DepthToSpaceModel1 depthToSpaceModel; + OHNNGraphArgs graphArgs = depthToSpaceModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_DepthToSpace_Model_SpecifyInputsAndOutputs_04 + * @tc.desc: 设置输入输出,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(DepthToSpaceTest, SUB_AI_NNRt_Func_North_DepthToSpace_Model_SpecifyInputsAndOutputs_04, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + DepthToSpaceModel1 depthToSpaceModel; + OHNNGraphArgs graphArgs = depthToSpaceModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_DepthToSpace_Model_SpecifyInputsAndOutputs_05 + * @tc.desc: 设置输入输出,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(DepthToSpaceTest, SUB_AI_NNRt_Func_North_DepthToSpace_Model_SpecifyInputsAndOutputs_05, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + DepthToSpaceModel1 depthToSpaceModel; + OHNNGraphArgs graphArgs = depthToSpaceModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_DepthToSpace_Model_SpecifyInputsAndOutputs_06 + * @tc.desc: 设置输入输出,outputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(DepthToSpaceTest, SUB_AI_NNRt_Func_North_DepthToSpace_Model_SpecifyInputsAndOutputs_06, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + DepthToSpaceModel1 depthToSpaceModel; + OHNNGraphArgs graphArgs = depthToSpaceModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_DepthToSpace_Model_SpecifyInputsAndOutputs_07 + * @tc.desc: 设置输入输出,outputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(DepthToSpaceTest, SUB_AI_NNRt_Func_North_DepthToSpace_Model_SpecifyInputsAndOutputs_07, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + DepthToSpaceModel1 depthToSpaceModel; + OHNNGraphArgs graphArgs = depthToSpaceModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.outputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_DepthToSpace_Model_SpecifyInputsAndOutputs_08 + * @tc.desc: 设置输入输出,outputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(DepthToSpaceTest, SUB_AI_NNRt_Func_North_DepthToSpace_Model_SpecifyInputsAndOutputs_08, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + DepthToSpaceModel1 depthToSpaceModel; + OHNNGraphArgs graphArgs = depthToSpaceModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_DepthToSpace_Model_AddOperation_01 + * @tc.desc: 添加算子,paramindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(DepthToSpaceTest, SUB_AI_NNRt_Func_North_DepthToSpace_Model_AddOperation_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + DepthToSpaceModel1 depthToSpaceModel; + OHNNGraphArgs graphArgs = depthToSpaceModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + nullptr, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_DepthToSpace_Model_AddOperation_02 + * @tc.desc: 添加算子,paramindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(DepthToSpaceTest, SUB_AI_NNRt_Func_North_DepthToSpace_Model_AddOperation_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + DepthToSpaceModel1 depthToSpaceModel; + OHNNGraphArgs graphArgs = depthToSpaceModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_DepthToSpace_Model_AddOperation_03 + * @tc.desc: 添加算子,paramindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(DepthToSpaceTest, SUB_AI_NNRt_Func_North_DepthToSpace_Model_AddOperation_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + DepthToSpaceModel1 depthToSpaceModel; + OHNNGraphArgs graphArgs = depthToSpaceModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.paramIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_DepthToSpace_Model_AddOperation_04 + * @tc.desc: 添加算子,paramindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(DepthToSpaceTest, SUB_AI_NNRt_Func_North_DepthToSpace_Model_AddOperation_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + DepthToSpaceModel1 depthToSpaceModel; + OHNNGraphArgs graphArgs = depthToSpaceModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_DepthToSpace_Model_AddOperation_05 + * @tc.desc: 添加算子,inputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(DepthToSpaceTest, SUB_AI_NNRt_Func_North_DepthToSpace_Model_AddOperation_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + DepthToSpaceModel1 depthToSpaceModel; + OHNNGraphArgs graphArgs = depthToSpaceModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_DepthToSpace_Model_AddOperation_06 + * @tc.desc: 添加算子,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(DepthToSpaceTest, SUB_AI_NNRt_Func_North_DepthToSpace_Model_AddOperation_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + DepthToSpaceModel1 depthToSpaceModel; + OHNNGraphArgs graphArgs = depthToSpaceModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_DepthToSpace_Model_AddOperation_07 + * @tc.desc: 添加算子,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(DepthToSpaceTest, SUB_AI_NNRt_Func_North_DepthToSpace_Model_AddOperation_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + DepthToSpaceModel1 depthToSpaceModel; + OHNNGraphArgs graphArgs = depthToSpaceModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_DepthToSpace_Model_AddOperation_08 + * @tc.desc: 添加算子,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(DepthToSpaceTest, SUB_AI_NNRt_Func_North_DepthToSpace_Model_AddOperation_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + DepthToSpaceModel1 depthToSpaceModel; + OHNNGraphArgs graphArgs = depthToSpaceModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_DepthToSpace_Model_AddOperation_09 + * @tc.desc: 添加算子,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(DepthToSpaceTest, SUB_AI_NNRt_Func_North_DepthToSpace_Model_AddOperation_09, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + DepthToSpaceModel1 depthToSpaceModel; + OHNNGraphArgs graphArgs = depthToSpaceModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(nullptr, graphArgs.operationType, + ¶mIndices, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} \ No newline at end of file diff --git a/test/nnrt_xts_acts/nncore/opstest/src/detection_post_process_test.cpp b/test/nnrt_xts_acts/nncore/opstest/src/detection_post_process_test.cpp new file mode 100644 index 0000000..9a26aea --- /dev/null +++ b/test/nnrt_xts_acts/nncore/opstest/src/detection_post_process_test.cpp @@ -0,0 +1,803 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "nncore_utils.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Test; +class DetectionPostProcessTest : public testing::Test {}; + +struct DetectionPostProcessModel1 { + const std::vector bbox_shape = {1, 16}; + const std::vector scores_shape = {1, 4}; + const std::vector anchors_shape = {1, 2, 8}; + const std::vector output_shape = {2, 8}; + + int64_t inputSizeValue[1] = {45}; + std::vector scaleValue = {10, 10, 5, 5}; + float nmsIoUThresholdValue[1] = {0.5}; + float nmsScoreThresholdValue[1] = {0.4}; + int64_t maxDetectionsValue[1] = {5}; + int64_t detectionsPerClassValue[1] = {2}; + int64_t maxClassesPerDetectionValue[1] = {1}; + int64_t numClassesValue[1] = {5}; + bool useRegularNmsValue[1] = {false}; + bool outQuantizedValue[1] = {false}; + + float bboxValue[1][16] = {1}; + float scoresValue[1][4] = {1}; + float anchorsValue[1][2][8] = {1}; + + float bboxesValue[1][16] = {0}; + float classesValue[1][4] = {0}; + float confidencesValue[1][16] = {0}; + float numDetectionsValue[1][16] = {0}; + + OHNNOperandTest bbox = {OH_NN_FLOAT32, OH_NN_TENSOR, bbox_shape, bboxValue, 16*sizeof(float)}; + OHNNOperandTest scores = {OH_NN_FLOAT32, OH_NN_TENSOR, scores_shape, scoresValue, 4*sizeof(float)}; + OHNNOperandTest anchors = {OH_NN_FLOAT32, OH_NN_TENSOR, anchors_shape, anchorsValue, 16*sizeof(float)}; + + OHNNOperandTest bboxes = {OH_NN_FLOAT32, OH_NN_TENSOR, output_shape, bboxesValue, 16*sizeof(float)}; + OHNNOperandTest classes = {OH_NN_FLOAT32, OH_NN_TENSOR, output_shape, classesValue, 16*sizeof(float)}; + OHNNOperandTest confidences = {OH_NN_FLOAT32, OH_NN_TENSOR, output_shape, confidencesValue, 16*sizeof(float)}; + OHNNOperandTest numDetections = {OH_NN_FLOAT32, OH_NN_TENSOR, output_shape, numDetectionsValue, 16*sizeof(float)}; + + OHNNOperandTest inputSize = {OH_NN_INT64, OH_NN_DETECTION_POST_PROCESS_INPUT_SIZE, {1}, + inputSizeValue, sizeof(int64_t)}; + OHNNOperandTest scale = {OH_NN_FLOAT32, OH_NN_DETECTION_POST_PROCESS_SCALE, {4}, &scaleValue, 4*sizeof(float)}; + OHNNOperandTest nmsIoUThreshold = {OH_NN_FLOAT32, OH_NN_DETECTION_POST_PROCESS_NMS_IOU_THRESHOLD, {1}, + nmsIoUThresholdValue, sizeof(float)}; + OHNNOperandTest nmsScoreThreshold = {OH_NN_FLOAT32, OH_NN_DETECTION_POST_PROCESS_NMS_SCORE_THRESHOLD, {1}, + nmsScoreThresholdValue, sizeof(float)}; + OHNNOperandTest maxDetections = {OH_NN_INT64, OH_NN_DETECTION_POST_PROCESS_MAX_DETECTIONS, {1}, + maxDetectionsValue, sizeof(int64_t)}; + OHNNOperandTest detectionsPerClass = {OH_NN_INT64, OH_NN_DETECTION_POST_PROCESS_DETECTIONS_PER_CLASS, {1}, + detectionsPerClassValue, sizeof(int64_t)}; + OHNNOperandTest maxClassesPerDetection = {OH_NN_INT64, OH_NN_DETECTION_POST_PROCESS_MAX_CLASSES_PER_DETECTION, {1}, + maxClassesPerDetectionValue, sizeof(int64_t)}; + OHNNOperandTest numClasses = {OH_NN_INT64, OH_NN_DETECTION_POST_PROCESS_NUM_CLASSES, {1}, + numClassesValue, sizeof(int64_t)}; + OHNNOperandTest useRegularNms = {OH_NN_BOOL, OH_NN_DETECTION_POST_PROCESS_USE_REGULAR_NMS, {1}, + useRegularNmsValue, sizeof(bool)}; + OHNNOperandTest outQuantized = {OH_NN_BOOL, OH_NN_DETECTION_POST_PROCESS_OUT_QUANTIZED, {1}, + outQuantizedValue, sizeof(bool)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_DETECTION_POST_PROCESS, + .operands = {bbox, scores, anchors, bboxes, classes, confidences, numDetections, + inputSize, scale, nmsIoUThreshold, nmsScoreThreshold, maxDetections, + detectionsPerClass, maxClassesPerDetection, numClasses, useRegularNms, + outQuantized}, + .paramIndices = {7, 8, 9, 10, 11, 12, 13, 14, 15, 16}, + .inputIndices = {0, 1, 2}, + .outputIndices = {3, 4, 5, 6}}; +}; + +/** + * @tc.number : SUB_AI_NNRt_Func_North_DetectionPostProcess_Build_01 + * @tc.desc: DetectionPostProcessModel1模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(DetectionPostProcessTest, SUB_AI_NNRt_Func_North_DetectionPostProcess_Build_01, + Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + DetectionPostProcessModel1 dppModel; + OHNNGraphArgs graphArgs = dppModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_DetectionPostProcess_Build_02 + * @tc.desc: DetectionPostProcessModel1模型输入Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(DetectionPostProcessTest, SUB_AI_NNRt_Func_North_DetectionPostProcess_Build_02, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + DetectionPostProcessModel1 dppModel; + OHNNGraphArgs graphArgs = dppModel.graphArgs; + graphArgs.operands = {dppModel.bbox, dppModel.bbox, dppModel.scores, dppModel.anchors, dppModel.bboxes, + dppModel.classes, dppModel.confidences, dppModel.numDetections, dppModel.inputSize, + dppModel.scale, dppModel.nmsIoUThreshold, dppModel.nmsScoreThreshold, dppModel.maxDetections, + dppModel.detectionsPerClass, dppModel.maxClassesPerDetection, dppModel.numClasses, + dppModel.useRegularNms, dppModel.outQuantized}; + graphArgs.inputIndices = {0, 1, 2, 3}; + graphArgs.outputIndices = {4, 5, 6, 7}; + graphArgs.paramIndices = {8, 9, 10, 11, 12, 13, 14, 15, 16, 17}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_DetectionPostProcess_Build_03 + * @tc.desc: DetectionPostProcessModel1模型输出Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(DetectionPostProcessTest, SUB_AI_NNRt_Func_North_DetectionPostProcess_Build_03, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + DetectionPostProcessModel1 dppModel; + OHNNGraphArgs graphArgs = dppModel.graphArgs; + graphArgs.operands = {dppModel.bbox, dppModel.scores, dppModel.anchors, dppModel.bboxes, dppModel.bboxes, + dppModel.classes, dppModel.confidences, dppModel.numDetections, dppModel.inputSize, + dppModel.scale, dppModel.nmsIoUThreshold, dppModel.nmsScoreThreshold, dppModel.maxDetections, + dppModel.detectionsPerClass, dppModel.maxClassesPerDetection, dppModel.numClasses, + dppModel.useRegularNms, dppModel.outQuantized}; + graphArgs.inputIndices = {0, 1, 2}; + graphArgs.outputIndices = {3, 4, 5, 6, 7}; + graphArgs.paramIndices = {8, 9, 10, 11, 12, 13, 14, 15, 16, 17}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_DetectionPostProcess_Build_04 + * @tc.desc: DetectionPostProcessModel1模型传入非法参数进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(DetectionPostProcessTest, SUB_AI_NNRt_Func_North_DetectionPostProcess_Build_04, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + DetectionPostProcessModel1 dppModel; + OHNNGraphArgs graphArgs = dppModel.graphArgs; + + int8_t activationValue = OH_NN_FUSED_NONE; + OHNNOperandTest activation = {OH_NN_INT8, OH_NN_ADD_ACTIVATIONTYPE, {}, &activationValue, sizeof(int8_t)}; + graphArgs.operands = {dppModel.bbox, dppModel.scores, dppModel.anchors, dppModel.bboxes, dppModel.classes, + dppModel.confidences, dppModel.numDetections, dppModel.inputSize, dppModel.scale, + dppModel.nmsIoUThreshold, dppModel.nmsScoreThreshold, dppModel.maxDetections, + dppModel.detectionsPerClass, dppModel.maxClassesPerDetection, dppModel.numClasses, + dppModel.useRegularNms, dppModel.outQuantized, activation}; + graphArgs.paramIndices = {7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_DetectionPostProcess_Model_Finish_01 + * @tc.desc: 模型构图,未添加操作数 + * @tc.type: FUNC + */ +HWTEST_F(DetectionPostProcessTest, SUB_AI_NNRt_Func_North_DetectionPostProcess_Model_Finish_01, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + OHNNGraphArgs graphArgs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, SingleModelBuildEndStep(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_DetectionPostProcess_Model_Finish_02 + * @tc.desc: 模型构图,未设置输入输出 + * @tc.type: FUNC + */ +HWTEST_F(DetectionPostProcessTest, SUB_AI_NNRt_Func_North_DetectionPostProcess_Model_Finish_02, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + DetectionPostProcessModel1 dppModel; + OHNNGraphArgs graphArgs = dppModel.graphArgs; + graphArgs.specifyIO = false; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_DetectionPostProcess_Model_Finish_03 + * @tc.desc: 模型构图,设置输入输出,构图成功 + * @tc.type: FUNC + */ +HWTEST_F(DetectionPostProcessTest, SUB_AI_NNRt_Func_North_DetectionPostProcess_Model_Finish_03, + Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + DetectionPostProcessModel1 dppModel; + OHNNGraphArgs graphArgs = dppModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_DetectionPostProcess_Model_SetOperandValue_01 + * @tc.desc: 设置操作数值,操作数不存在 + * @tc.type: FUNC + */ +HWTEST_F(DetectionPostProcessTest, SUB_AI_NNRt_Func_North_DetectionPostProcess_Model_SetOperandValue_01, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + DetectionPostProcessModel1 dppModel; + OHNNGraphArgs graphArgs = dppModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData( + model, 1000+i, operandTem.data, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_DetectionPostProcess_Model_SetOperandValue_02 + * @tc.desc: 设置操作数值,buufer为nullptr + * @tc.type: FUNC + */ +HWTEST_F(DetectionPostProcessTest, SUB_AI_NNRt_Func_North_DetectionPostProcess_Model_SetOperandValue_02, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + DetectionPostProcessModel1 dppModel; + OHNNGraphArgs graphArgs = dppModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, i, nullptr, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_DetectionPostProcess_Model_SetOperandValue_03 + * @tc.desc: 设置操作数值,length为0 + * @tc.type: FUNC + */ +HWTEST_F(DetectionPostProcessTest, SUB_AI_NNRt_Func_North_DetectionPostProcess_Model_SetOperandValue_03, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + DetectionPostProcessModel1 dppModel; + OHNNGraphArgs graphArgs = dppModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, 1000+i, operandTem.data, 0)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_DetectionPostProcess_Model_SpecifyInputsAndOutputs_01 + * @tc.desc: 设置输入输出,inputIndices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(DetectionPostProcessTest, SUB_AI_NNRt_Func_North_DetectionPostProcess_Model_SpecifyInputsAndOutputs_01, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + DetectionPostProcessModel1 dppModel; + OHNNGraphArgs graphArgs = dppModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_DetectionPostProcess_Model_SpecifyInputsAndOutputs_02 + * @tc.desc: 设置输入输出,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(DetectionPostProcessTest, SUB_AI_NNRt_Func_North_DetectionPostProcess_Model_SpecifyInputsAndOutputs_02, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + DetectionPostProcessModel1 dppModel; + OHNNGraphArgs graphArgs = dppModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_DetectionPostProcess_Model_SpecifyInputsAndOutputs_03 + * @tc.desc: 设置输入输出,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(DetectionPostProcessTest, SUB_AI_NNRt_Func_North_DetectionPostProcess_Model_SpecifyInputsAndOutputs_03, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + DetectionPostProcessModel1 dppModel; + OHNNGraphArgs graphArgs = dppModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_DetectionPostProcess_Model_SpecifyInputsAndOutputs_04 + * @tc.desc: 设置输入输出,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(DetectionPostProcessTest, SUB_AI_NNRt_Func_North_DetectionPostProcess_Model_SpecifyInputsAndOutputs_04, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + DetectionPostProcessModel1 dppModel; + OHNNGraphArgs graphArgs = dppModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_DetectionPostProcess_Model_SpecifyInputsAndOutputs_05 + * @tc.desc: 设置输入输出,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(DetectionPostProcessTest, SUB_AI_NNRt_Func_North_DetectionPostProcess_Model_SpecifyInputsAndOutputs_05, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + DetectionPostProcessModel1 dppModel; + OHNNGraphArgs graphArgs = dppModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_DetectionPostProcess_Model_SpecifyInputsAndOutputs_06 + * @tc.desc: 设置输入输出,outputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(DetectionPostProcessTest, SUB_AI_NNRt_Func_North_DetectionPostProcess_Model_SpecifyInputsAndOutputs_06, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + DetectionPostProcessModel1 dppModel; + OHNNGraphArgs graphArgs = dppModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_DetectionPostProcess_Model_SpecifyInputsAndOutputs_07 + * @tc.desc: 设置输入输出,outputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(DetectionPostProcessTest, SUB_AI_NNRt_Func_North_DetectionPostProcess_Model_SpecifyInputsAndOutputs_07, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + DetectionPostProcessModel1 dppModel; + OHNNGraphArgs graphArgs = dppModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.outputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_DetectionPostProcess_Model_SpecifyInputsAndOutputs_08 + * @tc.desc: 设置输入输出,outputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(DetectionPostProcessTest, SUB_AI_NNRt_Func_North_DetectionPostProcess_Model_SpecifyInputsAndOutputs_08, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + DetectionPostProcessModel1 dppModel; + OHNNGraphArgs graphArgs = dppModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_DetectionPostProcess_Model_AddOperation_01 + * @tc.desc: 添加算子,paramindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(DetectionPostProcessTest, SUB_AI_NNRt_Func_North_DetectionPostProcess_Model_AddOperation_01, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + DetectionPostProcessModel1 dppModel; + OHNNGraphArgs graphArgs = dppModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + nullptr, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_DetectionPostProcess_Model_AddOperation_02 + * @tc.desc: 添加算子,paramindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(DetectionPostProcessTest, SUB_AI_NNRt_Func_North_DetectionPostProcess_Model_AddOperation_02, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + DetectionPostProcessModel1 dppModel; + OHNNGraphArgs graphArgs = dppModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_DetectionPostProcess_Model_AddOperation_03 + * @tc.desc: 添加算子,paramindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(DetectionPostProcessTest, SUB_AI_NNRt_Func_North_DetectionPostProcess_Model_AddOperation_03, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + DetectionPostProcessModel1 dppModel; + OHNNGraphArgs graphArgs = dppModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.paramIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_DetectionPostProcess_Model_AddOperation_04 + * @tc.desc: 添加算子,paramindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(DetectionPostProcessTest, SUB_AI_NNRt_Func_North_DetectionPostProcess_Model_AddOperation_04, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + DetectionPostProcessModel1 dppModel; + OHNNGraphArgs graphArgs = dppModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_DetectionPostProcess_Model_AddOperation_05 + * @tc.desc: 添加算子,inputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(DetectionPostProcessTest, SUB_AI_NNRt_Func_North_DetectionPostProcess_Model_AddOperation_05, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + DetectionPostProcessModel1 dppModel; + OHNNGraphArgs graphArgs = dppModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_DetectionPostProcess_Model_AddOperation_06 + * @tc.desc: 添加算子,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(DetectionPostProcessTest, SUB_AI_NNRt_Func_North_DetectionPostProcess_Model_AddOperation_06, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + DetectionPostProcessModel1 dppModel; + OHNNGraphArgs graphArgs = dppModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_DetectionPostProcess_Model_AddOperation_07 + * @tc.desc: 添加算子,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(DetectionPostProcessTest, SUB_AI_NNRt_Func_North_DetectionPostProcess_Model_AddOperation_07, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + DetectionPostProcessModel1 dppModel; + OHNNGraphArgs graphArgs = dppModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_DetectionPostProcess_Model_AddOperation_08 + * @tc.desc: 添加算子,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(DetectionPostProcessTest, SUB_AI_NNRt_Func_North_DetectionPostProcess_Model_AddOperation_08, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + DetectionPostProcessModel1 dppModel; + OHNNGraphArgs graphArgs = dppModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_DetectionPostProcess_Model_AddOperation_09 + * @tc.desc: 添加算子,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(DetectionPostProcessTest, SUB_AI_NNRt_Func_North_DetectionPostProcess_Model_AddOperation_09, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + DetectionPostProcessModel1 dppModel; + OHNNGraphArgs graphArgs = dppModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(nullptr, graphArgs.operationType, + ¶mIndices, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} \ No newline at end of file diff --git a/test/nnrt_xts_acts/nncore/opstest/src/equal_test.cpp b/test/nnrt_xts_acts/nncore/opstest/src/equal_test.cpp new file mode 100644 index 0000000..d1cd43c --- /dev/null +++ b/test/nnrt_xts_acts/nncore/opstest/src/equal_test.cpp @@ -0,0 +1,850 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "nncore_utils.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Test; +class EqualTest : public testing::Test {}; + +struct EqualModel1 { + const std::vector tensor_shape = {3}; + float input0Value[3] = {1, 2, 3}; + float input1Value[3] = {4, 5, 6}; + bool outputValue[3] = {0}; + + OHNNOperandTest input0 = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, input0Value, 3*sizeof(float)}; + OHNNOperandTest input1 = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, input1Value, 3*sizeof(float)}; + OHNNOperandTest output = {OH_NN_BOOL, OH_NN_TENSOR, tensor_shape, outputValue, 3*sizeof(bool)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_EQUAL, + .operands = {input0, input1, output}, + .paramIndices = {}, + .inputIndices = {0, 1}, + .outputIndices = {2}}; +}; + +struct EqualModel2 { + const std::vector tensor_shape = {}; + float input0Value[1] = {1}; + float input1Value[1] = {1}; + bool outputValue[1] = {0}; + + OHNNOperandTest input0 = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, input0Value, sizeof(float)}; + OHNNOperandTest input1 = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, input1Value, sizeof(float)}; + OHNNOperandTest output = {OH_NN_BOOL, OH_NN_TENSOR, tensor_shape, outputValue, sizeof(bool)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_EQUAL, + .operands = {input0, input1, output}, + .paramIndices = {}, + .inputIndices = {0, 1}, + .outputIndices = {2}}; +}; + +struct EqualModel3 { + const std::vector tensor_shape = {3}; + const std::vector input1_shape = {4}; + float input0Value[3] = {1, 2, 3}; + float input1Value[4] = {4, 5, 6, 7}; + bool outputValue[3] = {0}; + + OHNNOperandTest input0 = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, input0Value, 3*sizeof(float)}; + OHNNOperandTest input1 = {OH_NN_FLOAT32, OH_NN_TENSOR, input1_shape, input1Value, 4*sizeof(float)}; + OHNNOperandTest output = {OH_NN_BOOL, OH_NN_TENSOR, tensor_shape, outputValue, 3*sizeof(bool)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_EQUAL, + .operands = {input0, input1, output}, + .paramIndices = {}, + .inputIndices = {0, 1}, + .outputIndices = {2}}; +}; + +struct EqualModel4 { + const std::vector tensor_shape = {}; + float* input0Value = {}; + float* input1Value = {}; + bool* outputValue = {}; + + OHNNOperandTest input0 = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, input0Value, 0*sizeof(float)}; + OHNNOperandTest input1 = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, input1Value, 0*sizeof(float)}; + OHNNOperandTest output = {OH_NN_BOOL, OH_NN_TENSOR, tensor_shape, outputValue, 0*sizeof(bool)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_EQUAL, + .operands = {input0, input1, output}, + .paramIndices = {}, + .inputIndices = {0, 1}, + .outputIndices = {2}}; +}; + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Equal_Build_01 + * @tc.desc: EqualModel1模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(EqualTest, SUB_AI_NNRt_Func_North_Equal_Build_01, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + EqualModel1 equalModel; + OHNNGraphArgs graphArgs = equalModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Equal_Build_02 + * @tc.desc: EqualModel2模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(EqualTest, SUB_AI_NNRt_Func_North_Equal_Build_02, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + EqualModel2 equalModel; + OHNNGraphArgs graphArgs = equalModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Equal_Build_03 + * @tc.desc: EqualModel3模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(EqualTest, SUB_AI_NNRt_Func_North_Equal_Build_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + EqualModel3 equalModel; + OHNNGraphArgs graphArgs = equalModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Equal_Build_04 + * @tc.desc: EqualModel4模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(EqualTest, SUB_AI_NNRt_Func_North_Equal_Build_04, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + EqualModel4 equalModel; + OHNNGraphArgs graphArgs = equalModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Equal_Build_05 + * @tc.desc: EqualModel1模型输入Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(EqualTest, SUB_AI_NNRt_Func_North_Equal_Build_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + EqualModel2 equalModel; + OHNNGraphArgs graphArgs = equalModel.graphArgs; + graphArgs.operands = {equalModel.input0, equalModel.input1, equalModel.input1, equalModel.output}; + graphArgs.inputIndices = {0, 1, 2}; + graphArgs.outputIndices = {3}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Equal_Build_06 + * @tc.desc: EqualModel1模型输出Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(EqualTest, SUB_AI_NNRt_Func_North_Equal_Build_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + EqualModel2 equalModel; + OHNNGraphArgs graphArgs = equalModel.graphArgs; + graphArgs.operands = {equalModel.input0, equalModel.input1, equalModel.output, equalModel.output}; + graphArgs.inputIndices = {0, 1}; + graphArgs.outputIndices = {2, 3}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Equal_Build_07 + * @tc.desc: EqualModel1模型传入非法参数进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(EqualTest, SUB_AI_NNRt_Func_North_Equal_Build_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + EqualModel2 equalModel; + OHNNGraphArgs graphArgs = equalModel.graphArgs; + + int8_t activationValue = OH_NN_FUSED_NONE; + OHNNOperandTest activation = {OH_NN_INT8, OH_NN_ADD_ACTIVATIONTYPE, {}, &activationValue, sizeof(int8_t)}; + graphArgs.operands = {equalModel.input0, equalModel.input1, equalModel.output, activation}; + graphArgs.paramIndices = {3}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Equal_Model_Finish_01 + * @tc.desc: 模型构图,未添加操作数 + * @tc.type: FUNC + */ +HWTEST_F(EqualTest, SUB_AI_NNRt_Func_North_Equal_Model_Finish_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + OHNNGraphArgs graphArgs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, SingleModelBuildEndStep(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Equal_Model_Finish_02 + * @tc.desc: 模型构图,未设置输入输出 + * @tc.type: FUNC + */ +HWTEST_F(EqualTest, SUB_AI_NNRt_Func_North_Equal_Model_Finish_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + EqualModel1 equalModel; + OHNNGraphArgs graphArgs = equalModel.graphArgs; + graphArgs.specifyIO = false; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Equal_Model_Finish_03 + * @tc.desc: 模型构图,设置输入输出,构图成功 + * @tc.type: FUNC + */ +HWTEST_F(EqualTest, SUB_AI_NNRt_Func_North_Equal_Model_Finish_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + EqualModel1 equalModel; + OHNNGraphArgs graphArgs = equalModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Equal_Model_SetOperandValue_01 + * @tc.desc: 设置操作数值,操作数不存在 + * @tc.type: FUNC + */ +HWTEST_F(EqualTest, SUB_AI_NNRt_Func_North_Equal_Model_SetOperandValue_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + EqualModel1 equalModel; + OHNNGraphArgs graphArgs = equalModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData( + model, 1000+i, operandTem.data, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Equal_Model_SetOperandValue_02 + * @tc.desc: 设置操作数值,buufer为nullptr + * @tc.type: FUNC + */ +HWTEST_F(EqualTest, SUB_AI_NNRt_Func_North_Equal_Model_SetOperandValue_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + EqualModel1 equalModel; + OHNNGraphArgs graphArgs = equalModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, i, nullptr, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Equal_Model_SetOperandValue_03 + * @tc.desc: 设置操作数值,length为0 + * @tc.type: FUNC + */ +HWTEST_F(EqualTest, SUB_AI_NNRt_Func_North_Equal_Model_SetOperandValue_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + EqualModel1 equalModel; + OHNNGraphArgs graphArgs = equalModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, 1000+i, operandTem.data, 0)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Equal_Model_SpecifyInputsAndOutputs_01 + * @tc.desc: 设置输入输出,inputIndices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(EqualTest, SUB_AI_NNRt_Func_North_Equal_Model_SpecifyInputsAndOutputs_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + EqualModel1 equalModel; + OHNNGraphArgs graphArgs = equalModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Equal_Model_SpecifyInputsAndOutputs_02 + * @tc.desc: 设置输入输出,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(EqualTest, SUB_AI_NNRt_Func_North_Equal_Model_SpecifyInputsAndOutputs_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + EqualModel1 equalModel; + OHNNGraphArgs graphArgs = equalModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Equal_Model_SpecifyInputsAndOutputs_03 + * @tc.desc: 设置输入输出,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(EqualTest, SUB_AI_NNRt_Func_North_Equal_Model_SpecifyInputsAndOutputs_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + EqualModel1 equalModel; + OHNNGraphArgs graphArgs = equalModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Equal_Model_SpecifyInputsAndOutputs_04 + * @tc.desc: 设置输入输出,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(EqualTest, SUB_AI_NNRt_Func_North_Equal_Model_SpecifyInputsAndOutputs_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + EqualModel1 equalModel; + OHNNGraphArgs graphArgs = equalModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Equal_Model_SpecifyInputsAndOutputs_05 + * @tc.desc: 设置输入输出,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(EqualTest, SUB_AI_NNRt_Func_North_Equal_Model_SpecifyInputsAndOutputs_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + EqualModel1 equalModel; + OHNNGraphArgs graphArgs = equalModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Equal_Model_SpecifyInputsAndOutputs_06 + * @tc.desc: 设置输入输出,outputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(EqualTest, SUB_AI_NNRt_Func_North_Equal_Model_SpecifyInputsAndOutputs_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + EqualModel1 equalModel; + OHNNGraphArgs graphArgs = equalModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Equal_Model_SpecifyInputsAndOutputs_07 + * @tc.desc: 设置输入输出,outputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(EqualTest, SUB_AI_NNRt_Func_North_Equal_Model_SpecifyInputsAndOutputs_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + EqualModel1 equalModel; + OHNNGraphArgs graphArgs = equalModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.outputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Equal_Model_SpecifyInputsAndOutputs_08 + * @tc.desc: 设置输入输出,outputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(EqualTest, SUB_AI_NNRt_Func_North_Equal_Model_SpecifyInputsAndOutputs_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + EqualModel1 equalModel; + OHNNGraphArgs graphArgs = equalModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Equal_Model_AddOperation_01 + * @tc.desc: 添加算子,paramindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(EqualTest, SUB_AI_NNRt_Func_North_Equal_Model_AddOperation_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + EqualModel1 equalModel; + OHNNGraphArgs graphArgs = equalModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + nullptr, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Equal_Model_AddOperation_02 + * @tc.desc: 添加算子,paramindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(EqualTest, SUB_AI_NNRt_Func_North_Equal_Model_AddOperation_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + EqualModel1 equalModel; + OHNNGraphArgs graphArgs = equalModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.data = nullptr; + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Equal_Model_AddOperation_03 + * @tc.desc: 添加算子,paramindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(EqualTest, SUB_AI_NNRt_Func_North_Equal_Model_AddOperation_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + EqualModel1 equalModel; + OHNNGraphArgs graphArgs = equalModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.paramIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Equal_Model_AddOperation_04 + * @tc.desc: 添加算子,paramindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(EqualTest, SUB_AI_NNRt_Func_North_Equal_Model_AddOperation_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + EqualModel1 equalModel; + OHNNGraphArgs graphArgs = equalModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.size = 0; + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Equal_Model_AddOperation_05 + * @tc.desc: 添加算子,inputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(EqualTest, SUB_AI_NNRt_Func_North_Equal_Model_AddOperation_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + EqualModel1 equalModel; + OHNNGraphArgs graphArgs = equalModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Equal_Model_AddOperation_06 + * @tc.desc: 添加算子,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(EqualTest, SUB_AI_NNRt_Func_North_Equal_Model_AddOperation_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + EqualModel1 equalModel; + OHNNGraphArgs graphArgs = equalModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Equal_Model_AddOperation_07 + * @tc.desc: 添加算子,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(EqualTest, SUB_AI_NNRt_Func_North_Equal_Model_AddOperation_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + EqualModel1 equalModel; + OHNNGraphArgs graphArgs = equalModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Equal_Model_AddOperation_08 + * @tc.desc: 添加算子,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(EqualTest, SUB_AI_NNRt_Func_North_Equal_Model_AddOperation_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + EqualModel1 equalModel; + OHNNGraphArgs graphArgs = equalModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Equal_Model_AddOperation_09 + * @tc.desc: 添加算子,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(EqualTest, SUB_AI_NNRt_Func_North_Equal_Model_AddOperation_09, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + EqualModel1 equalModel; + OHNNGraphArgs graphArgs = equalModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(nullptr, graphArgs.operationType, + ¶mIndices, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} \ No newline at end of file diff --git a/test/nnrt_xts_acts/nncore/opstest/src/erf_test.cpp b/test/nnrt_xts_acts/nncore/opstest/src/erf_test.cpp new file mode 100644 index 0000000..91944b9 --- /dev/null +++ b/test/nnrt_xts_acts/nncore/opstest/src/erf_test.cpp @@ -0,0 +1,799 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include "nncore_utils.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Test; +class ErfTest : public testing::Test {}; + +struct ErfModel1 { + const std::vector tensor_shape = {4}; + float inputValue[4] = {0, -1, 1, 10}; + float outputValue[4] = {0}; + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, inputValue, 4*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, outputValue, 4*sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_ERF, + .operands = {input, output}, + .paramIndices = {}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +struct ErfModel2 { + const std::vector tensor_shape = {}; + float* inputValue = {}; + float* outputValue = {}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, inputValue, 0*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, outputValue, 0*sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_ERF, + .operands = {input, output}, + .paramIndices = {}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +struct ErfModel3 { + const std::vector tensor_shape = {5}; + float inputValue[5] = {std::numeric_limits::quiet_NaN(), std::numeric_limits::infinity(), + -std::numeric_limits::infinity(), 1.0e20f, -1.0e20f}; + float outputValue[5] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, inputValue, 5*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, outputValue, 5*sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_ERF, + .operands = {input, output}, + .paramIndices = {}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Erf_Build_01 + * @tc.desc: ErfModel1模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(ErfTest, SUB_AI_NNRt_Func_North_Erf_Build_01, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ErfModel1 erfModel; + OHNNGraphArgs graphArgs = erfModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Erf_Build_02 + * @tc.desc: ErfModel2模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(ErfTest, SUB_AI_NNRt_Func_North_Erf_Build_02, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ErfModel2 erfModel; + OHNNGraphArgs graphArgs = erfModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Erf_Build_03 + * @tc.desc: ErfModel3模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(ErfTest, SUB_AI_NNRt_Func_North_Erf_Build_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ErfModel3 erfModel; + OHNNGraphArgs graphArgs = erfModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Erf_Build_04 + * @tc.desc: ErfModel1模型输入Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(ErfTest, SUB_AI_NNRt_Func_North_Erf_Build_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ErfModel1 erfModel; + OHNNGraphArgs graphArgs = erfModel.graphArgs; + graphArgs.operands = {erfModel.input, erfModel.input, erfModel.output}; + graphArgs.inputIndices = {0, 1}; + graphArgs.outputIndices = {2}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Erf_Build_05 + * @tc.desc: ErfModel1模型输出Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(ErfTest, SUB_AI_NNRt_Func_North_Erf_Build_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ErfModel1 erfModel; + OHNNGraphArgs graphArgs = erfModel.graphArgs; + graphArgs.operands = {erfModel.input, erfModel.output, erfModel.output}; + graphArgs.inputIndices = {0}; + graphArgs.outputIndices = {1, 2}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Erf_Build_06 + * @tc.desc: ErfModel1模型传入非法参数进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(ErfTest, SUB_AI_NNRt_Func_North_Erf_Build_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ErfModel1 erfModel; + OHNNGraphArgs graphArgs = erfModel.graphArgs; + + int8_t activationValue = OH_NN_FUSED_NONE; + OHNNOperandTest activation = {OH_NN_INT8, OH_NN_ADD_ACTIVATIONTYPE, {}, &activationValue, sizeof(int8_t)}; + graphArgs.operands = {erfModel.input, erfModel.output, activation}; + graphArgs.paramIndices = {2}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Erf_Model_Finish_01 + * @tc.desc: 模型构图,未添加操作数 + * @tc.type: FUNC + */ +HWTEST_F(ErfTest, SUB_AI_NNRt_Func_North_Erf_Model_Finish_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + OHNNGraphArgs graphArgs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, SingleModelBuildEndStep(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Erf_Model_Finish_02 + * @tc.desc: 模型构图,未设置输入输出 + * @tc.type: FUNC + */ +HWTEST_F(ErfTest, SUB_AI_NNRt_Func_North_Erf_Model_Finish_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ErfModel1 erfModel; + OHNNGraphArgs graphArgs = erfModel.graphArgs; + graphArgs.specifyIO = false; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Erf_Model_Finish_03 + * @tc.desc: 模型构图,设置输入输出,构图成功 + * @tc.type: FUNC + */ +HWTEST_F(ErfTest, SUB_AI_NNRt_Func_North_Erf_Model_Finish_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ErfModel1 erfModel; + OHNNGraphArgs graphArgs = erfModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Erf_Model_SetOperandValue_01 + * @tc.desc: 设置操作数值,操作数不存在 + * @tc.type: FUNC + */ +HWTEST_F(ErfTest, SUB_AI_NNRt_Func_North_Erf_Model_SetOperandValue_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ErfModel1 erfModel; + OHNNGraphArgs graphArgs = erfModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData( + model, 1000+i, operandTem.data, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Erf_Model_SetOperandValue_02 + * @tc.desc: 设置操作数值,buufer为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ErfTest, SUB_AI_NNRt_Func_North_Erf_Model_SetOperandValue_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ErfModel1 erfModel; + OHNNGraphArgs graphArgs = erfModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, i, nullptr, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Erf_Model_SetOperandValue_03 + * @tc.desc: 设置操作数值,length为0 + * @tc.type: FUNC + */ +HWTEST_F(ErfTest, SUB_AI_NNRt_Func_North_Erf_Model_SetOperandValue_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ErfModel1 erfModel; + OHNNGraphArgs graphArgs = erfModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, 1000+i, operandTem.data, 0)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Erf_Model_SpecifyInputsAndOutputs_01 + * @tc.desc: 设置输入输出,inputIndices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ErfTest, SUB_AI_NNRt_Func_North_Erf_Model_SpecifyInputsAndOutputs_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ErfModel1 erfModel; + OHNNGraphArgs graphArgs = erfModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Erf_Model_SpecifyInputsAndOutputs_02 + * @tc.desc: 设置输入输出,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ErfTest, SUB_AI_NNRt_Func_North_Erf_Model_SpecifyInputsAndOutputs_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ErfModel1 erfModel; + OHNNGraphArgs graphArgs = erfModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Erf_Model_SpecifyInputsAndOutputs_03 + * @tc.desc: 设置输入输出,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(ErfTest, SUB_AI_NNRt_Func_North_Erf_Model_SpecifyInputsAndOutputs_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ErfModel1 erfModel; + OHNNGraphArgs graphArgs = erfModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Erf_Model_SpecifyInputsAndOutputs_04 + * @tc.desc: 设置输入输出,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(ErfTest, SUB_AI_NNRt_Func_North_Erf_Model_SpecifyInputsAndOutputs_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ErfModel1 erfModel; + OHNNGraphArgs graphArgs = erfModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Erf_Model_SpecifyInputsAndOutputs_05 + * @tc.desc: 设置输入输出,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ErfTest, SUB_AI_NNRt_Func_North_Erf_Model_SpecifyInputsAndOutputs_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ErfModel1 erfModel; + OHNNGraphArgs graphArgs = erfModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Erf_Model_SpecifyInputsAndOutputs_06 + * @tc.desc: 设置输入输出,outputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ErfTest, SUB_AI_NNRt_Func_North_Erf_Model_SpecifyInputsAndOutputs_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ErfModel1 erfModel; + OHNNGraphArgs graphArgs = erfModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Erf_Model_SpecifyInputsAndOutputs_07 + * @tc.desc: 设置输入输出,outputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(ErfTest, SUB_AI_NNRt_Func_North_Erf_Model_SpecifyInputsAndOutputs_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ErfModel1 erfModel; + OHNNGraphArgs graphArgs = erfModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.outputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Erf_Model_SpecifyInputsAndOutputs_08 + * @tc.desc: 设置输入输出,outputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(ErfTest, SUB_AI_NNRt_Func_North_Erf_Model_SpecifyInputsAndOutputs_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ErfModel1 erfModel; + OHNNGraphArgs graphArgs = erfModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Erf_Model_AddOperation_01 + * @tc.desc: 添加算子,paramindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ErfTest, SUB_AI_NNRt_Func_North_Erf_Model_AddOperation_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ErfModel1 erfModel; + OHNNGraphArgs graphArgs = erfModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + nullptr, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Erf_Model_AddOperation_02 + * @tc.desc: 添加算子,paramindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ErfTest, SUB_AI_NNRt_Func_North_Erf_Model_AddOperation_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ErfModel1 erfModel; + OHNNGraphArgs graphArgs = erfModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.data = nullptr; + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Erf_Model_AddOperation_03 + * @tc.desc: 添加算子,paramindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(ErfTest, SUB_AI_NNRt_Func_North_Erf_Model_AddOperation_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ErfModel1 erfModel; + OHNNGraphArgs graphArgs = erfModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.paramIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Erf_Model_AddOperation_04 + * @tc.desc: 添加算子,paramindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(ErfTest, SUB_AI_NNRt_Func_North_Erf_Model_AddOperation_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ErfModel1 erfModel; + OHNNGraphArgs graphArgs = erfModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.size = 0; + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Erf_Model_AddOperation_05 + * @tc.desc: 添加算子,inputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ErfTest, SUB_AI_NNRt_Func_North_Erf_Model_AddOperation_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ErfModel1 erfModel; + OHNNGraphArgs graphArgs = erfModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Erf_Model_AddOperation_06 + * @tc.desc: 添加算子,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ErfTest, SUB_AI_NNRt_Func_North_Erf_Model_AddOperation_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ErfModel1 erfModel; + OHNNGraphArgs graphArgs = erfModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Erf_Model_AddOperation_07 + * @tc.desc: 添加算子,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(ErfTest, SUB_AI_NNRt_Func_North_Erf_Model_AddOperation_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ErfModel1 erfModel; + OHNNGraphArgs graphArgs = erfModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Erf_Model_AddOperation_08 + * @tc.desc: 添加算子,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(ErfTest, SUB_AI_NNRt_Func_North_Erf_Model_AddOperation_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ErfModel1 erfModel; + OHNNGraphArgs graphArgs = erfModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Erf_Model_AddOperation_09 + * @tc.desc: 添加算子,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ErfTest, SUB_AI_NNRt_Func_North_Erf_Model_AddOperation_09, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ErfModel1 erfModel; + OHNNGraphArgs graphArgs = erfModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(nullptr, graphArgs.operationType, + ¶mIndices, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} \ No newline at end of file diff --git a/test/nnrt_xts_acts/nncore/opstest/src/exp_test.cpp b/test/nnrt_xts_acts/nncore/opstest/src/exp_test.cpp new file mode 100644 index 0000000..f3ef22d --- /dev/null +++ b/test/nnrt_xts_acts/nncore/opstest/src/exp_test.cpp @@ -0,0 +1,864 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "nncore_utils.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Test; +class ExpTest : public testing::Test {}; + +struct ExpModel1 { + const std::vector tensor_shape = {3, 3}; + float baseValue = 1; + float scaleValue = 1.5; + float shiftValue = 1; + float inputValue[3][3] = {{1, 2, 3}, {4, 5, 6}, {7, 8, 9}}; + float outputValue[3][3] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, inputValue, 9*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, outputValue, 9*sizeof(float)}; + OHNNOperandTest base = {OH_NN_FLOAT32, OH_NN_EXP_BASE, {1}, &baseValue, sizeof(float)}; + OHNNOperandTest scale = {OH_NN_FLOAT32, OH_NN_EXP_SCALE, {1}, &scaleValue, sizeof(float)}; + OHNNOperandTest shift = {OH_NN_FLOAT32, OH_NN_EXP_SHIFT, {1}, &scaleValue, sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_EXP, + .operands = {input, output, base, scale, shift}, + .paramIndices = {2, 3, 4}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +struct ExpModel2 { + const std::vector tensor_shape = {3}; + float inputValue[3] = {0, 1, 2}; + float outputValue[3] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, inputValue, 3*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, outputValue, 3*sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_EXP, + .operands = {input, output}, + .paramIndices = {}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +struct ExpModel3 { + const std::vector tensor_shape = {3, 3}; + float baseValue = 0; + float scaleValue = 1.5; + float shiftValue = 1; + float inputValue[3][3] = {{1, 2, 3}, {4, 5, 6}, {7, 8, 9}}; + float outputValue[3][3] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, inputValue, 9*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, outputValue, 9*sizeof(float)}; + OHNNOperandTest base = {OH_NN_FLOAT32, OH_NN_EXP_BASE, {1}, &baseValue, sizeof(float)}; + OHNNOperandTest scale = {OH_NN_FLOAT32, OH_NN_EXP_SCALE, {1}, &scaleValue, sizeof(float)}; + OHNNOperandTest shift = {OH_NN_FLOAT32, OH_NN_EXP_SHIFT, {1}, &scaleValue, sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_EXP, + .operands = {input, output, base, scale, shift}, + .paramIndices = {2, 3, 4}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +struct ExpModel4 { + const std::vector tensor_shape = {}; + float baseValue = 1; + float scaleValue = 1.5; + float shiftValue = 1; + float* inputValue = {}; + float* outputValue = {}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, inputValue, 0*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, outputValue, 0*sizeof(float)}; + OHNNOperandTest base = {OH_NN_FLOAT32, OH_NN_EXP_BASE, {1}, &baseValue, sizeof(float)}; + OHNNOperandTest scale = {OH_NN_FLOAT32, OH_NN_EXP_SCALE, {1}, &scaleValue, sizeof(float)}; + OHNNOperandTest shift = {OH_NN_FLOAT32, OH_NN_EXP_SHIFT, {1}, &scaleValue, sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_EXP, + .operands = {input, output, base, scale, shift}, + .paramIndices = {2, 3, 4}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Exp_Build_01 + * @tc.desc: ExpModel1模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(ExpTest, SUB_AI_NNRt_Func_North_Exp_Build_01, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ExpModel1 expModel; + OHNNGraphArgs graphArgs = expModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Exp_Build_02 + * @tc.desc: ExpModel2模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(ExpTest, SUB_AI_NNRt_Func_North_Exp_Build_02, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ExpModel2 expModel; + OHNNGraphArgs graphArgs = expModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Exp_Build_03 + * @tc.desc: ExpModel3模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(ExpTest, SUB_AI_NNRt_Func_North_Exp_Build_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ExpModel3 expModel; + OHNNGraphArgs graphArgs = expModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Exp_Build_04 + * @tc.desc: ExpModel4模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(ExpTest, SUB_AI_NNRt_Func_North_Exp_Build_04, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ExpModel4 expModel; + OHNNGraphArgs graphArgs = expModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Exp_Build_05 + * @tc.desc: ExpModel1模型输入Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(ExpTest, SUB_AI_NNRt_Func_North_Exp_Build_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ExpModel1 expModel; + OHNNGraphArgs graphArgs = expModel.graphArgs; + graphArgs.operands = {expModel.input, expModel.input, expModel.output, + expModel.base, expModel.scale, expModel.shift}; + graphArgs.inputIndices = {0, 1}; + graphArgs.outputIndices = {2}; + graphArgs.paramIndices = {3, 4, 5}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Exp_Build_06 + * @tc.desc: ExpModel1模型输出Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(ExpTest, SUB_AI_NNRt_Func_North_Exp_Build_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ExpModel1 expModel; + OHNNGraphArgs graphArgs = expModel.graphArgs; + graphArgs.operands = {expModel.input, expModel.output, expModel.output, + expModel.base, expModel.scale, expModel.shift}; + graphArgs.inputIndices = {0}; + graphArgs.outputIndices = {1, 2}; + graphArgs.paramIndices = {3, 4, 5}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Exp_Build_07 + * @tc.desc: ExpModel1模型传入非法参数进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(ExpTest, SUB_AI_NNRt_Func_North_Exp_Build_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ExpModel1 expModel; + OHNNGraphArgs graphArgs = expModel.graphArgs; + + int8_t activationValue = OH_NN_FUSED_NONE; + OHNNOperandTest activation = {OH_NN_INT8, OH_NN_ADD_ACTIVATIONTYPE, {}, &activationValue, sizeof(int8_t)}; + graphArgs.operands = {expModel.input, expModel.output, expModel.base, + expModel.scale, expModel.shift, activation}; + graphArgs.paramIndices = {2, 3, 4, 5}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Exp_Model_Finish_01 + * @tc.desc: 模型构图,未添加操作数 + * @tc.type: FUNC + */ +HWTEST_F(ExpTest, SUB_AI_NNRt_Func_North_Exp_Model_Finish_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + OHNNGraphArgs graphArgs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, SingleModelBuildEndStep(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Exp_Model_Finish_02 + * @tc.desc: 模型构图,未设置输入输出 + * @tc.type: FUNC + */ +HWTEST_F(ExpTest, SUB_AI_NNRt_Func_North_Exp_Model_Finish_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ExpModel1 expModel; + OHNNGraphArgs graphArgs = expModel.graphArgs; + graphArgs.specifyIO = false; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Exp_Model_Finish_03 + * @tc.desc: 模型构图,设置输入输出,构图成功 + * @tc.type: FUNC + */ +HWTEST_F(ExpTest, SUB_AI_NNRt_Func_North_Exp_Model_Finish_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ExpModel1 expModel; + OHNNGraphArgs graphArgs = expModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Exp_Model_SetOperandValue_01 + * @tc.desc: 设置操作数值,操作数不存在 + * @tc.type: FUNC + */ +HWTEST_F(ExpTest, SUB_AI_NNRt_Func_North_Exp_Model_SetOperandValue_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ExpModel1 expModel; + OHNNGraphArgs graphArgs = expModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData( + model, 1000+i, operandTem.data, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Exp_Model_SetOperandValue_02 + * @tc.desc: 设置操作数值,buufer为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ExpTest, SUB_AI_NNRt_Func_North_Exp_Model_SetOperandValue_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ExpModel1 expModel; + OHNNGraphArgs graphArgs = expModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, i, nullptr, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Exp_Model_SetOperandValue_03 + * @tc.desc: 设置操作数值,length为0 + * @tc.type: FUNC + */ +HWTEST_F(ExpTest, SUB_AI_NNRt_Func_North_Exp_Model_SetOperandValue_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ExpModel1 expModel; + OHNNGraphArgs graphArgs = expModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, 1000+i, operandTem.data, 0)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Exp_Model_SpecifyInputsAndOutputs_01 + * @tc.desc: 设置输入输出,inputIndices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ExpTest, SUB_AI_NNRt_Func_North_Exp_Model_SpecifyInputsAndOutputs_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ExpModel1 expModel; + OHNNGraphArgs graphArgs = expModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Exp_Model_SpecifyInputsAndOutputs_02 + * @tc.desc: 设置输入输出,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ExpTest, SUB_AI_NNRt_Func_North_Exp_Model_SpecifyInputsAndOutputs_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ExpModel1 expModel; + OHNNGraphArgs graphArgs = expModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Exp_Model_SpecifyInputsAndOutputs_03 + * @tc.desc: 设置输入输出,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(ExpTest, SUB_AI_NNRt_Func_North_Exp_Model_SpecifyInputsAndOutputs_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ExpModel1 expModel; + OHNNGraphArgs graphArgs = expModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Exp_Model_SpecifyInputsAndOutputs_04 + * @tc.desc: 设置输入输出,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(ExpTest, SUB_AI_NNRt_Func_North_Exp_Model_SpecifyInputsAndOutputs_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ExpModel1 expModel; + OHNNGraphArgs graphArgs = expModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Exp_Model_SpecifyInputsAndOutputs_05 + * @tc.desc: 设置输入输出,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ExpTest, SUB_AI_NNRt_Func_North_Exp_Model_SpecifyInputsAndOutputs_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ExpModel1 expModel; + OHNNGraphArgs graphArgs = expModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Exp_Model_SpecifyInputsAndOutputs_06 + * @tc.desc: 设置输入输出,outputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ExpTest, SUB_AI_NNRt_Func_North_Exp_Model_SpecifyInputsAndOutputs_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ExpModel1 expModel; + OHNNGraphArgs graphArgs = expModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Exp_Model_SpecifyInputsAndOutputs_07 + * @tc.desc: 设置输入输出,outputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(ExpTest, SUB_AI_NNRt_Func_North_Exp_Model_SpecifyInputsAndOutputs_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ExpModel1 expModel; + OHNNGraphArgs graphArgs = expModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.outputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Exp_Model_SpecifyInputsAndOutputs_08 + * @tc.desc: 设置输入输出,outputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(ExpTest, SUB_AI_NNRt_Func_North_Exp_Model_SpecifyInputsAndOutputs_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ExpModel1 expModel; + OHNNGraphArgs graphArgs = expModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Exp_Model_AddOperation_01 + * @tc.desc: 添加算子,paramindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ExpTest, SUB_AI_NNRt_Func_North_Exp_Model_AddOperation_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ExpModel1 expModel; + OHNNGraphArgs graphArgs = expModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + nullptr, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Exp_Model_AddOperation_02 + * @tc.desc: 添加算子,paramindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ExpTest, SUB_AI_NNRt_Func_North_Exp_Model_AddOperation_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ExpModel1 expModel; + OHNNGraphArgs graphArgs = expModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Exp_Model_AddOperation_03 + * @tc.desc: 添加算子,paramindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(ExpTest, SUB_AI_NNRt_Func_North_Exp_Model_AddOperation_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ExpModel1 expModel; + OHNNGraphArgs graphArgs = expModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.paramIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Exp_Model_AddOperation_04 + * @tc.desc: 添加算子,paramindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(ExpTest, SUB_AI_NNRt_Func_North_Exp_Model_AddOperation_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ExpModel1 expModel; + OHNNGraphArgs graphArgs = expModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Exp_Model_AddOperation_05 + * @tc.desc: 添加算子,inputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ExpTest, SUB_AI_NNRt_Func_North_Exp_Model_AddOperation_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ExpModel1 expModel; + OHNNGraphArgs graphArgs = expModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Exp_Model_AddOperation_06 + * @tc.desc: 添加算子,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ExpTest, SUB_AI_NNRt_Func_North_Exp_Model_AddOperation_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ExpModel1 expModel; + OHNNGraphArgs graphArgs = expModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Exp_Model_AddOperation_07 + * @tc.desc: 添加算子,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(ExpTest, SUB_AI_NNRt_Func_North_Exp_Model_AddOperation_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ExpModel1 expModel; + OHNNGraphArgs graphArgs = expModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Exp_Model_AddOperation_08 + * @tc.desc: 添加算子,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(ExpTest, SUB_AI_NNRt_Func_North_Exp_Model_AddOperation_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ExpModel1 expModel; + OHNNGraphArgs graphArgs = expModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Exp_Model_AddOperation_09 + * @tc.desc: 添加算子,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ExpTest, SUB_AI_NNRt_Func_North_Exp_Model_AddOperation_09, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ExpModel1 expModel; + OHNNGraphArgs graphArgs = expModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(nullptr, graphArgs.operationType, + ¶mIndices, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} \ No newline at end of file diff --git a/test/nnrt_xts_acts/nncore/opstest/src/flatten_test.cpp b/test/nnrt_xts_acts/nncore/opstest/src/flatten_test.cpp new file mode 100644 index 0000000..5dd5fea --- /dev/null +++ b/test/nnrt_xts_acts/nncore/opstest/src/flatten_test.cpp @@ -0,0 +1,844 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "nncore_utils.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Test; +class FlattenTest : public testing::Test {}; + +struct FlattenModel1 { + const std::vector input_shape = {3, 3}; + const std::vector output_shape = {9}; + float inputValue[3][3] = {{1, 2, 3}, {4, 5, 6}, {7, 8, 9}}; + float outputValue[9] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, input_shape, inputValue, 9*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, output_shape, outputValue, 9*sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_FLATTEN, + .operands = {input, output}, + .paramIndices = {}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +struct FlattenModel2 { + const std::vector input_shape = {2, 2, 2}; + const std::vector output_shape = {8}; + float inputValue[2][2][2] = {{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}; + float outputValue[8] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, input_shape, inputValue, 8*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, output_shape, outputValue, 8*sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_FLATTEN, + .operands = {input, output}, + .paramIndices = {}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +struct FlattenModel3 { + const std::vector input_shape = {9}; + const std::vector output_shape = {9}; + float inputValue[9] = {1, 2, 3, 4, 5, 6, 7, 8, 9}; + float outputValue[9] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, input_shape, inputValue, 9*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, output_shape, outputValue, 9*sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_FLATTEN, + .operands = {input, output}, + .paramIndices = {}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +struct FlattenModel4 { + const std::vector tensor_shape = {}; + float* inputValue = {}; + float* outputValue = {}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, inputValue, 0*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, outputValue, 0*sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_FLATTEN, + .operands = {input, output}, + .paramIndices = {}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Flatten_Build_01 + * @tc.desc: FlattenModel1模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(FlattenTest, SUB_AI_NNRt_Func_North_Flatten_Build_01, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + FlattenModel1 flattenModel; + OHNNGraphArgs graphArgs = flattenModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Flatten_Build_02 + * @tc.desc: FlattenModel2模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(FlattenTest, SUB_AI_NNRt_Func_North_Flatten_Build_02, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + FlattenModel2 flattenModel; + OHNNGraphArgs graphArgs = flattenModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Flatten_Build_03 + * @tc.desc: FlattenModel3模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(FlattenTest, SUB_AI_NNRt_Func_North_Flatten_Build_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + FlattenModel3 flattenModel; + OHNNGraphArgs graphArgs = flattenModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Flatten_Build_04 + * @tc.desc: FlattenModel4模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(FlattenTest, SUB_AI_NNRt_Func_North_Flatten_Build_04, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + FlattenModel4 flattenModel; + OHNNGraphArgs graphArgs = flattenModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Flatten_Build_05 + * @tc.desc: FlattenModel1模型输入Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(FlattenTest, SUB_AI_NNRt_Func_North_Flatten_Build_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + FlattenModel1 flattenModel; + OHNNGraphArgs graphArgs = flattenModel.graphArgs; + graphArgs.operands = {flattenModel.input, flattenModel.input, flattenModel.output}; + graphArgs.inputIndices = {0, 1}; + graphArgs.outputIndices = {2}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Flatten_Build_06 + * @tc.desc: FlattenModel1模型输出Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(FlattenTest, SUB_AI_NNRt_Func_North_Flatten_Build_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + FlattenModel1 flattenModel; + OHNNGraphArgs graphArgs = flattenModel.graphArgs; + graphArgs.operands = {flattenModel.input, flattenModel.output, flattenModel.output}; + graphArgs.inputIndices = {0}; + graphArgs.outputIndices = {1, 2}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Flatten_Build_07 + * @tc.desc: FlattenModel1模型传入非法参数进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(FlattenTest, SUB_AI_NNRt_Func_North_Flatten_Build_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + FlattenModel1 flattenModel; + OHNNGraphArgs graphArgs = flattenModel.graphArgs; + + int8_t activationValue = OH_NN_FUSED_NONE; + OHNNOperandTest activation = {OH_NN_INT8, OH_NN_ADD_ACTIVATIONTYPE, {}, &activationValue, sizeof(int8_t)}; + graphArgs.operands = {flattenModel.input, flattenModel.output, activation}; + graphArgs.paramIndices = {2}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Flatten_Model_Finish_01 + * @tc.desc: 模型构图,未添加操作数 + * @tc.type: FUNC + */ +HWTEST_F(FlattenTest, SUB_AI_NNRt_Func_North_Flatten_Model_Finish_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + OHNNGraphArgs graphArgs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, SingleModelBuildEndStep(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Flatten_Model_Finish_02 + * @tc.desc: 模型构图,未设置输入输出 + * @tc.type: FUNC + */ +HWTEST_F(FlattenTest, SUB_AI_NNRt_Func_North_Flatten_Model_Finish_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + FlattenModel1 flattenModel; + OHNNGraphArgs graphArgs = flattenModel.graphArgs; + graphArgs.specifyIO = false; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Flatten_Model_Finish_03 + * @tc.desc: 模型构图,设置输入输出,构图成功 + * @tc.type: FUNC + */ +HWTEST_F(FlattenTest, SUB_AI_NNRt_Func_North_Flatten_Model_Finish_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + FlattenModel1 flattenModel; + OHNNGraphArgs graphArgs = flattenModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Flatten_Model_SetOperandValue_01 + * @tc.desc: 设置操作数值,操作数不存在 + * @tc.type: FUNC + */ +HWTEST_F(FlattenTest, SUB_AI_NNRt_Func_North_Flatten_Model_SetOperandValue_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + FlattenModel1 flattenModel; + OHNNGraphArgs graphArgs = flattenModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData( + model, 1000+i, operandTem.data, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Flatten_Model_SetOperandValue_02 + * @tc.desc: 设置操作数值,buufer为nullptr + * @tc.type: FUNC + */ +HWTEST_F(FlattenTest, SUB_AI_NNRt_Func_North_Flatten_Model_SetOperandValue_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + FlattenModel1 flattenModel; + OHNNGraphArgs graphArgs = flattenModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, i, nullptr, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Flatten_Model_SetOperandValue_03 + * @tc.desc: 设置操作数值,length为0 + * @tc.type: FUNC + */ +HWTEST_F(FlattenTest, SUB_AI_NNRt_Func_North_Flatten_Model_SetOperandValue_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + FlattenModel1 flattenModel; + OHNNGraphArgs graphArgs = flattenModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, 1000+i, operandTem.data, 0)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Flatten_Model_SpecifyInputsAndOutputs_01 + * @tc.desc: 设置输入输出,inputIndices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(FlattenTest, SUB_AI_NNRt_Func_North_Flatten_Model_SpecifyInputsAndOutputs_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + FlattenModel1 flattenModel; + OHNNGraphArgs graphArgs = flattenModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Flatten_Model_SpecifyInputsAndOutputs_02 + * @tc.desc: 设置输入输出,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(FlattenTest, SUB_AI_NNRt_Func_North_Flatten_Model_SpecifyInputsAndOutputs_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + FlattenModel1 flattenModel; + OHNNGraphArgs graphArgs = flattenModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Flatten_Model_SpecifyInputsAndOutputs_03 + * @tc.desc: 设置输入输出,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(FlattenTest, SUB_AI_NNRt_Func_North_Flatten_Model_SpecifyInputsAndOutputs_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + FlattenModel1 flattenModel; + OHNNGraphArgs graphArgs = flattenModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Flatten_Model_SpecifyInputsAndOutputs_04 + * @tc.desc: 设置输入输出,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(FlattenTest, SUB_AI_NNRt_Func_North_Flatten_Model_SpecifyInputsAndOutputs_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + FlattenModel1 flattenModel; + OHNNGraphArgs graphArgs = flattenModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Flatten_Model_SpecifyInputsAndOutputs_05 + * @tc.desc: 设置输入输出,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(FlattenTest, SUB_AI_NNRt_Func_North_Flatten_Model_SpecifyInputsAndOutputs_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + FlattenModel1 flattenModel; + OHNNGraphArgs graphArgs = flattenModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Flatten_Model_SpecifyInputsAndOutputs_06 + * @tc.desc: 设置输入输出,outputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(FlattenTest, SUB_AI_NNRt_Func_North_Flatten_Model_SpecifyInputsAndOutputs_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + FlattenModel1 flattenModel; + OHNNGraphArgs graphArgs = flattenModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Flatten_Model_SpecifyInputsAndOutputs_07 + * @tc.desc: 设置输入输出,outputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(FlattenTest, SUB_AI_NNRt_Func_North_Flatten_Model_SpecifyInputsAndOutputs_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + FlattenModel1 flattenModel; + OHNNGraphArgs graphArgs = flattenModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.outputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Flatten_Model_SpecifyInputsAndOutputs_08 + * @tc.desc: 设置输入输出,outputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(FlattenTest, SUB_AI_NNRt_Func_North_Flatten_Model_SpecifyInputsAndOutputs_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + FlattenModel1 flattenModel; + OHNNGraphArgs graphArgs = flattenModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Flatten_Model_AddOperation_01 + * @tc.desc: 添加算子,paramindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(FlattenTest, SUB_AI_NNRt_Func_North_Flatten_Model_AddOperation_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + FlattenModel1 flattenModel; + OHNNGraphArgs graphArgs = flattenModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + nullptr, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Flatten_Model_AddOperation_02 + * @tc.desc: 添加算子,paramindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(FlattenTest, SUB_AI_NNRt_Func_North_Flatten_Model_AddOperation_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + FlattenModel1 flattenModel; + OHNNGraphArgs graphArgs = flattenModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.data = nullptr; + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Flatten_Model_AddOperation_03 + * @tc.desc: 添加算子,paramindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(FlattenTest, SUB_AI_NNRt_Func_North_Flatten_Model_AddOperation_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + FlattenModel1 flattenModel; + OHNNGraphArgs graphArgs = flattenModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.paramIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Flatten_Model_AddOperation_04 + * @tc.desc: 添加算子,paramindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(FlattenTest, SUB_AI_NNRt_Func_North_Flatten_Model_AddOperation_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + FlattenModel1 flattenModel; + OHNNGraphArgs graphArgs = flattenModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.size = 0; + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Flatten_Model_AddOperation_05 + * @tc.desc: 添加算子,inputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(FlattenTest, SUB_AI_NNRt_Func_North_Flatten_Model_AddOperation_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + FlattenModel1 flattenModel; + OHNNGraphArgs graphArgs = flattenModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Flatten_Model_AddOperation_06 + * @tc.desc: 添加算子,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(FlattenTest, SUB_AI_NNRt_Func_North_Flatten_Model_AddOperation_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + FlattenModel1 flattenModel; + OHNNGraphArgs graphArgs = flattenModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Flatten_Model_AddOperation_07 + * @tc.desc: 添加算子,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(FlattenTest, SUB_AI_NNRt_Func_North_Flatten_Model_AddOperation_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + FlattenModel1 flattenModel; + OHNNGraphArgs graphArgs = flattenModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Flatten_Model_AddOperation_08 + * @tc.desc: 添加算子,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(FlattenTest, SUB_AI_NNRt_Func_North_Flatten_Model_AddOperation_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + FlattenModel1 flattenModel; + OHNNGraphArgs graphArgs = flattenModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Flatten_Model_AddOperation_09 + * @tc.desc: 添加算子,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(FlattenTest, SUB_AI_NNRt_Func_North_Flatten_Model_AddOperation_09, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + FlattenModel1 flattenModel; + OHNNGraphArgs graphArgs = flattenModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(nullptr, graphArgs.operationType, + ¶mIndices, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} \ No newline at end of file diff --git a/test/nnrt_xts_acts/nncore/opstest/src/floor_test.cpp b/test/nnrt_xts_acts/nncore/opstest/src/floor_test.cpp new file mode 100644 index 0000000..ea05d4a --- /dev/null +++ b/test/nnrt_xts_acts/nncore/opstest/src/floor_test.cpp @@ -0,0 +1,757 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "nncore_utils.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Test; +class FloorTest : public testing::Test {}; + +struct FloorModel1 { + const std::vector tensor_shape = {5}; + float inputValue[5] = {-2.5, -1.1, 0, 1.1, 2.9}; + float outputValue[5] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, inputValue, 5*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, outputValue, 5*sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_FLOOR, + .operands = {input, output}, + .paramIndices = {}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + + +struct FloorModel2 { + const std::vector tensor_shape = {4}; + float inputValue[4] = {3.5, 4.7, 5.1, 6.6}; + float outputValue[4] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, inputValue, 4*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, outputValue, 4*sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_FLOOR, + .operands = {input, output}, + .paramIndices = {}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Floor_Build_01 + * @tc.desc: FloorModel1模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(FloorTest, SUB_AI_NNRt_Func_North_Floor_Build_01, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + FloorModel1 floorModel; + OHNNGraphArgs graphArgs = floorModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Floor_Build_02 + * @tc.desc: FloorModel2模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(FloorTest, SUB_AI_NNRt_Func_North_Floor_Build_02, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + FloorModel2 floorModel; + OHNNGraphArgs graphArgs = floorModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Floor_Build_03 + * @tc.desc: FloorModel1模型输入Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(FloorTest, SUB_AI_NNRt_Func_North_Floor_Build_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + FloorModel1 floorModel; + OHNNGraphArgs graphArgs = floorModel.graphArgs; + graphArgs.operands = {floorModel.input, floorModel.input, floorModel.output}; + graphArgs.inputIndices = {0, 1}; + graphArgs.outputIndices = {2}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Floor_Build_04 + * @tc.desc: FloorModel1模型输出Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(FloorTest, SUB_AI_NNRt_Func_North_Floor_Build_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + FloorModel1 floorModel; + OHNNGraphArgs graphArgs = floorModel.graphArgs; + graphArgs.operands = {floorModel.input, floorModel.output, floorModel.output}; + graphArgs.inputIndices = {0}; + graphArgs.outputIndices = {1, 2}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Floor_Build_05 + * @tc.desc: FloorModel1模型传入非法参数进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(FloorTest, SUB_AI_NNRt_Func_North_Floor_Build_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + FloorModel1 floorModel; + OHNNGraphArgs graphArgs = floorModel.graphArgs; + + int8_t activationValue = OH_NN_FUSED_NONE; + OHNNOperandTest activation = {OH_NN_INT8, OH_NN_ADD_ACTIVATIONTYPE, {}, &activationValue, sizeof(int8_t)}; + graphArgs.operands = {floorModel.input, floorModel.output, activation}; + graphArgs.paramIndices = {2}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Floor_Model_Finish_01 + * @tc.desc: 模型构图,未添加操作数 + * @tc.type: FUNC + */ +HWTEST_F(FloorTest, SUB_AI_NNRt_Func_North_Floor_Model_Finish_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + OHNNGraphArgs graphArgs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, SingleModelBuildEndStep(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Floor_Model_Finish_02 + * @tc.desc: 模型构图,未设置输入输出 + * @tc.type: FUNC + */ +HWTEST_F(FloorTest, SUB_AI_NNRt_Func_North_Floor_Model_Finish_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + FloorModel1 floorModel; + OHNNGraphArgs graphArgs = floorModel.graphArgs; + graphArgs.specifyIO = false; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Floor_Model_Finish_03 + * @tc.desc: 模型构图,设置输入输出,构图成功 + * @tc.type: FUNC + */ +HWTEST_F(FloorTest, SUB_AI_NNRt_Func_North_Floor_Model_Finish_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + FloorModel1 floorModel; + OHNNGraphArgs graphArgs = floorModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Floor_Model_SetOperandValue_01 + * @tc.desc: 设置操作数值,操作数不存在 + * @tc.type: FUNC + */ +HWTEST_F(FloorTest, SUB_AI_NNRt_Func_North_Floor_Model_SetOperandValue_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + FloorModel1 floorModel; + OHNNGraphArgs graphArgs = floorModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData( + model, 1000+i, operandTem.data, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Floor_Model_SetOperandValue_02 + * @tc.desc: 设置操作数值,buufer为nullptr + * @tc.type: FUNC + */ +HWTEST_F(FloorTest, SUB_AI_NNRt_Func_North_Floor_Model_SetOperandValue_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + FloorModel1 floorModel; + OHNNGraphArgs graphArgs = floorModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, i, nullptr, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Floor_Model_SetOperandValue_03 + * @tc.desc: 设置操作数值,length为0 + * @tc.type: FUNC + */ +HWTEST_F(FloorTest, SUB_AI_NNRt_Func_North_Floor_Model_SetOperandValue_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + FloorModel1 floorModel; + OHNNGraphArgs graphArgs = floorModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, 1000+i, operandTem.data, 0)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Floor_Model_SpecifyInputsAndOutputs_01 + * @tc.desc: 设置输入输出,inputIndices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(FloorTest, SUB_AI_NNRt_Func_North_Floor_Model_SpecifyInputsAndOutputs_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + FloorModel1 floorModel; + OHNNGraphArgs graphArgs = floorModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Floor_Model_SpecifyInputsAndOutputs_02 + * @tc.desc: 设置输入输出,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(FloorTest, SUB_AI_NNRt_Func_North_Floor_Model_SpecifyInputsAndOutputs_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + FloorModel1 floorModel; + OHNNGraphArgs graphArgs = floorModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Floor_Model_SpecifyInputsAndOutputs_03 + * @tc.desc: 设置输入输出,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(FloorTest, SUB_AI_NNRt_Func_North_Floor_Model_SpecifyInputsAndOutputs_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + FloorModel1 floorModel; + OHNNGraphArgs graphArgs = floorModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Floor_Model_SpecifyInputsAndOutputs_04 + * @tc.desc: 设置输入输出,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(FloorTest, SUB_AI_NNRt_Func_North_Floor_Model_SpecifyInputsAndOutputs_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + FloorModel1 floorModel; + OHNNGraphArgs graphArgs = floorModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Floor_Model_SpecifyInputsAndOutputs_05 + * @tc.desc: 设置输入输出,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(FloorTest, SUB_AI_NNRt_Func_North_Floor_Model_SpecifyInputsAndOutputs_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + FloorModel1 floorModel; + OHNNGraphArgs graphArgs = floorModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Floor_Model_SpecifyInputsAndOutputs_06 + * @tc.desc: 设置输入输出,outputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(FloorTest, SUB_AI_NNRt_Func_North_Floor_Model_SpecifyInputsAndOutputs_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + FloorModel1 floorModel; + OHNNGraphArgs graphArgs = floorModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Floor_Model_SpecifyInputsAndOutputs_07 + * @tc.desc: 设置输入输出,outputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(FloorTest, SUB_AI_NNRt_Func_North_Floor_Model_SpecifyInputsAndOutputs_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + FloorModel1 floorModel; + OHNNGraphArgs graphArgs = floorModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.outputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Floor_Model_SpecifyInputsAndOutputs_08 + * @tc.desc: 设置输入输出,outputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(FloorTest, SUB_AI_NNRt_Func_North_Floor_Model_SpecifyInputsAndOutputs_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + FloorModel1 floorModel; + OHNNGraphArgs graphArgs = floorModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Floor_Model_AddOperation_01 + * @tc.desc: 添加算子,paramindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(FloorTest, SUB_AI_NNRt_Func_North_Floor_Model_AddOperation_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + FloorModel1 floorModel; + OHNNGraphArgs graphArgs = floorModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + nullptr, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Floor_Model_AddOperation_02 + * @tc.desc: 添加算子,paramindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(FloorTest, SUB_AI_NNRt_Func_North_Floor_Model_AddOperation_02, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + FloorModel1 floorModel; + OHNNGraphArgs graphArgs = floorModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.data = nullptr; + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Floor_Model_AddOperation_03 + * @tc.desc: 添加算子,paramindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(FloorTest, SUB_AI_NNRt_Func_North_Floor_Model_AddOperation_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + FloorModel1 floorModel; + OHNNGraphArgs graphArgs = floorModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.paramIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Floor_Model_AddOperation_04 + * @tc.desc: 添加算子,paramindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(FloorTest, SUB_AI_NNRt_Func_North_Floor_Model_AddOperation_04, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + FloorModel1 floorModel; + OHNNGraphArgs graphArgs = floorModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.size = 0; + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Floor_Model_AddOperation_05 + * @tc.desc: 添加算子,inputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(FloorTest, SUB_AI_NNRt_Func_North_Floor_Model_AddOperation_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + FloorModel1 floorModel; + OHNNGraphArgs graphArgs = floorModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Floor_Model_AddOperation_06 + * @tc.desc: 添加算子,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(FloorTest, SUB_AI_NNRt_Func_North_Floor_Model_AddOperation_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + FloorModel1 floorModel; + OHNNGraphArgs graphArgs = floorModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Floor_Model_AddOperation_07 + * @tc.desc: 添加算子,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(FloorTest, SUB_AI_NNRt_Func_North_Floor_Model_AddOperation_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + FloorModel1 floorModel; + OHNNGraphArgs graphArgs = floorModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Floor_Model_AddOperation_08 + * @tc.desc: 添加算子,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(FloorTest, SUB_AI_NNRt_Func_North_Floor_Model_AddOperation_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + FloorModel1 floorModel; + OHNNGraphArgs graphArgs = floorModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Floor_Model_AddOperation_09 + * @tc.desc: 添加算子,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(FloorTest, SUB_AI_NNRt_Func_North_Floor_Model_AddOperation_09, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + FloorModel1 floorModel; + OHNNGraphArgs graphArgs = floorModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(nullptr, graphArgs.operationType, + ¶mIndices, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} \ No newline at end of file diff --git a/test/nnrt_xts_acts/nncore/opstest/src/gather_nd_test.cpp b/test/nnrt_xts_acts/nncore/opstest/src/gather_nd_test.cpp new file mode 100644 index 0000000..73d8d34 --- /dev/null +++ b/test/nnrt_xts_acts/nncore/opstest/src/gather_nd_test.cpp @@ -0,0 +1,765 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "nncore_utils.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Test; +class GatherNDTest : public testing::Test {}; + +struct GatherNDModel1 { + const std::vector input_shape = {2, 3}; + const std::vector indices_shape = {2, 2}; + const std::vector output_shape = {2}; + float indicesValue[2][2] = {{0, 0}, {1, 1}}; + float inputValue[2][3] = {{-0.1, 0.3, 3.6}, {0.4, 0.5, -3.2}}; + float outputValue[2] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, input_shape, inputValue, 6*sizeof(float)}; + OHNNOperandTest indices = {OH_NN_FLOAT32, OH_NN_TENSOR, indices_shape, indicesValue, 4*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, output_shape, outputValue, 2*sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_GATHER_ND, + .operands = {input, indices, output}, + .paramIndices = {}, + .inputIndices = {0, 1}, + .outputIndices = {2}}; +}; + + +struct GatherNDModel2 { + const std::vector input_shape = {2, 3}; + const std::vector indices_shape = {2, 2}; + const std::vector output_shape = {2}; + float indicesValue[2][2] = {{1, 0}, {1, 0}}; + float inputValue[2][3] = {{-0.1, 0.3, 3.6}, {0.4, 0.5, -3.2}}; + float outputValue[2] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, input_shape, inputValue, 6*sizeof(float)}; + OHNNOperandTest indices = {OH_NN_FLOAT32, OH_NN_TENSOR, indices_shape, indicesValue, 4*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, output_shape, outputValue, 2*sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_GATHER_ND, + .operands = {input, indices, output}, + .paramIndices = {}, + .inputIndices = {0, 1}, + .outputIndices = {2}}; +}; + +/** + * @tc.number : SUB_AI_NNRt_Func_North_GatherND_Build_01 + * @tc.desc: GatherNDModel1模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(GatherNDTest, SUB_AI_NNRt_Func_North_GatherND_Build_01, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GatherNDModel1 gatherNDModel; + OHNNGraphArgs graphArgs = gatherNDModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_GatherND_Build_02 + * @tc.desc: GatherNDModel2模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(GatherNDTest, SUB_AI_NNRt_Func_North_GatherND_Build_02, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GatherNDModel2 gatherNDModel; + OHNNGraphArgs graphArgs = gatherNDModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_GatherND_Build_03 + * @tc.desc: GatherNDModel1模型输入Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(GatherNDTest, SUB_AI_NNRt_Func_North_GatherND_Build_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GatherNDModel1 gatherNDModel; + OHNNGraphArgs graphArgs = gatherNDModel.graphArgs; + graphArgs.operands = {gatherNDModel.input, gatherNDModel.input, gatherNDModel.indices, gatherNDModel.output}; + graphArgs.inputIndices = {0, 1, 2}; + graphArgs.outputIndices = {3}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_GatherND_Build_04 + * @tc.desc: GatherNDModel1模型输出Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(GatherNDTest, SUB_AI_NNRt_Func_North_GatherND_Build_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GatherNDModel1 gatherNDModel; + OHNNGraphArgs graphArgs = gatherNDModel.graphArgs; + graphArgs.operands = {gatherNDModel.input, gatherNDModel.indices, gatherNDModel.output, gatherNDModel.output}; + graphArgs.inputIndices = {0, 1}; + graphArgs.outputIndices = {2, 3}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_GatherND_Build_05 + * @tc.desc: GatherNDModel1模型传入非法参数进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(GatherNDTest, SUB_AI_NNRt_Func_North_GatherND_Build_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GatherNDModel1 gatherNDModel; + OHNNGraphArgs graphArgs = gatherNDModel.graphArgs; + + int8_t activationValue = OH_NN_FUSED_NONE; + OHNNOperandTest activation = {OH_NN_INT8, OH_NN_ADD_ACTIVATIONTYPE, {}, &activationValue, sizeof(int8_t)}; + graphArgs.operands = {gatherNDModel.input, gatherNDModel.indices, gatherNDModel.output, activation}; + graphArgs.paramIndices = {3}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_GatherND_Model_Finish_01 + * @tc.desc: 模型构图,未添加操作数 + * @tc.type: FUNC + */ +HWTEST_F(GatherNDTest, SUB_AI_NNRt_Func_North_GatherND_Model_Finish_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + OHNNGraphArgs graphArgs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, SingleModelBuildEndStep(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_GatherND_Model_Finish_02 + * @tc.desc: 模型构图,未设置输入输出 + * @tc.type: FUNC + */ +HWTEST_F(GatherNDTest, SUB_AI_NNRt_Func_North_GatherND_Model_Finish_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GatherNDModel1 gatherNDModel; + OHNNGraphArgs graphArgs = gatherNDModel.graphArgs; + graphArgs.specifyIO = false; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_GatherND_Model_Finish_03 + * @tc.desc: 模型构图,设置输入输出,构图成功 + * @tc.type: FUNC + */ +HWTEST_F(GatherNDTest, SUB_AI_NNRt_Func_North_GatherND_Model_Finish_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GatherNDModel1 gatherNDModel; + OHNNGraphArgs graphArgs = gatherNDModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_GatherND_Model_SetOperandValue_01 + * @tc.desc: 设置操作数值,操作数不存在 + * @tc.type: FUNC + */ +HWTEST_F(GatherNDTest, SUB_AI_NNRt_Func_North_GatherND_Model_SetOperandValue_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GatherNDModel1 gatherNDModel; + OHNNGraphArgs graphArgs = gatherNDModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData( + model, 1000+i, operandTem.data, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_GatherND_Model_SetOperandValue_02 + * @tc.desc: 设置操作数值,buufer为nullptr + * @tc.type: FUNC + */ +HWTEST_F(GatherNDTest, SUB_AI_NNRt_Func_North_GatherND_Model_SetOperandValue_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GatherNDModel1 gatherNDModel; + OHNNGraphArgs graphArgs = gatherNDModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, i, nullptr, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_GatherND_Model_SetOperandValue_03 + * @tc.desc: 设置操作数值,length为0 + * @tc.type: FUNC + */ +HWTEST_F(GatherNDTest, SUB_AI_NNRt_Func_North_GatherND_Model_SetOperandValue_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GatherNDModel1 gatherNDModel; + OHNNGraphArgs graphArgs = gatherNDModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, 1000+i, operandTem.data, 0)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_GatherND_Model_SpecifyInputsAndOutputs_01 + * @tc.desc: 设置输入输出,inputIndices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(GatherNDTest, SUB_AI_NNRt_Func_North_GatherND_Model_SpecifyInputsAndOutputs_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GatherNDModel1 gatherNDModel; + OHNNGraphArgs graphArgs = gatherNDModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_GatherND_Model_SpecifyInputsAndOutputs_02 + * @tc.desc: 设置输入输出,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(GatherNDTest, SUB_AI_NNRt_Func_North_GatherND_Model_SpecifyInputsAndOutputs_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GatherNDModel1 gatherNDModel; + OHNNGraphArgs graphArgs = gatherNDModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_GatherND_Model_SpecifyInputsAndOutputs_03 + * @tc.desc: 设置输入输出,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(GatherNDTest, SUB_AI_NNRt_Func_North_GatherND_Model_SpecifyInputsAndOutputs_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GatherNDModel1 gatherNDModel; + OHNNGraphArgs graphArgs = gatherNDModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_GatherND_Model_SpecifyInputsAndOutputs_04 + * @tc.desc: 设置输入输出,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(GatherNDTest, SUB_AI_NNRt_Func_North_GatherND_Model_SpecifyInputsAndOutputs_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GatherNDModel1 gatherNDModel; + OHNNGraphArgs graphArgs = gatherNDModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_GatherND_Model_SpecifyInputsAndOutputs_05 + * @tc.desc: 设置输入输出,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(GatherNDTest, SUB_AI_NNRt_Func_North_GatherND_Model_SpecifyInputsAndOutputs_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GatherNDModel1 gatherNDModel; + OHNNGraphArgs graphArgs = gatherNDModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_GatherND_Model_SpecifyInputsAndOutputs_06 + * @tc.desc: 设置输入输出,outputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(GatherNDTest, SUB_AI_NNRt_Func_North_GatherND_Model_SpecifyInputsAndOutputs_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GatherNDModel1 gatherNDModel; + OHNNGraphArgs graphArgs = gatherNDModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_GatherND_Model_SpecifyInputsAndOutputs_07 + * @tc.desc: 设置输入输出,outputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(GatherNDTest, SUB_AI_NNRt_Func_North_GatherND_Model_SpecifyInputsAndOutputs_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GatherNDModel1 gatherNDModel; + OHNNGraphArgs graphArgs = gatherNDModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.outputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_GatherND_Model_SpecifyInputsAndOutputs_08 + * @tc.desc: 设置输入输出,outputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(GatherNDTest, SUB_AI_NNRt_Func_North_GatherND_Model_SpecifyInputsAndOutputs_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GatherNDModel1 gatherNDModel; + OHNNGraphArgs graphArgs = gatherNDModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_GatherND_Model_AddOperation_01 + * @tc.desc: 添加算子,paramindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(GatherNDTest, SUB_AI_NNRt_Func_North_GatherND_Model_AddOperation_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GatherNDModel1 gatherNDModel; + OHNNGraphArgs graphArgs = gatherNDModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + nullptr, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_GatherND_Model_AddOperation_02 + * @tc.desc: 添加算子,paramindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(GatherNDTest, SUB_AI_NNRt_Func_North_GatherND_Model_AddOperation_02, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GatherNDModel1 gatherNDModel; + OHNNGraphArgs graphArgs = gatherNDModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.data = nullptr; + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_GatherND_Model_AddOperation_03 + * @tc.desc: 添加算子,paramindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(GatherNDTest, SUB_AI_NNRt_Func_North_GatherND_Model_AddOperation_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GatherNDModel1 gatherNDModel; + OHNNGraphArgs graphArgs = gatherNDModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.paramIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_GatherND_Model_AddOperation_04 + * @tc.desc: 添加算子,paramindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(GatherNDTest, SUB_AI_NNRt_Func_North_GatherND_Model_AddOperation_04, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GatherNDModel1 gatherNDModel; + OHNNGraphArgs graphArgs = gatherNDModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.size = 0; + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_GatherND_Model_AddOperation_05 + * @tc.desc: 添加算子,inputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(GatherNDTest, SUB_AI_NNRt_Func_North_GatherND_Model_AddOperation_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GatherNDModel1 gatherNDModel; + OHNNGraphArgs graphArgs = gatherNDModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_GatherND_Model_AddOperation_06 + * @tc.desc: 添加算子,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(GatherNDTest, SUB_AI_NNRt_Func_North_GatherND_Model_AddOperation_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GatherNDModel1 gatherNDModel; + OHNNGraphArgs graphArgs = gatherNDModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_GatherND_Model_AddOperation_07 + * @tc.desc: 添加算子,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(GatherNDTest, SUB_AI_NNRt_Func_North_GatherND_Model_AddOperation_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GatherNDModel1 gatherNDModel; + OHNNGraphArgs graphArgs = gatherNDModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_GatherND_Model_AddOperation_08 + * @tc.desc: 添加算子,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(GatherNDTest, SUB_AI_NNRt_Func_North_GatherND_Model_AddOperation_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GatherNDModel1 gatherNDModel; + OHNNGraphArgs graphArgs = gatherNDModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_GatherND_Model_AddOperation_09 + * @tc.desc: 添加算子,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(GatherNDTest, SUB_AI_NNRt_Func_North_GatherND_Model_AddOperation_09, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GatherNDModel1 gatherNDModel; + OHNNGraphArgs graphArgs = gatherNDModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(nullptr, graphArgs.operationType, + ¶mIndices, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} \ No newline at end of file diff --git a/test/nnrt_xts_acts/nncore/opstest/src/greater_equal_test.cpp b/test/nnrt_xts_acts/nncore/opstest/src/greater_equal_test.cpp new file mode 100644 index 0000000..32035f8 --- /dev/null +++ b/test/nnrt_xts_acts/nncore/opstest/src/greater_equal_test.cpp @@ -0,0 +1,863 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "nncore_utils.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Test; +class GreaterEqualTest : public testing::Test {}; + +struct GreaterEqualModel1 { + const std::vector tensor_shape = {3}; + float input0Value[3] = {1, 2, 3}; + float input1Value[3] = {4, 2, 6}; + bool outputValue[3] = {0}; + + OHNNOperandTest input0 = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, input0Value, 3*sizeof(float)}; + OHNNOperandTest input1 = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, input1Value, 3*sizeof(float)}; + OHNNOperandTest output = {OH_NN_BOOL, OH_NN_TENSOR, tensor_shape, outputValue, 3*sizeof(bool)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_GREATER_EQUAL, + .operands = {input0, input1, output}, + .paramIndices = {}, + .inputIndices = {0, 1}, + .outputIndices = {2}}; +}; + +struct GreaterEqualModel2 { + const std::vector tensor_shape = {1}; + float input0Value[1] = {1}; + float input1Value[1] = {1}; + bool outputValue[1] = {0}; + + OHNNOperandTest input0 = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, input0Value, sizeof(float)}; + OHNNOperandTest input1 = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, input1Value, sizeof(float)}; + OHNNOperandTest output = {OH_NN_BOOL, OH_NN_TENSOR, tensor_shape, outputValue, sizeof(bool)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_GREATER_EQUAL, + .operands = {input0, input1, output}, + .paramIndices = {}, + .inputIndices = {0, 1}, + .outputIndices = {2}}; +}; + +struct GreaterEqualModel3 { + const std::vector tensor_shape = {3}; + const std::vector input1_shape = {4}; + float input0Value[3] = {1, 2, 3}; + float input1Value[4] = {4, 5, 6, 7}; + bool outputValue[3] = {0}; + + OHNNOperandTest input0 = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, input0Value, 3*sizeof(float)}; + OHNNOperandTest input1 = {OH_NN_FLOAT32, OH_NN_TENSOR, input1_shape, input1Value, 4*sizeof(float)}; + OHNNOperandTest output = {OH_NN_BOOL, OH_NN_TENSOR, tensor_shape, outputValue, 4*sizeof(bool)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_GREATER_EQUAL, + .operands = {input0, input1, output}, + .paramIndices = {}, + .inputIndices = {0, 1}, + .outputIndices = {2}}; +}; + +struct GreaterEqualModel4 { + const std::vector tensor_shape = {}; + float* input0Value = {}; + float* input1Value = {}; + bool* outputValue = {}; + + OHNNOperandTest input0 = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, input0Value, 0*sizeof(float)}; + OHNNOperandTest input1 = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, input1Value, 0*sizeof(float)}; + OHNNOperandTest output = {OH_NN_BOOL, OH_NN_TENSOR, tensor_shape, outputValue, 0*sizeof(bool)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_GREATER_EQUAL, + .operands = {input0, input1, output}, + .paramIndices = {}, + .inputIndices = {0, 1}, + .outputIndices = {2}}; +}; + +/** + * @tc.number : SUB_AI_NNRt_Func_North_GreaterEqual_Build_01 + * @tc.desc: GreaterEqualModel1模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(GreaterEqualTest, SUB_AI_NNRt_Func_North_GreaterEqual_Build_01, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GreaterEqualModel1 greaterEqualModel; + OHNNGraphArgs graphArgs = greaterEqualModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_GreaterEqual_Build_02 + * @tc.desc: GreaterEqualModel2模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(GreaterEqualTest, SUB_AI_NNRt_Func_North_GreaterEqual_Build_02, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GreaterEqualModel2 greaterEqualModel; + OHNNGraphArgs graphArgs = greaterEqualModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_GreaterEqual_Build_03 + * @tc.desc: GreaterEqualModel3模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(GreaterEqualTest, SUB_AI_NNRt_Func_North_GreaterEqual_Build_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GreaterEqualModel3 greaterEqualModel; + OHNNGraphArgs graphArgs = greaterEqualModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_GreaterEqual_Build_04 + * @tc.desc: GreaterEqualModel4模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(GreaterEqualTest, SUB_AI_NNRt_Func_North_GreaterEqual_Build_04, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GreaterEqualModel4 greaterEqualModel; + OHNNGraphArgs graphArgs = greaterEqualModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_GreaterEqual_Build_05 + * @tc.desc: GreaterEqualModel1模型输入Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(GreaterEqualTest, SUB_AI_NNRt_Func_North_GreaterEqual_Build_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GreaterEqualModel2 greaterEqualModel; + OHNNGraphArgs graphArgs = greaterEqualModel.graphArgs; + graphArgs.operands = {greaterEqualModel.input0, greaterEqualModel.input1, + greaterEqualModel.input1, greaterEqualModel.output}; + graphArgs.inputIndices = {0, 1, 2}; + graphArgs.outputIndices = {3}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_GreaterEqual_Build_06 + * @tc.desc: GreaterEqualModel1模型输出Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(GreaterEqualTest, SUB_AI_NNRt_Func_North_GreaterEqual_Build_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GreaterEqualModel2 greaterEqualModel; + OHNNGraphArgs graphArgs = greaterEqualModel.graphArgs; + graphArgs.operands = {greaterEqualModel.input0, greaterEqualModel.input1, + greaterEqualModel.output, greaterEqualModel.output}; + graphArgs.inputIndices = {0, 1}; + graphArgs.outputIndices = {2, 3}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_GreaterEqual_Build_07 + * @tc.desc: GreaterEqualModel1模型传入非法参数进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(GreaterEqualTest, SUB_AI_NNRt_Func_North_GreaterEqual_Build_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GreaterEqualModel2 greaterEqualModel; + OHNNGraphArgs graphArgs = greaterEqualModel.graphArgs; + + int8_t activationValue = OH_NN_FUSED_NONE; + OHNNOperandTest activation = {OH_NN_INT8, OH_NN_ADD_ACTIVATIONTYPE, {}, &activationValue, sizeof(int8_t)}; + graphArgs.operands = {greaterEqualModel.input0, greaterEqualModel.input1, greaterEqualModel.output, activation}; + graphArgs.paramIndices = {3}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_GreaterEqual_Model_Finish_01 + * @tc.desc: 模型构图,未添加操作数 + * @tc.type: FUNC + */ +HWTEST_F(GreaterEqualTest, SUB_AI_NNRt_Func_North_GreaterEqual_Model_Finish_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + OHNNGraphArgs graphArgs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, SingleModelBuildEndStep(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_GreaterEqual_Model_Finish_02 + * @tc.desc: 模型构图,未设置输入输出 + * @tc.type: FUNC + */ +HWTEST_F(GreaterEqualTest, SUB_AI_NNRt_Func_North_GreaterEqual_Model_Finish_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GreaterEqualModel1 greaterEqualModel; + OHNNGraphArgs graphArgs = greaterEqualModel.graphArgs; + graphArgs.specifyIO = false; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_GreaterEqual_Model_Finish_03 + * @tc.desc: 模型构图,设置输入输出,构图成功 + * @tc.type: FUNC + */ +HWTEST_F(GreaterEqualTest, SUB_AI_NNRt_Func_North_GreaterEqual_Model_Finish_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GreaterEqualModel1 greaterEqualModel; + OHNNGraphArgs graphArgs = greaterEqualModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_GreaterEqual_Model_SetOperandValue_01 + * @tc.desc: 设置操作数值,操作数不存在 + * @tc.type: FUNC + */ +HWTEST_F(GreaterEqualTest, SUB_AI_NNRt_Func_North_GreaterEqual_Model_SetOperandValue_01, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GreaterEqualModel1 greaterEqualModel; + OHNNGraphArgs graphArgs = greaterEqualModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData( + model, 1000+i, operandTem.data, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_GreaterEqual_Model_SetOperandValue_02 + * @tc.desc: 设置操作数值,buufer为nullptr + * @tc.type: FUNC + */ +HWTEST_F(GreaterEqualTest, SUB_AI_NNRt_Func_North_GreaterEqual_Model_SetOperandValue_02, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GreaterEqualModel1 greaterEqualModel; + OHNNGraphArgs graphArgs = greaterEqualModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, i, nullptr, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_GreaterEqual_Model_SetOperandValue_03 + * @tc.desc: 设置操作数值,length为0 + * @tc.type: FUNC + */ +HWTEST_F(GreaterEqualTest, SUB_AI_NNRt_Func_North_GreaterEqual_Model_SetOperandValue_03, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GreaterEqualModel1 greaterEqualModel; + OHNNGraphArgs graphArgs = greaterEqualModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, 1000+i, operandTem.data, 0)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_GreaterEqual_Model_SpecifyInputsAndOutputs_01 + * @tc.desc: 设置输入输出,inputIndices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(GreaterEqualTest, SUB_AI_NNRt_Func_North_GreaterEqual_Model_SpecifyInputsAndOutputs_01, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GreaterEqualModel1 greaterEqualModel; + OHNNGraphArgs graphArgs = greaterEqualModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_GreaterEqual_Model_SpecifyInputsAndOutputs_02 + * @tc.desc: 设置输入输出,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(GreaterEqualTest, SUB_AI_NNRt_Func_North_GreaterEqual_Model_SpecifyInputsAndOutputs_02, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GreaterEqualModel1 greaterEqualModel; + OHNNGraphArgs graphArgs = greaterEqualModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_GreaterEqual_Model_SpecifyInputsAndOutputs_03 + * @tc.desc: 设置输入输出,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(GreaterEqualTest, SUB_AI_NNRt_Func_North_GreaterEqual_Model_SpecifyInputsAndOutputs_03, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GreaterEqualModel1 greaterEqualModel; + OHNNGraphArgs graphArgs = greaterEqualModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_GreaterEqual_Model_SpecifyInputsAndOutputs_04 + * @tc.desc: 设置输入输出,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(GreaterEqualTest, SUB_AI_NNRt_Func_North_GreaterEqual_Model_SpecifyInputsAndOutputs_04, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GreaterEqualModel1 greaterEqualModel; + OHNNGraphArgs graphArgs = greaterEqualModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_GreaterEqual_Model_SpecifyInputsAndOutputs_05 + * @tc.desc: 设置输入输出,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(GreaterEqualTest, SUB_AI_NNRt_Func_North_GreaterEqual_Model_SpecifyInputsAndOutputs_05, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GreaterEqualModel1 greaterEqualModel; + OHNNGraphArgs graphArgs = greaterEqualModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_GreaterEqual_Model_SpecifyInputsAndOutputs_06 + * @tc.desc: 设置输入输出,outputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(GreaterEqualTest, SUB_AI_NNRt_Func_North_GreaterEqual_Model_SpecifyInputsAndOutputs_06, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GreaterEqualModel1 greaterEqualModel; + OHNNGraphArgs graphArgs = greaterEqualModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_GreaterEqual_Model_SpecifyInputsAndOutputs_07 + * @tc.desc: 设置输入输出,outputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(GreaterEqualTest, SUB_AI_NNRt_Func_North_GreaterEqual_Model_SpecifyInputsAndOutputs_07, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GreaterEqualModel1 greaterEqualModel; + OHNNGraphArgs graphArgs = greaterEqualModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.outputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_GreaterEqual_Model_SpecifyInputsAndOutputs_08 + * @tc.desc: 设置输入输出,outputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(GreaterEqualTest, SUB_AI_NNRt_Func_North_GreaterEqual_Model_SpecifyInputsAndOutputs_08, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GreaterEqualModel1 greaterEqualModel; + OHNNGraphArgs graphArgs = greaterEqualModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_GreaterEqual_Model_AddOperation_01 + * @tc.desc: 添加算子,paramindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(GreaterEqualTest, SUB_AI_NNRt_Func_North_GreaterEqual_Model_AddOperation_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GreaterEqualModel1 greaterEqualModel; + OHNNGraphArgs graphArgs = greaterEqualModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + nullptr, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_GreaterEqual_Model_AddOperation_02 + * @tc.desc: 添加算子,paramindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(GreaterEqualTest, SUB_AI_NNRt_Func_North_GreaterEqual_Model_AddOperation_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GreaterEqualModel1 greaterEqualModel; + OHNNGraphArgs graphArgs = greaterEqualModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.data = nullptr; + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_GreaterEqual_Model_AddOperation_03 + * @tc.desc: 添加算子,paramindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(GreaterEqualTest, SUB_AI_NNRt_Func_North_GreaterEqual_Model_AddOperation_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GreaterEqualModel1 greaterEqualModel; + OHNNGraphArgs graphArgs = greaterEqualModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.paramIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_GreaterEqual_Model_AddOperation_04 + * @tc.desc: 添加算子,paramindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(GreaterEqualTest, SUB_AI_NNRt_Func_North_GreaterEqual_Model_AddOperation_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GreaterEqualModel1 greaterEqualModel; + OHNNGraphArgs graphArgs = greaterEqualModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.size = 0; + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_GreaterEqual_Model_AddOperation_05 + * @tc.desc: 添加算子,inputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(GreaterEqualTest, SUB_AI_NNRt_Func_North_GreaterEqual_Model_AddOperation_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GreaterEqualModel1 greaterEqualModel; + OHNNGraphArgs graphArgs = greaterEqualModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_GreaterEqual_Model_AddOperation_06 + * @tc.desc: 添加算子,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(GreaterEqualTest, SUB_AI_NNRt_Func_North_GreaterEqual_Model_AddOperation_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GreaterEqualModel1 greaterEqualModel; + OHNNGraphArgs graphArgs = greaterEqualModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_GreaterEqual_Model_AddOperation_07 + * @tc.desc: 添加算子,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(GreaterEqualTest, SUB_AI_NNRt_Func_North_GreaterEqual_Model_AddOperation_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GreaterEqualModel1 greaterEqualModel; + OHNNGraphArgs graphArgs = greaterEqualModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_GreaterEqual_Model_AddOperation_08 + * @tc.desc: 添加算子,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(GreaterEqualTest, SUB_AI_NNRt_Func_North_GreaterEqual_Model_AddOperation_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GreaterEqualModel1 greaterEqualModel; + OHNNGraphArgs graphArgs = greaterEqualModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_GreaterEqual_Model_AddOperation_09 + * @tc.desc: 添加算子,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(GreaterEqualTest, SUB_AI_NNRt_Func_North_GreaterEqual_Model_AddOperation_09, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GreaterEqualModel1 greaterEqualModel; + OHNNGraphArgs graphArgs = greaterEqualModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(nullptr, graphArgs.operationType, + ¶mIndices, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} \ No newline at end of file diff --git a/test/nnrt_xts_acts/nncore/opstest/src/greater_test.cpp b/test/nnrt_xts_acts/nncore/opstest/src/greater_test.cpp new file mode 100644 index 0000000..02f3e46 --- /dev/null +++ b/test/nnrt_xts_acts/nncore/opstest/src/greater_test.cpp @@ -0,0 +1,850 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "nncore_utils.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Test; +class GreaterTest : public testing::Test {}; + +struct GreaterModel1 { + const std::vector tensor_shape = {3}; + float input0Value[3] = {1, 2, 3}; + float input1Value[3] = {4, 5, 6}; + bool outputValue[3] = {0}; + + OHNNOperandTest input0 = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, input0Value, 3*sizeof(float)}; + OHNNOperandTest input1 = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, input1Value, 3*sizeof(float)}; + OHNNOperandTest output = {OH_NN_BOOL, OH_NN_TENSOR, tensor_shape, outputValue, 3*sizeof(bool)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_GREATER, + .operands = {input0, input1, output}, + .paramIndices = {}, + .inputIndices = {0, 1}, + .outputIndices = {2}}; +}; + +struct GreaterModel2 { + const std::vector tensor_shape = {1}; + float input0Value[1] = {1}; + float input1Value[1] = {1}; + bool outputValue[1] = {0}; + + OHNNOperandTest input0 = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, input0Value, sizeof(float)}; + OHNNOperandTest input1 = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, input1Value, sizeof(float)}; + OHNNOperandTest output = {OH_NN_BOOL, OH_NN_TENSOR, tensor_shape, outputValue, sizeof(bool)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_GREATER, + .operands = {input0, input1, output}, + .paramIndices = {}, + .inputIndices = {0, 1}, + .outputIndices = {2}}; +}; + +struct GreaterModel3 { + const std::vector tensor_shape = {3}; + const std::vector input1_shape = {4}; + float input0Value[3] = {1, 2, 3}; + float input1Value[4] = {4, 5, 6, 7}; + bool outputValue[3] = {0}; + + OHNNOperandTest input0 = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, input0Value, 3*sizeof(float)}; + OHNNOperandTest input1 = {OH_NN_FLOAT32, OH_NN_TENSOR, input1_shape, input1Value, 4*sizeof(float)}; + OHNNOperandTest output = {OH_NN_BOOL, OH_NN_TENSOR, tensor_shape, outputValue, 4*sizeof(bool)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_GREATER, + .operands = {input0, input1, output}, + .paramIndices = {}, + .inputIndices = {0, 1}, + .outputIndices = {2}}; +}; + +struct GreaterModel4 { + const std::vector tensor_shape = {}; + float* input0Value = {}; + float* input1Value = {}; + bool* outputValue = {}; + + OHNNOperandTest input0 = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, input0Value, 0*sizeof(float)}; + OHNNOperandTest input1 = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, input1Value, 0*sizeof(float)}; + OHNNOperandTest output = {OH_NN_BOOL, OH_NN_TENSOR, tensor_shape, outputValue, 0*sizeof(bool)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_GREATER, + .operands = {input0, input1, output}, + .paramIndices = {}, + .inputIndices = {0, 1}, + .outputIndices = {2}}; +}; + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Greater_Build_01 + * @tc.desc: GreaterModel1模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(GreaterTest, SUB_AI_NNRt_Func_North_Greater_Build_01, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GreaterModel1 greaterModel; + OHNNGraphArgs graphArgs = greaterModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Greater_Build_02 + * @tc.desc: GreaterModel2模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(GreaterTest, SUB_AI_NNRt_Func_North_Greater_Build_02, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GreaterModel2 greaterModel; + OHNNGraphArgs graphArgs = greaterModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Greater_Build_03 + * @tc.desc: GreaterModel3模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(GreaterTest, SUB_AI_NNRt_Func_North_Greater_Build_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GreaterModel3 greaterModel; + OHNNGraphArgs graphArgs = greaterModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Greater_Build_04 + * @tc.desc: GreaterModel4模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(GreaterTest, SUB_AI_NNRt_Func_North_Greater_Build_04, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GreaterModel4 greaterModel; + OHNNGraphArgs graphArgs = greaterModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Greater_Build_05 + * @tc.desc: GreaterModel1模型输入Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(GreaterTest, SUB_AI_NNRt_Func_North_Greater_Build_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GreaterModel2 greaterModel; + OHNNGraphArgs graphArgs = greaterModel.graphArgs; + graphArgs.operands = {greaterModel.input0, greaterModel.input1, greaterModel.input1, greaterModel.output}; + graphArgs.inputIndices = {0, 1, 2}; + graphArgs.outputIndices = {3}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Greater_Build_06 + * @tc.desc: GreaterModel1模型输出Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(GreaterTest, SUB_AI_NNRt_Func_North_Greater_Build_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GreaterModel2 greaterModel; + OHNNGraphArgs graphArgs = greaterModel.graphArgs; + graphArgs.operands = {greaterModel.input0, greaterModel.input1, greaterModel.output, greaterModel.output}; + graphArgs.inputIndices = {0, 1}; + graphArgs.outputIndices = {2, 3}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Greater_Build_07 + * @tc.desc: GreaterModel1模型传入非法参数进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(GreaterTest, SUB_AI_NNRt_Func_North_Greater_Build_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GreaterModel2 greaterModel; + OHNNGraphArgs graphArgs = greaterModel.graphArgs; + + int8_t activationValue = OH_NN_FUSED_NONE; + OHNNOperandTest activation = {OH_NN_INT8, OH_NN_ADD_ACTIVATIONTYPE, {}, &activationValue, sizeof(int8_t)}; + graphArgs.operands = {greaterModel.input0, greaterModel.input1, greaterModel.output, activation}; + graphArgs.paramIndices = {3}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Greater_Model_Finish_01 + * @tc.desc: 模型构图,未添加操作数 + * @tc.type: FUNC + */ +HWTEST_F(GreaterTest, SUB_AI_NNRt_Func_North_Greater_Model_Finish_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + OHNNGraphArgs graphArgs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, SingleModelBuildEndStep(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Greater_Model_Finish_02 + * @tc.desc: 模型构图,未设置输入输出 + * @tc.type: FUNC + */ +HWTEST_F(GreaterTest, SUB_AI_NNRt_Func_North_Greater_Model_Finish_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GreaterModel1 greaterModel; + OHNNGraphArgs graphArgs = greaterModel.graphArgs; + graphArgs.specifyIO = false; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Greater_Model_Finish_03 + * @tc.desc: 模型构图,设置输入输出,构图成功 + * @tc.type: FUNC + */ +HWTEST_F(GreaterTest, SUB_AI_NNRt_Func_North_Greater_Model_Finish_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GreaterModel1 greaterModel; + OHNNGraphArgs graphArgs = greaterModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Greater_Model_SetOperandValue_01 + * @tc.desc: 设置操作数值,操作数不存在 + * @tc.type: FUNC + */ +HWTEST_F(GreaterTest, SUB_AI_NNRt_Func_North_Greater_Model_SetOperandValue_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GreaterModel1 greaterModel; + OHNNGraphArgs graphArgs = greaterModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData( + model, 1000+i, operandTem.data, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Greater_Model_SetOperandValue_02 + * @tc.desc: 设置操作数值,buufer为nullptr + * @tc.type: FUNC + */ +HWTEST_F(GreaterTest, SUB_AI_NNRt_Func_North_Greater_Model_SetOperandValue_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GreaterModel1 greaterModel; + OHNNGraphArgs graphArgs = greaterModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, i, nullptr, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Greater_Model_SetOperandValue_03 + * @tc.desc: 设置操作数值,length为0 + * @tc.type: FUNC + */ +HWTEST_F(GreaterTest, SUB_AI_NNRt_Func_North_Greater_Model_SetOperandValue_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GreaterModel1 greaterModel; + OHNNGraphArgs graphArgs = greaterModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, 1000+i, operandTem.data, 0)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Greater_Model_SpecifyInputsAndOutputs_01 + * @tc.desc: 设置输入输出,inputIndices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(GreaterTest, SUB_AI_NNRt_Func_North_Greater_Model_SpecifyInputsAndOutputs_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GreaterModel1 greaterModel; + OHNNGraphArgs graphArgs = greaterModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Greater_Model_SpecifyInputsAndOutputs_02 + * @tc.desc: 设置输入输出,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(GreaterTest, SUB_AI_NNRt_Func_North_Greater_Model_SpecifyInputsAndOutputs_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GreaterModel1 greaterModel; + OHNNGraphArgs graphArgs = greaterModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Greater_Model_SpecifyInputsAndOutputs_03 + * @tc.desc: 设置输入输出,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(GreaterTest, SUB_AI_NNRt_Func_North_Greater_Model_SpecifyInputsAndOutputs_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GreaterModel1 greaterModel; + OHNNGraphArgs graphArgs = greaterModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Greater_Model_SpecifyInputsAndOutputs_04 + * @tc.desc: 设置输入输出,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(GreaterTest, SUB_AI_NNRt_Func_North_Greater_Model_SpecifyInputsAndOutputs_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GreaterModel1 greaterModel; + OHNNGraphArgs graphArgs = greaterModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Greater_Model_SpecifyInputsAndOutputs_05 + * @tc.desc: 设置输入输出,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(GreaterTest, SUB_AI_NNRt_Func_North_Greater_Model_SpecifyInputsAndOutputs_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GreaterModel1 greaterModel; + OHNNGraphArgs graphArgs = greaterModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Greater_Model_SpecifyInputsAndOutputs_06 + * @tc.desc: 设置输入输出,outputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(GreaterTest, SUB_AI_NNRt_Func_North_Greater_Model_SpecifyInputsAndOutputs_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GreaterModel1 greaterModel; + OHNNGraphArgs graphArgs = greaterModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Greater_Model_SpecifyInputsAndOutputs_07 + * @tc.desc: 设置输入输出,outputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(GreaterTest, SUB_AI_NNRt_Func_North_Greater_Model_SpecifyInputsAndOutputs_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GreaterModel1 greaterModel; + OHNNGraphArgs graphArgs = greaterModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.outputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Greater_Model_SpecifyInputsAndOutputs_08 + * @tc.desc: 设置输入输出,outputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(GreaterTest, SUB_AI_NNRt_Func_North_Greater_Model_SpecifyInputsAndOutputs_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GreaterModel1 greaterModel; + OHNNGraphArgs graphArgs = greaterModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Greater_Model_AddOperation_01 + * @tc.desc: 添加算子,paramindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(GreaterTest, SUB_AI_NNRt_Func_North_Greater_Model_AddOperation_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GreaterModel1 greaterModel; + OHNNGraphArgs graphArgs = greaterModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + nullptr, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Greater_Model_AddOperation_02 + * @tc.desc: 添加算子,paramindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(GreaterTest, SUB_AI_NNRt_Func_North_Greater_Model_AddOperation_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GreaterModel1 greaterModel; + OHNNGraphArgs graphArgs = greaterModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.data = nullptr; + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Greater_Model_AddOperation_03 + * @tc.desc: 添加算子,paramindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(GreaterTest, SUB_AI_NNRt_Func_North_Greater_Model_AddOperation_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GreaterModel1 greaterModel; + OHNNGraphArgs graphArgs = greaterModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.paramIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Greater_Model_AddOperation_04 + * @tc.desc: 添加算子,paramindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(GreaterTest, SUB_AI_NNRt_Func_North_Greater_Model_AddOperation_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GreaterModel1 greaterModel; + OHNNGraphArgs graphArgs = greaterModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.size = 0; + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Greater_Model_AddOperation_05 + * @tc.desc: 添加算子,inputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(GreaterTest, SUB_AI_NNRt_Func_North_Greater_Model_AddOperation_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GreaterModel1 greaterModel; + OHNNGraphArgs graphArgs = greaterModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Greater_Model_AddOperation_06 + * @tc.desc: 添加算子,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(GreaterTest, SUB_AI_NNRt_Func_North_Greater_Model_AddOperation_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GreaterModel1 greaterModel; + OHNNGraphArgs graphArgs = greaterModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Greater_Model_AddOperation_07 + * @tc.desc: 添加算子,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(GreaterTest, SUB_AI_NNRt_Func_North_Greater_Model_AddOperation_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GreaterModel1 greaterModel; + OHNNGraphArgs graphArgs = greaterModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Greater_Model_AddOperation_08 + * @tc.desc: 添加算子,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(GreaterTest, SUB_AI_NNRt_Func_North_Greater_Model_AddOperation_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GreaterModel1 greaterModel; + OHNNGraphArgs graphArgs = greaterModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Greater_Model_AddOperation_09 + * @tc.desc: 添加算子,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(GreaterTest, SUB_AI_NNRt_Func_North_Greater_Model_AddOperation_09, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + GreaterModel1 greaterModel; + OHNNGraphArgs graphArgs = greaterModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(nullptr, graphArgs.operationType, + ¶mIndices, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} \ No newline at end of file diff --git a/test/nnrt_xts_acts/nncore/opstest/src/hard_sigmoid_test.cpp b/test/nnrt_xts_acts/nncore/opstest/src/hard_sigmoid_test.cpp new file mode 100644 index 0000000..ce5f297 --- /dev/null +++ b/test/nnrt_xts_acts/nncore/opstest/src/hard_sigmoid_test.cpp @@ -0,0 +1,764 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "nncore_utils.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Test; +class HardSigmoidTest : public testing::Test {}; + +struct HardSigmoidModel1 { + const std::vector tensor_shape = {1, 4}; + float inputValue[1][4] = {{0, 0, 0, 0}}; + float outputValue[1][4] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, inputValue, 4*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, outputValue, 4*sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_HARD_SIGMOID, + .operands = {input, output}, + .paramIndices = {}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + + +struct HardSigmoidModel2 { + const std::vector tensor_shape = {1, 4}; + float inputValue[1][4] = {{1, 1, 1, 1}}; + float outputValue[1][4] = {0}; + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, inputValue, 4*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, outputValue, 4*sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_HARD_SIGMOID, + .operands = {input, output}, + .paramIndices = {}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +/** + * @tc.number : SUB_AI_NNRt_Func_North_HardSigmoid_Build_01 + * @tc.desc: HardSigmoidModel1模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(HardSigmoidTest, SUB_AI_NNRt_Func_North_HardSigmoid_Build_01, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + HardSigmoidModel1 hardSigmoidModel; + OHNNGraphArgs graphArgs = hardSigmoidModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_HardSigmoid_Build_02 + * @tc.desc: HardSigmoidModel2模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(HardSigmoidTest, SUB_AI_NNRt_Func_North_HardSigmoid_Build_02, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + HardSigmoidModel2 hardSigmoidModel; + OHNNGraphArgs graphArgs = hardSigmoidModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_HardSigmoid_Build_03 + * @tc.desc: HardSigmoidModel1模型输入Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(HardSigmoidTest, SUB_AI_NNRt_Func_North_HardSigmoid_Build_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + HardSigmoidModel1 hardSigmoidModel; + OHNNGraphArgs graphArgs = hardSigmoidModel.graphArgs; + graphArgs.operands = {hardSigmoidModel.input, hardSigmoidModel.input, hardSigmoidModel.output}; + graphArgs.inputIndices = {0, 1}; + graphArgs.outputIndices = {2}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_HardSigmoid_Build_04 + * @tc.desc: HardSigmoidModel1模型输出Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(HardSigmoidTest, SUB_AI_NNRt_Func_North_HardSigmoid_Build_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + HardSigmoidModel1 hardSigmoidModel; + OHNNGraphArgs graphArgs = hardSigmoidModel.graphArgs; + graphArgs.operands = {hardSigmoidModel.input, hardSigmoidModel.output, hardSigmoidModel.output}; + graphArgs.inputIndices = {0}; + graphArgs.outputIndices = {1, 2}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_HardSigmoid_Build_05 + * @tc.desc: HardSigmoidModel1模型传入非法参数进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(HardSigmoidTest, SUB_AI_NNRt_Func_North_HardSigmoid_Build_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + HardSigmoidModel1 hardSigmoidModel; + OHNNGraphArgs graphArgs = hardSigmoidModel.graphArgs; + + int8_t activationValue = OH_NN_FUSED_NONE; + OHNNOperandTest activation = {OH_NN_INT8, OH_NN_ADD_ACTIVATIONTYPE, {}, &activationValue, sizeof(int8_t)}; + graphArgs.operands = {hardSigmoidModel.input, hardSigmoidModel.output, activation}; + graphArgs.paramIndices = {2}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_HardSigmoid_Model_Finish_01 + * @tc.desc: 模型构图,未添加操作数 + * @tc.type: FUNC + */ +HWTEST_F(HardSigmoidTest, SUB_AI_NNRt_Func_North_HardSigmoid_Model_Finish_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + OHNNGraphArgs graphArgs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, SingleModelBuildEndStep(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_HardSigmoid_Model_Finish_02 + * @tc.desc: 模型构图,未设置输入输出 + * @tc.type: FUNC + */ +HWTEST_F(HardSigmoidTest, SUB_AI_NNRt_Func_North_HardSigmoid_Model_Finish_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + HardSigmoidModel1 hardSigmoidModel; + OHNNGraphArgs graphArgs = hardSigmoidModel.graphArgs; + graphArgs.specifyIO = false; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_HardSigmoid_Model_Finish_03 + * @tc.desc: 模型构图,设置输入输出,构图成功 + * @tc.type: FUNC + */ +HWTEST_F(HardSigmoidTest, SUB_AI_NNRt_Func_North_HardSigmoid_Model_Finish_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + HardSigmoidModel1 hardSigmoidModel; + OHNNGraphArgs graphArgs = hardSigmoidModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_HardSigmoid_Model_SetOperandValue_01 + * @tc.desc: 设置操作数值,操作数不存在 + * @tc.type: FUNC + */ +HWTEST_F(HardSigmoidTest, SUB_AI_NNRt_Func_North_HardSigmoid_Model_SetOperandValue_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + HardSigmoidModel1 hardSigmoidModel; + OHNNGraphArgs graphArgs = hardSigmoidModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData( + model, 1000+i, operandTem.data, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_HardSigmoid_Model_SetOperandValue_02 + * @tc.desc: 设置操作数值,buufer为nullptr + * @tc.type: FUNC + */ +HWTEST_F(HardSigmoidTest, SUB_AI_NNRt_Func_North_HardSigmoid_Model_SetOperandValue_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + HardSigmoidModel1 hardSigmoidModel; + OHNNGraphArgs graphArgs = hardSigmoidModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, i, nullptr, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_HardSigmoid_Model_SetOperandValue_03 + * @tc.desc: 设置操作数值,length为0 + * @tc.type: FUNC + */ +HWTEST_F(HardSigmoidTest, SUB_AI_NNRt_Func_North_HardSigmoid_Model_SetOperandValue_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + HardSigmoidModel1 hardSigmoidModel; + OHNNGraphArgs graphArgs = hardSigmoidModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, 1000+i, operandTem.data, 0)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_HardSigmoid_Model_SpecifyInputsAndOutputs_01 + * @tc.desc: 设置输入输出,inputIndices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(HardSigmoidTest, SUB_AI_NNRt_Func_North_HardSigmoid_Model_SpecifyInputsAndOutputs_01, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + HardSigmoidModel1 hardSigmoidModel; + OHNNGraphArgs graphArgs = hardSigmoidModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_HardSigmoid_Model_SpecifyInputsAndOutputs_02 + * @tc.desc: 设置输入输出,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(HardSigmoidTest, SUB_AI_NNRt_Func_North_HardSigmoid_Model_SpecifyInputsAndOutputs_02, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + HardSigmoidModel1 hardSigmoidModel; + OHNNGraphArgs graphArgs = hardSigmoidModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_HardSigmoid_Model_SpecifyInputsAndOutputs_03 + * @tc.desc: 设置输入输出,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(HardSigmoidTest, SUB_AI_NNRt_Func_North_HardSigmoid_Model_SpecifyInputsAndOutputs_03, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + HardSigmoidModel1 hardSigmoidModel; + OHNNGraphArgs graphArgs = hardSigmoidModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_HardSigmoid_Model_SpecifyInputsAndOutputs_04 + * @tc.desc: 设置输入输出,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(HardSigmoidTest, SUB_AI_NNRt_Func_North_HardSigmoid_Model_SpecifyInputsAndOutputs_04, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + HardSigmoidModel1 hardSigmoidModel; + OHNNGraphArgs graphArgs = hardSigmoidModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_HardSigmoid_Model_SpecifyInputsAndOutputs_05 + * @tc.desc: 设置输入输出,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(HardSigmoidTest, SUB_AI_NNRt_Func_North_HardSigmoid_Model_SpecifyInputsAndOutputs_05, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + HardSigmoidModel1 hardSigmoidModel; + OHNNGraphArgs graphArgs = hardSigmoidModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_HardSigmoid_Model_SpecifyInputsAndOutputs_06 + * @tc.desc: 设置输入输出,outputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(HardSigmoidTest, SUB_AI_NNRt_Func_North_HardSigmoid_Model_SpecifyInputsAndOutputs_06, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + HardSigmoidModel1 hardSigmoidModel; + OHNNGraphArgs graphArgs = hardSigmoidModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_HardSigmoid_Model_SpecifyInputsAndOutputs_07 + * @tc.desc: 设置输入输出,outputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(HardSigmoidTest, SUB_AI_NNRt_Func_North_HardSigmoid_Model_SpecifyInputsAndOutputs_07, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + HardSigmoidModel1 hardSigmoidModel; + OHNNGraphArgs graphArgs = hardSigmoidModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.outputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_HardSigmoid_Model_SpecifyInputsAndOutputs_08 + * @tc.desc: 设置输入输出,outputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(HardSigmoidTest, SUB_AI_NNRt_Func_North_HardSigmoid_Model_SpecifyInputsAndOutputs_08, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + HardSigmoidModel1 hardSigmoidModel; + OHNNGraphArgs graphArgs = hardSigmoidModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_HardSigmoid_Model_AddOperation_01 + * @tc.desc: 添加算子,paramindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(HardSigmoidTest, SUB_AI_NNRt_Func_North_HardSigmoid_Model_AddOperation_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + HardSigmoidModel1 hardSigmoidModel; + OHNNGraphArgs graphArgs = hardSigmoidModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + nullptr, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_HardSigmoid_Model_AddOperation_02 + * @tc.desc: 添加算子,paramindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(HardSigmoidTest, SUB_AI_NNRt_Func_North_HardSigmoid_Model_AddOperation_02, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + HardSigmoidModel1 hardSigmoidModel; + OHNNGraphArgs graphArgs = hardSigmoidModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.data = nullptr; + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_HardSigmoid_Model_AddOperation_03 + * @tc.desc: 添加算子,paramindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(HardSigmoidTest, SUB_AI_NNRt_Func_North_HardSigmoid_Model_AddOperation_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + HardSigmoidModel1 hardSigmoidModel; + OHNNGraphArgs graphArgs = hardSigmoidModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.paramIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_HardSigmoid_Model_AddOperation_04 + * @tc.desc: 添加算子,paramindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(HardSigmoidTest, SUB_AI_NNRt_Func_North_HardSigmoid_Model_AddOperation_04, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + HardSigmoidModel1 hardSigmoidModel; + OHNNGraphArgs graphArgs = hardSigmoidModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.size = 0; + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_HardSigmoid_Model_AddOperation_05 + * @tc.desc: 添加算子,inputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(HardSigmoidTest, SUB_AI_NNRt_Func_North_HardSigmoid_Model_AddOperation_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + HardSigmoidModel1 hardSigmoidModel; + OHNNGraphArgs graphArgs = hardSigmoidModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_HardSigmoid_Model_AddOperation_06 + * @tc.desc: 添加算子,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(HardSigmoidTest, SUB_AI_NNRt_Func_North_HardSigmoid_Model_AddOperation_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + HardSigmoidModel1 hardSigmoidModel; + OHNNGraphArgs graphArgs = hardSigmoidModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_HardSigmoid_Model_AddOperation_07 + * @tc.desc: 添加算子,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(HardSigmoidTest, SUB_AI_NNRt_Func_North_HardSigmoid_Model_AddOperation_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + HardSigmoidModel1 hardSigmoidModel; + OHNNGraphArgs graphArgs = hardSigmoidModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_HardSigmoid_Model_AddOperation_08 + * @tc.desc: 添加算子,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(HardSigmoidTest, SUB_AI_NNRt_Func_North_HardSigmoid_Model_AddOperation_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + HardSigmoidModel1 hardSigmoidModel; + OHNNGraphArgs graphArgs = hardSigmoidModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_HardSigmoid_Model_AddOperation_09 + * @tc.desc: 添加算子,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(HardSigmoidTest, SUB_AI_NNRt_Func_North_HardSigmoid_Model_AddOperation_09, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + HardSigmoidModel1 hardSigmoidModel; + OHNNGraphArgs graphArgs = hardSigmoidModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(nullptr, graphArgs.operationType, + ¶mIndices, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} \ No newline at end of file diff --git a/test/nnrt_xts_acts/nncore/opstest/src/instance_norm_test.cpp b/test/nnrt_xts_acts/nncore/opstest/src/instance_norm_test.cpp new file mode 100644 index 0000000..5a44695 --- /dev/null +++ b/test/nnrt_xts_acts/nncore/opstest/src/instance_norm_test.cpp @@ -0,0 +1,791 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "nncore_utils.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Test; +class InstanceNormTest : public testing::Test {}; + +struct InstanceNormModel1 { + const std::vector input_shape = {2, 3, 2, 2}; + const std::vector channel_shape = {3}; + const std::vector output_shape = {4}; + float epsilonValue[1] = {1.0e-6f}; + float inputValue[2][3][2][2] = {{{{1, 1}, {1, 1}}, {{1, 1}, {1, 1}}, {{1, 1}, {1, 1}}}, + {{{1, 1}, {1, 1}}, {{1, 1}, {1, 1}}, {{1, 1}, {1, 1}}}}; + float scaleValue[3] = {1}; + float biasValue[3] = {1}; + float outputValue[4] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, input_shape, inputValue, 24*sizeof(float)}; + OHNNOperandTest scale = {OH_NN_FLOAT32, OH_NN_TENSOR, channel_shape, scaleValue, 3*sizeof(float)}; + OHNNOperandTest bias = {OH_NN_FLOAT32, OH_NN_TENSOR, channel_shape, biasValue, 3*sizeof(float)}; + + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, output_shape, outputValue, 4*sizeof(float)}; + OHNNOperandTest epsilon = {OH_NN_FLOAT32, OH_NN_INSTANCE_NORM_EPSILON, {1}, &epsilonValue, sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_INSTANCE_NORM, + .operands = {input, scale, bias, output, epsilon}, + .paramIndices = {4}, + .inputIndices = {0, 1, 2}, + .outputIndices = {3}}; +}; + +struct InstanceNormModel2 { + const std::vector input_shape = {}; + const std::vector channel_shape = {}; + const std::vector output_shape = {}; + float epsilonValue[1] = {1.0e-6f}; + float* inputValue = {}; + float* scaleValue = {}; + float* biasValue = {}; + float* outputValue = {}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, input_shape, inputValue, 0*sizeof(float)}; + OHNNOperandTest scale = {OH_NN_FLOAT32, OH_NN_TENSOR, channel_shape, scaleValue, 0*sizeof(float)}; + OHNNOperandTest bias = {OH_NN_FLOAT32, OH_NN_TENSOR, channel_shape, biasValue, 0*sizeof(float)}; + + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, output_shape, outputValue, 0*sizeof(float)}; + OHNNOperandTest epsilon = {OH_NN_FLOAT32, OH_NN_INSTANCE_NORM_EPSILON, {1}, &epsilonValue, sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_INSTANCE_NORM, + .operands = {input, scale, bias, output, epsilon}, + .paramIndices = {4}, + .inputIndices = {0, 1, 2}, + .outputIndices = {3}}; +}; + +/** + * @tc.number : SUB_AI_NNRt_Func_North_InstanceNorm_Build_01 + * @tc.desc: InstanceNormModel1模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(InstanceNormTest, SUB_AI_NNRt_Func_North_InstanceNorm_Build_01, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + InstanceNormModel1 instanceNormModel; + OHNNGraphArgs graphArgs = instanceNormModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_InstanceNorm_Build_02 + * @tc.desc: InstanceNormModel2模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(InstanceNormTest, SUB_AI_NNRt_Func_North_InstanceNorm_Build_02, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + InstanceNormModel2 instanceNormModel; + OHNNGraphArgs graphArgs = instanceNormModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_InstanceNorm_Build_03 + * @tc.desc: InstanceNormModel1模型输入Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(InstanceNormTest, SUB_AI_NNRt_Func_North_InstanceNorm_Build_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + InstanceNormModel1 instanceNormModel; + OHNNGraphArgs graphArgs = instanceNormModel.graphArgs; + graphArgs.operands = {instanceNormModel.input, instanceNormModel.input, instanceNormModel.scale, + instanceNormModel.bias, instanceNormModel.output, instanceNormModel.epsilon}; + graphArgs.inputIndices = {0, 1, 2, 3}; + graphArgs.outputIndices = {4}; + graphArgs.paramIndices = {5}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_InstanceNorm_Build_04 + * @tc.desc: InstanceNormModel1模型输出Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(InstanceNormTest, SUB_AI_NNRt_Func_North_InstanceNorm_Build_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + InstanceNormModel1 instanceNormModel; + OHNNGraphArgs graphArgs = instanceNormModel.graphArgs; + graphArgs.operands = {instanceNormModel.input, instanceNormModel.scale, instanceNormModel.bias, + instanceNormModel.output, instanceNormModel.output, instanceNormModel.epsilon}; + graphArgs.inputIndices = {0, 1, 2}; + graphArgs.outputIndices = {3, 4}; + graphArgs.paramIndices = {5}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_InstanceNorm_Build_05 + * @tc.desc: InstanceNormModel1模型传入非法参数进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(InstanceNormTest, SUB_AI_NNRt_Func_North_InstanceNorm_Build_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + InstanceNormModel1 instanceNormModel; + OHNNGraphArgs graphArgs = instanceNormModel.graphArgs; + + int8_t activationValue = OH_NN_FUSED_NONE; + OHNNOperandTest activation = {OH_NN_INT8, OH_NN_ADD_ACTIVATIONTYPE, {}, &activationValue, sizeof(int8_t)}; + graphArgs.operands = {instanceNormModel.input, instanceNormModel.scale, instanceNormModel.bias, + instanceNormModel.output, instanceNormModel.epsilon, activation}; + graphArgs.paramIndices = {4, 5}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_InstanceNorm_Model_Finish_01 + * @tc.desc: 模型构图,未添加操作数 + * @tc.type: FUNC + */ +HWTEST_F(InstanceNormTest, SUB_AI_NNRt_Func_North_InstanceNorm_Model_Finish_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + OHNNGraphArgs graphArgs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, SingleModelBuildEndStep(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_InstanceNorm_Model_Finish_02 + * @tc.desc: 模型构图,未设置输入输出 + * @tc.type: FUNC + */ +HWTEST_F(InstanceNormTest, SUB_AI_NNRt_Func_North_InstanceNorm_Model_Finish_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + InstanceNormModel1 instanceNormModel; + OHNNGraphArgs graphArgs = instanceNormModel.graphArgs; + graphArgs.specifyIO = false; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_InstanceNorm_Model_Finish_03 + * @tc.desc: 模型构图,设置输入输出,构图成功 + * @tc.type: FUNC + */ +HWTEST_F(InstanceNormTest, SUB_AI_NNRt_Func_North_InstanceNorm_Model_Finish_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + InstanceNormModel1 instanceNormModel; + OHNNGraphArgs graphArgs = instanceNormModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_InstanceNorm_Model_SetOperandValue_01 + * @tc.desc: 设置操作数值,操作数不存在 + * @tc.type: FUNC + */ +HWTEST_F(InstanceNormTest, SUB_AI_NNRt_Func_North_InstanceNorm_Model_SetOperandValue_01, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + InstanceNormModel1 instanceNormModel; + OHNNGraphArgs graphArgs = instanceNormModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData( + model, 1000+i, operandTem.data, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_InstanceNorm_Model_SetOperandValue_02 + * @tc.desc: 设置操作数值,buufer为nullptr + * @tc.type: FUNC + */ +HWTEST_F(InstanceNormTest, SUB_AI_NNRt_Func_North_InstanceNorm_Model_SetOperandValue_02, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + InstanceNormModel1 instanceNormModel; + OHNNGraphArgs graphArgs = instanceNormModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, i, nullptr, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_InstanceNorm_Model_SetOperandValue_03 + * @tc.desc: 设置操作数值,length为0 + * @tc.type: FUNC + */ +HWTEST_F(InstanceNormTest, SUB_AI_NNRt_Func_North_InstanceNorm_Model_SetOperandValue_03, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + InstanceNormModel1 instanceNormModel; + OHNNGraphArgs graphArgs = instanceNormModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, 1000+i, operandTem.data, 0)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_InstanceNorm_Model_SpecifyInputsAndOutputs_01 + * @tc.desc: 设置输入输出,inputIndices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(InstanceNormTest, SUB_AI_NNRt_Func_North_InstanceNorm_Model_SpecifyInputsAndOutputs_01, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + InstanceNormModel1 instanceNormModel; + OHNNGraphArgs graphArgs = instanceNormModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_InstanceNorm_Model_SpecifyInputsAndOutputs_02 + * @tc.desc: 设置输入输出,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(InstanceNormTest, SUB_AI_NNRt_Func_North_InstanceNorm_Model_SpecifyInputsAndOutputs_02, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + InstanceNormModel1 instanceNormModel; + OHNNGraphArgs graphArgs = instanceNormModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_InstanceNorm_Model_SpecifyInputsAndOutputs_03 + * @tc.desc: 设置输入输出,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(InstanceNormTest, SUB_AI_NNRt_Func_North_InstanceNorm_Model_SpecifyInputsAndOutputs_03, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + InstanceNormModel1 instanceNormModel; + OHNNGraphArgs graphArgs = instanceNormModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_InstanceNorm_Model_SpecifyInputsAndOutputs_04 + * @tc.desc: 设置输入输出,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(InstanceNormTest, SUB_AI_NNRt_Func_North_InstanceNorm_Model_SpecifyInputsAndOutputs_04, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + InstanceNormModel1 instanceNormModel; + OHNNGraphArgs graphArgs = instanceNormModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_InstanceNorm_Model_SpecifyInputsAndOutputs_05 + * @tc.desc: 设置输入输出,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(InstanceNormTest, SUB_AI_NNRt_Func_North_InstanceNorm_Model_SpecifyInputsAndOutputs_05, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + InstanceNormModel1 instanceNormModel; + OHNNGraphArgs graphArgs = instanceNormModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_InstanceNorm_Model_SpecifyInputsAndOutputs_06 + * @tc.desc: 设置输入输出,outputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(InstanceNormTest, SUB_AI_NNRt_Func_North_InstanceNorm_Model_SpecifyInputsAndOutputs_06, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + InstanceNormModel1 instanceNormModel; + OHNNGraphArgs graphArgs = instanceNormModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_InstanceNorm_Model_SpecifyInputsAndOutputs_07 + * @tc.desc: 设置输入输出,outputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(InstanceNormTest, SUB_AI_NNRt_Func_North_InstanceNorm_Model_SpecifyInputsAndOutputs_07, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + InstanceNormModel1 instanceNormModel; + OHNNGraphArgs graphArgs = instanceNormModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.outputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_InstanceNorm_Model_SpecifyInputsAndOutputs_08 + * @tc.desc: 设置输入输出,outputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(InstanceNormTest, SUB_AI_NNRt_Func_North_InstanceNorm_Model_SpecifyInputsAndOutputs_08, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + InstanceNormModel1 instanceNormModel; + OHNNGraphArgs graphArgs = instanceNormModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_InstanceNorm_Model_AddOperation_01 + * @tc.desc: 添加算子,paramindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(InstanceNormTest, SUB_AI_NNRt_Func_North_InstanceNorm_Model_AddOperation_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + InstanceNormModel1 instanceNormModel; + OHNNGraphArgs graphArgs = instanceNormModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + nullptr, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_InstanceNorm_Model_AddOperation_02 + * @tc.desc: 添加算子,paramindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(InstanceNormTest, SUB_AI_NNRt_Func_North_InstanceNorm_Model_AddOperation_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + InstanceNormModel1 instanceNormModel; + OHNNGraphArgs graphArgs = instanceNormModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_InstanceNorm_Model_AddOperation_03 + * @tc.desc: 添加算子,paramindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(InstanceNormTest, SUB_AI_NNRt_Func_North_InstanceNorm_Model_AddOperation_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + InstanceNormModel1 instanceNormModel; + OHNNGraphArgs graphArgs = instanceNormModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.paramIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_InstanceNorm_Model_AddOperation_04 + * @tc.desc: 添加算子,paramindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(InstanceNormTest, SUB_AI_NNRt_Func_North_InstanceNorm_Model_AddOperation_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + InstanceNormModel1 instanceNormModel; + OHNNGraphArgs graphArgs = instanceNormModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_InstanceNorm_Model_AddOperation_05 + * @tc.desc: 添加算子,inputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(InstanceNormTest, SUB_AI_NNRt_Func_North_InstanceNorm_Model_AddOperation_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + InstanceNormModel1 instanceNormModel; + OHNNGraphArgs graphArgs = instanceNormModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_InstanceNorm_Model_AddOperation_06 + * @tc.desc: 添加算子,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(InstanceNormTest, SUB_AI_NNRt_Func_North_InstanceNorm_Model_AddOperation_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + InstanceNormModel1 instanceNormModel; + OHNNGraphArgs graphArgs = instanceNormModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_InstanceNorm_Model_AddOperation_07 + * @tc.desc: 添加算子,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(InstanceNormTest, SUB_AI_NNRt_Func_North_InstanceNorm_Model_AddOperation_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + InstanceNormModel1 instanceNormModel; + OHNNGraphArgs graphArgs = instanceNormModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_InstanceNorm_Model_AddOperation_08 + * @tc.desc: 添加算子,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(InstanceNormTest, SUB_AI_NNRt_Func_North_InstanceNorm_Model_AddOperation_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + InstanceNormModel1 instanceNormModel; + OHNNGraphArgs graphArgs = instanceNormModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_InstanceNorm_Model_AddOperation_09 + * @tc.desc: 添加算子,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(InstanceNormTest, SUB_AI_NNRt_Func_North_InstanceNorm_Model_AddOperation_09, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + InstanceNormModel1 instanceNormModel; + OHNNGraphArgs graphArgs = instanceNormModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(nullptr, graphArgs.operationType, + ¶mIndices, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} \ No newline at end of file diff --git a/test/nnrt_xts_acts/nncore/opstest/src/l2_normalize_test.cpp b/test/nnrt_xts_acts/nncore/opstest/src/l2_normalize_test.cpp new file mode 100644 index 0000000..e037e22 --- /dev/null +++ b/test/nnrt_xts_acts/nncore/opstest/src/l2_normalize_test.cpp @@ -0,0 +1,826 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "nncore_utils.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Test; +class L2NormalizeTest : public testing::Test {}; + +struct L2NormalizeModel1 { + const std::vector tensor_shape = {2, 3}; + int64_t axisValue[1] = {1}; + float epsilonValue[1] = {1e-10}; + + float inputValue[2][3] = {{-1, -2, -3}, {4, 5, 6}}; + float outputValue[2][3] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, inputValue, 6*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, outputValue, 6*sizeof(float)}; + OHNNOperandTest axis = {OH_NN_INT64, OH_NN_L2_NORMALIZE_AXIS, {1}, axisValue, sizeof(int64_t)}; + OHNNOperandTest epsilon = {OH_NN_FLOAT32, OH_NN_L2_NORMALIZE_EPSILON, {1}, epsilonValue, sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_L2_NORMALIZE, + .operands = {input, output, axis, epsilon}, + .paramIndices = {2, 3}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +struct L2NormalizeModel2 { + const std::vector tensor_shape = {2, 3}; + int64_t axisValue[1] = {0}; + float epsilonValue[1] = {1e-10}; + + float inputValue[2][3] = {{-1, -2, -3}, {4, 5, 6}}; + float outputValue[2][3] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, inputValue, 6*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, outputValue, 6*sizeof(float)}; + OHNNOperandTest axis = {OH_NN_INT64, OH_NN_L2_NORMALIZE_AXIS, {1}, axisValue, sizeof(int64_t)}; + OHNNOperandTest epsilon = {OH_NN_FLOAT32, OH_NN_L2_NORMALIZE_EPSILON, {1}, epsilonValue, sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_L2_NORMALIZE, + .operands = {input, output, axis, epsilon}, + .paramIndices = {2, 3}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +struct L2NormalizeModel3 { + const std::vector tensor_shape = {2, 3}; + int64_t axisValue[1] = {2}; + float epsilonValue[1] = {1e-10}; + + float inputValue[2][3] = {{-1, -2, -3}, {4, 5, 6}}; + float outputValue[2][3] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, inputValue, 6*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, outputValue, 6*sizeof(float)}; + OHNNOperandTest axis = {OH_NN_INT64, OH_NN_L2_NORMALIZE_AXIS, {1}, axisValue, sizeof(int64_t)}; + OHNNOperandTest epsilon = {OH_NN_FLOAT32, OH_NN_L2_NORMALIZE_EPSILON, {1}, epsilonValue, sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_L2_NORMALIZE, + .operands = {input, output, axis, epsilon}, + .paramIndices = {2, 3}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +/** + * @tc.number : SUB_AI_NNRt_Func_North_L2Normalize_Build_01 + * @tc.desc: L2NormalizeModel1模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(L2NormalizeTest, SUB_AI_NNRt_Func_North_L2Normalize_Build_01, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + L2NormalizeModel1 l2NormalizeModel; + OHNNGraphArgs graphArgs = l2NormalizeModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_L2Normalize_Build_02 + * @tc.desc: L2NormalizeModel2模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(L2NormalizeTest, SUB_AI_NNRt_Func_North_L2Normalize_Build_02, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + L2NormalizeModel2 l2NormalizeModel; + OHNNGraphArgs graphArgs = l2NormalizeModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_L2Normalize_Build_03 + * @tc.desc: L2NormalizeModel3模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(L2NormalizeTest, SUB_AI_NNRt_Func_North_L2Normalize_Build_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + L2NormalizeModel3 l2NormalizeModel; + OHNNGraphArgs graphArgs = l2NormalizeModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_L2Normalize_Build_04 + * @tc.desc: L2NormalizeModel1模型输入Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(L2NormalizeTest, SUB_AI_NNRt_Func_North_L2Normalize_Build_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + L2NormalizeModel1 l2NormalizeModel; + OHNNGraphArgs graphArgs = l2NormalizeModel.graphArgs; + graphArgs.operands = {l2NormalizeModel.input, l2NormalizeModel.input, l2NormalizeModel.output, + l2NormalizeModel.axis, l2NormalizeModel.epsilon}; + graphArgs.inputIndices = {0, 1}; + graphArgs.outputIndices = {2}; + graphArgs.paramIndices = {3, 4}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_L2Normalize_Build_05 + * @tc.desc: L2NormalizeModel1模型输出Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(L2NormalizeTest, SUB_AI_NNRt_Func_North_L2Normalize_Build_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + L2NormalizeModel1 l2NormalizeModel; + OHNNGraphArgs graphArgs = l2NormalizeModel.graphArgs; + graphArgs.operands = {l2NormalizeModel.input, l2NormalizeModel.output, l2NormalizeModel.output, + l2NormalizeModel.axis, l2NormalizeModel.epsilon}; + graphArgs.inputIndices = {0}; + graphArgs.outputIndices = {1, 2}; + graphArgs.paramIndices = {3, 4}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_L2Normalize_Build_06 + * @tc.desc: L2NormalizeModel1模型传入非法参数进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(L2NormalizeTest, SUB_AI_NNRt_Func_North_L2Normalize_Build_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + L2NormalizeModel1 l2NormalizeModel; + OHNNGraphArgs graphArgs = l2NormalizeModel.graphArgs; + + int8_t activationValue = OH_NN_FUSED_NONE; + OHNNOperandTest activation = {OH_NN_INT8, OH_NN_ADD_ACTIVATIONTYPE, {}, &activationValue, sizeof(int8_t)}; + graphArgs.operands = {l2NormalizeModel.input, l2NormalizeModel.output, l2NormalizeModel.axis, + l2NormalizeModel.epsilon, activation}; + graphArgs.paramIndices = {2, 3, 4}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_L2Normalize_Model_Finish_01 + * @tc.desc: 模型构图,未添加操作数 + * @tc.type: FUNC + */ +HWTEST_F(L2NormalizeTest, SUB_AI_NNRt_Func_North_L2Normalize_Model_Finish_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + OHNNGraphArgs graphArgs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, SingleModelBuildEndStep(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_L2Normalize_Model_Finish_02 + * @tc.desc: 模型构图,未设置输入输出 + * @tc.type: FUNC + */ +HWTEST_F(L2NormalizeTest, SUB_AI_NNRt_Func_North_L2Normalize_Model_Finish_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + L2NormalizeModel1 l2NormalizeModel; + OHNNGraphArgs graphArgs = l2NormalizeModel.graphArgs; + graphArgs.specifyIO = false; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_L2Normalize_Model_Finish_03 + * @tc.desc: 模型构图,设置输入输出,构图成功 + * @tc.type: FUNC + */ +HWTEST_F(L2NormalizeTest, SUB_AI_NNRt_Func_North_L2Normalize_Model_Finish_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + L2NormalizeModel1 l2NormalizeModel; + OHNNGraphArgs graphArgs = l2NormalizeModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_L2Normalize_Model_SetOperandValue_01 + * @tc.desc: 设置操作数值,操作数不存在 + * @tc.type: FUNC + */ +HWTEST_F(L2NormalizeTest, SUB_AI_NNRt_Func_North_L2Normalize_Model_SetOperandValue_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + L2NormalizeModel1 l2NormalizeModel; + OHNNGraphArgs graphArgs = l2NormalizeModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData( + model, 1000+i, operandTem.data, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_L2Normalize_Model_SetOperandValue_02 + * @tc.desc: 设置操作数值,buufer为nullptr + * @tc.type: FUNC + */ +HWTEST_F(L2NormalizeTest, SUB_AI_NNRt_Func_North_L2Normalize_Model_SetOperandValue_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + L2NormalizeModel1 l2NormalizeModel; + OHNNGraphArgs graphArgs = l2NormalizeModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, i, nullptr, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_L2Normalize_Model_SetOperandValue_03 + * @tc.desc: 设置操作数值,length为0 + * @tc.type: FUNC + */ +HWTEST_F(L2NormalizeTest, SUB_AI_NNRt_Func_North_L2Normalize_Model_SetOperandValue_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + L2NormalizeModel1 l2NormalizeModel; + OHNNGraphArgs graphArgs = l2NormalizeModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, 1000+i, operandTem.data, 0)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_L2Normalize_Model_SpecifyInputsAndOutputs_01 + * @tc.desc: 设置输入输出,inputIndices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(L2NormalizeTest, SUB_AI_NNRt_Func_North_L2Normalize_Model_SpecifyInputsAndOutputs_01, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + L2NormalizeModel1 l2NormalizeModel; + OHNNGraphArgs graphArgs = l2NormalizeModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_L2Normalize_Model_SpecifyInputsAndOutputs_02 + * @tc.desc: 设置输入输出,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(L2NormalizeTest, SUB_AI_NNRt_Func_North_L2Normalize_Model_SpecifyInputsAndOutputs_02, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + L2NormalizeModel1 l2NormalizeModel; + OHNNGraphArgs graphArgs = l2NormalizeModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_L2Normalize_Model_SpecifyInputsAndOutputs_03 + * @tc.desc: 设置输入输出,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(L2NormalizeTest, SUB_AI_NNRt_Func_North_L2Normalize_Model_SpecifyInputsAndOutputs_03, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + L2NormalizeModel1 l2NormalizeModel; + OHNNGraphArgs graphArgs = l2NormalizeModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_L2Normalize_Model_SpecifyInputsAndOutputs_04 + * @tc.desc: 设置输入输出,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(L2NormalizeTest, SUB_AI_NNRt_Func_North_L2Normalize_Model_SpecifyInputsAndOutputs_04, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + L2NormalizeModel1 l2NormalizeModel; + OHNNGraphArgs graphArgs = l2NormalizeModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_L2Normalize_Model_SpecifyInputsAndOutputs_05 + * @tc.desc: 设置输入输出,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(L2NormalizeTest, SUB_AI_NNRt_Func_North_L2Normalize_Model_SpecifyInputsAndOutputs_05, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + L2NormalizeModel1 l2NormalizeModel; + OHNNGraphArgs graphArgs = l2NormalizeModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_L2Normalize_Model_SpecifyInputsAndOutputs_06 + * @tc.desc: 设置输入输出,outputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(L2NormalizeTest, SUB_AI_NNRt_Func_North_L2Normalize_Model_SpecifyInputsAndOutputs_06, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + L2NormalizeModel1 l2NormalizeModel; + OHNNGraphArgs graphArgs = l2NormalizeModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_L2Normalize_Model_SpecifyInputsAndOutputs_07 + * @tc.desc: 设置输入输出,outputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(L2NormalizeTest, SUB_AI_NNRt_Func_North_L2Normalize_Model_SpecifyInputsAndOutputs_07, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + L2NormalizeModel1 l2NormalizeModel; + OHNNGraphArgs graphArgs = l2NormalizeModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.outputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_L2Normalize_Model_SpecifyInputsAndOutputs_08 + * @tc.desc: 设置输入输出,outputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(L2NormalizeTest, SUB_AI_NNRt_Func_North_L2Normalize_Model_SpecifyInputsAndOutputs_08, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + L2NormalizeModel1 l2NormalizeModel; + OHNNGraphArgs graphArgs = l2NormalizeModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_L2Normalize_Model_AddOperation_01 + * @tc.desc: 添加算子,paramindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(L2NormalizeTest, SUB_AI_NNRt_Func_North_L2Normalize_Model_AddOperation_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + L2NormalizeModel1 l2NormalizeModel; + OHNNGraphArgs graphArgs = l2NormalizeModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + nullptr, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_L2Normalize_Model_AddOperation_02 + * @tc.desc: 添加算子,paramindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(L2NormalizeTest, SUB_AI_NNRt_Func_North_L2Normalize_Model_AddOperation_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + L2NormalizeModel1 l2NormalizeModel; + OHNNGraphArgs graphArgs = l2NormalizeModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_L2Normalize_Model_AddOperation_03 + * @tc.desc: 添加算子,paramindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(L2NormalizeTest, SUB_AI_NNRt_Func_North_L2Normalize_Model_AddOperation_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + L2NormalizeModel1 l2NormalizeModel; + OHNNGraphArgs graphArgs = l2NormalizeModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.paramIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_L2Normalize_Model_AddOperation_04 + * @tc.desc: 添加算子,paramindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(L2NormalizeTest, SUB_AI_NNRt_Func_North_L2Normalize_Model_AddOperation_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + L2NormalizeModel1 l2NormalizeModel; + OHNNGraphArgs graphArgs = l2NormalizeModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_L2Normalize_Model_AddOperation_05 + * @tc.desc: 添加算子,inputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(L2NormalizeTest, SUB_AI_NNRt_Func_North_L2Normalize_Model_AddOperation_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + L2NormalizeModel1 l2NormalizeModel; + OHNNGraphArgs graphArgs = l2NormalizeModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_L2Normalize_Model_AddOperation_06 + * @tc.desc: 添加算子,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(L2NormalizeTest, SUB_AI_NNRt_Func_North_L2Normalize_Model_AddOperation_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + L2NormalizeModel1 l2NormalizeModel; + OHNNGraphArgs graphArgs = l2NormalizeModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_L2Normalize_Model_AddOperation_07 + * @tc.desc: 添加算子,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(L2NormalizeTest, SUB_AI_NNRt_Func_North_L2Normalize_Model_AddOperation_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + L2NormalizeModel1 l2NormalizeModel; + OHNNGraphArgs graphArgs = l2NormalizeModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_L2Normalize_Model_AddOperation_08 + * @tc.desc: 添加算子,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(L2NormalizeTest, SUB_AI_NNRt_Func_North_L2Normalize_Model_AddOperation_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + L2NormalizeModel1 l2NormalizeModel; + OHNNGraphArgs graphArgs = l2NormalizeModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_L2Normalize_Model_AddOperation_09 + * @tc.desc: 添加算子,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(L2NormalizeTest, SUB_AI_NNRt_Func_North_L2Normalize_Model_AddOperation_09, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + L2NormalizeModel1 l2NormalizeModel; + OHNNGraphArgs graphArgs = l2NormalizeModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(nullptr, graphArgs.operationType, + ¶mIndices, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} \ No newline at end of file diff --git a/test/nnrt_xts_acts/nncore/opstest/src/leaky_relu_test.cpp b/test/nnrt_xts_acts/nncore/opstest/src/leaky_relu_test.cpp new file mode 100644 index 0000000..d8e3abe --- /dev/null +++ b/test/nnrt_xts_acts/nncore/opstest/src/leaky_relu_test.cpp @@ -0,0 +1,866 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "nncore_utils.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Test; +class LeakyReluTest : public testing::Test {}; + +struct LeakyReluModel1 { + const std::vector input_shape = {3}; + const std::vector output_shape = {3}; + float negativeSlopeValue[1] = {0.01}; + float inputValue[3] = {0.5, 1.0, 1.5}; + float outputValue[3] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, input_shape, inputValue, 3*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, output_shape, outputValue, 3*sizeof(float)}; + OHNNOperandTest negativeSlope = { + OH_NN_FLOAT32, OH_NN_LEAKY_RELU_NEGATIVE_SLOPE, {1}, negativeSlopeValue, sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_LEAKY_RELU, + .operands = {input, output, negativeSlope}, + .paramIndices = {2}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +struct LeakyReluModel2 { + const std::vector input_shape = {3}; + const std::vector output_shape = {3}; + float negativeSlopeValue[1] = {0.01}; + float inputValue[3] = {-0.5, -1.0, -1.5}; + float outputValue[3] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, input_shape, inputValue, 3*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, output_shape, outputValue, 3*sizeof(float)}; + OHNNOperandTest negativeSlope = { + OH_NN_FLOAT32, OH_NN_LEAKY_RELU_NEGATIVE_SLOPE, {1}, negativeSlopeValue, sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_LEAKY_RELU, + .operands = {input, output, negativeSlope}, + .paramIndices = {2}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +struct LeakyReluModel3 { + const std::vector input_shape = {3}; + const std::vector output_shape = {3}; + float negativeSlopeValue[1] = {0}; + float inputValue[3] = {-0.5, -1.0, -1.5}; + float outputValue[3] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, input_shape, inputValue, 3*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, output_shape, outputValue, 3*sizeof(float)}; + OHNNOperandTest negativeSlope = { + OH_NN_FLOAT32, OH_NN_LEAKY_RELU_NEGATIVE_SLOPE, {1}, negativeSlopeValue, sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_LEAKY_RELU, + .operands = {input, output, negativeSlope}, + .paramIndices = {2}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +struct LeakyReluModel4 { + const std::vector input_shape = {}; + const std::vector output_shape = {}; + float* inputValue = {}; + float* outputValue = {}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, input_shape, inputValue, 0*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, output_shape, outputValue, 0*sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_LEAKY_RELU, + .operands = {input, output}, + .paramIndices = {}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LeakyRelu_Build_01 + * @tc.desc: LeakyReluModel1模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(LeakyReluTest, SUB_AI_NNRt_Func_North_LeakyRelu_Build_01, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LeakyReluModel1 leakyReluModel; + OHNNGraphArgs graphArgs = leakyReluModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LeakyRelu_Build_02 + * @tc.desc: LeakyReluModel2模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(LeakyReluTest, SUB_AI_NNRt_Func_North_LeakyRelu_Build_02, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LeakyReluModel2 leakyReluModel; + OHNNGraphArgs graphArgs = leakyReluModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LeakyRelu_Build_03 + * @tc.desc: LeakyReluModel3模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(LeakyReluTest, SUB_AI_NNRt_Func_North_LeakyRelu_Build_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LeakyReluModel3 leakyReluModel; + OHNNGraphArgs graphArgs = leakyReluModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LeakyRelu_Build_04 + * @tc.desc: LeakyReluModel4模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(LeakyReluTest, SUB_AI_NNRt_Func_North_LeakyRelu_Build_04, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LeakyReluModel4 leakyReluModel; + OHNNGraphArgs graphArgs = leakyReluModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LeakyRelu_Build_05 + * @tc.desc: LeakyReluModel1模型输入Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(LeakyReluTest, SUB_AI_NNRt_Func_North_LeakyRelu_Build_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LeakyReluModel1 leakyReluModel; + OHNNGraphArgs graphArgs = leakyReluModel.graphArgs; + graphArgs.operands = {leakyReluModel.input, leakyReluModel.input, + leakyReluModel.output, leakyReluModel.negativeSlope}; + graphArgs.inputIndices = {0, 1}; + graphArgs.outputIndices = {2}; + graphArgs.paramIndices = {3}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LeakyRelu_Build_06 + * @tc.desc: LeakyReluModel1模型输出Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(LeakyReluTest, SUB_AI_NNRt_Func_North_LeakyRelu_Build_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LeakyReluModel1 leakyReluModel; + OHNNGraphArgs graphArgs = leakyReluModel.graphArgs; + graphArgs.operands = {leakyReluModel.input, leakyReluModel.output, + leakyReluModel.output, leakyReluModel.negativeSlope}; + graphArgs.inputIndices = {0}; + graphArgs.outputIndices = {1, 2}; + graphArgs.paramIndices = {3}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LeakyRelu_Build_07 + * @tc.desc: LeakyReluModel1模型传入非法参数进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(LeakyReluTest, SUB_AI_NNRt_Func_North_LeakyRelu_Build_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LeakyReluModel1 leakyReluModel; + OHNNGraphArgs graphArgs = leakyReluModel.graphArgs; + + int8_t activationValue = OH_NN_FUSED_NONE; + OHNNOperandTest activation = {OH_NN_INT8, OH_NN_ADD_ACTIVATIONTYPE, {}, &activationValue, sizeof(int8_t)}; + graphArgs.operands = {leakyReluModel.input, leakyReluModel.output, leakyReluModel.negativeSlope, activation}; + graphArgs.paramIndices = {2, 3}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LeakyRelu_Model_Finish_01 + * @tc.desc: 模型构图,未添加操作数 + * @tc.type: FUNC + */ +HWTEST_F(LeakyReluTest, SUB_AI_NNRt_Func_North_LeakyRelu_Model_Finish_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + OHNNGraphArgs graphArgs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, SingleModelBuildEndStep(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LeakyRelu_Model_Finish_02 + * @tc.desc: 模型构图,未设置输入输出 + * @tc.type: FUNC + */ +HWTEST_F(LeakyReluTest, SUB_AI_NNRt_Func_North_LeakyRelu_Model_Finish_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LeakyReluModel1 leakyReluModel; + OHNNGraphArgs graphArgs = leakyReluModel.graphArgs; + graphArgs.specifyIO = false; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LeakyRelu_Model_Finish_03 + * @tc.desc: 模型构图,设置输入输出,构图成功 + * @tc.type: FUNC + */ +HWTEST_F(LeakyReluTest, SUB_AI_NNRt_Func_North_LeakyRelu_Model_Finish_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LeakyReluModel1 leakyReluModel; + OHNNGraphArgs graphArgs = leakyReluModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LeakyRelu_Model_SetOperandValue_01 + * @tc.desc: 设置操作数值,操作数不存在 + * @tc.type: FUNC + */ +HWTEST_F(LeakyReluTest, SUB_AI_NNRt_Func_North_LeakyRelu_Model_SetOperandValue_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LeakyReluModel1 leakyReluModel; + OHNNGraphArgs graphArgs = leakyReluModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData( + model, 1000+i, operandTem.data, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LeakyRelu_Model_SetOperandValue_02 + * @tc.desc: 设置操作数值,buufer为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LeakyReluTest, SUB_AI_NNRt_Func_North_LeakyRelu_Model_SetOperandValue_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LeakyReluModel1 leakyReluModel; + OHNNGraphArgs graphArgs = leakyReluModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, i, nullptr, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LeakyRelu_Model_SetOperandValue_03 + * @tc.desc: 设置操作数值,length为0 + * @tc.type: FUNC + */ +HWTEST_F(LeakyReluTest, SUB_AI_NNRt_Func_North_LeakyRelu_Model_SetOperandValue_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LeakyReluModel1 leakyReluModel; + OHNNGraphArgs graphArgs = leakyReluModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, 1000+i, operandTem.data, 0)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LeakyRelu_Model_SpecifyInputsAndOutputs_01 + * @tc.desc: 设置输入输出,inputIndices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LeakyReluTest, SUB_AI_NNRt_Func_North_LeakyRelu_Model_SpecifyInputsAndOutputs_01, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LeakyReluModel1 leakyReluModel; + OHNNGraphArgs graphArgs = leakyReluModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LeakyRelu_Model_SpecifyInputsAndOutputs_02 + * @tc.desc: 设置输入输出,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LeakyReluTest, SUB_AI_NNRt_Func_North_LeakyRelu_Model_SpecifyInputsAndOutputs_02, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LeakyReluModel1 leakyReluModel; + OHNNGraphArgs graphArgs = leakyReluModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LeakyRelu_Model_SpecifyInputsAndOutputs_03 + * @tc.desc: 设置输入输出,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(LeakyReluTest, SUB_AI_NNRt_Func_North_LeakyRelu_Model_SpecifyInputsAndOutputs_03, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LeakyReluModel1 leakyReluModel; + OHNNGraphArgs graphArgs = leakyReluModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LeakyRelu_Model_SpecifyInputsAndOutputs_04 + * @tc.desc: 设置输入输出,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(LeakyReluTest, SUB_AI_NNRt_Func_North_LeakyRelu_Model_SpecifyInputsAndOutputs_04, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LeakyReluModel1 leakyReluModel; + OHNNGraphArgs graphArgs = leakyReluModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LeakyRelu_Model_SpecifyInputsAndOutputs_05 + * @tc.desc: 设置输入输出,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LeakyReluTest, SUB_AI_NNRt_Func_North_LeakyRelu_Model_SpecifyInputsAndOutputs_05, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LeakyReluModel1 leakyReluModel; + OHNNGraphArgs graphArgs = leakyReluModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LeakyRelu_Model_SpecifyInputsAndOutputs_06 + * @tc.desc: 设置输入输出,outputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LeakyReluTest, SUB_AI_NNRt_Func_North_LeakyRelu_Model_SpecifyInputsAndOutputs_06, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LeakyReluModel1 leakyReluModel; + OHNNGraphArgs graphArgs = leakyReluModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LeakyRelu_Model_SpecifyInputsAndOutputs_07 + * @tc.desc: 设置输入输出,outputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(LeakyReluTest, SUB_AI_NNRt_Func_North_LeakyRelu_Model_SpecifyInputsAndOutputs_07, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LeakyReluModel1 leakyReluModel; + OHNNGraphArgs graphArgs = leakyReluModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.outputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LeakyRelu_Model_SpecifyInputsAndOutputs_08 + * @tc.desc: 设置输入输出,outputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(LeakyReluTest, SUB_AI_NNRt_Func_North_LeakyRelu_Model_SpecifyInputsAndOutputs_08, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LeakyReluModel1 leakyReluModel; + OHNNGraphArgs graphArgs = leakyReluModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LeakyRelu_Model_AddOperation_01 + * @tc.desc: 添加算子,paramindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LeakyReluTest, SUB_AI_NNRt_Func_North_LeakyRelu_Model_AddOperation_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LeakyReluModel1 leakyReluModel; + OHNNGraphArgs graphArgs = leakyReluModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + nullptr, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LeakyRelu_Model_AddOperation_02 + * @tc.desc: 添加算子,paramindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LeakyReluTest, SUB_AI_NNRt_Func_North_LeakyRelu_Model_AddOperation_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LeakyReluModel1 leakyReluModel; + OHNNGraphArgs graphArgs = leakyReluModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LeakyRelu_Model_AddOperation_03 + * @tc.desc: 添加算子,paramindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(LeakyReluTest, SUB_AI_NNRt_Func_North_LeakyRelu_Model_AddOperation_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LeakyReluModel1 leakyReluModel; + OHNNGraphArgs graphArgs = leakyReluModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.paramIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LeakyRelu_Model_AddOperation_04 + * @tc.desc: 添加算子,paramindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(LeakyReluTest, SUB_AI_NNRt_Func_North_LeakyRelu_Model_AddOperation_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LeakyReluModel1 leakyReluModel; + OHNNGraphArgs graphArgs = leakyReluModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LeakyRelu_Model_AddOperation_05 + * @tc.desc: 添加算子,inputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LeakyReluTest, SUB_AI_NNRt_Func_North_LeakyRelu_Model_AddOperation_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LeakyReluModel1 leakyReluModel; + OHNNGraphArgs graphArgs = leakyReluModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LeakyRelu_Model_AddOperation_06 + * @tc.desc: 添加算子,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LeakyReluTest, SUB_AI_NNRt_Func_North_LeakyRelu_Model_AddOperation_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LeakyReluModel1 leakyReluModel; + OHNNGraphArgs graphArgs = leakyReluModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LeakyRelu_Model_AddOperation_07 + * @tc.desc: 添加算子,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(LeakyReluTest, SUB_AI_NNRt_Func_North_LeakyRelu_Model_AddOperation_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LeakyReluModel1 leakyReluModel; + OHNNGraphArgs graphArgs = leakyReluModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LeakyRelu_Model_AddOperation_08 + * @tc.desc: 添加算子,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(LeakyReluTest, SUB_AI_NNRt_Func_North_LeakyRelu_Model_AddOperation_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LeakyReluModel1 leakyReluModel; + OHNNGraphArgs graphArgs = leakyReluModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LeakyRelu_Model_AddOperation_09 + * @tc.desc: 添加算子,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LeakyReluTest, SUB_AI_NNRt_Func_North_LeakyRelu_Model_AddOperation_09, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LeakyReluModel1 leakyReluModel; + OHNNGraphArgs graphArgs = leakyReluModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(nullptr, graphArgs.operationType, + ¶mIndices, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} \ No newline at end of file diff --git a/test/nnrt_xts_acts/nncore/opstest/src/less_test.cpp b/test/nnrt_xts_acts/nncore/opstest/src/less_test.cpp new file mode 100644 index 0000000..6cdb8b7 --- /dev/null +++ b/test/nnrt_xts_acts/nncore/opstest/src/less_test.cpp @@ -0,0 +1,851 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "nncore_utils.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Test; +class LessTest : public testing::Test {}; + +struct LessModel1 { + const std::vector tensor_shape = {3}; + float input0Value[3] = {1, 2, 3}; + float input1Value[3] = {3, 2, 1}; + bool outputValue[3] = {0}; + + OHNNOperandTest input0 = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, input0Value, 3*sizeof(float)}; + OHNNOperandTest input1 = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, input1Value, 3*sizeof(float)}; + OHNNOperandTest output = {OH_NN_BOOL, OH_NN_TENSOR, tensor_shape, outputValue, 3*sizeof(bool)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_LESS, + .operands = {input0, input1, output}, + .paramIndices = {}, + .inputIndices = {0, 1}, + .outputIndices = {2}}; +}; + +struct LessModel2 { + const std::vector tensor_shape = {3}; + const std::vector input1_shape = {3, 1}; + float input0Value[3] = {1, 2, 3}; + float input1Value[3][1] = {{3}, {2}, {1}}; + bool outputValue[3] = {0}; + + OHNNOperandTest input0 = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, input0Value, 3*sizeof(float)}; + OHNNOperandTest input1 = {OH_NN_FLOAT32, OH_NN_TENSOR, input1_shape, input1Value, 3*sizeof(float)}; + OHNNOperandTest output = {OH_NN_BOOL, OH_NN_TENSOR, tensor_shape, outputValue, 9*sizeof(bool)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_LESS, + .operands = {input0, input1, output}, + .paramIndices = {}, + .inputIndices = {0, 1}, + .outputIndices = {2}}; +}; + +struct LessModel3 { + const std::vector tensor_shape = {3}; + const std::vector input1_shape = {4}; + float input0Value[3] = {1, 2, 3}; + float input1Value[4] = {3, 2, 1, 4}; + bool outputValue[3] = {0}; + + OHNNOperandTest input0 = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, input0Value, 3*sizeof(float)}; + OHNNOperandTest input1 = {OH_NN_FLOAT32, OH_NN_TENSOR, input1_shape, input1Value, 4*sizeof(float)}; + OHNNOperandTest output = {OH_NN_BOOL, OH_NN_TENSOR, tensor_shape, outputValue, 4*sizeof(bool)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_LESS, + .operands = {input0, input1, output}, + .paramIndices = {}, + .inputIndices = {0, 1}, + .outputIndices = {2}}; +}; + +struct LessModel4 { + const std::vector tensor_shape = {}; + float* input0Value = {}; + float* input1Value = {}; + bool* outputValue = {}; + + OHNNOperandTest input0 = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, input0Value, 0*sizeof(float)}; + OHNNOperandTest input1 = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, input1Value, 0*sizeof(float)}; + OHNNOperandTest output = {OH_NN_BOOL, OH_NN_TENSOR, tensor_shape, outputValue, 0*sizeof(bool)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_LESS, + .operands = {input0, input1, output}, + .paramIndices = {}, + .inputIndices = {0, 1}, + .outputIndices = {2}}; +}; + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Less_Build_01 + * @tc.desc: LessModel1模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(LessTest, SUB_AI_NNRt_Func_North_Less_Build_01, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LessModel1 lessModel; + OHNNGraphArgs graphArgs = lessModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Less_Build_02 + * @tc.desc: LessModel2模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(LessTest, SUB_AI_NNRt_Func_North_Less_Build_02, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LessModel2 lessModel; + OHNNGraphArgs graphArgs = lessModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Less_Build_03 + * @tc.desc: LessModel3模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(LessTest, SUB_AI_NNRt_Func_North_Less_Build_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LessModel3 lessModel; + OHNNGraphArgs graphArgs = lessModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Less_Build_04 + * @tc.desc: LessModel4模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(LessTest, SUB_AI_NNRt_Func_North_Less_Build_04, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LessModel4 lessModel; + OHNNGraphArgs graphArgs = lessModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Less_Build_05 + * @tc.desc: LessModel1模型输入Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(LessTest, SUB_AI_NNRt_Func_North_Less_Build_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LessModel2 lessModel; + OHNNGraphArgs graphArgs = lessModel.graphArgs; + graphArgs.operands = {lessModel.input0, lessModel.input1, lessModel.input1, lessModel.output}; + graphArgs.inputIndices = {0, 1, 2}; + graphArgs.outputIndices = {3}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Less_Build_06 + * @tc.desc: LessModel1模型输出Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(LessTest, SUB_AI_NNRt_Func_North_Less_Build_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LessModel2 lessModel; + OHNNGraphArgs graphArgs = lessModel.graphArgs; + graphArgs.operands = {lessModel.input0, lessModel.input1, lessModel.output, lessModel.output}; + graphArgs.inputIndices = {0, 1}; + graphArgs.outputIndices = {2, 3}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Less_Build_07 + * @tc.desc: LessModel1模型传入非法参数进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(LessTest, SUB_AI_NNRt_Func_North_Less_Build_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LessModel2 lessModel; + OHNNGraphArgs graphArgs = lessModel.graphArgs; + + int8_t activationValue = OH_NN_FUSED_NONE; + OHNNOperandTest activation = {OH_NN_INT8, OH_NN_ADD_ACTIVATIONTYPE, {}, &activationValue, sizeof(int8_t)}; + graphArgs.operands = {lessModel.input0, lessModel.input1, lessModel.output, activation}; + graphArgs.paramIndices = {3}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Less_Model_Finish_01 + * @tc.desc: 模型构图,未添加操作数 + * @tc.type: FUNC + */ +HWTEST_F(LessTest, SUB_AI_NNRt_Func_North_Less_Model_Finish_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + OHNNGraphArgs graphArgs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, SingleModelBuildEndStep(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Less_Model_Finish_02 + * @tc.desc: 模型构图,未设置输入输出 + * @tc.type: FUNC + */ +HWTEST_F(LessTest, SUB_AI_NNRt_Func_North_Less_Model_Finish_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LessModel1 lessModel; + OHNNGraphArgs graphArgs = lessModel.graphArgs; + graphArgs.specifyIO = false; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Less_Model_Finish_03 + * @tc.desc: 模型构图,设置输入输出,构图成功 + * @tc.type: FUNC + */ +HWTEST_F(LessTest, SUB_AI_NNRt_Func_North_Less_Model_Finish_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LessModel1 lessModel; + OHNNGraphArgs graphArgs = lessModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Less_Model_SetOperandValue_01 + * @tc.desc: 设置操作数值,操作数不存在 + * @tc.type: FUNC + */ +HWTEST_F(LessTest, SUB_AI_NNRt_Func_North_Less_Model_SetOperandValue_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LessModel1 lessModel; + OHNNGraphArgs graphArgs = lessModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData( + model, 1000+i, operandTem.data, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Less_Model_SetOperandValue_02 + * @tc.desc: 设置操作数值,buufer为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LessTest, SUB_AI_NNRt_Func_North_Less_Model_SetOperandValue_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LessModel1 lessModel; + OHNNGraphArgs graphArgs = lessModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, i, nullptr, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Less_Model_SetOperandValue_03 + * @tc.desc: 设置操作数值,length为0 + * @tc.type: FUNC + */ +HWTEST_F(LessTest, SUB_AI_NNRt_Func_North_Less_Model_SetOperandValue_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LessModel1 lessModel; + OHNNGraphArgs graphArgs = lessModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, 1000+i, operandTem.data, 0)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Less_Model_SpecifyInputsAndOutputs_01 + * @tc.desc: 设置输入输出,inputIndices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LessTest, SUB_AI_NNRt_Func_North_Less_Model_SpecifyInputsAndOutputs_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LessModel1 lessModel; + OHNNGraphArgs graphArgs = lessModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Less_Model_SpecifyInputsAndOutputs_02 + * @tc.desc: 设置输入输出,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LessTest, SUB_AI_NNRt_Func_North_Less_Model_SpecifyInputsAndOutputs_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LessModel1 lessModel; + OHNNGraphArgs graphArgs = lessModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Less_Model_SpecifyInputsAndOutputs_03 + * @tc.desc: 设置输入输出,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(LessTest, SUB_AI_NNRt_Func_North_Less_Model_SpecifyInputsAndOutputs_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LessModel1 lessModel; + OHNNGraphArgs graphArgs = lessModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Less_Model_SpecifyInputsAndOutputs_04 + * @tc.desc: 设置输入输出,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(LessTest, SUB_AI_NNRt_Func_North_Less_Model_SpecifyInputsAndOutputs_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LessModel1 lessModel; + OHNNGraphArgs graphArgs = lessModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Less_Model_SpecifyInputsAndOutputs_05 + * @tc.desc: 设置输入输出,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LessTest, SUB_AI_NNRt_Func_North_Less_Model_SpecifyInputsAndOutputs_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LessModel1 lessModel; + OHNNGraphArgs graphArgs = lessModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Less_Model_SpecifyInputsAndOutputs_06 + * @tc.desc: 设置输入输出,outputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LessTest, SUB_AI_NNRt_Func_North_Less_Model_SpecifyInputsAndOutputs_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LessModel1 lessModel; + OHNNGraphArgs graphArgs = lessModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Less_Model_SpecifyInputsAndOutputs_07 + * @tc.desc: 设置输入输出,outputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(LessTest, SUB_AI_NNRt_Func_North_Less_Model_SpecifyInputsAndOutputs_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LessModel1 lessModel; + OHNNGraphArgs graphArgs = lessModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.outputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Less_Model_SpecifyInputsAndOutputs_08 + * @tc.desc: 设置输入输出,outputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(LessTest, SUB_AI_NNRt_Func_North_Less_Model_SpecifyInputsAndOutputs_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LessModel1 lessModel; + OHNNGraphArgs graphArgs = lessModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Less_Model_AddOperation_01 + * @tc.desc: 添加算子,paramindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LessTest, SUB_AI_NNRt_Func_North_Less_Model_AddOperation_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LessModel1 lessModel; + OHNNGraphArgs graphArgs = lessModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + nullptr, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Less_Model_AddOperation_02 + * @tc.desc: 添加算子,paramindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LessTest, SUB_AI_NNRt_Func_North_Less_Model_AddOperation_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LessModel1 lessModel; + OHNNGraphArgs graphArgs = lessModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.data = nullptr; + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Less_Model_AddOperation_03 + * @tc.desc: 添加算子,paramindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(LessTest, SUB_AI_NNRt_Func_North_Less_Model_AddOperation_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LessModel1 lessModel; + OHNNGraphArgs graphArgs = lessModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.paramIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Less_Model_AddOperation_04 + * @tc.desc: 添加算子,paramindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(LessTest, SUB_AI_NNRt_Func_North_Less_Model_AddOperation_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LessModel1 lessModel; + OHNNGraphArgs graphArgs = lessModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.size = 0; + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Less_Model_AddOperation_05 + * @tc.desc: 添加算子,inputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LessTest, SUB_AI_NNRt_Func_North_Less_Model_AddOperation_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LessModel1 lessModel; + OHNNGraphArgs graphArgs = lessModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Less_Model_AddOperation_06 + * @tc.desc: 添加算子,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LessTest, SUB_AI_NNRt_Func_North_Less_Model_AddOperation_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LessModel1 lessModel; + OHNNGraphArgs graphArgs = lessModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Less_Model_AddOperation_07 + * @tc.desc: 添加算子,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(LessTest, SUB_AI_NNRt_Func_North_Less_Model_AddOperation_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LessModel1 lessModel; + OHNNGraphArgs graphArgs = lessModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Less_Model_AddOperation_08 + * @tc.desc: 添加算子,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(LessTest, SUB_AI_NNRt_Func_North_Less_Model_AddOperation_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LessModel1 lessModel; + OHNNGraphArgs graphArgs = lessModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Less_Model_AddOperation_09 + * @tc.desc: 添加算子,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LessTest, SUB_AI_NNRt_Func_North_Less_Model_AddOperation_09, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LessModel1 lessModel; + OHNNGraphArgs graphArgs = lessModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(nullptr, graphArgs.operationType, + ¶mIndices, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} \ No newline at end of file diff --git a/test/nnrt_xts_acts/nncore/opstest/src/log_softmax_test.cpp b/test/nnrt_xts_acts/nncore/opstest/src/log_softmax_test.cpp new file mode 100644 index 0000000..e788543 --- /dev/null +++ b/test/nnrt_xts_acts/nncore/opstest/src/log_softmax_test.cpp @@ -0,0 +1,816 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "nncore_utils.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Test; +class LogSoftmaxTest : public testing::Test {}; + +struct LogSoftmaxModel1 { + const std::vector tensor_shape = {4, 2}; + int64_t axisValue[1] = {1}; + float inputValue[4][2] = {{1, 2}, {3, 4}, {5, 6}, {7, 8}}; + float outputValue[4][2] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, inputValue, 8*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, outputValue, 8*sizeof(float)}; + OHNNOperandTest axis = {OH_NN_INT64, OH_NN_LOG_SOFTMAX_AXIS, {1}, axisValue, sizeof(int64_t)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_LOG_SOFTMAX, + .operands = {input, output, axis}, + .paramIndices = {2}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +struct LogSoftmaxModel2 { + const std::vector tensor_shape = {4, 2}; + int64_t axisValue[1] = {0}; + float inputValue[4][2] = {{1, 2}, {3, 4}, {5, 6}, {7, 8}}; + float outputValue[4][2] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, inputValue, 8*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, outputValue, 8*sizeof(float)}; + OHNNOperandTest axis = {OH_NN_INT64, OH_NN_LOG_SOFTMAX_AXIS, {1}, axisValue, sizeof(int64_t)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_LOG_SOFTMAX, + .operands = {input, output, axis}, + .paramIndices = {2}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +struct LogSoftmaxModel3 { + const std::vector tensor_shape = {4, 2}; + int64_t axisValue[1] = {2}; + float inputValue[4][2] = {{1, 2}, {3, 4}, {5, 6}, {7, 8}}; + float outputValue[4][2] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, inputValue, 8*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, outputValue, 8*sizeof(float)}; + OHNNOperandTest axis = {OH_NN_INT64, OH_NN_LOG_SOFTMAX_AXIS, {1}, axisValue, sizeof(int64_t)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_LOG_SOFTMAX, + .operands = {input, output, axis}, + .paramIndices = {2}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogSoftmax_Build_01 + * @tc.desc: LogSoftmaxModel1模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(LogSoftmaxTest, SUB_AI_NNRt_Func_North_LogSoftmax_Build_01, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogSoftmaxModel1 logSoftmaxModel; + OHNNGraphArgs graphArgs = logSoftmaxModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogSoftmax_Build_02 + * @tc.desc: LogSoftmaxModel2模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(LogSoftmaxTest, SUB_AI_NNRt_Func_North_LogSoftmax_Build_02, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogSoftmaxModel2 logSoftmaxModel; + OHNNGraphArgs graphArgs = logSoftmaxModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogSoftmax_Build_03 + * @tc.desc: LogSoftmaxModel3模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(LogSoftmaxTest, SUB_AI_NNRt_Func_North_LogSoftmax_Build_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogSoftmaxModel3 logSoftmaxModel; + OHNNGraphArgs graphArgs = logSoftmaxModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogSoftmax_Build_04 + * @tc.desc: LogSoftmaxModel1模型输入Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(LogSoftmaxTest, SUB_AI_NNRt_Func_North_LogSoftmax_Build_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogSoftmaxModel1 logSoftmaxModel; + OHNNGraphArgs graphArgs = logSoftmaxModel.graphArgs; + graphArgs.operands = {logSoftmaxModel.input, logSoftmaxModel.input, + logSoftmaxModel.output, logSoftmaxModel.axis}; + graphArgs.inputIndices = {0, 1}; + graphArgs.outputIndices = {2}; + graphArgs.paramIndices = {3}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogSoftmax_Build_05 + * @tc.desc: LogSoftmaxModel1模型输出Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(LogSoftmaxTest, SUB_AI_NNRt_Func_North_LogSoftmax_Build_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogSoftmaxModel1 logSoftmaxModel; + OHNNGraphArgs graphArgs = logSoftmaxModel.graphArgs; + graphArgs.operands = {logSoftmaxModel.input, logSoftmaxModel.output, + logSoftmaxModel.output, logSoftmaxModel.axis}; + graphArgs.inputIndices = {0}; + graphArgs.outputIndices = {1, 2}; + graphArgs.paramIndices = {3}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogSoftmax_Build_06 + * @tc.desc: LogSoftmaxModel1模型传入非法参数进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(LogSoftmaxTest, SUB_AI_NNRt_Func_North_LogSoftmax_Build_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogSoftmaxModel1 logSoftmaxModel; + OHNNGraphArgs graphArgs = logSoftmaxModel.graphArgs; + + int8_t activationValue = OH_NN_FUSED_NONE; + OHNNOperandTest activation = {OH_NN_INT8, OH_NN_ADD_ACTIVATIONTYPE, {}, &activationValue, sizeof(int8_t)}; + graphArgs.operands = {logSoftmaxModel.input, logSoftmaxModel.output, logSoftmaxModel.axis, activation}; + graphArgs.paramIndices = {2, 3}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogSoftmax_Model_Finish_01 + * @tc.desc: 模型构图,未添加操作数 + * @tc.type: FUNC + */ +HWTEST_F(LogSoftmaxTest, SUB_AI_NNRt_Func_North_LogSoftmax_Model_Finish_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + OHNNGraphArgs graphArgs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, SingleModelBuildEndStep(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogSoftmax_Model_Finish_02 + * @tc.desc: 模型构图,未设置输入输出 + * @tc.type: FUNC + */ +HWTEST_F(LogSoftmaxTest, SUB_AI_NNRt_Func_North_LogSoftmax_Model_Finish_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogSoftmaxModel1 logSoftmaxModel; + OHNNGraphArgs graphArgs = logSoftmaxModel.graphArgs; + graphArgs.specifyIO = false; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogSoftmax_Model_Finish_03 + * @tc.desc: 模型构图,设置输入输出,构图成功 + * @tc.type: FUNC + */ +HWTEST_F(LogSoftmaxTest, SUB_AI_NNRt_Func_North_LogSoftmax_Model_Finish_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogSoftmaxModel1 logSoftmaxModel; + OHNNGraphArgs graphArgs = logSoftmaxModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogSoftmax_Model_SetOperandValue_01 + * @tc.desc: 设置操作数值,操作数不存在 + * @tc.type: FUNC + */ +HWTEST_F(LogSoftmaxTest, SUB_AI_NNRt_Func_North_LogSoftmax_Model_SetOperandValue_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogSoftmaxModel1 logSoftmaxModel; + OHNNGraphArgs graphArgs = logSoftmaxModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData( + model, 1000+i, operandTem.data, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogSoftmax_Model_SetOperandValue_02 + * @tc.desc: 设置操作数值,buffer为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LogSoftmaxTest, SUB_AI_NNRt_Func_North_LogSoftmax_Model_SetOperandValue_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogSoftmaxModel1 logSoftmaxModel; + OHNNGraphArgs graphArgs = logSoftmaxModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, i, nullptr, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogSoftmax_Model_SetOperandValue_03 + * @tc.desc: 设置操作数值,length为0 + * @tc.type: FUNC + */ +HWTEST_F(LogSoftmaxTest, SUB_AI_NNRt_Func_North_LogSoftmax_Model_SetOperandValue_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogSoftmaxModel1 logSoftmaxModel; + OHNNGraphArgs graphArgs = logSoftmaxModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, 1000+i, operandTem.data, 0)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogSoftmax_Model_SpecifyInputsAndOutputs_01 + * @tc.desc: 设置输入输出,inputIndices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LogSoftmaxTest, SUB_AI_NNRt_Func_North_LogSoftmax_Model_SpecifyInputsAndOutputs_01, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogSoftmaxModel1 logSoftmaxModel; + OHNNGraphArgs graphArgs = logSoftmaxModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogSoftmax_Model_SpecifyInputsAndOutputs_02 + * @tc.desc: 设置输入输出,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LogSoftmaxTest, SUB_AI_NNRt_Func_North_LogSoftmax_Model_SpecifyInputsAndOutputs_02, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogSoftmaxModel1 logSoftmaxModel; + OHNNGraphArgs graphArgs = logSoftmaxModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogSoftmax_Model_SpecifyInputsAndOutputs_03 + * @tc.desc: 设置输入输出,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(LogSoftmaxTest, SUB_AI_NNRt_Func_North_LogSoftmax_Model_SpecifyInputsAndOutputs_03, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogSoftmaxModel1 logSoftmaxModel; + OHNNGraphArgs graphArgs = logSoftmaxModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogSoftmax_Model_SpecifyInputsAndOutputs_04 + * @tc.desc: 设置输入输出,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(LogSoftmaxTest, SUB_AI_NNRt_Func_North_LogSoftmax_Model_SpecifyInputsAndOutputs_04, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogSoftmaxModel1 logSoftmaxModel; + OHNNGraphArgs graphArgs = logSoftmaxModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogSoftmax_Model_SpecifyInputsAndOutputs_05 + * @tc.desc: 设置输入输出,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LogSoftmaxTest, SUB_AI_NNRt_Func_North_LogSoftmax_Model_SpecifyInputsAndOutputs_05, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogSoftmaxModel1 logSoftmaxModel; + OHNNGraphArgs graphArgs = logSoftmaxModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogSoftmax_Model_SpecifyInputsAndOutputs_06 + * @tc.desc: 设置输入输出,outputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LogSoftmaxTest, SUB_AI_NNRt_Func_North_LogSoftmax_Model_SpecifyInputsAndOutputs_06, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogSoftmaxModel1 logSoftmaxModel; + OHNNGraphArgs graphArgs = logSoftmaxModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogSoftmax_Model_SpecifyInputsAndOutputs_07 + * @tc.desc: 设置输入输出,outputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(LogSoftmaxTest, SUB_AI_NNRt_Func_North_LogSoftmax_Model_SpecifyInputsAndOutputs_07, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogSoftmaxModel1 logSoftmaxModel; + OHNNGraphArgs graphArgs = logSoftmaxModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.outputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogSoftmax_Model_SpecifyInputsAndOutputs_08 + * @tc.desc: 设置输入输出,outputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(LogSoftmaxTest, SUB_AI_NNRt_Func_North_LogSoftmax_Model_SpecifyInputsAndOutputs_08, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogSoftmaxModel1 logSoftmaxModel; + OHNNGraphArgs graphArgs = logSoftmaxModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogSoftmax_Model_AddOperation_01 + * @tc.desc: 添加算子,paramindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LogSoftmaxTest, SUB_AI_NNRt_Func_North_LogSoftmax_Model_AddOperation_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogSoftmaxModel1 logSoftmaxModel; + OHNNGraphArgs graphArgs = logSoftmaxModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + nullptr, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogSoftmax_Model_AddOperation_02 + * @tc.desc: 添加算子,paramindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LogSoftmaxTest, SUB_AI_NNRt_Func_North_LogSoftmax_Model_AddOperation_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogSoftmaxModel1 logSoftmaxModel; + OHNNGraphArgs graphArgs = logSoftmaxModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogSoftmax_Model_AddOperation_03 + * @tc.desc: 添加算子,paramindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(LogSoftmaxTest, SUB_AI_NNRt_Func_North_LogSoftmax_Model_AddOperation_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogSoftmaxModel1 logSoftmaxModel; + OHNNGraphArgs graphArgs = logSoftmaxModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.paramIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogSoftmax_Model_AddOperation_04 + * @tc.desc: 添加算子,paramindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(LogSoftmaxTest, SUB_AI_NNRt_Func_North_LogSoftmax_Model_AddOperation_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogSoftmaxModel1 logSoftmaxModel; + OHNNGraphArgs graphArgs = logSoftmaxModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogSoftmax_Model_AddOperation_05 + * @tc.desc: 添加算子,inputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LogSoftmaxTest, SUB_AI_NNRt_Func_North_LogSoftmax_Model_AddOperation_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogSoftmaxModel1 logSoftmaxModel; + OHNNGraphArgs graphArgs = logSoftmaxModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogSoftmax_Model_AddOperation_06 + * @tc.desc: 添加算子,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LogSoftmaxTest, SUB_AI_NNRt_Func_North_LogSoftmax_Model_AddOperation_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogSoftmaxModel1 logSoftmaxModel; + OHNNGraphArgs graphArgs = logSoftmaxModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogSoftmax_Model_AddOperation_07 + * @tc.desc: 添加算子,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(LogSoftmaxTest, SUB_AI_NNRt_Func_North_LogSoftmax_Model_AddOperation_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogSoftmaxModel1 logSoftmaxModel; + OHNNGraphArgs graphArgs = logSoftmaxModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogSoftmax_Model_AddOperation_08 + * @tc.desc: 添加算子,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(LogSoftmaxTest, SUB_AI_NNRt_Func_North_LogSoftmax_Model_AddOperation_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogSoftmaxModel1 logSoftmaxModel; + OHNNGraphArgs graphArgs = logSoftmaxModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogSoftmax_Model_AddOperation_09 + * @tc.desc: 添加算子,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LogSoftmaxTest, SUB_AI_NNRt_Func_North_LogSoftmax_Model_AddOperation_09, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogSoftmaxModel1 logSoftmaxModel; + OHNNGraphArgs graphArgs = logSoftmaxModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(nullptr, graphArgs.operationType, + ¶mIndices, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} \ No newline at end of file diff --git a/test/nnrt_xts_acts/nncore/opstest/src/log_test.cpp b/test/nnrt_xts_acts/nncore/opstest/src/log_test.cpp new file mode 100644 index 0000000..915acae --- /dev/null +++ b/test/nnrt_xts_acts/nncore/opstest/src/log_test.cpp @@ -0,0 +1,755 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "nncore_utils.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Test; +class LogTest : public testing::Test {}; + +struct LogModel1 { + const std::vector tensor_shape = {3}; + float inputValue[3] = {1, 2, 3}; + float outputValue[3] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, inputValue, 3*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, outputValue, 3*sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_LOG, + .operands = {input, output}, + .paramIndices = {}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +struct LogModel2 { + const std::vector tensor_shape = {3}; + bool inputValue[3] = {false, false, true}; + bool outputValue[3] = {false}; + + OHNNOperandTest input = {OH_NN_BOOL, OH_NN_TENSOR, tensor_shape, inputValue, 3*sizeof(bool)}; + OHNNOperandTest output = {OH_NN_BOOL, OH_NN_TENSOR, tensor_shape, outputValue, 3*sizeof(bool)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_LOG, + .operands = {input, output}, + .paramIndices = {}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Log_Build_01 + * @tc.desc: LogModel1模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(LogTest, SUB_AI_NNRt_Func_North_Log_Build_01, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogModel1 logModel; + OHNNGraphArgs graphArgs = logModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Log_Build_02 + * @tc.desc: LogModel2模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(LogTest, SUB_AI_NNRt_Func_North_Log_Build_02, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogModel2 logModel; + OHNNGraphArgs graphArgs = logModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Log_Build_03 + * @tc.desc: LogModel1模型输入Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(LogTest, SUB_AI_NNRt_Func_North_Log_Build_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogModel2 logModel; + OHNNGraphArgs graphArgs = logModel.graphArgs; + graphArgs.operands = {logModel.input, logModel.input, logModel.output}; + graphArgs.inputIndices = {0, 1}; + graphArgs.outputIndices = {2}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Log_Build_04 + * @tc.desc: LogModel1模型输出Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(LogTest, SUB_AI_NNRt_Func_North_Log_Build_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogModel2 logModel; + OHNNGraphArgs graphArgs = logModel.graphArgs; + graphArgs.operands = {logModel.input, logModel.output, logModel.output}; + graphArgs.inputIndices = {0}; + graphArgs.outputIndices = {1, 2}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Log_Build_05 + * @tc.desc: LogModel1模型传入非法参数进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(LogTest, SUB_AI_NNRt_Func_North_Log_Build_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogModel2 logModel; + OHNNGraphArgs graphArgs = logModel.graphArgs; + + int8_t activationValue = OH_NN_FUSED_NONE; + OHNNOperandTest activation = {OH_NN_INT8, OH_NN_ADD_ACTIVATIONTYPE, {}, &activationValue, sizeof(int8_t)}; + graphArgs.operands = {logModel.input, logModel.output, activation}; + graphArgs.paramIndices = {2}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Log_Model_Finish_01 + * @tc.desc: 模型构图,未添加操作数 + * @tc.type: FUNC + */ +HWTEST_F(LogTest, SUB_AI_NNRt_Func_North_Log_Model_Finish_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + OHNNGraphArgs graphArgs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, SingleModelBuildEndStep(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Log_Model_Finish_02 + * @tc.desc: 模型构图,未设置输入输出 + * @tc.type: FUNC + */ +HWTEST_F(LogTest, SUB_AI_NNRt_Func_North_Log_Model_Finish_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogModel1 logModel; + OHNNGraphArgs graphArgs = logModel.graphArgs; + graphArgs.specifyIO = false; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Log_Model_Finish_03 + * @tc.desc: 模型构图,设置输入输出,构图成功 + * @tc.type: FUNC + */ +HWTEST_F(LogTest, SUB_AI_NNRt_Func_North_Log_Model_Finish_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogModel1 logModel; + OHNNGraphArgs graphArgs = logModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Log_Model_SetOperandValue_01 + * @tc.desc: 设置操作数值,操作数不存在 + * @tc.type: FUNC + */ +HWTEST_F(LogTest, SUB_AI_NNRt_Func_North_Log_Model_SetOperandValue_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogModel1 logModel; + OHNNGraphArgs graphArgs = logModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData( + model, 1000+i, operandTem.data, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Log_Model_SetOperandValue_02 + * @tc.desc: 设置操作数值,buufer为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LogTest, SUB_AI_NNRt_Func_North_Log_Model_SetOperandValue_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogModel1 logModel; + OHNNGraphArgs graphArgs = logModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, i, nullptr, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Log_Model_SetOperandValue_03 + * @tc.desc: 设置操作数值,length为0 + * @tc.type: FUNC + */ +HWTEST_F(LogTest, SUB_AI_NNRt_Func_North_Log_Model_SetOperandValue_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogModel1 logModel; + OHNNGraphArgs graphArgs = logModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, 1000+i, operandTem.data, 0)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Log_Model_SpecifyInputsAndOutputs_01 + * @tc.desc: 设置输入输出,inputIndices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LogTest, SUB_AI_NNRt_Func_North_Log_Model_SpecifyInputsAndOutputs_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogModel1 logModel; + OHNNGraphArgs graphArgs = logModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Log_Model_SpecifyInputsAndOutputs_02 + * @tc.desc: 设置输入输出,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LogTest, SUB_AI_NNRt_Func_North_Log_Model_SpecifyInputsAndOutputs_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogModel1 logModel; + OHNNGraphArgs graphArgs = logModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Log_Model_SpecifyInputsAndOutputs_03 + * @tc.desc: 设置输入输出,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(LogTest, SUB_AI_NNRt_Func_North_Log_Model_SpecifyInputsAndOutputs_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogModel1 logModel; + OHNNGraphArgs graphArgs = logModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Log_Model_SpecifyInputsAndOutputs_04 + * @tc.desc: 设置输入输出,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(LogTest, SUB_AI_NNRt_Func_North_Log_Model_SpecifyInputsAndOutputs_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogModel1 logModel; + OHNNGraphArgs graphArgs = logModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Log_Model_SpecifyInputsAndOutputs_05 + * @tc.desc: 设置输入输出,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LogTest, SUB_AI_NNRt_Func_North_Log_Model_SpecifyInputsAndOutputs_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogModel1 logModel; + OHNNGraphArgs graphArgs = logModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Log_Model_SpecifyInputsAndOutputs_06 + * @tc.desc: 设置输入输出,outputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LogTest, SUB_AI_NNRt_Func_North_Log_Model_SpecifyInputsAndOutputs_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogModel1 logModel; + OHNNGraphArgs graphArgs = logModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Log_Model_SpecifyInputsAndOutputs_07 + * @tc.desc: 设置输入输出,outputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(LogTest, SUB_AI_NNRt_Func_North_Log_Model_SpecifyInputsAndOutputs_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogModel1 logModel; + OHNNGraphArgs graphArgs = logModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.outputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Log_Model_SpecifyInputsAndOutputs_08 + * @tc.desc: 设置输入输出,outputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(LogTest, SUB_AI_NNRt_Func_North_Log_Model_SpecifyInputsAndOutputs_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogModel1 logModel; + OHNNGraphArgs graphArgs = logModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Log_Model_AddOperation_01 + * @tc.desc: 添加算子,paramindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LogTest, SUB_AI_NNRt_Func_North_Log_Model_AddOperation_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogModel1 logModel; + OHNNGraphArgs graphArgs = logModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + nullptr, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Log_Model_AddOperation_02 + * @tc.desc: 添加算子,paramindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LogTest, SUB_AI_NNRt_Func_North_Log_Model_AddOperation_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogModel1 logModel; + OHNNGraphArgs graphArgs = logModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.data = nullptr; + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Log_Model_AddOperation_03 + * @tc.desc: 添加算子,paramindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(LogTest, SUB_AI_NNRt_Func_North_Log_Model_AddOperation_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogModel1 logModel; + OHNNGraphArgs graphArgs = logModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.paramIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Log_Model_AddOperation_04 + * @tc.desc: 添加算子,paramindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(LogTest, SUB_AI_NNRt_Func_North_Log_Model_AddOperation_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogModel1 logModel; + OHNNGraphArgs graphArgs = logModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.size = 0; + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Log_Model_AddOperation_05 + * @tc.desc: 添加算子,inputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LogTest, SUB_AI_NNRt_Func_North_Log_Model_AddOperation_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogModel1 logModel; + OHNNGraphArgs graphArgs = logModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Log_Model_AddOperation_06 + * @tc.desc: 添加算子,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LogTest, SUB_AI_NNRt_Func_North_Log_Model_AddOperation_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogModel1 logModel; + OHNNGraphArgs graphArgs = logModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Log_Model_AddOperation_07 + * @tc.desc: 添加算子,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(LogTest, SUB_AI_NNRt_Func_North_Log_Model_AddOperation_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogModel1 logModel; + OHNNGraphArgs graphArgs = logModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Log_Model_AddOperation_08 + * @tc.desc: 添加算子,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(LogTest, SUB_AI_NNRt_Func_North_Log_Model_AddOperation_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogModel1 logModel; + OHNNGraphArgs graphArgs = logModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Log_Model_AddOperation_09 + * @tc.desc: 添加算子,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LogTest, SUB_AI_NNRt_Func_North_Log_Model_AddOperation_09, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogModel1 logModel; + OHNNGraphArgs graphArgs = logModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(nullptr, graphArgs.operationType, + ¶mIndices, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} \ No newline at end of file diff --git a/test/nnrt_xts_acts/nncore/opstest/src/logical_and_test.cpp b/test/nnrt_xts_acts/nncore/opstest/src/logical_and_test.cpp new file mode 100644 index 0000000..de89979 --- /dev/null +++ b/test/nnrt_xts_acts/nncore/opstest/src/logical_and_test.cpp @@ -0,0 +1,904 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "nncore_utils.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Test; +class LogicalAndTest : public testing::Test {}; + +struct LogicalAndModel1 { + const std::vector tensor_shape = {3}; + int64_t input0Value[3] = {1, 0, 1}; + int64_t input1Value[3] = {0, 1, 1}; + bool outputValue[3] = {false}; + + OHNNOperandTest input0 = {OH_NN_INT64, OH_NN_TENSOR, tensor_shape, input0Value, 3*sizeof(int64_t)}; + OHNNOperandTest input1 = {OH_NN_INT64, OH_NN_TENSOR, tensor_shape, input1Value, 3*sizeof(int64_t)}; + OHNNOperandTest output = {OH_NN_BOOL, OH_NN_TENSOR, tensor_shape, outputValue, 3*sizeof(bool)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_LOGICAL_AND, + .operands = {input0, input1, output}, + .paramIndices = {}, + .inputIndices = {0, 1}, + .outputIndices = {2}}; +}; + +struct LogicalAndModel2 { + const std::vector tensor_shape = {3}; + int64_t input0Value[3] = {1, 0, 1}; + bool input1Value[1] = {false}; + bool outputValue[3] = {false}; + + OHNNOperandTest input0 = {OH_NN_INT64, OH_NN_TENSOR, tensor_shape, input0Value, 3*sizeof(int64_t)}; + OHNNOperandTest input1 = {OH_NN_BOOL, OH_NN_TENSOR, {1}, input1Value, sizeof(bool)}; + OHNNOperandTest output = {OH_NN_BOOL, OH_NN_TENSOR, tensor_shape, outputValue, 3*sizeof(bool)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_LOGICAL_AND, + .operands = {input0, input1, output}, + .paramIndices = {}, + .inputIndices = {0, 1}, + .outputIndices = {2}}; +}; + +struct LogicalAndModel3 { + const std::vector tensor_shape = {2}; + bool input0Value[2] = {true, false}; + bool input1Value[1] = {true}; + bool outputValue[2] = {false}; + + OHNNOperandTest input0 = {OH_NN_BOOL, OH_NN_TENSOR, tensor_shape, input0Value, 2*sizeof(bool)}; + OHNNOperandTest input1 = {OH_NN_BOOL, OH_NN_TENSOR, {1}, input1Value, sizeof(bool)}; + OHNNOperandTest output = {OH_NN_BOOL, OH_NN_TENSOR, tensor_shape, outputValue, 2*sizeof(bool)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_LOGICAL_AND, + .operands = {input0, input1, output}, + .paramIndices = {}, + .inputIndices = {0, 1}, + .outputIndices = {2}}; +}; + +struct LogicalAndModel4 { + const std::vector tensor_shape = {2}; + bool input0Value[1] = {false}; + bool input1Value[2] = {true, false}; + bool outputValue[2] = {false}; + + OHNNOperandTest input0 = {OH_NN_BOOL, OH_NN_TENSOR, {1}, input0Value, sizeof(bool)}; + OHNNOperandTest input1 = {OH_NN_BOOL, OH_NN_TENSOR, tensor_shape, input1Value, 2*sizeof(bool)}; + OHNNOperandTest output = {OH_NN_BOOL, OH_NN_TENSOR, tensor_shape, outputValue, 2*sizeof(bool)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_LOGICAL_AND, + .operands = {input0, input1, output}, + .paramIndices = {}, + .inputIndices = {0, 1}, + .outputIndices = {2}}; +}; + +struct LogicalAndModel5 { + const std::vector tensor_shape = {3}; + bool input0Value[3] = {true, false, true}; + bool input1Value[3] = {false, false, true}; + bool outputValue[3] = {false}; + + OHNNOperandTest input0 = {OH_NN_BOOL, OH_NN_TENSOR, tensor_shape, input0Value, 3*sizeof(bool)}; + OHNNOperandTest input1 = {OH_NN_BOOL, OH_NN_TENSOR, tensor_shape, input1Value, 3*sizeof(bool)}; + OHNNOperandTest output = {OH_NN_BOOL, OH_NN_TENSOR, tensor_shape, outputValue, 3*sizeof(bool)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_LOGICAL_AND, + .operands = {input0, input1, output}, + .paramIndices = {}, + .inputIndices = {0, 1}, + .outputIndices = {2}}; +}; + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalAnd_Build_01 + * @tc.desc: LogicalAndModel1模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(LogicalAndTest, SUB_AI_NNRt_Func_North_LogicalAnd_Build_01, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalAndModel1 logicalAndModel; + OHNNGraphArgs graphArgs = logicalAndModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalAnd_Build_02 + * @tc.desc: LogicalAndModel2模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(LogicalAndTest, SUB_AI_NNRt_Func_North_LogicalAnd_Build_02, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalAndModel2 logicalAndModel; + OHNNGraphArgs graphArgs = logicalAndModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalAnd_Build_03 + * @tc.desc: LogicalAndModel3模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(LogicalAndTest, SUB_AI_NNRt_Func_North_LogicalAnd_Build_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalAndModel3 logicalAndModel; + OHNNGraphArgs graphArgs = logicalAndModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalAnd_Build_04 + * @tc.desc: LogicalAndModel4模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(LogicalAndTest, SUB_AI_NNRt_Func_North_LogicalAnd_Build_04, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalAndModel4 logicalAndModel; + OHNNGraphArgs graphArgs = logicalAndModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalAnd_Build_05 + * @tc.desc: LogicalAndModel5模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(LogicalAndTest, SUB_AI_NNRt_Func_North_LogicalAnd_Build_05, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalAndModel5 logicalAndModel; + OHNNGraphArgs graphArgs = logicalAndModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalAnd_Build_06 + * @tc.desc: LogicalAndModel1模型输入Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(LogicalAndTest, SUB_AI_NNRt_Func_North_LogicalAnd_Build_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalAndModel2 logicalAndModel; + OHNNGraphArgs graphArgs = logicalAndModel.graphArgs; + graphArgs.operands = {logicalAndModel.input0, logicalAndModel.input1, logicalAndModel.input1, + logicalAndModel.output}; + graphArgs.inputIndices = {0, 1, 2}; + graphArgs.outputIndices = {3}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalAnd_Build_07 + * @tc.desc: LogicalAndModel1模型输出Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(LogicalAndTest, SUB_AI_NNRt_Func_North_LogicalAnd_Build_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalAndModel2 logicalAndModel; + OHNNGraphArgs graphArgs = logicalAndModel.graphArgs; + graphArgs.operands = {logicalAndModel.input0, logicalAndModel.input1, logicalAndModel.output, + logicalAndModel.output}; + graphArgs.inputIndices = {0, 1}; + graphArgs.outputIndices = {2, 3}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalAnd_Build_08 + * @tc.desc: LogicalAndModel1模型传入非法参数进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(LogicalAndTest, SUB_AI_NNRt_Func_North_LogicalAnd_Build_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalAndModel2 logicalAndModel; + OHNNGraphArgs graphArgs = logicalAndModel.graphArgs; + + int8_t activationValue = OH_NN_FUSED_NONE; + OHNNOperandTest activation = {OH_NN_INT8, OH_NN_ADD_ACTIVATIONTYPE, {}, &activationValue, sizeof(int8_t)}; + graphArgs.operands = {logicalAndModel.input0, logicalAndModel.input1, logicalAndModel.output, activation}; + graphArgs.paramIndices = {3}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalAnd_Model_Finish_01 + * @tc.desc: 模型构图,未添加操作数 + * @tc.type: FUNC + */ +HWTEST_F(LogicalAndTest, SUB_AI_NNRt_Func_North_LogicalAnd_Model_Finish_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + OHNNGraphArgs graphArgs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, SingleModelBuildEndStep(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalAnd_Model_Finish_02 + * @tc.desc: 模型构图,未设置输入输出 + * @tc.type: FUNC + */ +HWTEST_F(LogicalAndTest, SUB_AI_NNRt_Func_North_LogicalAnd_Model_Finish_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalAndModel1 logicalAndModel; + OHNNGraphArgs graphArgs = logicalAndModel.graphArgs; + graphArgs.specifyIO = false; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalAnd_Model_Finish_03 + * @tc.desc: 模型构图,设置输入输出,构图成功 + * @tc.type: FUNC + */ +HWTEST_F(LogicalAndTest, SUB_AI_NNRt_Func_North_LogicalAnd_Model_Finish_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalAndModel1 logicalAndModel; + OHNNGraphArgs graphArgs = logicalAndModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalAnd_Model_SetOperandValue_01 + * @tc.desc: 设置操作数值,操作数不存在 + * @tc.type: FUNC + */ +HWTEST_F(LogicalAndTest, SUB_AI_NNRt_Func_North_LogicalAnd_Model_SetOperandValue_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalAndModel1 logicalAndModel; + OHNNGraphArgs graphArgs = logicalAndModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData( + model, 1000+i, operandTem.data, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalAnd_Model_SetOperandValue_02 + * @tc.desc: 设置操作数值,buufer为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LogicalAndTest, SUB_AI_NNRt_Func_North_LogicalAnd_Model_SetOperandValue_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalAndModel1 logicalAndModel; + OHNNGraphArgs graphArgs = logicalAndModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, i, nullptr, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalAnd_Model_SetOperandValue_03 + * @tc.desc: 设置操作数值,length为0 + * @tc.type: FUNC + */ +HWTEST_F(LogicalAndTest, SUB_AI_NNRt_Func_North_LogicalAnd_Model_SetOperandValue_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalAndModel1 logicalAndModel; + OHNNGraphArgs graphArgs = logicalAndModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, 1000+i, operandTem.data, 0)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalAnd_Model_SpecifyInputsAndOutputs_01 + * @tc.desc: 设置输入输出,inputIndices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LogicalAndTest, SUB_AI_NNRt_Func_North_LogicalAnd_Model_SpecifyInputsAndOutputs_01, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalAndModel1 logicalAndModel; + OHNNGraphArgs graphArgs = logicalAndModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalAnd_Model_SpecifyInputsAndOutputs_02 + * @tc.desc: 设置输入输出,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LogicalAndTest, SUB_AI_NNRt_Func_North_LogicalAnd_Model_SpecifyInputsAndOutputs_02, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalAndModel1 logicalAndModel; + OHNNGraphArgs graphArgs = logicalAndModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalAnd_Model_SpecifyInputsAndOutputs_03 + * @tc.desc: 设置输入输出,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(LogicalAndTest, SUB_AI_NNRt_Func_North_LogicalAnd_Model_SpecifyInputsAndOutputs_03, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalAndModel1 logicalAndModel; + OHNNGraphArgs graphArgs = logicalAndModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalAnd_Model_SpecifyInputsAndOutputs_04 + * @tc.desc: 设置输入输出,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(LogicalAndTest, SUB_AI_NNRt_Func_North_LogicalAnd_Model_SpecifyInputsAndOutputs_04, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalAndModel1 logicalAndModel; + OHNNGraphArgs graphArgs = logicalAndModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalAnd_Model_SpecifyInputsAndOutputs_05 + * @tc.desc: 设置输入输出,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LogicalAndTest, SUB_AI_NNRt_Func_North_LogicalAnd_Model_SpecifyInputsAndOutputs_05, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalAndModel1 logicalAndModel; + OHNNGraphArgs graphArgs = logicalAndModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalAnd_Model_SpecifyInputsAndOutputs_06 + * @tc.desc: 设置输入输出,outputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LogicalAndTest, SUB_AI_NNRt_Func_North_LogicalAnd_Model_SpecifyInputsAndOutputs_06, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalAndModel1 logicalAndModel; + OHNNGraphArgs graphArgs = logicalAndModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalAnd_Model_SpecifyInputsAndOutputs_07 + * @tc.desc: 设置输入输出,outputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(LogicalAndTest, SUB_AI_NNRt_Func_North_LogicalAnd_Model_SpecifyInputsAndOutputs_07, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalAndModel1 logicalAndModel; + OHNNGraphArgs graphArgs = logicalAndModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.outputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalAnd_Model_SpecifyInputsAndOutputs_08 + * @tc.desc: 设置输入输出,outputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(LogicalAndTest, SUB_AI_NNRt_Func_North_LogicalAnd_Model_SpecifyInputsAndOutputs_08, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalAndModel1 logicalAndModel; + OHNNGraphArgs graphArgs = logicalAndModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalAnd_Model_AddOperation_01 + * @tc.desc: 添加算子,paramindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LogicalAndTest, SUB_AI_NNRt_Func_North_LogicalAnd_Model_AddOperation_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalAndModel1 logicalAndModel; + OHNNGraphArgs graphArgs = logicalAndModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + nullptr, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalAnd_Model_AddOperation_02 + * @tc.desc: 添加算子,paramindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LogicalAndTest, SUB_AI_NNRt_Func_North_LogicalAnd_Model_AddOperation_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalAndModel1 logicalAndModel; + OHNNGraphArgs graphArgs = logicalAndModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.data = nullptr; + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalAnd_Model_AddOperation_03 + * @tc.desc: 添加算子,paramindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(LogicalAndTest, SUB_AI_NNRt_Func_North_LogicalAnd_Model_AddOperation_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalAndModel1 logicalAndModel; + OHNNGraphArgs graphArgs = logicalAndModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.paramIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalAnd_Model_AddOperation_04 + * @tc.desc: 添加算子,paramindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(LogicalAndTest, SUB_AI_NNRt_Func_North_LogicalAnd_Model_AddOperation_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalAndModel1 logicalAndModel; + OHNNGraphArgs graphArgs = logicalAndModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.size = 0; + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalAnd_Model_AddOperation_05 + * @tc.desc: 添加算子,inputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LogicalAndTest, SUB_AI_NNRt_Func_North_LogicalAnd_Model_AddOperation_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalAndModel1 logicalAndModel; + OHNNGraphArgs graphArgs = logicalAndModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalAnd_Model_AddOperation_06 + * @tc.desc: 添加算子,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LogicalAndTest, SUB_AI_NNRt_Func_North_LogicalAnd_Model_AddOperation_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalAndModel1 logicalAndModel; + OHNNGraphArgs graphArgs = logicalAndModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalAnd_Model_AddOperation_07 + * @tc.desc: 添加算子,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(LogicalAndTest, SUB_AI_NNRt_Func_North_LogicalAnd_Model_AddOperation_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalAndModel1 logicalAndModel; + OHNNGraphArgs graphArgs = logicalAndModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalAnd_Model_AddOperation_08 + * @tc.desc: 添加算子,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(LogicalAndTest, SUB_AI_NNRt_Func_North_LogicalAnd_Model_AddOperation_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalAndModel1 logicalAndModel; + OHNNGraphArgs graphArgs = logicalAndModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalAnd_Model_AddOperation_09 + * @tc.desc: 添加算子,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LogicalAndTest, SUB_AI_NNRt_Func_North_LogicalAnd_Model_AddOperation_09, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalAndModel1 logicalAndModel; + OHNNGraphArgs graphArgs = logicalAndModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(nullptr, graphArgs.operationType, + ¶mIndices, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} \ No newline at end of file diff --git a/test/nnrt_xts_acts/nncore/opstest/src/logical_not_test.cpp b/test/nnrt_xts_acts/nncore/opstest/src/logical_not_test.cpp new file mode 100644 index 0000000..2d9fdd0 --- /dev/null +++ b/test/nnrt_xts_acts/nncore/opstest/src/logical_not_test.cpp @@ -0,0 +1,892 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "nncore_utils.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Test; +class LogicalNotTest : public testing::Test {}; + +struct LogicalNotModel1 { + const std::vector tensor_shape = {3}; + int64_t inputValue[3] = {1, 0, 1}; + bool outputValue[3] = {false}; + + OHNNOperandTest input = {OH_NN_INT64, OH_NN_TENSOR, tensor_shape, inputValue, 3*sizeof(int64_t)}; + OHNNOperandTest output = {OH_NN_BOOL, OH_NN_TENSOR, tensor_shape, outputValue, 3*sizeof(bool)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_LOGICAL_NOT, + .operands = {input, output}, + .paramIndices = {}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +struct LogicalNotModel2 { + const std::vector tensor_shape = {1}; + bool inputValue[1] = {true}; + bool outputValue[1] = {false}; + + OHNNOperandTest input = {OH_NN_BOOL, OH_NN_TENSOR, tensor_shape, inputValue, sizeof(bool)}; + OHNNOperandTest output = {OH_NN_BOOL, OH_NN_TENSOR, tensor_shape, outputValue, sizeof(bool)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_LOGICAL_NOT, + .operands = {input, output}, + .paramIndices = {}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +struct LogicalNotModel3 { + const std::vector tensor_shape = {1}; + bool inputValue[1] = {false}; + bool outputValue[1] = {false}; + + OHNNOperandTest input = {OH_NN_BOOL, OH_NN_TENSOR, tensor_shape, inputValue, sizeof(bool)}; + OHNNOperandTest output = {OH_NN_BOOL, OH_NN_TENSOR, tensor_shape, outputValue, sizeof(bool)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_LOGICAL_NOT, + .operands = {input, output}, + .paramIndices = {}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +struct LogicalNotModel4 { + const std::vector tensor_shape = {2}; + bool inputValue[2] = {true, false}; + bool outputValue[2] = {false, true}; + + OHNNOperandTest input = {OH_NN_BOOL, OH_NN_TENSOR, tensor_shape, inputValue, 2*sizeof(bool)}; + OHNNOperandTest output = {OH_NN_BOOL, OH_NN_TENSOR, tensor_shape, outputValue, 2*sizeof(bool)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_LOGICAL_NOT, + .operands = {input, output}, + .paramIndices = {}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +struct LogicalNotModel5 { + const std::vector tensor_shape = {3}; + bool inputValue[3] = {0, 0, 1}; + bool outputValue[3] = {false}; + + OHNNOperandTest input = {OH_NN_BOOL, OH_NN_TENSOR, tensor_shape, inputValue, 3*sizeof(bool)}; + OHNNOperandTest output = {OH_NN_BOOL, OH_NN_TENSOR, tensor_shape, outputValue, 3*sizeof(bool)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_LOGICAL_NOT, + .operands = {input, output}, + .paramIndices = {}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalNot_Build_01 + * @tc.desc: LogicalNotModel1模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(LogicalNotTest, SUB_AI_NNRt_Func_North_LogicalNot_Build_01, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalNotModel1 logicalNotModel; + OHNNGraphArgs graphArgs = logicalNotModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalNot_Build_02 + * @tc.desc: LogicalNotModel2模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(LogicalNotTest, SUB_AI_NNRt_Func_North_LogicalNot_Build_02, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalNotModel2 logicalNotModel; + OHNNGraphArgs graphArgs = logicalNotModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalNot_Build_03 + * @tc.desc: LogicalNotModel3模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(LogicalNotTest, SUB_AI_NNRt_Func_North_LogicalNot_Build_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalNotModel3 logicalNotModel; + OHNNGraphArgs graphArgs = logicalNotModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalNot_Build_04 + * @tc.desc: LogicalNotModel4模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(LogicalNotTest, SUB_AI_NNRt_Func_North_LogicalNot_Build_04, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalNotModel4 logicalNotModel; + OHNNGraphArgs graphArgs = logicalNotModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalNot_Build_05 + * @tc.desc: LogicalNotModel5模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(LogicalNotTest, SUB_AI_NNRt_Func_North_LogicalNot_Build_05, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalNotModel5 logicalNotModel; + OHNNGraphArgs graphArgs = logicalNotModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalNot_Build_06 + * @tc.desc: LogicalNotModel1模型输入Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(LogicalNotTest, SUB_AI_NNRt_Func_North_LogicalNot_Build_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalNotModel2 logicalNotModel; + OHNNGraphArgs graphArgs = logicalNotModel.graphArgs; + graphArgs.operands = {logicalNotModel.input, logicalNotModel.input, logicalNotModel.output}; + graphArgs.inputIndices = {0, 1}; + graphArgs.outputIndices = {2}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalNot_Build_07 + * @tc.desc: LogicalNotModel1模型输出Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(LogicalNotTest, SUB_AI_NNRt_Func_North_LogicalNot_Build_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalNotModel2 logicalNotModel; + OHNNGraphArgs graphArgs = logicalNotModel.graphArgs; + graphArgs.operands = {logicalNotModel.input, logicalNotModel.output, logicalNotModel.output}; + graphArgs.inputIndices = {0}; + graphArgs.outputIndices = {1, 2}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalNot_Build_08 + * @tc.desc: LogicalNotModel1模型传入非法参数进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(LogicalNotTest, SUB_AI_NNRt_Func_North_LogicalNot_Build_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalNotModel2 logicalNotModel; + OHNNGraphArgs graphArgs = logicalNotModel.graphArgs; + + int8_t activationValue = OH_NN_FUSED_NONE; + OHNNOperandTest activation = {OH_NN_INT8, OH_NN_ADD_ACTIVATIONTYPE, {}, &activationValue, sizeof(int8_t)}; + graphArgs.operands = {logicalNotModel.input, logicalNotModel.output, activation}; + graphArgs.paramIndices = {2}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalNot_Model_Finish_01 + * @tc.desc: 模型构图,未添加操作数 + * @tc.type: FUNC + */ +HWTEST_F(LogicalNotTest, SUB_AI_NNRt_Func_North_LogicalNot_Model_Finish_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + OHNNGraphArgs graphArgs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, SingleModelBuildEndStep(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalNot_Model_Finish_02 + * @tc.desc: 模型构图,未设置输入输出 + * @tc.type: FUNC + */ +HWTEST_F(LogicalNotTest, SUB_AI_NNRt_Func_North_LogicalNot_Model_Finish_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalNotModel1 logicalNotModel; + OHNNGraphArgs graphArgs = logicalNotModel.graphArgs; + graphArgs.specifyIO = false; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalNot_Model_Finish_03 + * @tc.desc: 模型构图,设置输入输出,构图成功 + * @tc.type: FUNC + */ +HWTEST_F(LogicalNotTest, SUB_AI_NNRt_Func_North_LogicalNot_Model_Finish_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalNotModel1 logicalNotModel; + OHNNGraphArgs graphArgs = logicalNotModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalNot_Model_SetOperandValue_01 + * @tc.desc: 设置操作数值,操作数不存在 + * @tc.type: FUNC + */ +HWTEST_F(LogicalNotTest, SUB_AI_NNRt_Func_North_LogicalNot_Model_SetOperandValue_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalNotModel1 logicalNotModel; + OHNNGraphArgs graphArgs = logicalNotModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData( + model, 1000+i, operandTem.data, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalNot_Model_SetOperandValue_02 + * @tc.desc: 设置操作数值,buufer为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LogicalNotTest, SUB_AI_NNRt_Func_North_LogicalNot_Model_SetOperandValue_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalNotModel1 logicalNotModel; + OHNNGraphArgs graphArgs = logicalNotModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, i, nullptr, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalNot_Model_SetOperandValue_03 + * @tc.desc: 设置操作数值,length为0 + * @tc.type: FUNC + */ +HWTEST_F(LogicalNotTest, SUB_AI_NNRt_Func_North_LogicalNot_Model_SetOperandValue_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalNotModel1 logicalNotModel; + OHNNGraphArgs graphArgs = logicalNotModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, 1000+i, operandTem.data, 0)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalNot_Model_SpecifyInputsAndOutputs_01 + * @tc.desc: 设置输入输出,inputIndices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LogicalNotTest, SUB_AI_NNRt_Func_North_LogicalNot_Model_SpecifyInputsAndOutputs_01, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalNotModel1 logicalNotModel; + OHNNGraphArgs graphArgs = logicalNotModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalNot_Model_SpecifyInputsAndOutputs_02 + * @tc.desc: 设置输入输出,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LogicalNotTest, SUB_AI_NNRt_Func_North_LogicalNot_Model_SpecifyInputsAndOutputs_02, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalNotModel1 logicalNotModel; + OHNNGraphArgs graphArgs = logicalNotModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalNot_Model_SpecifyInputsAndOutputs_03 + * @tc.desc: 设置输入输出,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(LogicalNotTest, SUB_AI_NNRt_Func_North_LogicalNot_Model_SpecifyInputsAndOutputs_03, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalNotModel1 logicalNotModel; + OHNNGraphArgs graphArgs = logicalNotModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalNot_Model_SpecifyInputsAndOutputs_04 + * @tc.desc: 设置输入输出,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(LogicalNotTest, SUB_AI_NNRt_Func_North_LogicalNot_Model_SpecifyInputsAndOutputs_04, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalNotModel1 logicalNotModel; + OHNNGraphArgs graphArgs = logicalNotModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalNot_Model_SpecifyInputsAndOutputs_05 + * @tc.desc: 设置输入输出,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LogicalNotTest, SUB_AI_NNRt_Func_North_LogicalNot_Model_SpecifyInputsAndOutputs_05, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalNotModel1 logicalNotModel; + OHNNGraphArgs graphArgs = logicalNotModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalNot_Model_SpecifyInputsAndOutputs_06 + * @tc.desc: 设置输入输出,outputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LogicalNotTest, SUB_AI_NNRt_Func_North_LogicalNot_Model_SpecifyInputsAndOutputs_06, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalNotModel1 logicalNotModel; + OHNNGraphArgs graphArgs = logicalNotModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalNot_Model_SpecifyInputsAndOutputs_07 + * @tc.desc: 设置输入输出,outputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(LogicalNotTest, SUB_AI_NNRt_Func_North_LogicalNot_Model_SpecifyInputsAndOutputs_07, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalNotModel1 logicalNotModel; + OHNNGraphArgs graphArgs = logicalNotModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.outputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalNot_Model_SpecifyInputsAndOutputs_08 + * @tc.desc: 设置输入输出,outputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(LogicalNotTest, SUB_AI_NNRt_Func_North_LogicalNot_Model_SpecifyInputsAndOutputs_08, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalNotModel1 logicalNotModel; + OHNNGraphArgs graphArgs = logicalNotModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalNot_Model_AddOperation_01 + * @tc.desc: 添加算子,paramindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LogicalNotTest, SUB_AI_NNRt_Func_North_LogicalNot_Model_AddOperation_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalNotModel1 logicalNotModel; + OHNNGraphArgs graphArgs = logicalNotModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + nullptr, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalNot_Model_AddOperation_02 + * @tc.desc: 添加算子,paramindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LogicalNotTest, SUB_AI_NNRt_Func_North_LogicalNot_Model_AddOperation_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalNotModel1 logicalNotModel; + OHNNGraphArgs graphArgs = logicalNotModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.data = nullptr; + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalNot_Model_AddOperation_03 + * @tc.desc: 添加算子,paramindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(LogicalNotTest, SUB_AI_NNRt_Func_North_LogicalNot_Model_AddOperation_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalNotModel1 logicalNotModel; + OHNNGraphArgs graphArgs = logicalNotModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.paramIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalNot_Model_AddOperation_04 + * @tc.desc: 添加算子,paramindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(LogicalNotTest, SUB_AI_NNRt_Func_North_LogicalNot_Model_AddOperation_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalNotModel1 logicalNotModel; + OHNNGraphArgs graphArgs = logicalNotModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.size = 0; + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalNot_Model_AddOperation_05 + * @tc.desc: 添加算子,inputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LogicalNotTest, SUB_AI_NNRt_Func_North_LogicalNot_Model_AddOperation_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalNotModel1 logicalNotModel; + OHNNGraphArgs graphArgs = logicalNotModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalNot_Model_AddOperation_06 + * @tc.desc: 添加算子,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LogicalNotTest, SUB_AI_NNRt_Func_North_LogicalNot_Model_AddOperation_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalNotModel1 logicalNotModel; + OHNNGraphArgs graphArgs = logicalNotModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalNot_Model_AddOperation_07 + * @tc.desc: 添加算子,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(LogicalNotTest, SUB_AI_NNRt_Func_North_LogicalNot_Model_AddOperation_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalNotModel1 logicalNotModel; + OHNNGraphArgs graphArgs = logicalNotModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalNot_Model_AddOperation_08 + * @tc.desc: 添加算子,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(LogicalNotTest, SUB_AI_NNRt_Func_North_LogicalNot_Model_AddOperation_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalNotModel1 logicalNotModel; + OHNNGraphArgs graphArgs = logicalNotModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalNot_Model_AddOperation_09 + * @tc.desc: 添加算子,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LogicalNotTest, SUB_AI_NNRt_Func_North_LogicalNot_Model_AddOperation_09, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalNotModel1 logicalNotModel; + OHNNGraphArgs graphArgs = logicalNotModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(nullptr, graphArgs.operationType, + ¶mIndices, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} \ No newline at end of file diff --git a/test/nnrt_xts_acts/nncore/opstest/src/logical_or_test.cpp b/test/nnrt_xts_acts/nncore/opstest/src/logical_or_test.cpp new file mode 100644 index 0000000..8b143f1 --- /dev/null +++ b/test/nnrt_xts_acts/nncore/opstest/src/logical_or_test.cpp @@ -0,0 +1,767 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "nncore_utils.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Test; +class LogicalOrTest : public testing::Test {}; + +struct LogicalOrModel1 { + const std::vector tensor_shape = {3}; + bool input0Value[3] = {true, false, true}; + bool input1Value[3] = {false, false, true}; + bool outputValue[3] = {false}; + + OHNNOperandTest input0 = {OH_NN_BOOL, OH_NN_TENSOR, tensor_shape, input0Value, 3*sizeof(bool)}; + OHNNOperandTest input1 = {OH_NN_BOOL, OH_NN_TENSOR, tensor_shape, input1Value, 3*sizeof(bool)}; + OHNNOperandTest output = {OH_NN_BOOL, OH_NN_TENSOR, tensor_shape, outputValue, 3*sizeof(bool)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_LOGICAL_OR, + .operands = {input0, input1, output}, + .paramIndices = {}, + .inputIndices = {0, 1}, + .outputIndices = {2}}; +}; + +struct LogicalOrModel2 { + const std::vector tensor_shape = {3}; + int64_t input0Value[3] = {0, 1, 0}; + int64_t input1Value[3] = {1, 0, 1}; + bool outputValue[3] = {false}; + + OHNNOperandTest input0 = {OH_NN_INT64, OH_NN_TENSOR, tensor_shape, input0Value, 3*sizeof(int64_t)}; + OHNNOperandTest input1 = {OH_NN_INT64, OH_NN_TENSOR, tensor_shape, input1Value, 3*sizeof(int64_t)}; + OHNNOperandTest output = {OH_NN_BOOL, OH_NN_TENSOR, tensor_shape, outputValue, 3*sizeof(bool)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_LOGICAL_OR, + .operands = {input0, input1, output}, + .paramIndices = {}, + .inputIndices = {0, 1}, + .outputIndices = {2}}; +}; + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalOr_Build_01 + * @tc.desc: LogicalOrModel1模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(LogicalOrTest, SUB_AI_NNRt_Func_North_LogicalOr_Build_01, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalOrModel1 logicalOrModel; + OHNNGraphArgs graphArgs = logicalOrModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalOr_Build_02 + * @tc.desc: LogicalOrModel2模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(LogicalOrTest, SUB_AI_NNRt_Func_North_LogicalOr_Build_02, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalOrModel2 logicalOrModel; + OHNNGraphArgs graphArgs = logicalOrModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalOr_Build_03 + * @tc.desc: LogicalOrModel1模型输入Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(LogicalOrTest, SUB_AI_NNRt_Func_North_LogicalOr_Build_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalOrModel2 logicalOrModel; + OHNNGraphArgs graphArgs = logicalOrModel.graphArgs; + graphArgs.operands = {logicalOrModel.input0, logicalOrModel.input1, logicalOrModel.input1, logicalOrModel.output}; + graphArgs.inputIndices = {0, 1, 2}; + graphArgs.outputIndices = {3}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalOr_Build_04 + * @tc.desc: LogicalOrModel1模型输出Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(LogicalOrTest, SUB_AI_NNRt_Func_North_LogicalOr_Build_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalOrModel2 logicalOrModel; + OHNNGraphArgs graphArgs = logicalOrModel.graphArgs; + graphArgs.operands = {logicalOrModel.input0, logicalOrModel.input1, logicalOrModel.output, logicalOrModel.output}; + graphArgs.inputIndices = {0, 1}; + graphArgs.outputIndices = {2, 3}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalOr_Build_05 + * @tc.desc: LogicalOrModel1模型传入非法参数进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(LogicalOrTest, SUB_AI_NNRt_Func_North_LogicalOr_Build_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalOrModel2 logicalOrModel; + OHNNGraphArgs graphArgs = logicalOrModel.graphArgs; + + int8_t activationValue = OH_NN_FUSED_NONE; + OHNNOperandTest activation = {OH_NN_INT8, OH_NN_ADD_ACTIVATIONTYPE, {}, &activationValue, sizeof(int8_t)}; + graphArgs.operands = {logicalOrModel.input0, logicalOrModel.input1, logicalOrModel.output, activation}; + graphArgs.paramIndices = {3}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalOr_Model_Finish_01 + * @tc.desc: 模型构图,未添加操作数 + * @tc.type: FUNC + */ +HWTEST_F(LogicalOrTest, SUB_AI_NNRt_Func_North_LogicalOr_Model_Finish_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + OHNNGraphArgs graphArgs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, SingleModelBuildEndStep(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalOr_Model_Finish_02 + * @tc.desc: 模型构图,未设置输入输出 + * @tc.type: FUNC + */ +HWTEST_F(LogicalOrTest, SUB_AI_NNRt_Func_North_LogicalOr_Model_Finish_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalOrModel1 logicalOrModel; + OHNNGraphArgs graphArgs = logicalOrModel.graphArgs; + graphArgs.specifyIO = false; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalOr_Model_Finish_03 + * @tc.desc: 模型构图,设置输入输出,构图成功 + * @tc.type: FUNC + */ +HWTEST_F(LogicalOrTest, SUB_AI_NNRt_Func_North_LogicalOr_Model_Finish_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalOrModel1 logicalOrModel; + OHNNGraphArgs graphArgs = logicalOrModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalOr_Model_SetOperandValue_01 + * @tc.desc: 设置操作数值,操作数不存在 + * @tc.type: FUNC + */ +HWTEST_F(LogicalOrTest, SUB_AI_NNRt_Func_North_LogicalOr_Model_SetOperandValue_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalOrModel1 logicalOrModel; + OHNNGraphArgs graphArgs = logicalOrModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData( + model, 1000+i, operandTem.data, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalOr_Model_SetOperandValue_02 + * @tc.desc: 设置操作数值,buufer为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LogicalOrTest, SUB_AI_NNRt_Func_North_LogicalOr_Model_SetOperandValue_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalOrModel1 logicalOrModel; + OHNNGraphArgs graphArgs = logicalOrModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, i, nullptr, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalOr_Model_SetOperandValue_03 + * @tc.desc: 设置操作数值,length为0 + * @tc.type: FUNC + */ +HWTEST_F(LogicalOrTest, SUB_AI_NNRt_Func_North_LogicalOr_Model_SetOperandValue_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalOrModel1 logicalOrModel; + OHNNGraphArgs graphArgs = logicalOrModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, 1000+i, operandTem.data, 0)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalOr_Model_SpecifyInputsAndOutputs_01 + * @tc.desc: 设置输入输出,inputIndices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LogicalOrTest, SUB_AI_NNRt_Func_North_LogicalOr_Model_SpecifyInputsAndOutputs_01, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalOrModel1 logicalOrModel; + OHNNGraphArgs graphArgs = logicalOrModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalOr_Model_SpecifyInputsAndOutputs_02 + * @tc.desc: 设置输入输出,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LogicalOrTest, SUB_AI_NNRt_Func_North_LogicalOr_Model_SpecifyInputsAndOutputs_02, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalOrModel1 logicalOrModel; + OHNNGraphArgs graphArgs = logicalOrModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalOr_Model_SpecifyInputsAndOutputs_03 + * @tc.desc: 设置输入输出,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(LogicalOrTest, SUB_AI_NNRt_Func_North_LogicalOr_Model_SpecifyInputsAndOutputs_03, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalOrModel1 logicalOrModel; + OHNNGraphArgs graphArgs = logicalOrModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalOr_Model_SpecifyInputsAndOutputs_04 + * @tc.desc: 设置输入输出,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(LogicalOrTest, SUB_AI_NNRt_Func_North_LogicalOr_Model_SpecifyInputsAndOutputs_04, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalOrModel1 logicalOrModel; + OHNNGraphArgs graphArgs = logicalOrModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalOr_Model_SpecifyInputsAndOutputs_05 + * @tc.desc: 设置输入输出,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LogicalOrTest, SUB_AI_NNRt_Func_North_LogicalOr_Model_SpecifyInputsAndOutputs_05, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalOrModel1 logicalOrModel; + OHNNGraphArgs graphArgs = logicalOrModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalOr_Model_SpecifyInputsAndOutputs_06 + * @tc.desc: 设置输入输出,outputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LogicalOrTest, SUB_AI_NNRt_Func_North_LogicalOr_Model_SpecifyInputsAndOutputs_06, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalOrModel1 logicalOrModel; + OHNNGraphArgs graphArgs = logicalOrModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalOr_Model_SpecifyInputsAndOutputs_07 + * @tc.desc: 设置输入输出,outputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(LogicalOrTest, SUB_AI_NNRt_Func_North_LogicalOr_Model_SpecifyInputsAndOutputs_07, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalOrModel1 logicalOrModel; + OHNNGraphArgs graphArgs = logicalOrModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.outputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalOr_Model_SpecifyInputsAndOutputs_08 + * @tc.desc: 设置输入输出,outputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(LogicalOrTest, SUB_AI_NNRt_Func_North_LogicalOr_Model_SpecifyInputsAndOutputs_08, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalOrModel1 logicalOrModel; + OHNNGraphArgs graphArgs = logicalOrModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalOr_Model_AddOperation_01 + * @tc.desc: 添加算子,paramindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LogicalOrTest, SUB_AI_NNRt_Func_North_LogicalOr_Model_AddOperation_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalOrModel1 logicalOrModel; + OHNNGraphArgs graphArgs = logicalOrModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + nullptr, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalOr_Model_AddOperation_02 + * @tc.desc: 添加算子,paramindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LogicalOrTest, SUB_AI_NNRt_Func_North_LogicalOr_Model_AddOperation_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalOrModel1 logicalOrModel; + OHNNGraphArgs graphArgs = logicalOrModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.data = nullptr; + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalOr_Model_AddOperation_03 + * @tc.desc: 添加算子,paramindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(LogicalOrTest, SUB_AI_NNRt_Func_North_LogicalOr_Model_AddOperation_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalOrModel1 logicalOrModel; + OHNNGraphArgs graphArgs = logicalOrModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.paramIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalOr_Model_AddOperation_04 + * @tc.desc: 添加算子,paramindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(LogicalOrTest, SUB_AI_NNRt_Func_North_LogicalOr_Model_AddOperation_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalOrModel1 logicalOrModel; + OHNNGraphArgs graphArgs = logicalOrModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.size = 0; + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalOr_Model_AddOperation_05 + * @tc.desc: 添加算子,inputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LogicalOrTest, SUB_AI_NNRt_Func_North_LogicalOr_Model_AddOperation_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalOrModel1 logicalOrModel; + OHNNGraphArgs graphArgs = logicalOrModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalOr_Model_AddOperation_06 + * @tc.desc: 添加算子,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LogicalOrTest, SUB_AI_NNRt_Func_North_LogicalOr_Model_AddOperation_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalOrModel1 logicalOrModel; + OHNNGraphArgs graphArgs = logicalOrModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalOr_Model_AddOperation_07 + * @tc.desc: 添加算子,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(LogicalOrTest, SUB_AI_NNRt_Func_North_LogicalOr_Model_AddOperation_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalOrModel1 logicalOrModel; + OHNNGraphArgs graphArgs = logicalOrModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalOr_Model_AddOperation_08 + * @tc.desc: 添加算子,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(LogicalOrTest, SUB_AI_NNRt_Func_North_LogicalOr_Model_AddOperation_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalOrModel1 logicalOrModel; + OHNNGraphArgs graphArgs = logicalOrModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LogicalOr_Model_AddOperation_09 + * @tc.desc: 添加算子,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LogicalOrTest, SUB_AI_NNRt_Func_North_LogicalOr_Model_AddOperation_09, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LogicalOrModel1 logicalOrModel; + OHNNGraphArgs graphArgs = logicalOrModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(nullptr, graphArgs.operationType, + ¶mIndices, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} \ No newline at end of file diff --git a/test/nnrt_xts_acts/nncore/opstest/src/lrn_test.cpp b/test/nnrt_xts_acts/nncore/opstest/src/lrn_test.cpp new file mode 100644 index 0000000..d7c5b15 --- /dev/null +++ b/test/nnrt_xts_acts/nncore/opstest/src/lrn_test.cpp @@ -0,0 +1,730 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "nncore_utils.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Test; +class LrnTest : public testing::Test {}; + +struct LrnModel1 { + const std::vector tensor_shape = {1, 3, 2, 2}; + + int64_t depthRadiusValue[1] = {1}; + float alphaValue[1] = {0.0001}; + float betaValue[1] = {0.75}; + float biasValue[1] = {2}; + int32_t normRegionValue[1] = {0}; + float inputValue[1][3][2][2] = {{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}, {{9, 10}, {11, 12}}}}; + float outputValue[1][3][2][2] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, inputValue, 12*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, outputValue, 12*sizeof(float)}; + OHNNOperandTest depthRadius = {OH_NN_INT64, OH_NN_LRN_DEPTH_RADIUS, {1}, depthRadiusValue, sizeof(int64_t)}; + OHNNOperandTest alpha = {OH_NN_FLOAT32, OH_NN_LRN_ALPHA, {1}, alphaValue, sizeof(float)}; + OHNNOperandTest beta = {OH_NN_FLOAT32, OH_NN_LRN_BETA, {1}, betaValue, sizeof(float)}; + OHNNOperandTest bias = {OH_NN_FLOAT32, OH_NN_LRN_BIAS, {1}, biasValue, sizeof(float)}; + OHNNOperandTest normRegion = {OH_NN_INT32, OH_NN_LRN_NORM_REGION, {1}, normRegionValue, sizeof(int32_t)}; + + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_LRN, + .operands = {input, output, depthRadius, alpha, beta, bias, normRegion}, + .paramIndices = {2, 3, 4, 5, 6}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Lrn_Build_01 + * @tc.desc: LrnModel1模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(LrnTest, SUB_AI_NNRt_Func_North_Lrn_Build_01, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LrnModel1 lrnModel; + OHNNGraphArgs graphArgs = lrnModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Lrn_Build_02 + * @tc.desc: LrnModel1模型输入Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(LrnTest, SUB_AI_NNRt_Func_North_Lrn_Build_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LrnModel1 lrnModel; + OHNNGraphArgs graphArgs = lrnModel.graphArgs; + graphArgs.operands = {lrnModel.input, lrnModel.input, lrnModel.output, lrnModel.depthRadius, lrnModel.alpha, + lrnModel.beta, lrnModel.bias, lrnModel.normRegion}; + graphArgs.inputIndices = {0, 1}; + graphArgs.outputIndices = {2}; + graphArgs.paramIndices = {3, 4, 5, 6, 7}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Lrn_Build_03 + * @tc.desc: LrnModel1模型输出Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(LrnTest, SUB_AI_NNRt_Func_North_Lrn_Build_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LrnModel1 lrnModel; + OHNNGraphArgs graphArgs = lrnModel.graphArgs; + graphArgs.operands = {lrnModel.input, lrnModel.output, lrnModel.output, lrnModel.depthRadius, lrnModel.alpha, + lrnModel.beta, lrnModel.bias, lrnModel.normRegion}; + graphArgs.inputIndices = {0}; + graphArgs.outputIndices = {1, 2}; + graphArgs.paramIndices = {3, 4, 5, 6, 7}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Lrn_Build_04 + * @tc.desc: LrnModel1模型传入非法参数进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(LrnTest, SUB_AI_NNRt_Func_North_Lrn_Build_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LrnModel1 lrnModel; + OHNNGraphArgs graphArgs = lrnModel.graphArgs; + + int8_t activationValue = OH_NN_FUSED_NONE; + OHNNOperandTest activation = {OH_NN_INT8, OH_NN_ADD_ACTIVATIONTYPE, {}, &activationValue, sizeof(int8_t)}; + graphArgs.operands = {lrnModel.input, lrnModel.output, activation, lrnModel.depthRadius, lrnModel.alpha, + lrnModel.beta, lrnModel.bias, lrnModel.normRegion}; + graphArgs.paramIndices = {2, 3, 4, 5, 6, 7}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Lrn_Model_Finish_01 + * @tc.desc: 模型构图,未添加操作数 + * @tc.type: FUNC + */ +HWTEST_F(LrnTest, SUB_AI_NNRt_Func_North_Lrn_Model_Finish_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + OHNNGraphArgs graphArgs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, SingleModelBuildEndStep(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Lrn_Model_Finish_02 + * @tc.desc: 模型构图,未设置输入输出 + * @tc.type: FUNC + */ +HWTEST_F(LrnTest, SUB_AI_NNRt_Func_North_Lrn_Model_Finish_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LrnModel1 lrnModel; + OHNNGraphArgs graphArgs = lrnModel.graphArgs; + graphArgs.specifyIO = false; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Lrn_Model_Finish_03 + * @tc.desc: 模型构图,设置输入输出,构图成功 + * @tc.type: FUNC + */ +HWTEST_F(LrnTest, SUB_AI_NNRt_Func_North_Lrn_Model_Finish_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LrnModel1 lrnModel; + OHNNGraphArgs graphArgs = lrnModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Lrn_Model_SetOperandValue_01 + * @tc.desc: 设置操作数值,操作数不存在 + * @tc.type: FUNC + */ +HWTEST_F(LrnTest, SUB_AI_NNRt_Func_North_Lrn_Model_SetOperandValue_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LrnModel1 lrnModel; + OHNNGraphArgs graphArgs = lrnModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData( + model, 1000+i, operandTem.data, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Lrn_Model_SetOperandValue_02 + * @tc.desc: 设置操作数值,buufer为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LrnTest, SUB_AI_NNRt_Func_North_Lrn_Model_SetOperandValue_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LrnModel1 lrnModel; + OHNNGraphArgs graphArgs = lrnModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, i, nullptr, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Lrn_Model_SetOperandValue_03 + * @tc.desc: 设置操作数值,length为0 + * @tc.type: FUNC + */ +HWTEST_F(LrnTest, SUB_AI_NNRt_Func_North_Lrn_Model_SetOperandValue_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LrnModel1 lrnModel; + OHNNGraphArgs graphArgs = lrnModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, 1000+i, operandTem.data, 0)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Lrn_Model_SpecifyInputsAndOutputs_01 + * @tc.desc: 设置输入输出,inputIndices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LrnTest, SUB_AI_NNRt_Func_North_Lrn_Model_SpecifyInputsAndOutputs_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LrnModel1 lrnModel; + OHNNGraphArgs graphArgs = lrnModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Lrn_Model_SpecifyInputsAndOutputs_02 + * @tc.desc: 设置输入输出,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LrnTest, SUB_AI_NNRt_Func_North_Lrn_Model_SpecifyInputsAndOutputs_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LrnModel1 lrnModel; + OHNNGraphArgs graphArgs = lrnModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Lrn_Model_SpecifyInputsAndOutputs_03 + * @tc.desc: 设置输入输出,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(LrnTest, SUB_AI_NNRt_Func_North_Lrn_Model_SpecifyInputsAndOutputs_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LrnModel1 lrnModel; + OHNNGraphArgs graphArgs = lrnModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Lrn_Model_SpecifyInputsAndOutputs_04 + * @tc.desc: 设置输入输出,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(LrnTest, SUB_AI_NNRt_Func_North_Lrn_Model_SpecifyInputsAndOutputs_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LrnModel1 lrnModel; + OHNNGraphArgs graphArgs = lrnModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Lrn_Model_SpecifyInputsAndOutputs_05 + * @tc.desc: 设置输入输出,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LrnTest, SUB_AI_NNRt_Func_North_Lrn_Model_SpecifyInputsAndOutputs_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LrnModel1 lrnModel; + OHNNGraphArgs graphArgs = lrnModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Lrn_Model_SpecifyInputsAndOutputs_06 + * @tc.desc: 设置输入输出,outputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LrnTest, SUB_AI_NNRt_Func_North_Lrn_Model_SpecifyInputsAndOutputs_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LrnModel1 lrnModel; + OHNNGraphArgs graphArgs = lrnModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Lrn_Model_SpecifyInputsAndOutputs_07 + * @tc.desc: 设置输入输出,outputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(LrnTest, SUB_AI_NNRt_Func_North_Lrn_Model_SpecifyInputsAndOutputs_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LrnModel1 lrnModel; + OHNNGraphArgs graphArgs = lrnModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.outputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Lrn_Model_SpecifyInputsAndOutputs_08 + * @tc.desc: 设置输入输出,outputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(LrnTest, SUB_AI_NNRt_Func_North_Lrn_Model_SpecifyInputsAndOutputs_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LrnModel1 lrnModel; + OHNNGraphArgs graphArgs = lrnModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Lrn_Model_AddOperation_01 + * @tc.desc: 添加算子,paramindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LrnTest, SUB_AI_NNRt_Func_North_Lrn_Model_AddOperation_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LrnModel1 lrnModel; + OHNNGraphArgs graphArgs = lrnModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + nullptr, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Lrn_Model_AddOperation_02 + * @tc.desc: 添加算子,paramindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LrnTest, SUB_AI_NNRt_Func_North_Lrn_Model_AddOperation_02, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LrnModel1 lrnModel; + OHNNGraphArgs graphArgs = lrnModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Lrn_Model_AddOperation_03 + * @tc.desc: 添加算子,paramindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(LrnTest, SUB_AI_NNRt_Func_North_Lrn_Model_AddOperation_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LrnModel1 lrnModel; + OHNNGraphArgs graphArgs = lrnModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.paramIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Lrn_Model_AddOperation_04 + * @tc.desc: 添加算子,paramindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(LrnTest, SUB_AI_NNRt_Func_North_Lrn_Model_AddOperation_04, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LrnModel1 lrnModel; + OHNNGraphArgs graphArgs = lrnModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Lrn_Model_AddOperation_05 + * @tc.desc: 添加算子,inputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LrnTest, SUB_AI_NNRt_Func_North_Lrn_Model_AddOperation_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LrnModel1 lrnModel; + OHNNGraphArgs graphArgs = lrnModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Lrn_Model_AddOperation_06 + * @tc.desc: 添加算子,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LrnTest, SUB_AI_NNRt_Func_North_Lrn_Model_AddOperation_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LrnModel1 lrnModel; + OHNNGraphArgs graphArgs = lrnModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Lrn_Model_AddOperation_07 + * @tc.desc: 添加算子,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(LrnTest, SUB_AI_NNRt_Func_North_Lrn_Model_AddOperation_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LrnModel1 lrnModel; + OHNNGraphArgs graphArgs = lrnModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Lrn_Model_AddOperation_08 + * @tc.desc: 添加算子,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(LrnTest, SUB_AI_NNRt_Func_North_Lrn_Model_AddOperation_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LrnModel1 lrnModel; + OHNNGraphArgs graphArgs = lrnModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Lrn_Model_AddOperation_09 + * @tc.desc: 添加算子,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LrnTest, SUB_AI_NNRt_Func_North_Lrn_Model_AddOperation_09, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LrnModel1 lrnModel; + OHNNGraphArgs graphArgs = lrnModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(nullptr, graphArgs.operationType, + ¶mIndices, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} \ No newline at end of file diff --git a/test/nnrt_xts_acts/nncore/opstest/src/lstm_test.cpp b/test/nnrt_xts_acts/nncore/opstest/src/lstm_test.cpp new file mode 100644 index 0000000..fc8ead2 --- /dev/null +++ b/test/nnrt_xts_acts/nncore/opstest/src/lstm_test.cpp @@ -0,0 +1,751 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "nncore_utils.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Test; +class LSTMTest : public testing::Test {}; + +struct LSTMModel1 { + const std::vector input_shape = {5, 2, 10}; + const std::vector wih_shape = {8, 10}; + const std::vector whh_shape = {8, 2}; + const std::vector bias_shape = {16}; + const std::vector hx_shape = {2, 2}; + const std::vector output_shape = {5, 2}; + + float inputValue[5][2][10] = {1}; + float wihValue[8][10] = {1}; + float whhValue[8][2] = {1}; + float biasValue[16] = {1}; + float hxValue[2][2] = {1}; + float cxValue[2][2] = {1}; + + int64_t inputSizeValue[1] = {10}; + int64_t hiddenSizeValue[1] = {2}; + int64_t numLayersValue[1] = {1}; + + float outputValue[5][2] = {0}; + float hyValue[2][2] = {0}; + float cyValue[2][2] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, input_shape, inputValue, 100*sizeof(float)}; + OHNNOperandTest wih = {OH_NN_FLOAT32, OH_NN_TENSOR, wih_shape, wihValue, 80*sizeof(float)}; + OHNNOperandTest whh = {OH_NN_FLOAT32, OH_NN_TENSOR, whh_shape, whhValue, 16*sizeof(float)}; + OHNNOperandTest bias = {OH_NN_FLOAT32, OH_NN_TENSOR, bias_shape, biasValue, 16*sizeof(float)}; + OHNNOperandTest hx = {OH_NN_FLOAT32, OH_NN_TENSOR, hx_shape, hxValue, 4*sizeof(float)}; + OHNNOperandTest cx = {OH_NN_FLOAT32, OH_NN_TENSOR, hx_shape, cxValue, 4*sizeof(float)}; + + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, output_shape, outputValue, 10*sizeof(float)}; + OHNNOperandTest hy = {OH_NN_FLOAT32, OH_NN_TENSOR, hx_shape, hyValue, 4*sizeof(float)}; + OHNNOperandTest cy = {OH_NN_FLOAT32, OH_NN_TENSOR, hx_shape, cyValue, 4*sizeof(float)}; + + OHNNOperandTest inputSize = {OH_NN_INT64, OH_NN_LSTM_INPUT_SIZE, {1}, inputSizeValue, sizeof(int64_t)}; + OHNNOperandTest hiddenSize = {OH_NN_INT64, OH_NN_LSTM_HIDDEN_SIZE, {1}, hiddenSizeValue, sizeof(int64_t)}; + OHNNOperandTest numLayers = {OH_NN_INT64, OH_NN_LSTM_NUM_LAYERS, {1}, numLayersValue, sizeof(int64_t)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_LSTM, + .operands = {input, wih, whh, bias, hx, cx, output, hy, + cy, inputSize, hiddenSize, numLayers}, + .paramIndices = {9, 10, 11}, + .inputIndices = {0, 1, 2, 3, 4, 5}, + .outputIndices = {6, 7, 8}}; +}; + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LSTM_Build_01 + * @tc.desc: LSTMModel1模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(LSTMTest, SUB_AI_NNRt_Func_North_LSTM_Build_01, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LSTMModel1 lSTMModel; + OHNNGraphArgs graphArgs = lSTMModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LSTM_Build_02 + * @tc.desc: LSTMModel1模型输入Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(LSTMTest, SUB_AI_NNRt_Func_North_LSTM_Build_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LSTMModel1 lSTMModel; + OHNNGraphArgs graphArgs = lSTMModel.graphArgs; + graphArgs.operands = {lSTMModel.input, lSTMModel.input, lSTMModel.wih, lSTMModel.whh, lSTMModel.bias, lSTMModel.hx, + lSTMModel.cx, lSTMModel.output, lSTMModel.hy, lSTMModel.cy, lSTMModel.inputSize, + lSTMModel.hiddenSize, lSTMModel.numLayers}; + graphArgs.inputIndices = {0, 1, 2, 3, 4, 5, 6}; + graphArgs.outputIndices = {7, 8, 9}; + graphArgs.paramIndices = {10, 11, 12}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LSTM_Build_03 + * @tc.desc: LSTMModel1模型输出Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(LSTMTest, SUB_AI_NNRt_Func_North_LSTM_Build_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LSTMModel1 lSTMModel; + OHNNGraphArgs graphArgs = lSTMModel.graphArgs; + graphArgs.operands = {lSTMModel.input, lSTMModel.wih, lSTMModel.whh, lSTMModel.bias, lSTMModel.hx, lSTMModel.cx, + lSTMModel.output, lSTMModel.output, lSTMModel.hy, lSTMModel.cy, lSTMModel.inputSize, + lSTMModel.hiddenSize, lSTMModel.numLayers}; + graphArgs.inputIndices = {0, 1, 2, 3, 4, 5}; + graphArgs.outputIndices = {6, 7, 8, 9}; + graphArgs.paramIndices = {10, 11, 12}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LSTM_Build_04 + * @tc.desc: LSTMModel1模型传入非法参数进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(LSTMTest, SUB_AI_NNRt_Func_North_LSTM_Build_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LSTMModel1 lSTMModel; + OHNNGraphArgs graphArgs = lSTMModel.graphArgs; + + int8_t activationValue = OH_NN_FUSED_NONE; + OHNNOperandTest activation = {OH_NN_INT8, OH_NN_ADD_ACTIVATIONTYPE, {}, &activationValue, sizeof(int8_t)}; + graphArgs.operands = {lSTMModel.input, lSTMModel.wih, lSTMModel.whh, lSTMModel.bias, lSTMModel.hx, + lSTMModel.cx, lSTMModel.output, lSTMModel.hy, lSTMModel.cy, lSTMModel.inputSize, + lSTMModel.hiddenSize, lSTMModel.numLayers, activation}; + graphArgs.paramIndices = {9, 10, 11, 12}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LSTM_Model_Finish_01 + * @tc.desc: 模型构图,未添加操作数 + * @tc.type: FUNC + */ +HWTEST_F(LSTMTest, SUB_AI_NNRt_Func_North_LSTM_Model_Finish_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + OHNNGraphArgs graphArgs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, SingleModelBuildEndStep(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LSTM_Model_Finish_02 + * @tc.desc: 模型构图,未设置输入输出 + * @tc.type: FUNC + */ +HWTEST_F(LSTMTest, SUB_AI_NNRt_Func_North_LSTM_Model_Finish_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LSTMModel1 lSTMModel; + OHNNGraphArgs graphArgs = lSTMModel.graphArgs; + graphArgs.specifyIO = false; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LSTM_Model_Finish_03 + * @tc.desc: 模型构图,设置输入输出,构图成功 + * @tc.type: FUNC + */ +HWTEST_F(LSTMTest, SUB_AI_NNRt_Func_North_LSTM_Model_Finish_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LSTMModel1 lSTMModel; + OHNNGraphArgs graphArgs = lSTMModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LSTM_Model_SetOperandValue_01 + * @tc.desc: 设置操作数值,操作数不存在 + * @tc.type: FUNC + */ +HWTEST_F(LSTMTest, SUB_AI_NNRt_Func_North_LSTM_Model_SetOperandValue_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LSTMModel1 lSTMModel; + OHNNGraphArgs graphArgs = lSTMModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData( + model, 1000+i, operandTem.data, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LSTM_Model_SetOperandValue_02 + * @tc.desc: 设置操作数值,buufer为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LSTMTest, SUB_AI_NNRt_Func_North_LSTM_Model_SetOperandValue_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LSTMModel1 lSTMModel; + OHNNGraphArgs graphArgs = lSTMModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, i, nullptr, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LSTM_Model_SetOperandValue_03 + * @tc.desc: 设置操作数值,length为0 + * @tc.type: FUNC + */ +HWTEST_F(LSTMTest, SUB_AI_NNRt_Func_North_LSTM_Model_SetOperandValue_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LSTMModel1 lSTMModel; + OHNNGraphArgs graphArgs = lSTMModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, 1000+i, operandTem.data, 0)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LSTM_Model_SpecifyInputsAndOutputs_01 + * @tc.desc: 设置输入输出,inputIndices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LSTMTest, SUB_AI_NNRt_Func_North_LSTM_Model_SpecifyInputsAndOutputs_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LSTMModel1 lSTMModel; + OHNNGraphArgs graphArgs = lSTMModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LSTM_Model_SpecifyInputsAndOutputs_02 + * @tc.desc: 设置输入输出,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LSTMTest, SUB_AI_NNRt_Func_North_LSTM_Model_SpecifyInputsAndOutputs_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LSTMModel1 lSTMModel; + OHNNGraphArgs graphArgs = lSTMModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LSTM_Model_SpecifyInputsAndOutputs_03 + * @tc.desc: 设置输入输出,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(LSTMTest, SUB_AI_NNRt_Func_North_LSTM_Model_SpecifyInputsAndOutputs_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LSTMModel1 lSTMModel; + OHNNGraphArgs graphArgs = lSTMModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LSTM_Model_SpecifyInputsAndOutputs_04 + * @tc.desc: 设置输入输出,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(LSTMTest, SUB_AI_NNRt_Func_North_LSTM_Model_SpecifyInputsAndOutputs_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LSTMModel1 lSTMModel; + OHNNGraphArgs graphArgs = lSTMModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LSTM_Model_SpecifyInputsAndOutputs_05 + * @tc.desc: 设置输入输出,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LSTMTest, SUB_AI_NNRt_Func_North_LSTM_Model_SpecifyInputsAndOutputs_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LSTMModel1 lSTMModel; + OHNNGraphArgs graphArgs = lSTMModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LSTM_Model_SpecifyInputsAndOutputs_06 + * @tc.desc: 设置输入输出,outputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LSTMTest, SUB_AI_NNRt_Func_North_LSTM_Model_SpecifyInputsAndOutputs_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LSTMModel1 lSTMModel; + OHNNGraphArgs graphArgs = lSTMModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LSTM_Model_SpecifyInputsAndOutputs_07 + * @tc.desc: 设置输入输出,outputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(LSTMTest, SUB_AI_NNRt_Func_North_LSTM_Model_SpecifyInputsAndOutputs_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LSTMModel1 lSTMModel; + OHNNGraphArgs graphArgs = lSTMModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.outputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LSTM_Model_SpecifyInputsAndOutputs_08 + * @tc.desc: 设置输入输出,outputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(LSTMTest, SUB_AI_NNRt_Func_North_LSTM_Model_SpecifyInputsAndOutputs_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LSTMModel1 lSTMModel; + OHNNGraphArgs graphArgs = lSTMModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LSTM_Model_AddOperation_01 + * @tc.desc: 添加算子,paramindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LSTMTest, SUB_AI_NNRt_Func_North_LSTM_Model_AddOperation_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LSTMModel1 lSTMModel; + OHNNGraphArgs graphArgs = lSTMModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + nullptr, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LSTM_Model_AddOperation_02 + * @tc.desc: 添加算子,paramindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LSTMTest, SUB_AI_NNRt_Func_North_LSTM_Model_AddOperation_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LSTMModel1 lSTMModel; + OHNNGraphArgs graphArgs = lSTMModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LSTM_Model_AddOperation_03 + * @tc.desc: 添加算子,paramindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(LSTMTest, SUB_AI_NNRt_Func_North_LSTM_Model_AddOperation_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LSTMModel1 lSTMModel; + OHNNGraphArgs graphArgs = lSTMModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.paramIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LSTM_Model_AddOperation_04 + * @tc.desc: 添加算子,paramindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(LSTMTest, SUB_AI_NNRt_Func_North_LSTM_Model_AddOperation_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LSTMModel1 lSTMModel; + OHNNGraphArgs graphArgs = lSTMModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LSTM_Model_AddOperation_05 + * @tc.desc: 添加算子,inputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LSTMTest, SUB_AI_NNRt_Func_North_LSTM_Model_AddOperation_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LSTMModel1 lSTMModel; + OHNNGraphArgs graphArgs = lSTMModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LSTM_Model_AddOperation_06 + * @tc.desc: 添加算子,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LSTMTest, SUB_AI_NNRt_Func_North_LSTM_Model_AddOperation_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LSTMModel1 lSTMModel; + OHNNGraphArgs graphArgs = lSTMModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LSTM_Model_AddOperation_07 + * @tc.desc: 添加算子,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(LSTMTest, SUB_AI_NNRt_Func_North_LSTM_Model_AddOperation_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LSTMModel1 lSTMModel; + OHNNGraphArgs graphArgs = lSTMModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LSTM_Model_AddOperation_08 + * @tc.desc: 添加算子,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(LSTMTest, SUB_AI_NNRt_Func_North_LSTM_Model_AddOperation_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LSTMModel1 lSTMModel; + OHNNGraphArgs graphArgs = lSTMModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_LSTM_Model_AddOperation_09 + * @tc.desc: 添加算子,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(LSTMTest, SUB_AI_NNRt_Func_North_LSTM_Model_AddOperation_09, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + LSTMModel1 lSTMModel; + OHNNGraphArgs graphArgs = lSTMModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(nullptr, graphArgs.operationType, + ¶mIndices, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} \ No newline at end of file diff --git a/test/nnrt_xts_acts/nncore/opstest/src/minimum_test.cpp b/test/nnrt_xts_acts/nncore/opstest/src/minimum_test.cpp new file mode 100644 index 0000000..b4e3538 --- /dev/null +++ b/test/nnrt_xts_acts/nncore/opstest/src/minimum_test.cpp @@ -0,0 +1,805 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "nncore_utils.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Test; +class MinimumTest : public testing::Test {}; + +struct MinimumModel1 { + const std::vector tensor_shape = {3}; + float input0Value[3] = {1, 3, 5}; + float input1Value[3] = {2, 2, 4}; + float outputValue[3] = {0}; + + OHNNOperandTest input0 = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, input0Value, 3*sizeof(float)}; + OHNNOperandTest input1 = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, input1Value, 3*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, outputValue, 3*sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_MINIMUM, + .operands = {input0, input1, output}, + .paramIndices = {}, + .inputIndices = {0, 1}, + .outputIndices = {2}}; +}; + +struct MinimumModel2 { + const std::vector tensor_shape = {3}; + bool input0Value[3] = {true, false, true}; + bool input1Value[1] = {false}; + bool outputValue[3] = {false}; + + OHNNOperandTest input0 = {OH_NN_BOOL, OH_NN_TENSOR, tensor_shape, input0Value, 3*sizeof(bool)}; + OHNNOperandTest input1 = {OH_NN_BOOL, OH_NN_TENSOR, tensor_shape, input1Value, sizeof(bool)}; + OHNNOperandTest output = {OH_NN_BOOL, OH_NN_TENSOR, tensor_shape, outputValue, 3*sizeof(bool)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_MINIMUM, + .operands = {input0, input1, output}, + .paramIndices = {}, + .inputIndices = {0, 1}, + .outputIndices = {2}}; +}; + +struct MinimumModel3 { + const std::vector tensor_shape = {}; + float input0Value[3] = {1, 3, 5}; + bool input1Value[1] = {false}; + float* outputValue = {}; + + OHNNOperandTest input0 = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, input0Value, 3*sizeof(float)}; + OHNNOperandTest input1 = {OH_NN_BOOL, OH_NN_TENSOR, tensor_shape, input1Value, sizeof(bool)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, outputValue, 0*sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_MINIMUM, + .operands = {input0, input1, output}, + .paramIndices = {}, + .inputIndices = {0, 1}, + .outputIndices = {2}}; +}; + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Minimum_Build_01 + * @tc.desc: MinimumModel1模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(MinimumTest, SUB_AI_NNRt_Func_North_Minimum_Build_01, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + MinimumModel1 minimumModel; + OHNNGraphArgs graphArgs = minimumModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Minimum_Build_02 + * @tc.desc: MinimumModel2模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(MinimumTest, SUB_AI_NNRt_Func_North_Minimum_Build_02, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + MinimumModel2 minimumModel; + OHNNGraphArgs graphArgs = minimumModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Minimum_Build_03 + * @tc.desc: MinimumModel3模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(MinimumTest, SUB_AI_NNRt_Func_North_Minimum_Build_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + MinimumModel3 minimumModel; + OHNNGraphArgs graphArgs = minimumModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Minimum_Build_04 + * @tc.desc: MinimumModel1模型输入Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(MinimumTest, SUB_AI_NNRt_Func_North_Minimum_Build_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + MinimumModel1 minimumModel; + OHNNGraphArgs graphArgs = minimumModel.graphArgs; + graphArgs.operands = {minimumModel.input0, minimumModel.input0, minimumModel.input1, minimumModel.output}; + graphArgs.inputIndices = {0, 1, 2}; + graphArgs.outputIndices = {3}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Minimum_Build_05 + * @tc.desc: MinimumModel1模型输出Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(MinimumTest, SUB_AI_NNRt_Func_North_Minimum_Build_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + MinimumModel1 minimumModel; + OHNNGraphArgs graphArgs = minimumModel.graphArgs; + graphArgs.operands = {minimumModel.input0, minimumModel.input1, minimumModel.output, minimumModel.output}; + graphArgs.inputIndices = {0, 1}; + graphArgs.outputIndices = {2, 3}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Minimum_Build_06 + * @tc.desc: MinimumModel1模型传入非法参数进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(MinimumTest, SUB_AI_NNRt_Func_North_Minimum_Build_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + MinimumModel1 minimumModel; + OHNNGraphArgs graphArgs = minimumModel.graphArgs; + + int8_t activationValue = OH_NN_FUSED_NONE; + OHNNOperandTest activation = {OH_NN_INT8, OH_NN_ADD_ACTIVATIONTYPE, {}, &activationValue, sizeof(int8_t)}; + graphArgs.operands = {minimumModel.input0, minimumModel.input1, minimumModel.output, activation}; + graphArgs.paramIndices = {3}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Minimum_Model_Finish_01 + * @tc.desc: 模型构图,未添加操作数 + * @tc.type: FUNC + */ +HWTEST_F(MinimumTest, SUB_AI_NNRt_Func_North_Minimum_Model_Finish_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + OHNNGraphArgs graphArgs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, SingleModelBuildEndStep(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Minimum_Model_Finish_02 + * @tc.desc: 模型构图,未设置输入输出 + * @tc.type: FUNC + */ +HWTEST_F(MinimumTest, SUB_AI_NNRt_Func_North_Minimum_Model_Finish_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + MinimumModel1 minimumModel; + OHNNGraphArgs graphArgs = minimumModel.graphArgs; + graphArgs.specifyIO = false; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Minimum_Model_Finish_03 + * @tc.desc: 模型构图,设置输入输出,构图成功 + * @tc.type: FUNC + */ +HWTEST_F(MinimumTest, SUB_AI_NNRt_Func_North_Minimum_Model_Finish_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + MinimumModel1 minimumModel; + OHNNGraphArgs graphArgs = minimumModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Minimum_Model_SetOperandValue_01 + * @tc.desc: 设置操作数值,操作数不存在 + * @tc.type: FUNC + */ +HWTEST_F(MinimumTest, SUB_AI_NNRt_Func_North_Minimum_Model_SetOperandValue_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + MinimumModel1 minimumModel; + OHNNGraphArgs graphArgs = minimumModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData( + model, 1000+i, operandTem.data, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Minimum_Model_SetOperandValue_02 + * @tc.desc: 设置操作数值,buufer为nullptr + * @tc.type: FUNC + */ +HWTEST_F(MinimumTest, SUB_AI_NNRt_Func_North_Minimum_Model_SetOperandValue_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + MinimumModel1 minimumModel; + OHNNGraphArgs graphArgs = minimumModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, i, nullptr, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Minimum_Model_SetOperandValue_03 + * @tc.desc: 设置操作数值,length为0 + * @tc.type: FUNC + */ +HWTEST_F(MinimumTest, SUB_AI_NNRt_Func_North_Minimum_Model_SetOperandValue_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + MinimumModel1 minimumModel; + OHNNGraphArgs graphArgs = minimumModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, 1000+i, operandTem.data, 0)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Minimum_Model_SpecifyInputsAndOutputs_01 + * @tc.desc: 设置输入输出,inputIndices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(MinimumTest, SUB_AI_NNRt_Func_North_Minimum_Model_SpecifyInputsAndOutputs_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + MinimumModel1 minimumModel; + OHNNGraphArgs graphArgs = minimumModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Minimum_Model_SpecifyInputsAndOutputs_02 + * @tc.desc: 设置输入输出,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(MinimumTest, SUB_AI_NNRt_Func_North_Minimum_Model_SpecifyInputsAndOutputs_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + MinimumModel1 minimumModel; + OHNNGraphArgs graphArgs = minimumModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Minimum_Model_SpecifyInputsAndOutputs_03 + * @tc.desc: 设置输入输出,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(MinimumTest, SUB_AI_NNRt_Func_North_Minimum_Model_SpecifyInputsAndOutputs_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + MinimumModel1 minimumModel; + OHNNGraphArgs graphArgs = minimumModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Minimum_Model_SpecifyInputsAndOutputs_04 + * @tc.desc: 设置输入输出,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(MinimumTest, SUB_AI_NNRt_Func_North_Minimum_Model_SpecifyInputsAndOutputs_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + MinimumModel1 minimumModel; + OHNNGraphArgs graphArgs = minimumModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Minimum_Model_SpecifyInputsAndOutputs_05 + * @tc.desc: 设置输入输出,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(MinimumTest, SUB_AI_NNRt_Func_North_Minimum_Model_SpecifyInputsAndOutputs_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + MinimumModel1 minimumModel; + OHNNGraphArgs graphArgs = minimumModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Minimum_Model_SpecifyInputsAndOutputs_06 + * @tc.desc: 设置输入输出,outputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(MinimumTest, SUB_AI_NNRt_Func_North_Minimum_Model_SpecifyInputsAndOutputs_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + MinimumModel1 minimumModel; + OHNNGraphArgs graphArgs = minimumModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Minimum_Model_SpecifyInputsAndOutputs_07 + * @tc.desc: 设置输入输出,outputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(MinimumTest, SUB_AI_NNRt_Func_North_Minimum_Model_SpecifyInputsAndOutputs_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + MinimumModel1 minimumModel; + OHNNGraphArgs graphArgs = minimumModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.outputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Minimum_Model_SpecifyInputsAndOutputs_08 + * @tc.desc: 设置输入输出,outputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(MinimumTest, SUB_AI_NNRt_Func_North_Minimum_Model_SpecifyInputsAndOutputs_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + MinimumModel1 minimumModel; + OHNNGraphArgs graphArgs = minimumModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Minimum_Model_AddOperation_01 + * @tc.desc: 添加算子,paramindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(MinimumTest, SUB_AI_NNRt_Func_North_Minimum_Model_AddOperation_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + MinimumModel1 minimumModel; + OHNNGraphArgs graphArgs = minimumModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + nullptr, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Minimum_Model_AddOperation_02 + * @tc.desc: 添加算子,paramindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(MinimumTest, SUB_AI_NNRt_Func_North_Minimum_Model_AddOperation_02, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + MinimumModel1 minimumModel; + OHNNGraphArgs graphArgs = minimumModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.data = nullptr; + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Minimum_Model_AddOperation_03 + * @tc.desc: 添加算子,paramindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(MinimumTest, SUB_AI_NNRt_Func_North_Minimum_Model_AddOperation_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + MinimumModel1 minimumModel; + OHNNGraphArgs graphArgs = minimumModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.paramIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Minimum_Model_AddOperation_04 + * @tc.desc: 添加算子,paramindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(MinimumTest, SUB_AI_NNRt_Func_North_Minimum_Model_AddOperation_04, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + MinimumModel1 minimumModel; + OHNNGraphArgs graphArgs = minimumModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.size = 0; + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Minimum_Model_AddOperation_05 + * @tc.desc: 添加算子,inputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(MinimumTest, SUB_AI_NNRt_Func_North_Minimum_Model_AddOperation_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + MinimumModel1 minimumModel; + OHNNGraphArgs graphArgs = minimumModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Minimum_Model_AddOperation_06 + * @tc.desc: 添加算子,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(MinimumTest, SUB_AI_NNRt_Func_North_Minimum_Model_AddOperation_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + MinimumModel1 minimumModel; + OHNNGraphArgs graphArgs = minimumModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Minimum_Model_AddOperation_07 + * @tc.desc: 添加算子,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(MinimumTest, SUB_AI_NNRt_Func_North_Minimum_Model_AddOperation_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + MinimumModel1 minimumModel; + OHNNGraphArgs graphArgs = minimumModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Minimum_Model_AddOperation_08 + * @tc.desc: 添加算子,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(MinimumTest, SUB_AI_NNRt_Func_North_Minimum_Model_AddOperation_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + MinimumModel1 minimumModel; + OHNNGraphArgs graphArgs = minimumModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Minimum_Model_AddOperation_09 + * @tc.desc: 添加算子,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(MinimumTest, SUB_AI_NNRt_Func_North_Minimum_Model_AddOperation_09, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + MinimumModel1 minimumModel; + OHNNGraphArgs graphArgs = minimumModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(nullptr, graphArgs.operationType, + ¶mIndices, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} \ No newline at end of file diff --git a/test/nnrt_xts_acts/nncore/opstest/src/mock_idevice.cpp b/test/nnrt_xts_acts/nncore/opstest/src/mock_idevice.cpp new file mode 100644 index 0000000..fbde26c --- /dev/null +++ b/test/nnrt_xts_acts/nncore/opstest/src/mock_idevice.cpp @@ -0,0 +1,371 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include + +#include "nncore_const.h" +#include "mock_idevice.h" +#include "hdi_device_v2_1.h" +#include "hdi_returncode_utils_v2_1.h" +#include "log.h" +#include "utils.h" +#include "nnbackend.h" +#include "backend_registrar.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +void PrintRetLog(int32_t ret, int32_t nnrtSuccess, const std::string& makeName) +{ + if (ret < nnrtSuccess) { + LOGW("%s failed. An error occurred in HDI, errorcode is %{public}d.", makeName.c_str(), ret); + } else { + OHOS::HDI::Nnrt::V2_1::NNRT_ReturnCode nnrtRet = static_cast(ret); + LOGW("%s failed. Errorcode is %{public}s.", makeName.c_str(), ConverterRetToString(nnrtRet).c_str()); + } +} + +std::shared_ptr HDIDeviceV2_1Creator() +{ + std::string deviceName; + std::string vendorName; + std::string version; + + // only one device from HDI now. + OHOS::sptr iDevice = V2_1::INnrtDevice::Get(); + if (iDevice == nullptr) { + LOGW("Get HDI device failed."); + return nullptr; + } + + auto ret = iDevice->GetDeviceName(deviceName); + int32_t nnrtSuccess = static_cast(V2_1::NNRT_ReturnCode::NNRT_SUCCESS); + if (ret != nnrtSuccess) { + std::string makeName = "Get device name"; + PrintRetLog(ret, nnrtSuccess, makeName); + return nullptr; + } + + ret = iDevice->GetVendorName(vendorName); + if (ret != nnrtSuccess) { + std::string makeName = "Get vendor name"; + PrintRetLog(ret, nnrtSuccess, makeName); + return nullptr; + } + + std::pair hdiVersion; + ret = iDevice->GetVersion(hdiVersion.first, hdiVersion.second); + if (ret != nnrtSuccess) { + std::string makeName = "Get version"; + PrintRetLog(ret, nnrtSuccess, makeName); + return nullptr; + } + version = 'v' + std::to_string(hdiVersion.first) + '_' + std::to_string(hdiVersion.second); + const std::string& backendName = GenUniqueName(deviceName, vendorName, version); + + std::shared_ptr device = CreateSharedPtr(iDevice); + if (device == nullptr) { + LOGW("Failed to create device, because fail to create device instance."); + return nullptr; + } + + std::shared_ptr backend = std::make_shared(device, std::hash{}(backendName)); + if (backend == nullptr) { + LOGW("Failed to register backend, because fail to create backend."); + } + return backend; +} + +REGISTER_BACKEND(HDIDeviceV2_1, HDIDeviceV2_1Creator) +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +namespace OHOS { +namespace HDI { +namespace Nnrt { +namespace V2_1 { + +sptr INnrtDevice::Get(bool isStub) +{ + return INnrtDevice::Get("mock_device_service", isStub); +} + +sptr INnrtDevice::Get(const std::string &serviceName, bool isStub) +{ + if (isStub) { + return nullptr; + } + sptr mockIDevice = sptr(MockIDevice::GetInstance()); + return mockIDevice; +} + +MockIDevice::~MockIDevice() +{ + for (auto fd : m_fds) { + close(fd); + } +} + +MockIDevice::MockIDevice() +{ + m_bufferFd = 0; +} + +MockIPreparedModel::~MockIPreparedModel() +{ + for (auto fd : m_fds) { + close(fd); + } +} + +MockIDevice *MockIDevice::GetInstance() +{ + static MockIDevice iDevice; + return &iDevice; +} + +void MockIDevice::SetFP16Supported(bool isSupported) +{ + m_fp16 = isSupported; +} + +void MockIDevice::SetPerformanceSupported(bool isSupported) +{ + m_performance = isSupported; +} + +void MockIDevice::SetPrioritySupported(bool isSupported) +{ + m_priority = isSupported; +} + +void MockIDevice::SetModelCacheSupported(bool isSupported) +{ + m_cache = isSupported; +} + +void MockIDevice::SetOperationsSupported(std::vector isSupported) +{ + m_operations = isSupported; +} + +void MockIDevice::SetDynamicInputSupported(bool isSupported) +{ + m_dynamic = isSupported; +} + +int32_t MockIDevice::GetDeviceName(std::string& name) +{ + name = "Device-CPU"; + return HDF_SUCCESS; +} + +int32_t MockIDevice::GetVendorName(std::string& name) +{ + name = "TestVendor"; + return HDF_SUCCESS; +} + +int32_t MockIDevice::GetDeviceType(DeviceType& deviceType) +{ + deviceType = DeviceType::CPU; + return HDF_SUCCESS; +} + +int32_t MockIDevice::GetDeviceStatus(DeviceStatus& status) +{ + status = DeviceStatus::AVAILABLE; + return HDF_SUCCESS; +} + +int32_t MockIDevice::GetVersion(uint32_t &majorVersion, uint32_t &minorVersion) +{ + uint32_t twoNum = 2; + majorVersion = twoNum; + minorVersion = 1; + return HDF_SUCCESS; +} + +int32_t MockIDevice::GetSupportedOperation(const Model& model, std::vector& ops) +{ + ops = m_operations; + return HDF_SUCCESS; +} + +int32_t MockIDevice::IsFloat16PrecisionSupported(bool& isSupported) +{ + isSupported = m_fp16; + return HDF_SUCCESS; +} + +int32_t MockIDevice::IsPerformanceModeSupported(bool& isSupported) +{ + isSupported = m_performance; + return HDF_SUCCESS; +} + +int32_t MockIDevice::IsPrioritySupported(bool& isSupported) +{ + isSupported = m_priority; + return HDF_SUCCESS; +} + +int32_t MockIDevice::IsDynamicInputSupported(bool& isSupported) +{ + isSupported = m_dynamic; + return HDF_SUCCESS; +} + +int32_t MockIDevice::IsModelCacheSupported(bool& isSupported) +{ + isSupported = m_cache; + return HDF_SUCCESS; +} + +int32_t MockIDevice::AllocateBuffer(uint32_t length, SharedBuffer &buffer) +{ + std::lock_guard lock(m_mtx); + buffer.fd = AshmemCreate("allocateBuffer", length); + buffer.bufferSize = AshmemGetSize(buffer.fd); + buffer.offset = 0; + buffer.dataSize = length; + + AshmemSetProt(buffer.fd, PROT_READ | PROT_WRITE); + m_fds.emplace(buffer.fd); + m_bufferFd = buffer.fd; + return HDF_SUCCESS; +} + +int32_t MockIDevice::ReleaseBuffer(const SharedBuffer &buffer) +{ + if (m_fds.find(buffer.fd) == m_fds.end()) { + LOGE("[mock_idevice] ReleaseBuffer: buffer fd is invalid. fd = %d.", buffer.fd); + return HDF_FAILURE; + } + if (close(buffer.fd) != 0) { + LOGE("[mock_idevice] ReleaseBuffer: Close memory fd failed. fd=%d", buffer.fd); + return HDF_FAILURE; + } + return HDF_SUCCESS; +} + +int32_t MockIDevice::MemoryCopy(float* data, uint32_t length) +{ + std::lock_guard lock(m_mtx); + void* mapData = mmap(nullptr, length, PROT_READ | PROT_WRITE, MAP_SHARED, m_bufferFd, 0); + if (mapData == MAP_FAILED) { + LOGE("[mock_idevice] Map fd to address failed : %{public}s.", strerror(errno)); + return HDF_FAILURE; + } + + auto memRet = memcpy_s(mapData, length, data, length); + auto unmapResult = munmap(mapData, length); + if (unmapResult != 0) { + LOGE("[mock_idevice] ExportModelCache failed, please try again."); + return HDF_FAILURE; + } + + if (memRet != EOK) { + LOGE("[mock_idevice] ExportModelCache failed, failed to memcpy_s datat type."); + return HDF_FAILURE; + } + return HDF_SUCCESS; +} + +int32_t MockIDevice::PrepareModel(const Model& model, const ModelConfig& config, sptr& preparedModel) +{ + preparedModel = new (std::nothrow) V2_1::MockIPreparedModel(); + return HDF_SUCCESS; +} + +int32_t MockIDevice::PrepareOfflineModel(const std::vector& offlineModels, const ModelConfig& config, + sptr& preparedModel) +{ + preparedModel = new (std::nothrow) V2_1::MockIPreparedModel(); + return V2_1::NNRT_ReturnCode::NNRT_SUCCESS; +} + +int32_t MockIDevice::PrepareModelFromModelCache(const std::vector& modelCache, const ModelConfig& config, + sptr& preparedModel) +{ + preparedModel = new (std::nothrow) V2_1::MockIPreparedModel(); + return HDF_SUCCESS; +} + +int32_t MockIPreparedModel::ExportModelCache(std::vector& modelCache) +{ + if (!modelCache.empty()) { + LOGE("[NNRtTest] The parameters of ExportModelCache should be an empty vector."); + return HDF_ERR_INVALID_PARAM; + } + + uint8_t bufferData[4] = {0, 1, 2, 3}; + uint32_t size = sizeof(bufferData); + SharedBuffer buffer; + buffer.fd = AshmemCreate("cache", size); + buffer.bufferSize = AshmemGetSize(buffer.fd); + buffer.offset = 0; + buffer.dataSize = size; + + AshmemSetProt(buffer.fd, PROT_READ | PROT_WRITE); + + void* data = mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_SHARED, buffer.fd, 0); + if (data == MAP_FAILED) { + LOGE("[mock_idevice]::ExportModelCache failed, Map fd to address failed:%{public}s.", strerror(errno)); + return HDF_FAILURE; + } + + auto memRet = memcpy_s(data, size, bufferData, size); + auto unmapResult = munmap(data, size); + if (unmapResult != 0) { + LOGE("[NNRtTest] ExportModelCache failed, failed to memcpy_s data type."); + return HDF_FAILURE; + } + + if (memRet != EOK) { + return HDF_FAILURE; + } + + m_fds.emplace(buffer.fd); + modelCache.emplace_back(buffer); + return HDF_SUCCESS; +} + +int32_t MockIPreparedModel::GetVersion(uint32_t &majorVersion, uint32_t &minorVersion) +{ + majorVersion = 1; + minorVersion = 0; + return HDF_SUCCESS; +} + +int32_t MockIPreparedModel::Run(const std::vector& inputs, const std::vector& outputs, + std::vector>& outputsDims) +{ + outputsDims = {{2, 2, 2, 2}}; + return HDF_SUCCESS; +} + +int32_t MockIPreparedModel::GetInputDimRanges(std::vector>& minInputDims, + std::vector>& maxInputDims) +{ + minInputDims = {{2, 2, 2, 2}, {2, 2, 2, 2}}; + maxInputDims = {{2, 100, 100, 10}, {2, 100, 100, 10}}; + + return HDF_SUCCESS; +} + +} // namespace V2_1 +} // namespace Nnrt +} // namespace HDI +} // namespace OHOS \ No newline at end of file diff --git a/test/nnrt_xts_acts/nncore/opstest/src/mod_test.cpp b/test/nnrt_xts_acts/nncore/opstest/src/mod_test.cpp new file mode 100644 index 0000000..d1eb1fe --- /dev/null +++ b/test/nnrt_xts_acts/nncore/opstest/src/mod_test.cpp @@ -0,0 +1,804 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "nncore_utils.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Test; +class ModTest : public testing::Test {}; + +struct ModModel1 { + const std::vector tensor_shape = {3}; + int64_t input0Value[3] = {10, 20, 30}; + int64_t input1Value[1] = {3}; + int64_t outputValue[3] = {0}; + + OHNNOperandTest input0 = {OH_NN_INT64, OH_NN_TENSOR, tensor_shape, input0Value, 3*sizeof(int64_t)}; + OHNNOperandTest input1 = {OH_NN_INT64, OH_NN_TENSOR, tensor_shape, input1Value, 3*sizeof(int64_t)}; + OHNNOperandTest output = {OH_NN_INT64, OH_NN_TENSOR, tensor_shape, outputValue, 3*sizeof(int64_t)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_MOD, + .operands = {input0, input1, output}, + .paramIndices = {}, + .inputIndices = {0, 1}, + .outputIndices = {2}}; +}; + +struct ModModel2 { + const std::vector tensor_shape = {3}; + bool input0Value[1] = {true}; + int64_t input1Value[3] = {2, 3, 4}; + int64_t outputValue[3] = {0}; + + OHNNOperandTest input0 = {OH_NN_BOOL, OH_NN_TENSOR, {1}, input0Value, sizeof(int64_t)}; + OHNNOperandTest input1 = {OH_NN_INT64, OH_NN_TENSOR, tensor_shape, input1Value, 3*sizeof(int64_t)}; + OHNNOperandTest output = {OH_NN_INT64, OH_NN_TENSOR, tensor_shape, outputValue, 3*sizeof(int64_t)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_MOD, + .operands = {input0, input1, output}, + .paramIndices = {}, + .inputIndices = {0, 1}, + .outputIndices = {2}}; +}; + +struct ModModel3 { + const std::vector tensor_shape = {1}; + bool input0Value[1] = {true}; + bool input1Value[1] = {true}; + int64_t* outputValue = {}; + + OHNNOperandTest input0 = {OH_NN_BOOL, OH_NN_TENSOR, tensor_shape, input0Value, sizeof(int64_t)}; + OHNNOperandTest input1 = {OH_NN_BOOL, OH_NN_TENSOR, tensor_shape, input1Value, sizeof(int64_t)}; + OHNNOperandTest output = {OH_NN_INT64, OH_NN_TENSOR, tensor_shape, outputValue, sizeof(int64_t)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_MOD, + .operands = {input0, input1, output}, + .paramIndices = {}, + .inputIndices = {0, 1}, + .outputIndices = {2}}; +}; + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Mod_Build_01 + * @tc.desc: ModModel1模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(ModTest, SUB_AI_NNRt_Func_North_Mod_Build_01, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ModModel1 modModel; + OHNNGraphArgs graphArgs = modModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Mod_Build_02 + * @tc.desc: ModModel2模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(ModTest, SUB_AI_NNRt_Func_North_Mod_Build_02, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ModModel2 modModel; + OHNNGraphArgs graphArgs = modModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Mod_Build_03 + * @tc.desc: ModModel3模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(ModTest, SUB_AI_NNRt_Func_North_Mod_Build_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ModModel3 modModel; + OHNNGraphArgs graphArgs = modModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Mod_Build_04 + * @tc.desc: ModModel1模型输入Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(ModTest, SUB_AI_NNRt_Func_North_Mod_Build_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ModModel2 modModel; + OHNNGraphArgs graphArgs = modModel.graphArgs; + graphArgs.operands = {modModel.input0, modModel.input1, modModel.input1, modModel.output}; + graphArgs.inputIndices = {0, 1, 2}; + graphArgs.outputIndices = {3}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Mod_Build_05 + * @tc.desc: ModModel1模型输出Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(ModTest, SUB_AI_NNRt_Func_North_Mod_Build_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ModModel2 modModel; + OHNNGraphArgs graphArgs = modModel.graphArgs; + graphArgs.operands = {modModel.input0, modModel.input1, modModel.output, modModel.output}; + graphArgs.inputIndices = {0, 1}; + graphArgs.outputIndices = {2, 3}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Mod_Build_06 + * @tc.desc: ModModel1模型传入非法参数进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(ModTest, SUB_AI_NNRt_Func_North_Mod_Build_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ModModel2 modModel; + OHNNGraphArgs graphArgs = modModel.graphArgs; + + int8_t activationValue = OH_NN_FUSED_NONE; + OHNNOperandTest activation = {OH_NN_INT8, OH_NN_ADD_ACTIVATIONTYPE, {}, &activationValue, sizeof(int8_t)}; + graphArgs.operands = {modModel.input0, modModel.input1, modModel.output, activation}; + graphArgs.paramIndices = {3}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Mod_Model_Finish_01 + * @tc.desc: 模型构图,未添加操作数 + * @tc.type: FUNC + */ +HWTEST_F(ModTest, SUB_AI_NNRt_Func_North_Mod_Model_Finish_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + OHNNGraphArgs graphArgs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, SingleModelBuildEndStep(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Mod_Model_Finish_02 + * @tc.desc: 模型构图,未设置输入输出 + * @tc.type: FUNC + */ +HWTEST_F(ModTest, SUB_AI_NNRt_Func_North_Mod_Model_Finish_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ModModel1 modModel; + OHNNGraphArgs graphArgs = modModel.graphArgs; + graphArgs.specifyIO = false; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Mod_Model_Finish_03 + * @tc.desc: 模型构图,设置输入输出,构图成功 + * @tc.type: FUNC + */ +HWTEST_F(ModTest, SUB_AI_NNRt_Func_North_Mod_Model_Finish_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ModModel1 modModel; + OHNNGraphArgs graphArgs = modModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Mod_Model_SetOperandValue_01 + * @tc.desc: 设置操作数值,操作数不存在 + * @tc.type: FUNC + */ +HWTEST_F(ModTest, SUB_AI_NNRt_Func_North_Mod_Model_SetOperandValue_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ModModel1 modModel; + OHNNGraphArgs graphArgs = modModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData( + model, 1000+i, operandTem.data, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Mod_Model_SetOperandValue_02 + * @tc.desc: 设置操作数值,buufer为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ModTest, SUB_AI_NNRt_Func_North_Mod_Model_SetOperandValue_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ModModel1 modModel; + OHNNGraphArgs graphArgs = modModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, i, nullptr, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Mod_Model_SetOperandValue_03 + * @tc.desc: 设置操作数值,length为0 + * @tc.type: FUNC + */ +HWTEST_F(ModTest, SUB_AI_NNRt_Func_North_Mod_Model_SetOperandValue_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ModModel1 modModel; + OHNNGraphArgs graphArgs = modModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, 1000+i, operandTem.data, 0)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Mod_Model_SpecifyInputsAndOutputs_01 + * @tc.desc: 设置输入输出,inputIndices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ModTest, SUB_AI_NNRt_Func_North_Mod_Model_SpecifyInputsAndOutputs_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ModModel1 modModel; + OHNNGraphArgs graphArgs = modModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Mod_Model_SpecifyInputsAndOutputs_02 + * @tc.desc: 设置输入输出,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ModTest, SUB_AI_NNRt_Func_North_Mod_Model_SpecifyInputsAndOutputs_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ModModel1 modModel; + OHNNGraphArgs graphArgs = modModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Mod_Model_SpecifyInputsAndOutputs_03 + * @tc.desc: 设置输入输出,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(ModTest, SUB_AI_NNRt_Func_North_Mod_Model_SpecifyInputsAndOutputs_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ModModel1 modModel; + OHNNGraphArgs graphArgs = modModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Mod_Model_SpecifyInputsAndOutputs_04 + * @tc.desc: 设置输入输出,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(ModTest, SUB_AI_NNRt_Func_North_Mod_Model_SpecifyInputsAndOutputs_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ModModel1 modModel; + OHNNGraphArgs graphArgs = modModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Mod_Model_SpecifyInputsAndOutputs_05 + * @tc.desc: 设置输入输出,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ModTest, SUB_AI_NNRt_Func_North_Mod_Model_SpecifyInputsAndOutputs_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ModModel1 modModel; + OHNNGraphArgs graphArgs = modModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Mod_Model_SpecifyInputsAndOutputs_06 + * @tc.desc: 设置输入输出,outputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ModTest, SUB_AI_NNRt_Func_North_Mod_Model_SpecifyInputsAndOutputs_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ModModel1 modModel; + OHNNGraphArgs graphArgs = modModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Mod_Model_SpecifyInputsAndOutputs_07 + * @tc.desc: 设置输入输出,outputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(ModTest, SUB_AI_NNRt_Func_North_Mod_Model_SpecifyInputsAndOutputs_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ModModel1 modModel; + OHNNGraphArgs graphArgs = modModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.outputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Mod_Model_SpecifyInputsAndOutputs_08 + * @tc.desc: 设置输入输出,outputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(ModTest, SUB_AI_NNRt_Func_North_Mod_Model_SpecifyInputsAndOutputs_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ModModel1 modModel; + OHNNGraphArgs graphArgs = modModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Mod_Model_AddOperation_01 + * @tc.desc: 添加算子,paramindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ModTest, SUB_AI_NNRt_Func_North_Mod_Model_AddOperation_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ModModel1 modModel; + OHNNGraphArgs graphArgs = modModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + nullptr, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Mod_Model_AddOperation_02 + * @tc.desc: 添加算子,paramindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ModTest, SUB_AI_NNRt_Func_North_Mod_Model_AddOperation_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ModModel1 modModel; + OHNNGraphArgs graphArgs = modModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.data = nullptr; + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Mod_Model_AddOperation_03 + * @tc.desc: 添加算子,paramindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(ModTest, SUB_AI_NNRt_Func_North_Mod_Model_AddOperation_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ModModel1 modModel; + OHNNGraphArgs graphArgs = modModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.paramIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Mod_Model_AddOperation_04 + * @tc.desc: 添加算子,paramindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(ModTest, SUB_AI_NNRt_Func_North_Mod_Model_AddOperation_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ModModel1 modModel; + OHNNGraphArgs graphArgs = modModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.size = 0; + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Mod_Model_AddOperation_05 + * @tc.desc: 添加算子,inputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ModTest, SUB_AI_NNRt_Func_North_Mod_Model_AddOperation_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ModModel1 modModel; + OHNNGraphArgs graphArgs = modModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Mod_Model_AddOperation_06 + * @tc.desc: 添加算子,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ModTest, SUB_AI_NNRt_Func_North_Mod_Model_AddOperation_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ModModel1 modModel; + OHNNGraphArgs graphArgs = modModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Mod_Model_AddOperation_07 + * @tc.desc: 添加算子,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(ModTest, SUB_AI_NNRt_Func_North_Mod_Model_AddOperation_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ModModel1 modModel; + OHNNGraphArgs graphArgs = modModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Mod_Model_AddOperation_08 + * @tc.desc: 添加算子,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(ModTest, SUB_AI_NNRt_Func_North_Mod_Model_AddOperation_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ModModel1 modModel; + OHNNGraphArgs graphArgs = modModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Mod_Model_AddOperation_09 + * @tc.desc: 添加算子,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ModTest, SUB_AI_NNRt_Func_North_Mod_Model_AddOperation_09, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ModModel1 modModel; + OHNNGraphArgs graphArgs = modModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(nullptr, graphArgs.operationType, + ¶mIndices, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} \ No newline at end of file diff --git a/test/nnrt_xts_acts/nncore/opstest/src/neg_test.cpp b/test/nnrt_xts_acts/nncore/opstest/src/neg_test.cpp new file mode 100644 index 0000000..4cbfa14 --- /dev/null +++ b/test/nnrt_xts_acts/nncore/opstest/src/neg_test.cpp @@ -0,0 +1,755 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "nncore_utils.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Test; +class NegTest : public testing::Test {}; + +struct NegModel1 { + const std::vector tensor_shape = {1}; + bool inputValue[1] = {true}; + bool outputValue[1] = {false}; + + OHNNOperandTest input = {OH_NN_BOOL, OH_NN_TENSOR, tensor_shape, inputValue, sizeof(bool)}; + OHNNOperandTest output = {OH_NN_BOOL, OH_NN_TENSOR, tensor_shape, outputValue, sizeof(bool)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_NEG, + .operands = {input, output}, + .paramIndices = {}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +struct NegModel2 { + const std::vector tensor_shape = {1}; + int64_t inputValue[3] = {0, 1, 2}; + int64_t outputValue[3] = {0}; + + OHNNOperandTest input = {OH_NN_INT64, OH_NN_TENSOR, tensor_shape, inputValue, 3*sizeof(int64_t)}; + OHNNOperandTest output = {OH_NN_INT64, OH_NN_TENSOR, tensor_shape, outputValue, 3*sizeof(int64_t)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_NEG, + .operands = {input, output}, + .paramIndices = {}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Neg_Build_01 + * @tc.desc: NegModel1模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(NegTest, SUB_AI_NNRt_Func_North_Neg_Build_01, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + NegModel1 negModel; + OHNNGraphArgs graphArgs = negModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Neg_Build_02 + * @tc.desc: NegModel2模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(NegTest, SUB_AI_NNRt_Func_North_Neg_Build_02, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + NegModel2 negModel; + OHNNGraphArgs graphArgs = negModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Neg_Build_03 + * @tc.desc: NegModel1模型输入Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(NegTest, SUB_AI_NNRt_Func_North_Neg_Build_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + NegModel2 negModel; + OHNNGraphArgs graphArgs = negModel.graphArgs; + graphArgs.operands = {negModel.input, negModel.input, negModel.output}; + graphArgs.inputIndices = {0, 1}; + graphArgs.outputIndices = {2}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Neg_Build_04 + * @tc.desc: NegModel1模型输出Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(NegTest, SUB_AI_NNRt_Func_North_Neg_Build_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + NegModel2 negModel; + OHNNGraphArgs graphArgs = negModel.graphArgs; + graphArgs.operands = {negModel.input, negModel.output, negModel.output}; + graphArgs.inputIndices = {0}; + graphArgs.outputIndices = {1, 2}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Neg_Build_05 + * @tc.desc: NegModel1模型传入非法参数进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(NegTest, SUB_AI_NNRt_Func_North_Neg_Build_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + NegModel2 negModel; + OHNNGraphArgs graphArgs = negModel.graphArgs; + + int8_t activationValue = OH_NN_FUSED_NONE; + OHNNOperandTest activation = {OH_NN_INT8, OH_NN_ADD_ACTIVATIONTYPE, {}, &activationValue, sizeof(int8_t)}; + graphArgs.operands = {negModel.input, negModel.output, activation}; + graphArgs.paramIndices = {2}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Neg_Model_Finish_01 + * @tc.desc: 模型构图,未添加操作数 + * @tc.type: FUNC + */ +HWTEST_F(NegTest, SUB_AI_NNRt_Func_North_Neg_Model_Finish_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + OHNNGraphArgs graphArgs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, SingleModelBuildEndStep(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Neg_Model_Finish_02 + * @tc.desc: 模型构图,未设置输入输出 + * @tc.type: FUNC + */ +HWTEST_F(NegTest, SUB_AI_NNRt_Func_North_Neg_Model_Finish_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + NegModel1 negModel; + OHNNGraphArgs graphArgs = negModel.graphArgs; + graphArgs.specifyIO = false; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Neg_Model_Finish_03 + * @tc.desc: 模型构图,设置输入输出,构图成功 + * @tc.type: FUNC + */ +HWTEST_F(NegTest, SUB_AI_NNRt_Func_North_Neg_Model_Finish_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + NegModel1 negModel; + OHNNGraphArgs graphArgs = negModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Neg_Model_SetOperandValue_01 + * @tc.desc: 设置操作数值,操作数不存在 + * @tc.type: FUNC + */ +HWTEST_F(NegTest, SUB_AI_NNRt_Func_North_Neg_Model_SetOperandValue_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + NegModel1 negModel; + OHNNGraphArgs graphArgs = negModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData( + model, 1000+i, operandTem.data, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Neg_Model_SetOperandValue_02 + * @tc.desc: 设置操作数值,buufer为nullptr + * @tc.type: FUNC + */ +HWTEST_F(NegTest, SUB_AI_NNRt_Func_North_Neg_Model_SetOperandValue_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + NegModel1 negModel; + OHNNGraphArgs graphArgs = negModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, i, nullptr, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Neg_Model_SetOperandValue_03 + * @tc.desc: 设置操作数值,length为0 + * @tc.type: FUNC + */ +HWTEST_F(NegTest, SUB_AI_NNRt_Func_North_Neg_Model_SetOperandValue_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + NegModel1 negModel; + OHNNGraphArgs graphArgs = negModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, 1000+i, operandTem.data, 0)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Neg_Model_SpecifyInputsAndOutputs_01 + * @tc.desc: 设置输入输出,inputIndices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(NegTest, SUB_AI_NNRt_Func_North_Neg_Model_SpecifyInputsAndOutputs_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + NegModel1 negModel; + OHNNGraphArgs graphArgs = negModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Neg_Model_SpecifyInputsAndOutputs_02 + * @tc.desc: 设置输入输出,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(NegTest, SUB_AI_NNRt_Func_North_Neg_Model_SpecifyInputsAndOutputs_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + NegModel1 negModel; + OHNNGraphArgs graphArgs = negModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Neg_Model_SpecifyInputsAndOutputs_03 + * @tc.desc: 设置输入输出,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(NegTest, SUB_AI_NNRt_Func_North_Neg_Model_SpecifyInputsAndOutputs_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + NegModel1 negModel; + OHNNGraphArgs graphArgs = negModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Neg_Model_SpecifyInputsAndOutputs_04 + * @tc.desc: 设置输入输出,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(NegTest, SUB_AI_NNRt_Func_North_Neg_Model_SpecifyInputsAndOutputs_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + NegModel1 negModel; + OHNNGraphArgs graphArgs = negModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Neg_Model_SpecifyInputsAndOutputs_05 + * @tc.desc: 设置输入输出,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(NegTest, SUB_AI_NNRt_Func_North_Neg_Model_SpecifyInputsAndOutputs_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + NegModel1 negModel; + OHNNGraphArgs graphArgs = negModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Neg_Model_SpecifyInputsAndOutputs_06 + * @tc.desc: 设置输入输出,outputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(NegTest, SUB_AI_NNRt_Func_North_Neg_Model_SpecifyInputsAndOutputs_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + NegModel1 negModel; + OHNNGraphArgs graphArgs = negModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Neg_Model_SpecifyInputsAndOutputs_07 + * @tc.desc: 设置输入输出,outputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(NegTest, SUB_AI_NNRt_Func_North_Neg_Model_SpecifyInputsAndOutputs_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + NegModel1 negModel; + OHNNGraphArgs graphArgs = negModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.outputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Neg_Model_SpecifyInputsAndOutputs_08 + * @tc.desc: 设置输入输出,outputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(NegTest, SUB_AI_NNRt_Func_North_Neg_Model_SpecifyInputsAndOutputs_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + NegModel1 negModel; + OHNNGraphArgs graphArgs = negModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Neg_Model_AddOperation_01 + * @tc.desc: 添加算子,paramindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(NegTest, SUB_AI_NNRt_Func_North_Neg_Model_AddOperation_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + NegModel1 negModel; + OHNNGraphArgs graphArgs = negModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + nullptr, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Neg_Model_AddOperation_02 + * @tc.desc: 添加算子,paramindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(NegTest, SUB_AI_NNRt_Func_North_Neg_Model_AddOperation_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + NegModel1 negModel; + OHNNGraphArgs graphArgs = negModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.data = nullptr; + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Neg_Model_AddOperation_03 + * @tc.desc: 添加算子,paramindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(NegTest, SUB_AI_NNRt_Func_North_Neg_Model_AddOperation_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + NegModel1 negModel; + OHNNGraphArgs graphArgs = negModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.paramIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Neg_Model_AddOperation_04 + * @tc.desc: 添加算子,paramindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(NegTest, SUB_AI_NNRt_Func_North_Neg_Model_AddOperation_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + NegModel1 negModel; + OHNNGraphArgs graphArgs = negModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.size = 0; + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Neg_Model_AddOperation_05 + * @tc.desc: 添加算子,inputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(NegTest, SUB_AI_NNRt_Func_North_Neg_Model_AddOperation_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + NegModel1 negModel; + OHNNGraphArgs graphArgs = negModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Neg_Model_AddOperation_06 + * @tc.desc: 添加算子,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(NegTest, SUB_AI_NNRt_Func_North_Neg_Model_AddOperation_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + NegModel1 negModel; + OHNNGraphArgs graphArgs = negModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Neg_Model_AddOperation_07 + * @tc.desc: 添加算子,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(NegTest, SUB_AI_NNRt_Func_North_Neg_Model_AddOperation_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + NegModel1 negModel; + OHNNGraphArgs graphArgs = negModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Neg_Model_AddOperation_08 + * @tc.desc: 添加算子,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(NegTest, SUB_AI_NNRt_Func_North_Neg_Model_AddOperation_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + NegModel1 negModel; + OHNNGraphArgs graphArgs = negModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Neg_Model_AddOperation_09 + * @tc.desc: 添加算子,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(NegTest, SUB_AI_NNRt_Func_North_Neg_Model_AddOperation_09, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + NegModel1 negModel; + OHNNGraphArgs graphArgs = negModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(nullptr, graphArgs.operationType, + ¶mIndices, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} \ No newline at end of file diff --git a/test/nnrt_xts_acts/nncore/opstest/src/nncore_utils.cpp b/test/nnrt_xts_acts/nncore/opstest/src/nncore_utils.cpp new file mode 100644 index 0000000..e4b0c4e --- /dev/null +++ b/test/nnrt_xts_acts/nncore/opstest/src/nncore_utils.cpp @@ -0,0 +1,239 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include + +#include "nncore_utils.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Test { +OH_NN_UInt32Array TransformUInt32Array(const std::vector& vector) +{ + uint32_t* data = (vector.empty()) ? nullptr : const_cast(vector.data()); + return {data, vector.size()}; +} + +NN_TensorDesc* createTensorDesc(const int32_t* shape, size_t shapeNum, OH_NN_DataType dataType, OH_NN_Format format) +{ + NN_TensorDesc* tensorDescTmp = OH_NNTensorDesc_Create(); + if (tensorDescTmp == nullptr) { + LOGE("[NNRtTest]OH_NNTensorDesc_Create failed!"); + return nullptr; + } + + OH_NN_ReturnCode ret = OH_NNTensorDesc_SetDataType(tensorDescTmp, dataType); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest]OH_NNTensorDesc_SetDataType failed!ret = %d\n", ret); + return nullptr; + } + + if (shape != nullptr) { + ret = OH_NNTensorDesc_SetShape(tensorDescTmp, shape, shapeNum); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest]OH_NNTensorDesc_SetShape failed!ret = %d\n", ret); + return nullptr; + } + } + + ret = OH_NNTensorDesc_SetFormat(tensorDescTmp, format); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest]OH_NNTensorDesc_SetShape failed!ret = %d\n", ret); + return nullptr; + } + + return tensorDescTmp; +} + +int SingleModelBuildEndStep(OH_NNModel *model, const OHNNGraphArgs &graphArgs) +{ + int ret = 0; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + + if (graphArgs.addOperation) { + ret = OH_NNModel_AddOperation(model, graphArgs.operationType, ¶mIndices, &inputIndices, + &outputIndices); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNModel_AddOperation failed! ret=%{public}d\n", ret); + return ret; + } + } + + if (graphArgs.specifyIO) { + ret = OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNModel_SpecifyInputsAndOutputs failed! ret=%{public}d\n", ret); + return ret; + } + } + + if (graphArgs.build) { + ret = OH_NNModel_Finish(model); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNModel_Finish failed! ret=%d\n", ret); + return ret; + } + } + return ret; +} + +int BuildSingleOpGraph(OH_NNModel *model, const OHNNGraphArgs &graphArgs) +{ + int ret = 0; + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + NN_TensorDesc* tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + + ret = OH_NNModel_AddTensorToModel(model, tensorDesc); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNModel_AddTensor failed! ret=%d\n", ret); + return ret; + } + + ret = OH_NNModel_SetTensorType(model, i, operandTem.type); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNBackend_SetModelTensorType failed! ret=%d\n", ret); + return ret; + } + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + ret = OH_NNModel_SetTensorData(model, i, operandTem.data, operandTem.length); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNModel_SetTensorData failed! ret=%{public}d\n", ret); + return ret; + } + } + OH_NNTensorDesc_Destroy(&tensorDesc); + } + ret = SingleModelBuildEndStep(model, graphArgs); + return ret; +} + +OH_NN_ReturnCode GetDeviceID(size_t *deviceId) +{ + OH_NN_ReturnCode ret = OH_NN_FAILED; + const size_t *devicesID{nullptr}; + uint32_t devicesCount{0}; + + ret = OH_NNDevice_GetAllDevicesID(&devicesID, &devicesCount); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNDevice_GetAllDevicesID failed! ret=%d\n", ret); + return ret; + } + + if (devicesCount <= NO_DEVICE_COUNT) { + LOGE("[NNRtTest] devicesCount <= 0 devicesCount=%d\n", devicesCount); + return OH_NN_FAILED; + } + + const char *name = nullptr; + std::string deviceName{"Device-CPU_TestVendor_v2_1"}; + for (uint32_t i = 0; i < devicesCount; i++) { + name = nullptr; + ret = OH_NNDevice_GetName(devicesID[i], &name); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNDevice_GetName failed! ret=%d\n", ret); + return ret; + } + + std::string sName(name); + if (deviceName == sName) { + *deviceId = devicesID[i]; + return OH_NN_SUCCESS; + } + } + return OH_NN_FAILED; +} + +int CompileGraphMock(OH_NNCompilation *compilation, const OHNNCompileParam &compileParam) +{ + int ret = 0; + + // set cache + if (!compileParam.cacheDir.empty()) { + ret = OH_NNCompilation_SetCache(compilation, compileParam.cacheDir.c_str(), + compileParam.cacheVersion); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNCompilation_SetCache failed! ret=%d\n", ret); + return ret; + } + } + + // set performance + if (compileParam.performanceMode != OH_NN_PERFORMANCE_NONE) { + ret = OH_NNCompilation_SetPerformanceMode(compilation, compileParam.performanceMode); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNCompilation_SetPerformanceMode failed! ret=%d\n", ret); + return ret; + } + } + + // set priority + if (compileParam.priority != OH_NN_PRIORITY_NONE) { + ret = OH_NNCompilation_SetPriority(compilation, compileParam.priority); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNCompilation_SetPriority failed! ret=%d\n", ret); + return ret; + } + } + + // enable fp16 + if (compileParam.enableFp16) { + ret = OH_NNCompilation_EnableFloat16(compilation, compileParam.enableFp16); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNRtTest] OH_NNCompilation_EnableFloat16 failed! ret=%d\n", ret); + return ret; + } + } + + // build + ret = OH_NNCompilation_Build(compilation); + return ret; +} + +void Free(OH_NNModel *model, OH_NNCompilation *compilation, OH_NNExecutor *executor) +{ + if (model != nullptr) { + OH_NNModel_Destroy(&model); + ASSERT_EQ(nullptr, model); + } + + if (compilation != nullptr) { + OH_NNCompilation_Destroy(&compilation); + ASSERT_EQ(nullptr, compilation); + } + + if (executor != nullptr) { + OH_NNExecutor_Destroy(&executor); + ASSERT_EQ(nullptr, executor); + } +} + +void FreeTensorDescVec(std::vector tensorDescVec) +{ + if (!tensorDescVec.empty()) { + for (auto tensorDesc : tensorDescVec) { + OH_NNTensorDesc_Destroy(&tensorDesc); + ASSERT_EQ(nullptr, tensorDesc); + } + } +} +} // namespace Test +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/test/nnrt_xts_acts/nncore/opstest/src/not_equal_test.cpp b/test/nnrt_xts_acts/nncore/opstest/src/not_equal_test.cpp new file mode 100644 index 0000000..85c735a --- /dev/null +++ b/test/nnrt_xts_acts/nncore/opstest/src/not_equal_test.cpp @@ -0,0 +1,858 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "nncore_utils.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Test; +class NotEqualTest : public testing::Test {}; + +struct NotEqualModel1 { + const std::vector tensor_shape = {3}; + float input0Value[3] = {1, 2, 3}; + float input1Value[3] = {4, 5, 6}; + bool outputValue[3] = {0}; + + OHNNOperandTest input0 = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, input0Value, 3*sizeof(float)}; + OHNNOperandTest input1 = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, input1Value, 3*sizeof(float)}; + OHNNOperandTest output = {OH_NN_BOOL, OH_NN_TENSOR, tensor_shape, outputValue, 3*sizeof(bool)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_NOT_EQUAL, + .operands = {input0, input1, output}, + .paramIndices = {}, + .inputIndices = {0, 1}, + .outputIndices = {2}}; +}; + +struct NotEqualModel2 { + const std::vector tensor_shape = {}; + float input0Value[1] = {1}; + float input1Value[1] = {1}; + bool outputValue[1] = {0}; + + OHNNOperandTest input0 = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, input0Value, sizeof(float)}; + OHNNOperandTest input1 = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, input1Value, sizeof(float)}; + OHNNOperandTest output = {OH_NN_BOOL, OH_NN_TENSOR, tensor_shape, outputValue, sizeof(bool)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_NOT_EQUAL, + .operands = {input0, input1, output}, + .paramIndices = {}, + .inputIndices = {0, 1}, + .outputIndices = {2}}; +}; + +struct NotEqualModel3 { + const std::vector tensor_shape = {3}; + const std::vector input1_shape = {4}; + float input0Value[3] = {1, 2, 3}; + float input1Value[4] = {4, 5, 6, 7}; + bool outputValue[3] = {0}; + + OHNNOperandTest input0 = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, input0Value, 3*sizeof(float)}; + OHNNOperandTest input1 = {OH_NN_FLOAT32, OH_NN_TENSOR, input1_shape, input1Value, 4*sizeof(float)}; + OHNNOperandTest output = {OH_NN_BOOL, OH_NN_TENSOR, tensor_shape, outputValue, 3*sizeof(bool)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_NOT_EQUAL, + .operands = {input0, input1, output}, + .paramIndices = {}, + .inputIndices = {0, 1}, + .outputIndices = {2}}; +}; + +struct NotEqualModel4 { + const std::vector tensor_shape = {}; + float* input0Value = {}; + float* input1Value = {}; + bool* outputValue = {}; + + OHNNOperandTest input0 = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, input0Value, 0*sizeof(float)}; + OHNNOperandTest input1 = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, input1Value, 0*sizeof(float)}; + OHNNOperandTest output = {OH_NN_BOOL, OH_NN_TENSOR, tensor_shape, outputValue, 0*sizeof(bool)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_NOT_EQUAL, + .operands = {input0, input1, output}, + .paramIndices = {}, + .inputIndices = {0, 1}, + .outputIndices = {2}}; +}; + +/** + * @tc.number : SUB_AI_NNRt_Func_North_NotEqual_Build_01 + * @tc.desc: NotEqualModel1模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(NotEqualTest, SUB_AI_NNRt_Func_North_NotEqual_Build_01, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + NotEqualModel1 notEqualModel; + OHNNGraphArgs graphArgs = notEqualModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_NotEqual_Build_02 + * @tc.desc: NotEqualModel2模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(NotEqualTest, SUB_AI_NNRt_Func_North_NotEqual_Build_02, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + NotEqualModel2 notEqualModel; + OHNNGraphArgs graphArgs = notEqualModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_NotEqual_Build_03 + * @tc.desc: NotEqualModel3模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(NotEqualTest, SUB_AI_NNRt_Func_North_NotEqual_Build_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + NotEqualModel3 notEqualModel; + OHNNGraphArgs graphArgs = notEqualModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_NotEqual_Build_04 + * @tc.desc: NotEqualModel4模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(NotEqualTest, SUB_AI_NNRt_Func_North_NotEqual_Build_04, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + NotEqualModel4 notEqualModel; + OHNNGraphArgs graphArgs = notEqualModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_NotEqual_Build_05 + * @tc.desc: NotEqualModel1模型输入Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(NotEqualTest, SUB_AI_NNRt_Func_North_NotEqual_Build_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + NotEqualModel2 notEqualModel; + OHNNGraphArgs graphArgs = notEqualModel.graphArgs; + graphArgs.operands = {notEqualModel.input0, notEqualModel.input1, notEqualModel.input1, notEqualModel.output}; + graphArgs.inputIndices = {0, 1, 2}; + graphArgs.outputIndices = {3}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_NotEqual_Build_06 + * @tc.desc: NotEqualModel1模型输出Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(NotEqualTest, SUB_AI_NNRt_Func_North_NotEqual_Build_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + NotEqualModel2 notEqualModel; + OHNNGraphArgs graphArgs = notEqualModel.graphArgs; + graphArgs.operands = {notEqualModel.input0, notEqualModel.input1, notEqualModel.output, notEqualModel.output}; + graphArgs.inputIndices = {0, 1}; + graphArgs.outputIndices = {2, 3}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_NotEqual_Build_07 + * @tc.desc: NotEqualModel1模型传入非法参数进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(NotEqualTest, SUB_AI_NNRt_Func_North_NotEqual_Build_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + NotEqualModel2 notEqualModel; + OHNNGraphArgs graphArgs = notEqualModel.graphArgs; + + int8_t activationValue = OH_NN_FUSED_NONE; + OHNNOperandTest activation = {OH_NN_INT8, OH_NN_ADD_ACTIVATIONTYPE, {}, &activationValue, sizeof(int8_t)}; + graphArgs.operands = {notEqualModel.input0, notEqualModel.input1, notEqualModel.output, activation}; + graphArgs.paramIndices = {3}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_NotEqual_Model_Finish_01 + * @tc.desc: 模型构图,未添加操作数 + * @tc.type: FUNC + */ +HWTEST_F(NotEqualTest, SUB_AI_NNRt_Func_North_NotEqual_Model_Finish_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + OHNNGraphArgs graphArgs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, SingleModelBuildEndStep(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_NotEqual_Model_Finish_02 + * @tc.desc: 模型构图,未设置输入输出 + * @tc.type: FUNC + */ +HWTEST_F(NotEqualTest, SUB_AI_NNRt_Func_North_NotEqual_Model_Finish_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + NotEqualModel1 notEqualModel; + OHNNGraphArgs graphArgs = notEqualModel.graphArgs; + graphArgs.specifyIO = false; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_NotEqual_Model_Finish_03 + * @tc.desc: 模型构图,设置输入输出,构图成功 + * @tc.type: FUNC + */ +HWTEST_F(NotEqualTest, SUB_AI_NNRt_Func_North_NotEqual_Model_Finish_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + NotEqualModel1 notEqualModel; + OHNNGraphArgs graphArgs = notEqualModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_NotEqual_Model_SetOperandValue_01 + * @tc.desc: 设置操作数值,操作数不存在 + * @tc.type: FUNC + */ +HWTEST_F(NotEqualTest, SUB_AI_NNRt_Func_North_NotEqual_Model_SetOperandValue_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + NotEqualModel1 notEqualModel; + OHNNGraphArgs graphArgs = notEqualModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData( + model, 1000+i, operandTem.data, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_NotEqual_Model_SetOperandValue_02 + * @tc.desc: 设置操作数值,buufer为nullptr + * @tc.type: FUNC + */ +HWTEST_F(NotEqualTest, SUB_AI_NNRt_Func_North_NotEqual_Model_SetOperandValue_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + NotEqualModel1 notEqualModel; + OHNNGraphArgs graphArgs = notEqualModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, i, nullptr, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_NotEqual_Model_SetOperandValue_03 + * @tc.desc: 设置操作数值,length为0 + * @tc.type: FUNC + */ +HWTEST_F(NotEqualTest, SUB_AI_NNRt_Func_North_NotEqual_Model_SetOperandValue_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + NotEqualModel1 notEqualModel; + OHNNGraphArgs graphArgs = notEqualModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, 1000+i, operandTem.data, 0)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_NotEqual_Model_SpecifyInputsAndOutputs_01 + * @tc.desc: 设置输入输出,inputIndices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(NotEqualTest, SUB_AI_NNRt_Func_North_NotEqual_Model_SpecifyInputsAndOutputs_01, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + NotEqualModel1 notEqualModel; + OHNNGraphArgs graphArgs = notEqualModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_NotEqual_Model_SpecifyInputsAndOutputs_02 + * @tc.desc: 设置输入输出,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(NotEqualTest, SUB_AI_NNRt_Func_North_NotEqual_Model_SpecifyInputsAndOutputs_02, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + NotEqualModel1 notEqualModel; + OHNNGraphArgs graphArgs = notEqualModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_NotEqual_Model_SpecifyInputsAndOutputs_03 + * @tc.desc: 设置输入输出,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(NotEqualTest, SUB_AI_NNRt_Func_North_NotEqual_Model_SpecifyInputsAndOutputs_03, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + NotEqualModel1 notEqualModel; + OHNNGraphArgs graphArgs = notEqualModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_NotEqual_Model_SpecifyInputsAndOutputs_04 + * @tc.desc: 设置输入输出,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(NotEqualTest, SUB_AI_NNRt_Func_North_NotEqual_Model_SpecifyInputsAndOutputs_04, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + NotEqualModel1 notEqualModel; + OHNNGraphArgs graphArgs = notEqualModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_NotEqual_Model_SpecifyInputsAndOutputs_05 + * @tc.desc: 设置输入输出,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(NotEqualTest, SUB_AI_NNRt_Func_North_NotEqual_Model_SpecifyInputsAndOutputs_05, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + NotEqualModel1 notEqualModel; + OHNNGraphArgs graphArgs = notEqualModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_NotEqual_Model_SpecifyInputsAndOutputs_06 + * @tc.desc: 设置输入输出,outputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(NotEqualTest, SUB_AI_NNRt_Func_North_NotEqual_Model_SpecifyInputsAndOutputs_06, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + NotEqualModel1 notEqualModel; + OHNNGraphArgs graphArgs = notEqualModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_NotEqual_Model_SpecifyInputsAndOutputs_07 + * @tc.desc: 设置输入输出,outputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(NotEqualTest, SUB_AI_NNRt_Func_North_NotEqual_Model_SpecifyInputsAndOutputs_07, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + NotEqualModel1 notEqualModel; + OHNNGraphArgs graphArgs = notEqualModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.outputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_NotEqual_Model_SpecifyInputsAndOutputs_08 + * @tc.desc: 设置输入输出,outputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(NotEqualTest, SUB_AI_NNRt_Func_North_NotEqual_Model_SpecifyInputsAndOutputs_08, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + NotEqualModel1 notEqualModel; + OHNNGraphArgs graphArgs = notEqualModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_NotEqual_Model_AddOperation_01 + * @tc.desc: 添加算子,paramindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(NotEqualTest, SUB_AI_NNRt_Func_North_NotEqual_Model_AddOperation_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + NotEqualModel1 notEqualModel; + OHNNGraphArgs graphArgs = notEqualModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + nullptr, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_NotEqual_Model_AddOperation_02 + * @tc.desc: 添加算子,paramindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(NotEqualTest, SUB_AI_NNRt_Func_North_NotEqual_Model_AddOperation_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + NotEqualModel1 notEqualModel; + OHNNGraphArgs graphArgs = notEqualModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.data = nullptr; + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_NotEqual_Model_AddOperation_03 + * @tc.desc: 添加算子,paramindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(NotEqualTest, SUB_AI_NNRt_Func_North_NotEqual_Model_AddOperation_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + NotEqualModel1 notEqualModel; + OHNNGraphArgs graphArgs = notEqualModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.paramIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_NotEqual_Model_AddOperation_04 + * @tc.desc: 添加算子,paramindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(NotEqualTest, SUB_AI_NNRt_Func_North_NotEqual_Model_AddOperation_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + NotEqualModel1 notEqualModel; + OHNNGraphArgs graphArgs = notEqualModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.size = 0; + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_NotEqual_Model_AddOperation_05 + * @tc.desc: 添加算子,inputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(NotEqualTest, SUB_AI_NNRt_Func_North_NotEqual_Model_AddOperation_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + NotEqualModel1 notEqualModel; + OHNNGraphArgs graphArgs = notEqualModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_NotEqual_Model_AddOperation_06 + * @tc.desc: 添加算子,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(NotEqualTest, SUB_AI_NNRt_Func_North_NotEqual_Model_AddOperation_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + NotEqualModel1 notEqualModel; + OHNNGraphArgs graphArgs = notEqualModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_NotEqual_Model_AddOperation_07 + * @tc.desc: 添加算子,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(NotEqualTest, SUB_AI_NNRt_Func_North_NotEqual_Model_AddOperation_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + NotEqualModel1 notEqualModel; + OHNNGraphArgs graphArgs = notEqualModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_NotEqual_Model_AddOperation_08 + * @tc.desc: 添加算子,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(NotEqualTest, SUB_AI_NNRt_Func_North_NotEqual_Model_AddOperation_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + NotEqualModel1 notEqualModel; + OHNNGraphArgs graphArgs = notEqualModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_NotEqual_Model_AddOperation_09 + * @tc.desc: 添加算子,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(NotEqualTest, SUB_AI_NNRt_Func_North_NotEqual_Model_AddOperation_09, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + NotEqualModel1 notEqualModel; + OHNNGraphArgs graphArgs = notEqualModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(nullptr, graphArgs.operationType, + ¶mIndices, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} \ No newline at end of file diff --git a/test/nnrt_xts_acts/nncore/opstest/src/pow_test.cpp b/test/nnrt_xts_acts/nncore/opstest/src/pow_test.cpp new file mode 100644 index 0000000..48422c8 --- /dev/null +++ b/test/nnrt_xts_acts/nncore/opstest/src/pow_test.cpp @@ -0,0 +1,822 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "nncore_utils.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Test; +class PowTest : public testing::Test {}; + +struct PowModel1 { + const std::vector tensor_shape = {2}; + float scaleValue = 2; + float shiftValue = 1; + float input0Value[2] = {1, 2}; + float input1Value[2] = {2, 3}; + float outputValue[2] = {0}; + + OHNNOperandTest input0 = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, input0Value, 2*sizeof(float)}; + OHNNOperandTest input1 = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, input1Value, 2*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, outputValue, 2*sizeof(float)}; + OHNNOperandTest scale = {OH_NN_FLOAT32, OH_NN_POW_SCALE, {1}, &scaleValue, sizeof(float)}; + OHNNOperandTest shift = {OH_NN_FLOAT32, OH_NN_POW_SHIFT, {1}, &shiftValue, sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_POW, + .operands = {input0, input1, output, scale, shift}, + .paramIndices = {3, 4}, + .inputIndices = {0, 1}, + .outputIndices = {2}}; +}; + +struct PowModel2 { + const std::vector tensor_shape = {1}; + float scaleValue = 1; + float shiftValue = 0; + float input0Value[1] = {2}; + float input1Value[1] = {3}; + float outputValue[1] = {0}; + + OHNNOperandTest input0 = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, input0Value, sizeof(float)}; + OHNNOperandTest input1 = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, input1Value, sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, outputValue, sizeof(float)}; + OHNNOperandTest scale = {OH_NN_FLOAT32, OH_NN_POW_SCALE, {1}, &scaleValue, sizeof(float)}; + OHNNOperandTest shift = {OH_NN_FLOAT32, OH_NN_POW_SHIFT, {1}, &shiftValue, sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_POW, + .operands = {input0, input1, output, scale, shift}, + .paramIndices = {3, 4}, + .inputIndices = {0, 1}, + .outputIndices = {2}}; +}; + +struct PowModel3 { + const std::vector input0_shape = {2}; + const std::vector input1_shape = {1}; + float scaleValue = 0.5; + float shiftValue = 2; + float input0Value[2] = {0, 1}; + float input1Value[1] = {2}; + float outputValue[2] = {0}; + + OHNNOperandTest input0 = {OH_NN_FLOAT32, OH_NN_TENSOR, input0_shape, input0Value, 2*sizeof(float)}; + OHNNOperandTest input1 = {OH_NN_FLOAT32, OH_NN_TENSOR, input1_shape, input1Value, sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, input0_shape, outputValue, 2*sizeof(float)}; + OHNNOperandTest scale = {OH_NN_FLOAT32, OH_NN_POW_SCALE, {1}, &scaleValue, sizeof(float)}; + OHNNOperandTest shift = {OH_NN_FLOAT32, OH_NN_POW_SHIFT, {1}, &shiftValue, sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_POW, + .operands = {input0, input1, output, scale, shift}, + .paramIndices = {3, 4}, + .inputIndices = {0, 1}, + .outputIndices = {2}}; +}; + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Pow_Build_01 + * @tc.desc: PowModel1模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(PowTest, SUB_AI_NNRt_Func_North_Pow_Build_01, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + PowModel1 powModel; + OHNNGraphArgs graphArgs = powModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Pow_Build_02 + * @tc.desc: PowModel2模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(PowTest, SUB_AI_NNRt_Func_North_Pow_Build_02, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + PowModel2 powModel; + OHNNGraphArgs graphArgs = powModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Pow_Build_03 + * @tc.desc: PowModel3模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(PowTest, SUB_AI_NNRt_Func_North_Pow_Build_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + PowModel3 powModel; + OHNNGraphArgs graphArgs = powModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Pow_Build_04 + * @tc.desc: PowModel1模型输入Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(PowTest, SUB_AI_NNRt_Func_North_Pow_Build_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + PowModel1 powModel; + OHNNGraphArgs graphArgs = powModel.graphArgs; + graphArgs.operands = {powModel.input0, powModel.input1, powModel.input1, powModel.output, + powModel.scale, powModel.shift}; + graphArgs.inputIndices = {0, 1, 2}; + graphArgs.outputIndices = {3}; + graphArgs.paramIndices = {4, 5}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Pow_Build_05 + * @tc.desc: PowModel1模型输出Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(PowTest, SUB_AI_NNRt_Func_North_Pow_Build_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + PowModel1 powModel; + OHNNGraphArgs graphArgs = powModel.graphArgs; + graphArgs.operands = {powModel.input0, powModel.input1, powModel.output, powModel.output, + powModel.scale, powModel.shift}; + graphArgs.inputIndices = {0, 1}; + graphArgs.outputIndices = {2, 3}; + graphArgs.paramIndices = {4, 5}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Pow_Build_06 + * @tc.desc: PowModel1模型传入非法参数进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(PowTest, SUB_AI_NNRt_Func_North_Pow_Build_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + PowModel1 powModel; + OHNNGraphArgs graphArgs = powModel.graphArgs; + + int8_t activationValue = OH_NN_FUSED_NONE; + OHNNOperandTest activation = {OH_NN_INT8, OH_NN_ADD_ACTIVATIONTYPE, {}, &activationValue, sizeof(int8_t)}; + graphArgs.operands = {powModel.input0, powModel.input1, powModel.output, + powModel.scale, powModel.shift, activation}; + graphArgs.paramIndices = {3, 4, 5}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Pow_Model_Finish_01 + * @tc.desc: 模型构图,未添加操作数 + * @tc.type: FUNC + */ +HWTEST_F(PowTest, SUB_AI_NNRt_Func_North_Pow_Model_Finish_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + OHNNGraphArgs graphArgs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, SingleModelBuildEndStep(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Pow_Model_Finish_02 + * @tc.desc: 模型构图,未设置输入输出 + * @tc.type: FUNC + */ +HWTEST_F(PowTest, SUB_AI_NNRt_Func_North_Pow_Model_Finish_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + PowModel1 powModel; + OHNNGraphArgs graphArgs = powModel.graphArgs; + graphArgs.specifyIO = false; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Pow_Model_Finish_03 + * @tc.desc: 模型构图,设置输入输出,构图成功 + * @tc.type: FUNC + */ +HWTEST_F(PowTest, SUB_AI_NNRt_Func_North_Pow_Model_Finish_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + PowModel1 powModel; + OHNNGraphArgs graphArgs = powModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Pow_Model_SetOperandValue_01 + * @tc.desc: 设置操作数值,操作数不存在 + * @tc.type: FUNC + */ +HWTEST_F(PowTest, SUB_AI_NNRt_Func_North_Pow_Model_SetOperandValue_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + PowModel1 powModel; + OHNNGraphArgs graphArgs = powModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData( + model, 1000+i, operandTem.data, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Pow_Model_SetOperandValue_02 + * @tc.desc: 设置操作数值,buufer为nullptr + * @tc.type: FUNC + */ +HWTEST_F(PowTest, SUB_AI_NNRt_Func_North_Pow_Model_SetOperandValue_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + PowModel1 powModel; + OHNNGraphArgs graphArgs = powModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, i, nullptr, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Pow_Model_SetOperandValue_03 + * @tc.desc: 设置操作数值,length为0 + * @tc.type: FUNC + */ +HWTEST_F(PowTest, SUB_AI_NNRt_Func_North_Pow_Model_SetOperandValue_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + PowModel1 powModel; + OHNNGraphArgs graphArgs = powModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, 1000+i, operandTem.data, 0)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Pow_Model_SpecifyInputsAndOutputs_01 + * @tc.desc: 设置输入输出,inputIndices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(PowTest, SUB_AI_NNRt_Func_North_Pow_Model_SpecifyInputsAndOutputs_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + PowModel1 powModel; + OHNNGraphArgs graphArgs = powModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Pow_Model_SpecifyInputsAndOutputs_02 + * @tc.desc: 设置输入输出,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(PowTest, SUB_AI_NNRt_Func_North_Pow_Model_SpecifyInputsAndOutputs_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + PowModel1 powModel; + OHNNGraphArgs graphArgs = powModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Pow_Model_SpecifyInputsAndOutputs_03 + * @tc.desc: 设置输入输出,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(PowTest, SUB_AI_NNRt_Func_North_Pow_Model_SpecifyInputsAndOutputs_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + PowModel1 powModel; + OHNNGraphArgs graphArgs = powModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Pow_Model_SpecifyInputsAndOutputs_04 + * @tc.desc: 设置输入输出,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(PowTest, SUB_AI_NNRt_Func_North_Pow_Model_SpecifyInputsAndOutputs_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + PowModel1 powModel; + OHNNGraphArgs graphArgs = powModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Pow_Model_SpecifyInputsAndOutputs_05 + * @tc.desc: 设置输入输出,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(PowTest, SUB_AI_NNRt_Func_North_Pow_Model_SpecifyInputsAndOutputs_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + PowModel1 powModel; + OHNNGraphArgs graphArgs = powModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Pow_Model_SpecifyInputsAndOutputs_06 + * @tc.desc: 设置输入输出,outputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(PowTest, SUB_AI_NNRt_Func_North_Pow_Model_SpecifyInputsAndOutputs_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + PowModel1 powModel; + OHNNGraphArgs graphArgs = powModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Pow_Model_SpecifyInputsAndOutputs_07 + * @tc.desc: 设置输入输出,outputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(PowTest, SUB_AI_NNRt_Func_North_Pow_Model_SpecifyInputsAndOutputs_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + PowModel1 powModel; + OHNNGraphArgs graphArgs = powModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.outputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Pow_Model_SpecifyInputsAndOutputs_08 + * @tc.desc: 设置输入输出,outputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(PowTest, SUB_AI_NNRt_Func_North_Pow_Model_SpecifyInputsAndOutputs_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + PowModel1 powModel; + OHNNGraphArgs graphArgs = powModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Pow_Model_AddOperation_01 + * @tc.desc: 添加算子,paramindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(PowTest, SUB_AI_NNRt_Func_North_Pow_Model_AddOperation_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + PowModel1 powModel; + OHNNGraphArgs graphArgs = powModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + nullptr, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Pow_Model_AddOperation_02 + * @tc.desc: 添加算子,paramindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(PowTest, SUB_AI_NNRt_Func_North_Pow_Model_AddOperation_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + PowModel1 powModel; + OHNNGraphArgs graphArgs = powModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Pow_Model_AddOperation_03 + * @tc.desc: 添加算子,paramindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(PowTest, SUB_AI_NNRt_Func_North_Pow_Model_AddOperation_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + PowModel1 powModel; + OHNNGraphArgs graphArgs = powModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.paramIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Pow_Model_AddOperation_04 + * @tc.desc: 添加算子,paramindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(PowTest, SUB_AI_NNRt_Func_North_Pow_Model_AddOperation_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + PowModel1 powModel; + OHNNGraphArgs graphArgs = powModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Pow_Model_AddOperation_05 + * @tc.desc: 添加算子,inputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(PowTest, SUB_AI_NNRt_Func_North_Pow_Model_AddOperation_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + PowModel1 powModel; + OHNNGraphArgs graphArgs = powModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Pow_Model_AddOperation_06 + * @tc.desc: 添加算子,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(PowTest, SUB_AI_NNRt_Func_North_Pow_Model_AddOperation_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + PowModel1 powModel; + OHNNGraphArgs graphArgs = powModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Pow_Model_AddOperation_07 + * @tc.desc: 添加算子,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(PowTest, SUB_AI_NNRt_Func_North_Pow_Model_AddOperation_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + PowModel1 powModel; + OHNNGraphArgs graphArgs = powModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Pow_Model_AddOperation_08 + * @tc.desc: 添加算子,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(PowTest, SUB_AI_NNRt_Func_North_Pow_Model_AddOperation_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + PowModel1 powModel; + OHNNGraphArgs graphArgs = powModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Pow_Model_AddOperation_09 + * @tc.desc: 添加算子,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(PowTest, SUB_AI_NNRt_Func_North_Pow_Model_AddOperation_09, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + PowModel1 powModel; + OHNNGraphArgs graphArgs = powModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(nullptr, graphArgs.operationType, + ¶mIndices, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} \ No newline at end of file diff --git a/test/nnrt_xts_acts/nncore/opstest/src/quant_dtype_cast_test.cpp b/test/nnrt_xts_acts/nncore/opstest/src/quant_dtype_cast_test.cpp new file mode 100644 index 0000000..522ba20 --- /dev/null +++ b/test/nnrt_xts_acts/nncore/opstest/src/quant_dtype_cast_test.cpp @@ -0,0 +1,841 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "nncore_utils.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Test; +class QuantDTypeCastTest : public testing::Test {}; + +struct QuantDTypeCastModel1 { + const std::vector tensor_shape = {4}; + std::vector axisValue = {0}; + std::vector srcValue = {1}; + std::vector dstValue = {1}; + int32_t inputValue[4] = {0.5, 1.0, 1.5, 2.0}; + int32_t outputValue[4] = {0}; + + OHNNOperandTest input = {OH_NN_INT32, OH_NN_TENSOR, tensor_shape, inputValue, 4*sizeof(int32_t)}; + OHNNOperandTest output = {OH_NN_INT32, OH_NN_TENSOR, tensor_shape, outputValue, 4*sizeof(int32_t)}; + OHNNOperandTest axis = {OH_NN_INT64, OH_NN_QUANT_DTYPE_CAST_AXIS, {1}, &axisValue, sizeof(int64_t)}; + OHNNOperandTest src = {OH_NN_INT64, OH_NN_QUANT_DTYPE_CAST_SRC_T, {1}, &srcValue, sizeof(int64_t)}; + OHNNOperandTest dst = {OH_NN_INT64, OH_NN_QUANT_DTYPE_CAST_DST_T, {1}, &dstValue, sizeof(int64_t)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_QUANT_DTYPE_CAST, + .operands = {input, output, axis, src, dst}, + .paramIndices = {2, 3, 4}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +struct QuantDTypeCastModel2 { + const std::vector tensor_shape = {4}; + std::vector axisValue = {0}; + std::vector srcValue = {1}; + std::vector dstValue = {1}; + int32_t inputValue[4] = {0.5, 1.0, 1.5, 2.0}; + int32_t outputValue[4] = {0}; + + OHNNOperandTest input = {OH_NN_INT32, OH_NN_TENSOR, tensor_shape, inputValue, 4*sizeof(int32_t)}; + OHNNOperandTest output = {OH_NN_INT32, OH_NN_TENSOR, tensor_shape, outputValue, 4*sizeof(int32_t)}; + OHNNOperandTest axis = {OH_NN_INT64, OH_NN_QUANT_DTYPE_CAST_AXIS, {1}, &axisValue, sizeof(int64_t)}; + OHNNOperandTest src = {OH_NN_INT64, OH_NN_QUANT_DTYPE_CAST_SRC_T, {1}, &srcValue, sizeof(int64_t)}; + OHNNOperandTest dst = {OH_NN_INT64, OH_NN_QUANT_DTYPE_CAST_DST_T, {1}, &dstValue, sizeof(int64_t)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_QUANT_DTYPE_CAST, + .operands = {input, output, axis, src, dst}, + .paramIndices = {2, 3, 4}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +struct QuantDTypeCastModel3 { + const std::vector tensor_shape = {2, 3}; + std::vector axisValue = {0}; + std::vector srcValue = {1}; + std::vector dstValue = {1}; + int32_t inputValue[2][3] = {{1.5, 2.5, 3.5}, {4.5, 5.5, 6.5}}; + int32_t outputValue[2][3] = {0}; + + OHNNOperandTest input = {OH_NN_INT32, OH_NN_TENSOR, tensor_shape, inputValue, 6*sizeof(int32_t)}; + OHNNOperandTest output = {OH_NN_INT32, OH_NN_TENSOR, tensor_shape, outputValue, 6*sizeof(int32_t)}; + OHNNOperandTest axis = {OH_NN_INT64, OH_NN_QUANT_DTYPE_CAST_AXIS, {1}, &axisValue, sizeof(int64_t)}; + OHNNOperandTest src = {OH_NN_INT64, OH_NN_QUANT_DTYPE_CAST_SRC_T, {1}, &srcValue, sizeof(int64_t)}; + OHNNOperandTest dst = {OH_NN_INT64, OH_NN_QUANT_DTYPE_CAST_DST_T, {1}, &dstValue, sizeof(int64_t)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_QUANT_DTYPE_CAST, + .operands = {input, output, axis, src, dst}, + .paramIndices = {2, 3, 4}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +/** + * @tc.number : SUB_AI_NNRt_Func_North_QuantDTypeCast_Build_01 + * @tc.desc: QuantDTypeCastModel1模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(QuantDTypeCastTest, SUB_AI_NNRt_Func_North_QuantDTypeCast_Build_01, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + QuantDTypeCastModel1 quantDTypeCastModel; + OHNNGraphArgs graphArgs = quantDTypeCastModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_QuantDTypeCast_Build_02 + * @tc.desc: QuantDTypeCastModel2模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(QuantDTypeCastTest, SUB_AI_NNRt_Func_North_QuantDTypeCast_Build_02, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + QuantDTypeCastModel2 quantDTypeCastModel; + OHNNGraphArgs graphArgs = quantDTypeCastModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_QuantDTypeCast_Build_03 + * @tc.desc: QuantDTypeCastModel3模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(QuantDTypeCastTest, SUB_AI_NNRt_Func_North_QuantDTypeCast_Build_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + QuantDTypeCastModel3 quantDTypeCastModel; + OHNNGraphArgs graphArgs = quantDTypeCastModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_QuantDTypeCast_Build_06 + * @tc.desc: QuantDTypeCastModel1模型输入Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(QuantDTypeCastTest, SUB_AI_NNRt_Func_North_QuantDTypeCast_Build_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + QuantDTypeCastModel1 quantDTypeCastModel; + OHNNGraphArgs graphArgs = quantDTypeCastModel.graphArgs; + graphArgs.operands = {quantDTypeCastModel.input, quantDTypeCastModel.input, quantDTypeCastModel.output, + quantDTypeCastModel.axis, quantDTypeCastModel.src, quantDTypeCastModel.dst}; + graphArgs.inputIndices = {0, 1}; + graphArgs.outputIndices = {2}; + graphArgs.paramIndices = {3, 4, 5}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_QuantDTypeCast_Build_07 + * @tc.desc: QuantDTypeCastModel1模型输出Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(QuantDTypeCastTest, SUB_AI_NNRt_Func_North_QuantDTypeCast_Build_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + QuantDTypeCastModel1 quantDTypeCastModel; + OHNNGraphArgs graphArgs = quantDTypeCastModel.graphArgs; + graphArgs.operands = {quantDTypeCastModel.input, quantDTypeCastModel.output, quantDTypeCastModel.output, + quantDTypeCastModel.axis, quantDTypeCastModel.src, quantDTypeCastModel.dst}; + graphArgs.inputIndices = {0}; + graphArgs.outputIndices = {1, 2}; + graphArgs.paramIndices = {3, 4, 5}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_QuantDTypeCast_Build_08 + * @tc.desc: QuantDTypeCastModel1模型传入非法参数进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(QuantDTypeCastTest, SUB_AI_NNRt_Func_North_QuantDTypeCast_Build_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + QuantDTypeCastModel1 quantDTypeCastModel; + OHNNGraphArgs graphArgs = quantDTypeCastModel.graphArgs; + + int8_t activationValue = OH_NN_FUSED_NONE; + OHNNOperandTest activation = {OH_NN_INT8, OH_NN_ADD_ACTIVATIONTYPE, {}, &activationValue, sizeof(int8_t)}; + graphArgs.operands = {quantDTypeCastModel.input, quantDTypeCastModel.output, quantDTypeCastModel.dst, + quantDTypeCastModel.axis, quantDTypeCastModel.src, activation}; + graphArgs.paramIndices = {2, 3, 4, 5}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_QuantDTypeCast_Model_Finish_01 + * @tc.desc: 模型构图,未添加操作数 + * @tc.type: FUNC + */ +HWTEST_F(QuantDTypeCastTest, SUB_AI_NNRt_Func_North_QuantDTypeCast_Model_Finish_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + OHNNGraphArgs graphArgs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, SingleModelBuildEndStep(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_QuantDTypeCast_Model_Finish_02 + * @tc.desc: 模型构图,未设置输入输出 + * @tc.type: FUNC + */ +HWTEST_F(QuantDTypeCastTest, SUB_AI_NNRt_Func_North_QuantDTypeCast_Model_Finish_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + QuantDTypeCastModel1 quantDTypeCastModel; + OHNNGraphArgs graphArgs = quantDTypeCastModel.graphArgs; + graphArgs.specifyIO = false; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_QuantDTypeCast_Model_Finish_03 + * @tc.desc: 模型构图,设置输入输出,构图成功 + * @tc.type: FUNC + */ +HWTEST_F(QuantDTypeCastTest, SUB_AI_NNRt_Func_North_QuantDTypeCast_Model_Finish_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + QuantDTypeCastModel1 quantDTypeCastModel; + OHNNGraphArgs graphArgs = quantDTypeCastModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_QuantDTypeCast_Model_SetOperandValue_01 + * @tc.desc: 设置操作数值,操作数不存在 + * @tc.type: FUNC + */ +HWTEST_F(QuantDTypeCastTest, SUB_AI_NNRt_Func_North_QuantDTypeCast_Model_SetOperandValue_01, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + QuantDTypeCastModel1 quantDTypeCastModel; + OHNNGraphArgs graphArgs = quantDTypeCastModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData( + model, 1000+i, operandTem.data, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_QuantDTypeCast_Model_SetOperandValue_02 + * @tc.desc: 设置操作数值,buufer为nullptr + * @tc.type: FUNC + */ +HWTEST_F(QuantDTypeCastTest, SUB_AI_NNRt_Func_North_QuantDTypeCast_Model_SetOperandValue_02, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + QuantDTypeCastModel1 quantDTypeCastModel; + OHNNGraphArgs graphArgs = quantDTypeCastModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, i, nullptr, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_QuantDTypeCast_Model_SetOperandValue_03 + * @tc.desc: 设置操作数值,length为0 + * @tc.type: FUNC + */ +HWTEST_F(QuantDTypeCastTest, SUB_AI_NNRt_Func_North_QuantDTypeCast_Model_SetOperandValue_03, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + QuantDTypeCastModel1 quantDTypeCastModel; + OHNNGraphArgs graphArgs = quantDTypeCastModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, 1000+i, operandTem.data, 0)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_QuantDTypeCast_Model_SpecifyInputsAndOutputs_01 + * @tc.desc: 设置输入输出,inputIndices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(QuantDTypeCastTest, SUB_AI_NNRt_Func_North_QuantDTypeCast_Model_SpecifyInputsAndOutputs_01, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + QuantDTypeCastModel1 quantDTypeCastModel; + OHNNGraphArgs graphArgs = quantDTypeCastModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_QuantDTypeCast_Model_SpecifyInputsAndOutputs_02 + * @tc.desc: 设置输入输出,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(QuantDTypeCastTest, SUB_AI_NNRt_Func_North_QuantDTypeCast_Model_SpecifyInputsAndOutputs_02, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + QuantDTypeCastModel1 quantDTypeCastModel; + OHNNGraphArgs graphArgs = quantDTypeCastModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_QuantDTypeCast_Model_SpecifyInputsAndOutputs_03 + * @tc.desc: 设置输入输出,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(QuantDTypeCastTest, SUB_AI_NNRt_Func_North_QuantDTypeCast_Model_SpecifyInputsAndOutputs_03, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + QuantDTypeCastModel1 quantDTypeCastModel; + OHNNGraphArgs graphArgs = quantDTypeCastModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_QuantDTypeCast_Model_SpecifyInputsAndOutputs_04 + * @tc.desc: 设置输入输出,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(QuantDTypeCastTest, SUB_AI_NNRt_Func_North_QuantDTypeCast_Model_SpecifyInputsAndOutputs_04, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + QuantDTypeCastModel1 quantDTypeCastModel; + OHNNGraphArgs graphArgs = quantDTypeCastModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_QuantDTypeCast_Model_SpecifyInputsAndOutputs_05 + * @tc.desc: 设置输入输出,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(QuantDTypeCastTest, SUB_AI_NNRt_Func_North_QuantDTypeCast_Model_SpecifyInputsAndOutputs_05, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + QuantDTypeCastModel1 quantDTypeCastModel; + OHNNGraphArgs graphArgs = quantDTypeCastModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_QuantDTypeCast_Model_SpecifyInputsAndOutputs_06 + * @tc.desc: 设置输入输出,outputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(QuantDTypeCastTest, SUB_AI_NNRt_Func_North_QuantDTypeCast_Model_SpecifyInputsAndOutputs_06, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + QuantDTypeCastModel1 quantDTypeCastModel; + OHNNGraphArgs graphArgs = quantDTypeCastModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_QuantDTypeCast_Model_SpecifyInputsAndOutputs_07 + * @tc.desc: 设置输入输出,outputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(QuantDTypeCastTest, SUB_AI_NNRt_Func_North_QuantDTypeCast_Model_SpecifyInputsAndOutputs_07, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + QuantDTypeCastModel1 quantDTypeCastModel; + OHNNGraphArgs graphArgs = quantDTypeCastModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.outputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_QuantDTypeCast_Model_SpecifyInputsAndOutputs_08 + * @tc.desc: 设置输入输出,outputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(QuantDTypeCastTest, SUB_AI_NNRt_Func_North_QuantDTypeCast_Model_SpecifyInputsAndOutputs_08, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + QuantDTypeCastModel1 quantDTypeCastModel; + OHNNGraphArgs graphArgs = quantDTypeCastModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_QuantDTypeCast_Model_AddOperation_01 + * @tc.desc: 添加算子,paramindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(QuantDTypeCastTest, SUB_AI_NNRt_Func_North_QuantDTypeCast_Model_AddOperation_01, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + QuantDTypeCastModel1 quantDTypeCastModel; + OHNNGraphArgs graphArgs = quantDTypeCastModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + nullptr, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_QuantDTypeCast_Model_AddOperation_02 + * @tc.desc: 添加算子,paramindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(QuantDTypeCastTest, SUB_AI_NNRt_Func_North_QuantDTypeCast_Model_AddOperation_02, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + QuantDTypeCastModel1 quantDTypeCastModel; + OHNNGraphArgs graphArgs = quantDTypeCastModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_QuantDTypeCast_Model_AddOperation_03 + * @tc.desc: 添加算子,paramindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(QuantDTypeCastTest, SUB_AI_NNRt_Func_North_QuantDTypeCast_Model_AddOperation_03, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + QuantDTypeCastModel1 quantDTypeCastModel; + OHNNGraphArgs graphArgs = quantDTypeCastModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.paramIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_QuantDTypeCast_Model_AddOperation_04 + * @tc.desc: 添加算子,paramindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(QuantDTypeCastTest, SUB_AI_NNRt_Func_North_QuantDTypeCast_Model_AddOperation_04, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + QuantDTypeCastModel1 quantDTypeCastModel; + OHNNGraphArgs graphArgs = quantDTypeCastModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_QuantDTypeCast_Model_AddOperation_05 + * @tc.desc: 添加算子,inputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(QuantDTypeCastTest, SUB_AI_NNRt_Func_North_QuantDTypeCast_Model_AddOperation_05, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + QuantDTypeCastModel1 quantDTypeCastModel; + OHNNGraphArgs graphArgs = quantDTypeCastModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_QuantDTypeCast_Model_AddOperation_06 + * @tc.desc: 添加算子,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(QuantDTypeCastTest, SUB_AI_NNRt_Func_North_QuantDTypeCast_Model_AddOperation_06, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + QuantDTypeCastModel1 quantDTypeCastModel; + OHNNGraphArgs graphArgs = quantDTypeCastModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_QuantDTypeCast_Model_AddOperation_07 + * @tc.desc: 添加算子,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(QuantDTypeCastTest, SUB_AI_NNRt_Func_North_QuantDTypeCast_Model_AddOperation_07, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + QuantDTypeCastModel1 quantDTypeCastModel; + OHNNGraphArgs graphArgs = quantDTypeCastModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_QuantDTypeCast_Model_AddOperation_08 + * @tc.desc: 添加算子,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(QuantDTypeCastTest, SUB_AI_NNRt_Func_North_QuantDTypeCast_Model_AddOperation_08, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + QuantDTypeCastModel1 quantDTypeCastModel; + OHNNGraphArgs graphArgs = quantDTypeCastModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_QuantDTypeCast_Model_AddOperation_09 + * @tc.desc: 添加算子,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(QuantDTypeCastTest, SUB_AI_NNRt_Func_North_QuantDTypeCast_Model_AddOperation_09, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + QuantDTypeCastModel1 quantDTypeCastModel; + OHNNGraphArgs graphArgs = quantDTypeCastModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(nullptr, graphArgs.operationType, + ¶mIndices, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} \ No newline at end of file diff --git a/test/nnrt_xts_acts/nncore/opstest/src/range_test.cpp b/test/nnrt_xts_acts/nncore/opstest/src/range_test.cpp new file mode 100644 index 0000000..65e01f5 --- /dev/null +++ b/test/nnrt_xts_acts/nncore/opstest/src/range_test.cpp @@ -0,0 +1,919 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "nncore_utils.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Test; +class RangeTest : public testing::Test {}; + +struct RangeModel1 { + const std::vector tensor_shape = {3}; + std::vector startValue = {-1}; + std::vector limitValue = {10}; + std::vector deltaValue = {4}; + int32_t inputValue[3] = {0}; + int32_t outputValue[3] = {0}; + + OHNNOperandTest input = {OH_NN_INT32, OH_NN_TENSOR, tensor_shape, inputValue, 3*sizeof(int32_t)}; + OHNNOperandTest output = {OH_NN_INT32, OH_NN_TENSOR, tensor_shape, outputValue, 3*sizeof(int32_t)}; + OHNNOperandTest start = {OH_NN_INT64, OH_NN_RANGE_START, {1}, &startValue, sizeof(int64_t)}; + OHNNOperandTest limit = {OH_NN_INT64, OH_NN_RANGE_LIMIT, {1}, &limitValue, sizeof(int64_t)}; + OHNNOperandTest delta = {OH_NN_INT64, OH_NN_RANGE_DELTA, {1}, &deltaValue, sizeof(int64_t)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_RANGE, + .operands = {input, output, start, limit, delta}, + .paramIndices = {2, 3, 4}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +struct RangeModel2 { + const std::vector tensor_shape = {1}; + std::vector startValue = {1}; + std::vector limitValue = {1}; + std::vector deltaValue = {4}; + int32_t inputValue[1] = {0}; + int32_t outputValue[1] = {0}; + + OHNNOperandTest input = {OH_NN_INT32, OH_NN_TENSOR, tensor_shape, inputValue, sizeof(int32_t)}; + OHNNOperandTest output = {OH_NN_INT32, OH_NN_TENSOR, tensor_shape, outputValue, sizeof(int32_t)}; + OHNNOperandTest start = {OH_NN_INT64, OH_NN_RANGE_START, {1}, &startValue, sizeof(int64_t)}; + OHNNOperandTest limit = {OH_NN_INT64, OH_NN_RANGE_LIMIT, {1}, &limitValue, sizeof(int64_t)}; + OHNNOperandTest delta = {OH_NN_INT64, OH_NN_RANGE_DELTA, {1}, &deltaValue, sizeof(int64_t)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_RANGE, + .operands = {input, output, start, limit, delta}, + .paramIndices = {2, 3, 4}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +struct RangeModel3 { + const std::vector tensor_shape = {1}; + std::vector startValue = {2}; + std::vector limitValue = {1}; + std::vector deltaValue = {4}; + int32_t inputValue[1] = {0}; + int32_t outputValue[1] = {0}; + + OHNNOperandTest input = {OH_NN_INT32, OH_NN_TENSOR, tensor_shape, inputValue, sizeof(int32_t)}; + OHNNOperandTest output = {OH_NN_INT32, OH_NN_TENSOR, tensor_shape, outputValue, sizeof(int32_t)}; + OHNNOperandTest start = {OH_NN_INT64, OH_NN_RANGE_START, {1}, &startValue, sizeof(int64_t)}; + OHNNOperandTest limit = {OH_NN_INT64, OH_NN_RANGE_LIMIT, {1}, &limitValue, sizeof(int64_t)}; + OHNNOperandTest delta = {OH_NN_INT64, OH_NN_RANGE_DELTA, {1}, &deltaValue, sizeof(int64_t)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_RANGE, + .operands = {input, output, start, limit, delta}, + .paramIndices = {2, 3, 4}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +struct RangeModel4 { + const std::vector tensor_shape = {2}; + std::vector startValue = {2.4}; + std::vector limitValue = {10}; + std::vector deltaValue = {4}; + float inputValue[2] = {0}; + float outputValue[2] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT16, OH_NN_TENSOR, tensor_shape, inputValue, 2*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT16, OH_NN_TENSOR, tensor_shape, outputValue, 2*sizeof(float)}; + OHNNOperandTest start = {OH_NN_INT64, OH_NN_RANGE_START, {1}, &startValue, sizeof(int64_t)}; + OHNNOperandTest limit = {OH_NN_INT64, OH_NN_RANGE_LIMIT, {1}, &limitValue, sizeof(int64_t)}; + OHNNOperandTest delta = {OH_NN_INT64, OH_NN_RANGE_DELTA, {1}, &deltaValue, sizeof(int64_t)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_RANGE, + .operands = {input, output, start, limit, delta}, + .paramIndices = {2, 3, 4}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +struct RangeModel5 { + const std::vector tensor_shape = {2}; + std::vector startValue = {2.4}; + std::vector limitValue = {10}; + std::vector deltaValue = {4}; + int32_t inputValue[2] = {0}; + int32_t outputValue[2] = {0}; + + OHNNOperandTest input = {OH_NN_INT32, OH_NN_TENSOR, tensor_shape, inputValue, 2*sizeof(int32_t)}; + OHNNOperandTest output = {OH_NN_INT32, OH_NN_TENSOR, tensor_shape, outputValue, 2*sizeof(int32_t)}; + OHNNOperandTest start = {OH_NN_INT64, OH_NN_RANGE_START, {1}, &startValue, sizeof(int64_t)}; + OHNNOperandTest limit = {OH_NN_INT64, OH_NN_RANGE_LIMIT, {1}, &limitValue, sizeof(int64_t)}; + OHNNOperandTest delta = {OH_NN_INT64, OH_NN_RANGE_DELTA, {1}, &deltaValue, sizeof(int64_t)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_RANGE, + .operands = {input, output, start, limit, delta}, + .paramIndices = {2, 3, 4}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Range_Build_01 + * @tc.desc: RangeModel1模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(RangeTest, SUB_AI_NNRt_Func_North_Range_Build_01, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RangeModel1 rangeModel; + OHNNGraphArgs graphArgs = rangeModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Range_Build_02 + * @tc.desc: RangeModel2模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(RangeTest, SUB_AI_NNRt_Func_North_Range_Build_02, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RangeModel2 rangeModel; + OHNNGraphArgs graphArgs = rangeModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Range_Build_03 + * @tc.desc: RangeModel3模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(RangeTest, SUB_AI_NNRt_Func_North_Range_Build_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RangeModel3 rangeModel; + OHNNGraphArgs graphArgs = rangeModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Range_Build_04 + * @tc.desc: RangeModel4模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(RangeTest, SUB_AI_NNRt_Func_North_Range_Build_04, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RangeModel4 rangeModel; + OHNNGraphArgs graphArgs = rangeModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Range_Build_05 + * @tc.desc: RangeModel5模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(RangeTest, SUB_AI_NNRt_Func_North_Range_Build_05, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RangeModel5 rangeModel; + OHNNGraphArgs graphArgs = rangeModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Range_Build_06 + * @tc.desc: RangeModel1模型输入Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(RangeTest, SUB_AI_NNRt_Func_North_Range_Build_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RangeModel1 rangeModel; + OHNNGraphArgs graphArgs = rangeModel.graphArgs; + graphArgs.operands = {rangeModel.input, rangeModel.input, rangeModel.output, + rangeModel.start, rangeModel.limit, rangeModel.delta}; + graphArgs.inputIndices = {0, 1}; + graphArgs.outputIndices = {2}; + graphArgs.paramIndices = {3, 4, 5}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Range_Build_07 + * @tc.desc: RangeModel1模型输出Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(RangeTest, SUB_AI_NNRt_Func_North_Range_Build_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RangeModel1 rangeModel; + OHNNGraphArgs graphArgs = rangeModel.graphArgs; + graphArgs.operands = {rangeModel.input, rangeModel.output, rangeModel.output, + rangeModel.start, rangeModel.limit, rangeModel.delta}; + graphArgs.inputIndices = {0}; + graphArgs.outputIndices = {1, 2}; + graphArgs.paramIndices = {3, 4, 5}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Range_Build_08 + * @tc.desc: RangeModel1模型传入非法参数进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(RangeTest, SUB_AI_NNRt_Func_North_Range_Build_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RangeModel1 rangeModel; + OHNNGraphArgs graphArgs = rangeModel.graphArgs; + + int8_t activationValue = OH_NN_FUSED_NONE; + OHNNOperandTest activation = {OH_NN_INT8, OH_NN_ADD_ACTIVATIONTYPE, {}, &activationValue, sizeof(int8_t)}; + graphArgs.operands = {rangeModel.input, rangeModel.output, rangeModel.start, + rangeModel.limit, rangeModel.delta, activation}; + graphArgs.paramIndices = {2, 3, 4, 5}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Range_Model_Finish_01 + * @tc.desc: 模型构图,未添加操作数 + * @tc.type: FUNC + */ +HWTEST_F(RangeTest, SUB_AI_NNRt_Func_North_Range_Model_Finish_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + OHNNGraphArgs graphArgs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, SingleModelBuildEndStep(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Range_Model_Finish_02 + * @tc.desc: 模型构图,未设置输入输出 + * @tc.type: FUNC + */ +HWTEST_F(RangeTest, SUB_AI_NNRt_Func_North_Range_Model_Finish_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RangeModel1 rangeModel; + OHNNGraphArgs graphArgs = rangeModel.graphArgs; + graphArgs.specifyIO = false; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Range_Model_Finish_03 + * @tc.desc: 模型构图,设置输入输出,构图成功 + * @tc.type: FUNC + */ +HWTEST_F(RangeTest, SUB_AI_NNRt_Func_North_Range_Model_Finish_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RangeModel1 rangeModel; + OHNNGraphArgs graphArgs = rangeModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Range_Model_SetOperandValue_01 + * @tc.desc: 设置操作数值,操作数不存在 + * @tc.type: FUNC + */ +HWTEST_F(RangeTest, SUB_AI_NNRt_Func_North_Range_Model_SetOperandValue_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RangeModel1 rangeModel; + OHNNGraphArgs graphArgs = rangeModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData( + model, 1000+i, operandTem.data, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Range_Model_SetOperandValue_02 + * @tc.desc: 设置操作数值,buufer为nullptr + * @tc.type: FUNC + */ +HWTEST_F(RangeTest, SUB_AI_NNRt_Func_North_Range_Model_SetOperandValue_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RangeModel1 rangeModel; + OHNNGraphArgs graphArgs = rangeModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, i, nullptr, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Range_Model_SetOperandValue_03 + * @tc.desc: 设置操作数值,length为0 + * @tc.type: FUNC + */ +HWTEST_F(RangeTest, SUB_AI_NNRt_Func_North_Range_Model_SetOperandValue_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RangeModel1 rangeModel; + OHNNGraphArgs graphArgs = rangeModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, 1000+i, operandTem.data, 0)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Range_Model_SpecifyInputsAndOutputs_01 + * @tc.desc: 设置输入输出,inputIndices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(RangeTest, SUB_AI_NNRt_Func_North_Range_Model_SpecifyInputsAndOutputs_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RangeModel1 rangeModel; + OHNNGraphArgs graphArgs = rangeModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Range_Model_SpecifyInputsAndOutputs_02 + * @tc.desc: 设置输入输出,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(RangeTest, SUB_AI_NNRt_Func_North_Range_Model_SpecifyInputsAndOutputs_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RangeModel1 rangeModel; + OHNNGraphArgs graphArgs = rangeModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Range_Model_SpecifyInputsAndOutputs_03 + * @tc.desc: 设置输入输出,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(RangeTest, SUB_AI_NNRt_Func_North_Range_Model_SpecifyInputsAndOutputs_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RangeModel1 rangeModel; + OHNNGraphArgs graphArgs = rangeModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Range_Model_SpecifyInputsAndOutputs_04 + * @tc.desc: 设置输入输出,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(RangeTest, SUB_AI_NNRt_Func_North_Range_Model_SpecifyInputsAndOutputs_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RangeModel1 rangeModel; + OHNNGraphArgs graphArgs = rangeModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Range_Model_SpecifyInputsAndOutputs_05 + * @tc.desc: 设置输入输出,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(RangeTest, SUB_AI_NNRt_Func_North_Range_Model_SpecifyInputsAndOutputs_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RangeModel1 rangeModel; + OHNNGraphArgs graphArgs = rangeModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Range_Model_SpecifyInputsAndOutputs_06 + * @tc.desc: 设置输入输出,outputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(RangeTest, SUB_AI_NNRt_Func_North_Range_Model_SpecifyInputsAndOutputs_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RangeModel1 rangeModel; + OHNNGraphArgs graphArgs = rangeModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Range_Model_SpecifyInputsAndOutputs_07 + * @tc.desc: 设置输入输出,outputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(RangeTest, SUB_AI_NNRt_Func_North_Range_Model_SpecifyInputsAndOutputs_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RangeModel1 rangeModel; + OHNNGraphArgs graphArgs = rangeModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.outputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Range_Model_SpecifyInputsAndOutputs_08 + * @tc.desc: 设置输入输出,outputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(RangeTest, SUB_AI_NNRt_Func_North_Range_Model_SpecifyInputsAndOutputs_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RangeModel1 rangeModel; + OHNNGraphArgs graphArgs = rangeModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Range_Model_AddOperation_01 + * @tc.desc: 添加算子,paramindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(RangeTest, SUB_AI_NNRt_Func_North_Range_Model_AddOperation_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RangeModel1 rangeModel; + OHNNGraphArgs graphArgs = rangeModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + nullptr, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Range_Model_AddOperation_02 + * @tc.desc: 添加算子,paramindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(RangeTest, SUB_AI_NNRt_Func_North_Range_Model_AddOperation_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RangeModel1 rangeModel; + OHNNGraphArgs graphArgs = rangeModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Range_Model_AddOperation_03 + * @tc.desc: 添加算子,paramindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(RangeTest, SUB_AI_NNRt_Func_North_Range_Model_AddOperation_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RangeModel1 rangeModel; + OHNNGraphArgs graphArgs = rangeModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.paramIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Range_Model_AddOperation_04 + * @tc.desc: 添加算子,paramindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(RangeTest, SUB_AI_NNRt_Func_North_Range_Model_AddOperation_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RangeModel1 rangeModel; + OHNNGraphArgs graphArgs = rangeModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Range_Model_AddOperation_05 + * @tc.desc: 添加算子,inputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(RangeTest, SUB_AI_NNRt_Func_North_Range_Model_AddOperation_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RangeModel1 rangeModel; + OHNNGraphArgs graphArgs = rangeModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Range_Model_AddOperation_06 + * @tc.desc: 添加算子,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(RangeTest, SUB_AI_NNRt_Func_North_Range_Model_AddOperation_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RangeModel1 rangeModel; + OHNNGraphArgs graphArgs = rangeModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Range_Model_AddOperation_07 + * @tc.desc: 添加算子,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(RangeTest, SUB_AI_NNRt_Func_North_Range_Model_AddOperation_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RangeModel1 rangeModel; + OHNNGraphArgs graphArgs = rangeModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Range_Model_AddOperation_08 + * @tc.desc: 添加算子,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(RangeTest, SUB_AI_NNRt_Func_North_Range_Model_AddOperation_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RangeModel1 rangeModel; + OHNNGraphArgs graphArgs = rangeModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Range_Model_AddOperation_09 + * @tc.desc: 添加算子,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(RangeTest, SUB_AI_NNRt_Func_North_Range_Model_AddOperation_09, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RangeModel1 rangeModel; + OHNNGraphArgs graphArgs = rangeModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(nullptr, graphArgs.operationType, + ¶mIndices, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} \ No newline at end of file diff --git a/test/nnrt_xts_acts/nncore/opstest/src/rank_test.cpp b/test/nnrt_xts_acts/nncore/opstest/src/rank_test.cpp new file mode 100644 index 0000000..bc886ce --- /dev/null +++ b/test/nnrt_xts_acts/nncore/opstest/src/rank_test.cpp @@ -0,0 +1,756 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "nncore_utils.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Test; +class RankTest : public testing::Test {}; + +struct RankModel1 { + const std::vector tensor_shape = {5}; + float inputValue[5] = {1}; + float outputValue[1] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, inputValue, 5*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, {1}, outputValue, sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_RANK, + .operands = {input, output}, + .paramIndices = {}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +struct RankModel2 { + const std::vector tensor_shape = {1, 2, 3, 4, 5}; + float inputValue[1][2][3][4][5] = {1}; + float outputValue[1] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, inputValue, 120*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, {1}, outputValue, sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_RANK, + .operands = {input, output}, + .paramIndices = {}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Rank_Build_01 + * @tc.desc: RankModel1模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(RankTest, SUB_AI_NNRt_Func_North_Rank_Build_01, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RankModel1 rankModel; + OHNNGraphArgs graphArgs = rankModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Rank_Build_02 + * @tc.desc: RankModel2模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(RankTest, SUB_AI_NNRt_Func_North_Rank_Build_02, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RankModel2 rankModel; + OHNNGraphArgs graphArgs = rankModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Rank_Build_03 + * @tc.desc: RankModel1模型输入Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(RankTest, SUB_AI_NNRt_Func_North_Rank_Build_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RankModel1 rankModel; + OHNNGraphArgs graphArgs = rankModel.graphArgs; + graphArgs.operands = {rankModel.input, rankModel.input, rankModel.output}; + graphArgs.inputIndices = {0, 1}; + graphArgs.outputIndices = {2}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Rank_Build_04 + * @tc.desc: RankModel1模型输出Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(RankTest, SUB_AI_NNRt_Func_North_Rank_Build_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RankModel1 rankModel; + OHNNGraphArgs graphArgs = rankModel.graphArgs; + graphArgs.operands = {rankModel.input, rankModel.output, rankModel.output}; + graphArgs.inputIndices = {0}; + graphArgs.outputIndices = {1, 2}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Rank_Build_05 + * @tc.desc: RankModel1模型传入非法参数进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(RankTest, SUB_AI_NNRt_Func_North_Rank_Build_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RankModel1 rankModel; + OHNNGraphArgs graphArgs = rankModel.graphArgs; + + int8_t activationValue = OH_NN_FUSED_NONE; + OHNNOperandTest activation = {OH_NN_INT8, OH_NN_ADD_ACTIVATIONTYPE, {}, &activationValue, sizeof(int8_t)}; + graphArgs.operands = {rankModel.input, rankModel.output, activation}; + graphArgs.paramIndices = {2}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Rank_Model_Finish_01 + * @tc.desc: 模型构图,未添加操作数 + * @tc.type: FUNC + */ +HWTEST_F(RankTest, SUB_AI_NNRt_Func_North_Rank_Model_Finish_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + OHNNGraphArgs graphArgs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, SingleModelBuildEndStep(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Rank_Model_Finish_02 + * @tc.desc: 模型构图,未设置输入输出 + * @tc.type: FUNC + */ +HWTEST_F(RankTest, SUB_AI_NNRt_Func_North_Rank_Model_Finish_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RankModel1 rankModel; + OHNNGraphArgs graphArgs = rankModel.graphArgs; + graphArgs.specifyIO = false; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Rank_Model_Finish_03 + * @tc.desc: 模型构图,设置输入输出,构图成功 + * @tc.type: FUNC + */ +HWTEST_F(RankTest, SUB_AI_NNRt_Func_North_Rank_Model_Finish_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RankModel1 rankModel; + OHNNGraphArgs graphArgs = rankModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Rank_Model_SetOperandValue_01 + * @tc.desc: 设置操作数值,操作数不存在 + * @tc.type: FUNC + */ +HWTEST_F(RankTest, SUB_AI_NNRt_Func_North_Rank_Model_SetOperandValue_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RankModel1 rankModel; + OHNNGraphArgs graphArgs = rankModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData( + model, 1000+i, operandTem.data, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Rank_Model_SetOperandValue_02 + * @tc.desc: 设置操作数值,buufer为nullptr + * @tc.type: FUNC + */ +HWTEST_F(RankTest, SUB_AI_NNRt_Func_North_Rank_Model_SetOperandValue_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RankModel1 rankModel; + OHNNGraphArgs graphArgs = rankModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, i, nullptr, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Rank_Model_SetOperandValue_03 + * @tc.desc: 设置操作数值,length为0 + * @tc.type: FUNC + */ +HWTEST_F(RankTest, SUB_AI_NNRt_Func_North_Rank_Model_SetOperandValue_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RankModel1 rankModel; + OHNNGraphArgs graphArgs = rankModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, 1000+i, operandTem.data, 0)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Rank_Model_SpecifyInputsAndOutputs_01 + * @tc.desc: 设置输入输出,inputIndices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(RankTest, SUB_AI_NNRt_Func_North_Rank_Model_SpecifyInputsAndOutputs_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RankModel1 rankModel; + OHNNGraphArgs graphArgs = rankModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Rank_Model_SpecifyInputsAndOutputs_02 + * @tc.desc: 设置输入输出,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(RankTest, SUB_AI_NNRt_Func_North_Rank_Model_SpecifyInputsAndOutputs_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RankModel1 rankModel; + OHNNGraphArgs graphArgs = rankModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Rank_Model_SpecifyInputsAndOutputs_03 + * @tc.desc: 设置输入输出,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(RankTest, SUB_AI_NNRt_Func_North_Rank_Model_SpecifyInputsAndOutputs_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RankModel1 rankModel; + OHNNGraphArgs graphArgs = rankModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Rank_Model_SpecifyInputsAndOutputs_04 + * @tc.desc: 设置输入输出,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(RankTest, SUB_AI_NNRt_Func_North_Rank_Model_SpecifyInputsAndOutputs_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RankModel1 rankModel; + OHNNGraphArgs graphArgs = rankModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Rank_Model_SpecifyInputsAndOutputs_05 + * @tc.desc: 设置输入输出,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(RankTest, SUB_AI_NNRt_Func_North_Rank_Model_SpecifyInputsAndOutputs_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RankModel1 rankModel; + OHNNGraphArgs graphArgs = rankModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Rank_Model_SpecifyInputsAndOutputs_06 + * @tc.desc: 设置输入输出,outputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(RankTest, SUB_AI_NNRt_Func_North_Rank_Model_SpecifyInputsAndOutputs_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RankModel1 rankModel; + OHNNGraphArgs graphArgs = rankModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Rank_Model_SpecifyInputsAndOutputs_07 + * @tc.desc: 设置输入输出,outputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(RankTest, SUB_AI_NNRt_Func_North_Rank_Model_SpecifyInputsAndOutputs_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RankModel1 rankModel; + OHNNGraphArgs graphArgs = rankModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.outputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Rank_Model_SpecifyInputsAndOutputs_08 + * @tc.desc: 设置输入输出,outputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(RankTest, SUB_AI_NNRt_Func_North_Rank_Model_SpecifyInputsAndOutputs_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RankModel1 rankModel; + OHNNGraphArgs graphArgs = rankModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Rank_Model_AddOperation_01 + * @tc.desc: 添加算子,paramindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(RankTest, SUB_AI_NNRt_Func_North_Rank_Model_AddOperation_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RankModel1 rankModel; + OHNNGraphArgs graphArgs = rankModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + nullptr, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Rank_Model_AddOperation_02 + * @tc.desc: 添加算子,paramindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(RankTest, SUB_AI_NNRt_Func_North_Rank_Model_AddOperation_02, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RankModel1 rankModel; + OHNNGraphArgs graphArgs = rankModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.data = nullptr; + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Rank_Model_AddOperation_03 + * @tc.desc: 添加算子,paramindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(RankTest, SUB_AI_NNRt_Func_North_Rank_Model_AddOperation_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RankModel1 rankModel; + OHNNGraphArgs graphArgs = rankModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.paramIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Rank_Model_AddOperation_04 + * @tc.desc: 添加算子,paramindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(RankTest, SUB_AI_NNRt_Func_North_Rank_Model_AddOperation_04, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RankModel1 rankModel; + OHNNGraphArgs graphArgs = rankModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.size = 0; + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Rank_Model_AddOperation_05 + * @tc.desc: 添加算子,inputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(RankTest, SUB_AI_NNRt_Func_North_Rank_Model_AddOperation_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RankModel1 rankModel; + OHNNGraphArgs graphArgs = rankModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Rank_Model_AddOperation_06 + * @tc.desc: 添加算子,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(RankTest, SUB_AI_NNRt_Func_North_Rank_Model_AddOperation_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RankModel1 rankModel; + OHNNGraphArgs graphArgs = rankModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Rank_Model_AddOperation_07 + * @tc.desc: 添加算子,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(RankTest, SUB_AI_NNRt_Func_North_Rank_Model_AddOperation_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RankModel1 rankModel; + OHNNGraphArgs graphArgs = rankModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Rank_Model_AddOperation_08 + * @tc.desc: 添加算子,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(RankTest, SUB_AI_NNRt_Func_North_Rank_Model_AddOperation_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RankModel1 rankModel; + OHNNGraphArgs graphArgs = rankModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Rank_Model_AddOperation_09 + * @tc.desc: 添加算子,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(RankTest, SUB_AI_NNRt_Func_North_Rank_Model_AddOperation_09, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RankModel1 rankModel; + OHNNGraphArgs graphArgs = rankModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(nullptr, graphArgs.operationType, + ¶mIndices, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} \ No newline at end of file diff --git a/test/nnrt_xts_acts/nncore/opstest/src/reciprocal_test.cpp b/test/nnrt_xts_acts/nncore/opstest/src/reciprocal_test.cpp new file mode 100644 index 0000000..b9364c0 --- /dev/null +++ b/test/nnrt_xts_acts/nncore/opstest/src/reciprocal_test.cpp @@ -0,0 +1,806 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "nncore_utils.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Test; +class ReciprocalTest : public testing::Test {}; + +struct ReciprocalModel1 { + const std::vector tensor_shape = {2, 2}; + float inputValue[2][2] = {{1, 2}, {3, 4}}; + float outputValue[2][2] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, inputValue, 4*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, outputValue, 4*sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_RECIPROCAL, + .operands = {input, output}, + .paramIndices = {}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +struct ReciprocalModel2 { + const std::vector tensor_shape = {3}; + float inputValue[3] = {-1, -2, -4}; + float outputValue[3] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, inputValue, 3*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, outputValue, 3*sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_RECIPROCAL, + .operands = {input, output}, + .paramIndices = {}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +struct ReciprocalModel3 { + const std::vector tensor_shape = {1}; + bool inputValue[1] = {false}; + float outputValue[1] = {0}; + + OHNNOperandTest input = {OH_NN_BOOL, OH_NN_TENSOR, tensor_shape, inputValue, sizeof(bool)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, outputValue, sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_RECIPROCAL, + .operands = {input, output}, + .paramIndices = {}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Reciprocal_Build_01 + * @tc.desc: ReciprocalModel1模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(ReciprocalTest, SUB_AI_NNRt_Func_North_Reciprocal_Build_01, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReciprocalModel1 reciprocalModel; + OHNNGraphArgs graphArgs = reciprocalModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Reciprocal_Build_02 + * @tc.desc: ReciprocalModel2模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(ReciprocalTest, SUB_AI_NNRt_Func_North_Reciprocal_Build_02, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReciprocalModel2 reciprocalModel; + OHNNGraphArgs graphArgs = reciprocalModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Reciprocal_Build_03 + * @tc.desc: ReciprocalModel3模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(ReciprocalTest, SUB_AI_NNRt_Func_North_Reciprocal_Build_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReciprocalModel3 reciprocalModel; + OHNNGraphArgs graphArgs = reciprocalModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Reciprocal_Build_04 + * @tc.desc: ReciprocalModel1模型输入Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(ReciprocalTest, SUB_AI_NNRt_Func_North_Reciprocal_Build_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReciprocalModel1 reciprocalModel; + OHNNGraphArgs graphArgs = reciprocalModel.graphArgs; + graphArgs.operands = {reciprocalModel.input, reciprocalModel.input, reciprocalModel.output}; + graphArgs.inputIndices = {0, 1}; + graphArgs.outputIndices = {2}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Reciprocal_Build_05 + * @tc.desc: ReciprocalModel1模型输出Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(ReciprocalTest, SUB_AI_NNRt_Func_North_Reciprocal_Build_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReciprocalModel1 reciprocalModel; + OHNNGraphArgs graphArgs = reciprocalModel.graphArgs; + graphArgs.operands = {reciprocalModel.input, reciprocalModel.output, reciprocalModel.output}; + graphArgs.inputIndices = {0}; + graphArgs.outputIndices = {1, 2}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Reciprocal_Build_06 + * @tc.desc: ReciprocalModel1模型传入非法参数进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(ReciprocalTest, SUB_AI_NNRt_Func_North_Reciprocal_Build_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReciprocalModel1 reciprocalModel; + OHNNGraphArgs graphArgs = reciprocalModel.graphArgs; + + int8_t activationValue = OH_NN_FUSED_NONE; + OHNNOperandTest activation = {OH_NN_INT8, OH_NN_ADD_ACTIVATIONTYPE, {}, &activationValue, sizeof(int8_t)}; + graphArgs.operands = {reciprocalModel.input, reciprocalModel.output, activation}; + graphArgs.paramIndices = {2}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Reciprocal_Model_Finish_01 + * @tc.desc: 模型构图,未添加操作数 + * @tc.type: FUNC + */ +HWTEST_F(ReciprocalTest, SUB_AI_NNRt_Func_North_Reciprocal_Model_Finish_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + OHNNGraphArgs graphArgs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, SingleModelBuildEndStep(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Reciprocal_Model_Finish_02 + * @tc.desc: 模型构图,未设置输入输出 + * @tc.type: FUNC + */ +HWTEST_F(ReciprocalTest, SUB_AI_NNRt_Func_North_Reciprocal_Model_Finish_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReciprocalModel1 reciprocalModel; + OHNNGraphArgs graphArgs = reciprocalModel.graphArgs; + graphArgs.specifyIO = false; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Reciprocal_Model_Finish_03 + * @tc.desc: 模型构图,设置输入输出,构图成功 + * @tc.type: FUNC + */ +HWTEST_F(ReciprocalTest, SUB_AI_NNRt_Func_North_Reciprocal_Model_Finish_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReciprocalModel1 reciprocalModel; + OHNNGraphArgs graphArgs = reciprocalModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Reciprocal_Model_SetOperandValue_01 + * @tc.desc: 设置操作数值,操作数不存在 + * @tc.type: FUNC + */ +HWTEST_F(ReciprocalTest, SUB_AI_NNRt_Func_North_Reciprocal_Model_SetOperandValue_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReciprocalModel1 reciprocalModel; + OHNNGraphArgs graphArgs = reciprocalModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData( + model, 1000+i, operandTem.data, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Reciprocal_Model_SetOperandValue_02 + * @tc.desc: 设置操作数值,buufer为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ReciprocalTest, SUB_AI_NNRt_Func_North_Reciprocal_Model_SetOperandValue_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReciprocalModel1 reciprocalModel; + OHNNGraphArgs graphArgs = reciprocalModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, i, nullptr, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Reciprocal_Model_SetOperandValue_03 + * @tc.desc: 设置操作数值,length为0 + * @tc.type: FUNC + */ +HWTEST_F(ReciprocalTest, SUB_AI_NNRt_Func_North_Reciprocal_Model_SetOperandValue_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReciprocalModel1 reciprocalModel; + OHNNGraphArgs graphArgs = reciprocalModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, 1000+i, operandTem.data, 0)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Reciprocal_Model_SpecifyInputsAndOutputs_01 + * @tc.desc: 设置输入输出,inputIndices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ReciprocalTest, SUB_AI_NNRt_Func_North_Reciprocal_Model_SpecifyInputsAndOutputs_01, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReciprocalModel1 reciprocalModel; + OHNNGraphArgs graphArgs = reciprocalModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Reciprocal_Model_SpecifyInputsAndOutputs_02 + * @tc.desc: 设置输入输出,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ReciprocalTest, SUB_AI_NNRt_Func_North_Reciprocal_Model_SpecifyInputsAndOutputs_02, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReciprocalModel1 reciprocalModel; + OHNNGraphArgs graphArgs = reciprocalModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Reciprocal_Model_SpecifyInputsAndOutputs_03 + * @tc.desc: 设置输入输出,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(ReciprocalTest, SUB_AI_NNRt_Func_North_Reciprocal_Model_SpecifyInputsAndOutputs_03, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReciprocalModel1 reciprocalModel; + OHNNGraphArgs graphArgs = reciprocalModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Reciprocal_Model_SpecifyInputsAndOutputs_04 + * @tc.desc: 设置输入输出,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(ReciprocalTest, SUB_AI_NNRt_Func_North_Reciprocal_Model_SpecifyInputsAndOutputs_04, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReciprocalModel1 reciprocalModel; + OHNNGraphArgs graphArgs = reciprocalModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Reciprocal_Model_SpecifyInputsAndOutputs_05 + * @tc.desc: 设置输入输出,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ReciprocalTest, SUB_AI_NNRt_Func_North_Reciprocal_Model_SpecifyInputsAndOutputs_05, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReciprocalModel1 reciprocalModel; + OHNNGraphArgs graphArgs = reciprocalModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Reciprocal_Model_SpecifyInputsAndOutputs_06 + * @tc.desc: 设置输入输出,outputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ReciprocalTest, SUB_AI_NNRt_Func_North_Reciprocal_Model_SpecifyInputsAndOutputs_06, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReciprocalModel1 reciprocalModel; + OHNNGraphArgs graphArgs = reciprocalModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Reciprocal_Model_SpecifyInputsAndOutputs_07 + * @tc.desc: 设置输入输出,outputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(ReciprocalTest, SUB_AI_NNRt_Func_North_Reciprocal_Model_SpecifyInputsAndOutputs_07, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReciprocalModel1 reciprocalModel; + OHNNGraphArgs graphArgs = reciprocalModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.outputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Reciprocal_Model_SpecifyInputsAndOutputs_08 + * @tc.desc: 设置输入输出,outputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(ReciprocalTest, SUB_AI_NNRt_Func_North_Reciprocal_Model_SpecifyInputsAndOutputs_08, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReciprocalModel1 reciprocalModel; + OHNNGraphArgs graphArgs = reciprocalModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Reciprocal_Model_AddOperation_01 + * @tc.desc: 添加算子,paramindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ReciprocalTest, SUB_AI_NNRt_Func_North_Reciprocal_Model_AddOperation_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReciprocalModel1 reciprocalModel; + OHNNGraphArgs graphArgs = reciprocalModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + nullptr, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Reciprocal_Model_AddOperation_02 + * @tc.desc: 添加算子,paramindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ReciprocalTest, SUB_AI_NNRt_Func_North_Reciprocal_Model_AddOperation_02, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReciprocalModel1 reciprocalModel; + OHNNGraphArgs graphArgs = reciprocalModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.data = nullptr; + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Reciprocal_Model_AddOperation_03 + * @tc.desc: 添加算子,paramindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(ReciprocalTest, SUB_AI_NNRt_Func_North_Reciprocal_Model_AddOperation_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReciprocalModel1 reciprocalModel; + OHNNGraphArgs graphArgs = reciprocalModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.paramIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Reciprocal_Model_AddOperation_04 + * @tc.desc: 添加算子,paramindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(ReciprocalTest, SUB_AI_NNRt_Func_North_Reciprocal_Model_AddOperation_04, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReciprocalModel1 reciprocalModel; + OHNNGraphArgs graphArgs = reciprocalModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.size = 0; + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Reciprocal_Model_AddOperation_05 + * @tc.desc: 添加算子,inputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ReciprocalTest, SUB_AI_NNRt_Func_North_Reciprocal_Model_AddOperation_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReciprocalModel1 reciprocalModel; + OHNNGraphArgs graphArgs = reciprocalModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Reciprocal_Model_AddOperation_06 + * @tc.desc: 添加算子,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ReciprocalTest, SUB_AI_NNRt_Func_North_Reciprocal_Model_AddOperation_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReciprocalModel1 reciprocalModel; + OHNNGraphArgs graphArgs = reciprocalModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Reciprocal_Model_AddOperation_07 + * @tc.desc: 添加算子,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(ReciprocalTest, SUB_AI_NNRt_Func_North_Reciprocal_Model_AddOperation_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReciprocalModel1 reciprocalModel; + OHNNGraphArgs graphArgs = reciprocalModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Reciprocal_Model_AddOperation_08 + * @tc.desc: 添加算子,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(ReciprocalTest, SUB_AI_NNRt_Func_North_Reciprocal_Model_AddOperation_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReciprocalModel1 reciprocalModel; + OHNNGraphArgs graphArgs = reciprocalModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Reciprocal_Model_AddOperation_09 + * @tc.desc: 添加算子,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ReciprocalTest, SUB_AI_NNRt_Func_North_Reciprocal_Model_AddOperation_09, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReciprocalModel1 reciprocalModel; + OHNNGraphArgs graphArgs = reciprocalModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(nullptr, graphArgs.operationType, + ¶mIndices, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} \ No newline at end of file diff --git a/test/nnrt_xts_acts/nncore/opstest/src/reducel2_test.cpp b/test/nnrt_xts_acts/nncore/opstest/src/reducel2_test.cpp new file mode 100644 index 0000000..a3253e5 --- /dev/null +++ b/test/nnrt_xts_acts/nncore/opstest/src/reducel2_test.cpp @@ -0,0 +1,948 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "nncore_utils.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Test; +class ReduceL2Test : public testing::Test {}; + +struct ReduceL2Model1 { + const std::vector input_shape = {2, 2}; + const std::vector output_shape = {2, 2}; + + bool keepDimsValue[1] = {false}; + bool reduceToEndValue[1] = {false}; + float coeffValue[1] = {1}; + float inputValue[2][2] = {{1, 2}, {3, 4}}; + float axisValue[1] = {0}; + float outputValue[2][2] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, input_shape, inputValue, 4*sizeof(float)}; + OHNNOperandTest axis = {OH_NN_FLOAT32, OH_NN_TENSOR, {1}, axisValue, sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, output_shape, outputValue, 4*sizeof(float)}; + OHNNOperandTest keepDims = {OH_NN_BOOL, OH_NN_REDUCE_L2_KEEP_DIMS, {1}, keepDimsValue, sizeof(bool)}; + OHNNOperandTest reduceToEnd = {OH_NN_BOOL, OH_NN_REDUCE_L2_REDUCE_TO_END, {1}, reduceToEndValue, sizeof(bool)}; + OHNNOperandTest coeff = {OH_NN_FLOAT32, OH_NN_REDUCE_L2_COEFF, {1}, coeffValue, sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_REDUCE_L2, + .operands = {input, axis, output, keepDims, reduceToEnd, coeff}, + .paramIndices = {3, 4, 5}, + .inputIndices = {0, 1}, + .outputIndices = {2}}; +}; + +struct ReduceL2Model2 { + const std::vector input_shape = {2, 2}; + const std::vector output_shape = {1, 2}; + + bool keepDimsValue[1] = {true}; + bool reduceToEndValue[1] = {false}; + float coeffValue[1] = {1}; + float inputValue[2][2] = {{1, 2}, {3, 4}}; + float axisValue[1] = {1}; + float outputValue[1][2] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, input_shape, inputValue, 4*sizeof(float)}; + OHNNOperandTest axis = {OH_NN_FLOAT32, OH_NN_TENSOR, {1}, axisValue, sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, output_shape, outputValue, 2*sizeof(float)}; + OHNNOperandTest keepDims = {OH_NN_BOOL, OH_NN_REDUCE_L2_KEEP_DIMS, {1}, keepDimsValue, sizeof(bool)}; + OHNNOperandTest reduceToEnd = {OH_NN_BOOL, OH_NN_REDUCE_L2_REDUCE_TO_END, {1}, reduceToEndValue, sizeof(bool)}; + OHNNOperandTest coeff = {OH_NN_FLOAT32, OH_NN_REDUCE_L2_COEFF, {1}, coeffValue, sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_REDUCE_L2, + .operands = {input, axis, output, keepDims, reduceToEnd, coeff}, + .paramIndices = {3, 4, 5}, + .inputIndices = {0, 1}, + .outputIndices = {2}}; +}; + +struct ReduceL2Model3 { + const std::vector input_shape = {2, 2, 2}; + const std::vector output_shape = {2, 2}; + + bool keepDimsValue[1] = {false}; + bool reduceToEndValue[1] = {false}; + float coeffValue[1] = {1}; + float inputValue[2][2][2] = {{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}; + float axisValue[2] = {0, 1}; + float outputValue[2][2] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, input_shape, inputValue, 8*sizeof(float)}; + OHNNOperandTest axis = {OH_NN_FLOAT32, OH_NN_TENSOR, {2}, axisValue, 2*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, output_shape, outputValue, 4*sizeof(float)}; + OHNNOperandTest keepDims = {OH_NN_BOOL, OH_NN_REDUCE_L2_KEEP_DIMS, {1}, keepDimsValue, sizeof(bool)}; + OHNNOperandTest reduceToEnd = {OH_NN_BOOL, OH_NN_REDUCE_L2_REDUCE_TO_END, {1}, reduceToEndValue, sizeof(bool)}; + OHNNOperandTest coeff = {OH_NN_FLOAT32, OH_NN_REDUCE_L2_COEFF, {1}, coeffValue, sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_REDUCE_L2, + .operands = {input, axis, output, keepDims, reduceToEnd, coeff}, + .paramIndices = {3, 4, 5}, + .inputIndices = {0, 1}, + .outputIndices = {2}}; +}; + +struct ReduceL2Model4 { + const std::vector input_shape = {2, 2, 2}; + const std::vector output_shape = {2, 2}; + + bool keepDimsValue[1] = {true}; + bool reduceToEndValue[1] = {true}; + float coeffValue[1] = {1}; + float inputValue[2][2][2] = {{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}; + float axisValue[1] = {0}; + float outputValue[1][2] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, input_shape, inputValue, 8*sizeof(float)}; + OHNNOperandTest axis = {OH_NN_FLOAT32, OH_NN_TENSOR, {1}, axisValue, sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, output_shape, outputValue, 2*sizeof(float)}; + OHNNOperandTest keepDims = {OH_NN_BOOL, OH_NN_REDUCE_L2_KEEP_DIMS, {1}, keepDimsValue, sizeof(bool)}; + OHNNOperandTest reduceToEnd = {OH_NN_BOOL, OH_NN_REDUCE_L2_REDUCE_TO_END, {1}, reduceToEndValue, sizeof(bool)}; + OHNNOperandTest coeff = {OH_NN_FLOAT32, OH_NN_REDUCE_L2_COEFF, {1}, coeffValue, sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_REDUCE_L2, + .operands = {input, axis, output, keepDims, reduceToEnd, coeff}, + .paramIndices = {3, 4, 5}, + .inputIndices = {0, 1}, + .outputIndices = {2}}; +}; + +struct ReduceL2Model5 { + const std::vector input_shape = {2, 2, 2}; + const std::vector output_shape = {2, 2}; + + bool keepDimsValue[1] = {false}; + bool reduceToEndValue[1] = {true}; + float coeffValue[1] = {2}; + float inputValue[2][2][2] = {{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}; + float axisValue[2] = {0, 1}; + float outputValue[2] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, input_shape, inputValue, 8*sizeof(float)}; + OHNNOperandTest axis = {OH_NN_FLOAT32, OH_NN_TENSOR, {2}, axisValue, 2*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, output_shape, outputValue, 2*sizeof(float)}; + OHNNOperandTest keepDims = {OH_NN_BOOL, OH_NN_REDUCE_L2_KEEP_DIMS, {1}, keepDimsValue, sizeof(bool)}; + OHNNOperandTest reduceToEnd = {OH_NN_BOOL, OH_NN_REDUCE_L2_REDUCE_TO_END, {1}, reduceToEndValue, sizeof(bool)}; + OHNNOperandTest coeff = {OH_NN_FLOAT32, OH_NN_REDUCE_L2_COEFF, {1}, coeffValue, sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_REDUCE_L2, + .operands = {input, axis, output, keepDims, reduceToEnd, coeff}, + .paramIndices = {3, 4, 5}, + .inputIndices = {0, 1}, + .outputIndices = {2}}; +}; + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceL2_Build_01 + * @tc.desc: ReduceL2Model1模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(ReduceL2Test, SUB_AI_NNRt_Func_North_ReduceL2_Build_01, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceL2Model1 reduceL2Model; + OHNNGraphArgs graphArgs = reduceL2Model.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceL2_Build_02 + * @tc.desc: ReduceL2Model2模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(ReduceL2Test, SUB_AI_NNRt_Func_North_ReduceL2_Build_02, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceL2Model2 reduceL2Model; + OHNNGraphArgs graphArgs = reduceL2Model.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceL2_Build_03 + * @tc.desc: ReduceL2Model3模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(ReduceL2Test, SUB_AI_NNRt_Func_North_ReduceL2_Build_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceL2Model3 reduceL2Model; + OHNNGraphArgs graphArgs = reduceL2Model.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceL2_Build_04 + * @tc.desc: ReduceL2Model4模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(ReduceL2Test, SUB_AI_NNRt_Func_North_ReduceL2_Build_04, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceL2Model4 reduceL2Model; + OHNNGraphArgs graphArgs = reduceL2Model.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceL2_Build_05 + * @tc.desc: ReduceL2Model5模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(ReduceL2Test, SUB_AI_NNRt_Func_North_ReduceL2_Build_05, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceL2Model5 reduceL2Model; + OHNNGraphArgs graphArgs = reduceL2Model.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceL2_Build_06 + * @tc.desc: ReduceL2Model1模型输入Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(ReduceL2Test, SUB_AI_NNRt_Func_North_ReduceL2_Build_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceL2Model1 reduceL2Model; + OHNNGraphArgs graphArgs = reduceL2Model.graphArgs; + graphArgs.operands = {reduceL2Model.input, reduceL2Model.input, reduceL2Model.axis, reduceL2Model.output, + reduceL2Model.keepDims, reduceL2Model.reduceToEnd, reduceL2Model.coeff}; + graphArgs.inputIndices = {0, 1, 2}; + graphArgs.outputIndices = {3}; + graphArgs.paramIndices = {4, 5, 6}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceL2_Build_07 + * @tc.desc: ReduceL2Model1模型输出Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(ReduceL2Test, SUB_AI_NNRt_Func_North_ReduceL2_Build_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceL2Model1 reduceL2Model; + OHNNGraphArgs graphArgs = reduceL2Model.graphArgs; + graphArgs.operands = {reduceL2Model.input, reduceL2Model.axis, reduceL2Model.output, reduceL2Model.output, + reduceL2Model.keepDims, reduceL2Model.reduceToEnd, reduceL2Model.coeff}; + graphArgs.inputIndices = {0, 1}; + graphArgs.outputIndices = {2, 3}; + graphArgs.paramIndices = {4, 5, 6}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceL2_Build_08 + * @tc.desc: ReduceL2Model1模型传入非法参数进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(ReduceL2Test, SUB_AI_NNRt_Func_North_ReduceL2_Build_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceL2Model1 reduceL2Model; + OHNNGraphArgs graphArgs = reduceL2Model.graphArgs; + + int8_t activationValue = OH_NN_FUSED_NONE; + OHNNOperandTest activation = {OH_NN_INT8, OH_NN_ADD_ACTIVATIONTYPE, {}, &activationValue, sizeof(int8_t)}; + graphArgs.operands = {reduceL2Model.input, reduceL2Model.axis, reduceL2Model.output, activation, + reduceL2Model.keepDims, reduceL2Model.reduceToEnd, reduceL2Model.coeff}; + graphArgs.paramIndices = {3, 4, 5, 6}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceL2_Model_Finish_01 + * @tc.desc: 模型构图,未添加操作数 + * @tc.type: FUNC + */ +HWTEST_F(ReduceL2Test, SUB_AI_NNRt_Func_North_ReduceL2_Model_Finish_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + OHNNGraphArgs graphArgs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, SingleModelBuildEndStep(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceL2_Model_Finish_02 + * @tc.desc: 模型构图,未设置输入输出 + * @tc.type: FUNC + */ +HWTEST_F(ReduceL2Test, SUB_AI_NNRt_Func_North_ReduceL2_Model_Finish_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceL2Model1 reduceL2Model; + OHNNGraphArgs graphArgs = reduceL2Model.graphArgs; + graphArgs.specifyIO = false; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceL2_Model_Finish_03 + * @tc.desc: 模型构图,设置输入输出,构图成功 + * @tc.type: FUNC + */ +HWTEST_F(ReduceL2Test, SUB_AI_NNRt_Func_North_ReduceL2_Model_Finish_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceL2Model1 reduceL2Model; + OHNNGraphArgs graphArgs = reduceL2Model.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceL2_Model_SetOperandValue_01 + * @tc.desc: 设置操作数值,操作数不存在 + * @tc.type: FUNC + */ +HWTEST_F(ReduceL2Test, SUB_AI_NNRt_Func_North_ReduceL2_Model_SetOperandValue_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceL2Model1 reduceL2Model; + OHNNGraphArgs graphArgs = reduceL2Model.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData( + model, 1000+i, operandTem.data, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceL2_Model_SetOperandValue_02 + * @tc.desc: 设置操作数值,buufer为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ReduceL2Test, SUB_AI_NNRt_Func_North_ReduceL2_Model_SetOperandValue_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceL2Model1 reduceL2Model; + OHNNGraphArgs graphArgs = reduceL2Model.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, i, nullptr, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceL2_Model_SetOperandValue_03 + * @tc.desc: 设置操作数值,length为0 + * @tc.type: FUNC + */ +HWTEST_F(ReduceL2Test, SUB_AI_NNRt_Func_North_ReduceL2_Model_SetOperandValue_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceL2Model1 reduceL2Model; + OHNNGraphArgs graphArgs = reduceL2Model.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, 1000+i, operandTem.data, 0)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceL2_Model_SpecifyInputsAndOutputs_01 + * @tc.desc: 设置输入输出,inputIndices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ReduceL2Test, SUB_AI_NNRt_Func_North_ReduceL2_Model_SpecifyInputsAndOutputs_01, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceL2Model1 reduceL2Model; + OHNNGraphArgs graphArgs = reduceL2Model.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceL2_Model_SpecifyInputsAndOutputs_02 + * @tc.desc: 设置输入输出,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ReduceL2Test, SUB_AI_NNRt_Func_North_ReduceL2_Model_SpecifyInputsAndOutputs_02, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceL2Model1 reduceL2Model; + OHNNGraphArgs graphArgs = reduceL2Model.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceL2_Model_SpecifyInputsAndOutputs_03 + * @tc.desc: 设置输入输出,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(ReduceL2Test, SUB_AI_NNRt_Func_North_ReduceL2_Model_SpecifyInputsAndOutputs_03, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceL2Model1 reduceL2Model; + OHNNGraphArgs graphArgs = reduceL2Model.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceL2_Model_SpecifyInputsAndOutputs_04 + * @tc.desc: 设置输入输出,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(ReduceL2Test, SUB_AI_NNRt_Func_North_ReduceL2_Model_SpecifyInputsAndOutputs_04, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceL2Model1 reduceL2Model; + OHNNGraphArgs graphArgs = reduceL2Model.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceL2_Model_SpecifyInputsAndOutputs_05 + * @tc.desc: 设置输入输出,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ReduceL2Test, SUB_AI_NNRt_Func_North_ReduceL2_Model_SpecifyInputsAndOutputs_05, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceL2Model1 reduceL2Model; + OHNNGraphArgs graphArgs = reduceL2Model.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceL2_Model_SpecifyInputsAndOutputs_06 + * @tc.desc: 设置输入输出,outputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ReduceL2Test, SUB_AI_NNRt_Func_North_ReduceL2_Model_SpecifyInputsAndOutputs_06, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceL2Model1 reduceL2Model; + OHNNGraphArgs graphArgs = reduceL2Model.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceL2_Model_SpecifyInputsAndOutputs_07 + * @tc.desc: 设置输入输出,outputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(ReduceL2Test, SUB_AI_NNRt_Func_North_ReduceL2_Model_SpecifyInputsAndOutputs_07, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceL2Model1 reduceL2Model; + OHNNGraphArgs graphArgs = reduceL2Model.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.outputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceL2_Model_SpecifyInputsAndOutputs_08 + * @tc.desc: 设置输入输出,outputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(ReduceL2Test, SUB_AI_NNRt_Func_North_ReduceL2_Model_SpecifyInputsAndOutputs_08, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceL2Model1 reduceL2Model; + OHNNGraphArgs graphArgs = reduceL2Model.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceL2_Model_AddOperation_01 + * @tc.desc: 添加算子,paramindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ReduceL2Test, SUB_AI_NNRt_Func_North_ReduceL2_Model_AddOperation_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceL2Model1 reduceL2Model; + OHNNGraphArgs graphArgs = reduceL2Model.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + nullptr, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceL2_Model_AddOperation_02 + * @tc.desc: 添加算子,paramindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ReduceL2Test, SUB_AI_NNRt_Func_North_ReduceL2_Model_AddOperation_02, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceL2Model1 reduceL2Model; + OHNNGraphArgs graphArgs = reduceL2Model.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceL2_Model_AddOperation_03 + * @tc.desc: 添加算子,paramindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(ReduceL2Test, SUB_AI_NNRt_Func_North_ReduceL2_Model_AddOperation_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceL2Model1 reduceL2Model; + OHNNGraphArgs graphArgs = reduceL2Model.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.paramIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceL2_Model_AddOperation_04 + * @tc.desc: 添加算子,paramindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(ReduceL2Test, SUB_AI_NNRt_Func_North_ReduceL2_Model_AddOperation_04, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceL2Model1 reduceL2Model; + OHNNGraphArgs graphArgs = reduceL2Model.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceL2_Model_AddOperation_05 + * @tc.desc: 添加算子,inputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ReduceL2Test, SUB_AI_NNRt_Func_North_ReduceL2_Model_AddOperation_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceL2Model1 reduceL2Model; + OHNNGraphArgs graphArgs = reduceL2Model.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceL2_Model_AddOperation_06 + * @tc.desc: 添加算子,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ReduceL2Test, SUB_AI_NNRt_Func_North_ReduceL2_Model_AddOperation_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceL2Model1 reduceL2Model; + OHNNGraphArgs graphArgs = reduceL2Model.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceL2_Model_AddOperation_07 + * @tc.desc: 添加算子,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(ReduceL2Test, SUB_AI_NNRt_Func_North_ReduceL2_Model_AddOperation_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceL2Model1 reduceL2Model; + OHNNGraphArgs graphArgs = reduceL2Model.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceL2_Model_AddOperation_08 + * @tc.desc: 添加算子,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(ReduceL2Test, SUB_AI_NNRt_Func_North_ReduceL2_Model_AddOperation_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceL2Model1 reduceL2Model; + OHNNGraphArgs graphArgs = reduceL2Model.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceL2_Model_AddOperation_09 + * @tc.desc: 添加算子,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ReduceL2Test, SUB_AI_NNRt_Func_North_ReduceL2_Model_AddOperation_09, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceL2Model1 reduceL2Model; + OHNNGraphArgs graphArgs = reduceL2Model.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(nullptr, graphArgs.operationType, + ¶mIndices, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} \ No newline at end of file diff --git a/test/nnrt_xts_acts/nncore/opstest/src/reducemax_test.cpp b/test/nnrt_xts_acts/nncore/opstest/src/reducemax_test.cpp new file mode 100644 index 0000000..7f480c5 --- /dev/null +++ b/test/nnrt_xts_acts/nncore/opstest/src/reducemax_test.cpp @@ -0,0 +1,928 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "nncore_utils.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Test; +class ReduceMaxTest : public testing::Test {}; + +struct ReduceMaxModel1 { + const std::vector input_shape = {2, 3}; + const std::vector output_shape = {3}; + + bool keepDimsValue[1] = {false}; + float inputValue[2][3] = {{1, 2, 3}, {4, 0, 6}}; + float axisValue[1] = {0}; + float outputValue[3] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, input_shape, inputValue, 6*sizeof(float)}; + OHNNOperandTest axis = {OH_NN_FLOAT32, OH_NN_TENSOR, {1}, axisValue, sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, output_shape, outputValue, 3*sizeof(float)}; + OHNNOperandTest keepDims = {OH_NN_BOOL, OH_NN_REDUCE_MIN_KEEP_DIMS, {1}, keepDimsValue, sizeof(bool)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_REDUCE_MIN, + .operands = {input, axis, output, keepDims}, + .paramIndices = {3}, + .inputIndices = {0, 1}, + .outputIndices = {2}}; +}; + +struct ReduceMaxModel2 { + const std::vector input_shape = {2, 3}; + const std::vector output_shape = {2, 1}; + + bool keepDimsValue[1] = {true}; + float inputValue[2][3] = {{1, 2, 3}, {4, 0, 6}}; + float axisValue[1] = {1}; + float outputValue[2][1] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, input_shape, inputValue, 6*sizeof(float)}; + OHNNOperandTest axis = {OH_NN_FLOAT32, OH_NN_TENSOR, {1}, axisValue, sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, output_shape, outputValue, 2*sizeof(float)}; + OHNNOperandTest keepDims = {OH_NN_BOOL, OH_NN_REDUCE_MIN_KEEP_DIMS, {1}, keepDimsValue, sizeof(bool)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_REDUCE_MIN, + .operands = {input, axis, output, keepDims}, + .paramIndices = {3}, + .inputIndices = {0, 1}, + .outputIndices = {2}}; +}; + +struct ReduceMaxModel3 { + const std::vector input_shape = {3, 2}; + const std::vector output_shape = {3}; + + bool keepDimsValue[1] = {false}; + float inputValue[3][2] = {{7, 8}, {3, 4}, {5, 1}}; + float axisValue[1] = {-1}; + float outputValue[3] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, input_shape, inputValue, 6*sizeof(float)}; + OHNNOperandTest axis = {OH_NN_FLOAT32, OH_NN_TENSOR, {1}, axisValue, sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, output_shape, outputValue, 3*sizeof(float)}; + OHNNOperandTest keepDims = {OH_NN_BOOL, OH_NN_REDUCE_MIN_KEEP_DIMS, {1}, keepDimsValue, sizeof(bool)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_REDUCE_MIN, + .operands = {input, axis, output, keepDims}, + .paramIndices = {3}, + .inputIndices = {0, 1}, + .outputIndices = {2}}; +}; + +struct ReduceMaxModel4 { + const std::vector input_shape = {2, 3}; + const std::vector output_shape = {1, 1}; + + bool keepDimsValue[1] = {true}; + float inputValue[2][3] = {{1, 2, 3}, {4, 0, 6}}; + float axisValue[2] = {0, 1}; + float outputValue[1][1] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, input_shape, inputValue, 6*sizeof(float)}; + OHNNOperandTest axis = {OH_NN_FLOAT32, OH_NN_TENSOR, {2}, axisValue, 2*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, output_shape, outputValue, sizeof(float)}; + OHNNOperandTest keepDims = {OH_NN_BOOL, OH_NN_REDUCE_MIN_KEEP_DIMS, {1}, keepDimsValue, sizeof(bool)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_REDUCE_MIN, + .operands = {input, axis, output, keepDims}, + .paramIndices = {3}, + .inputIndices = {0, 1}, + .outputIndices = {2}}; +}; + +struct ReduceMaxModel5 { + const std::vector input_shape = {2, 2}; + const std::vector output_shape = {1}; + + bool keepDimsValue[1] = {false}; + float inputValue[2][2] = {{2, 3}, {6, 1}}; + float axisValue[1] = {1}; + float outputValue[1] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, input_shape, inputValue, 6*sizeof(float)}; + OHNNOperandTest axis = {OH_NN_FLOAT32, OH_NN_TENSOR, {1}, axisValue, sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, output_shape, outputValue, 2*sizeof(float)}; + OHNNOperandTest keepDims = {OH_NN_BOOL, OH_NN_REDUCE_MIN_KEEP_DIMS, {1}, keepDimsValue, sizeof(bool)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_REDUCE_MIN, + .operands = {input, axis, output, keepDims}, + .paramIndices = {3}, + .inputIndices = {0, 1}, + .outputIndices = {2}}; +}; + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceMax_Build_01 + * @tc.desc: ReduceMaxModel1模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(ReduceMaxTest, SUB_AI_NNRt_Func_North_ReduceMax_Build_01, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceMaxModel1 reduceMaxModel; + OHNNGraphArgs graphArgs = reduceMaxModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceMax_Build_02 + * @tc.desc: ReduceMaxModel2模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(ReduceMaxTest, SUB_AI_NNRt_Func_North_ReduceMax_Build_02, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceMaxModel2 reduceMaxModel; + OHNNGraphArgs graphArgs = reduceMaxModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceMax_Build_03 + * @tc.desc: ReduceMaxModel3模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(ReduceMaxTest, SUB_AI_NNRt_Func_North_ReduceMax_Build_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceMaxModel3 reduceMaxModel; + OHNNGraphArgs graphArgs = reduceMaxModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceMax_Build_04 + * @tc.desc: ReduceMaxModel4模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(ReduceMaxTest, SUB_AI_NNRt_Func_North_ReduceMax_Build_04, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceMaxModel4 reduceMaxModel; + OHNNGraphArgs graphArgs = reduceMaxModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceMax_Build_05 + * @tc.desc: ReduceMaxModel5模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(ReduceMaxTest, SUB_AI_NNRt_Func_North_ReduceMax_Build_05, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceMaxModel5 reduceMaxModel; + OHNNGraphArgs graphArgs = reduceMaxModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceMax_Build_06 + * @tc.desc: ReduceMaxModel1模型输入Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(ReduceMaxTest, SUB_AI_NNRt_Func_North_ReduceMax_Build_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceMaxModel1 reduceMaxModel; + OHNNGraphArgs graphArgs = reduceMaxModel.graphArgs; + graphArgs.operands = {reduceMaxModel.input, reduceMaxModel.input, reduceMaxModel.axis, reduceMaxModel.output, + reduceMaxModel.keepDims}; + graphArgs.inputIndices = {0, 1, 2}; + graphArgs.outputIndices = {3}; + graphArgs.paramIndices = {4}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceMax_Build_07 + * @tc.desc: ReduceMaxModel1模型输出Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(ReduceMaxTest, SUB_AI_NNRt_Func_North_ReduceMax_Build_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceMaxModel1 reduceMaxModel; + OHNNGraphArgs graphArgs = reduceMaxModel.graphArgs; + graphArgs.operands = {reduceMaxModel.input, reduceMaxModel.axis, reduceMaxModel.output, reduceMaxModel.output, + reduceMaxModel.keepDims}; + graphArgs.inputIndices = {0, 1}; + graphArgs.outputIndices = {2, 3}; + graphArgs.paramIndices = {4}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceMax_Build_08 + * @tc.desc: ReduceMaxModel1模型传入非法参数进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(ReduceMaxTest, SUB_AI_NNRt_Func_North_ReduceMax_Build_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceMaxModel1 reduceMaxModel; + OHNNGraphArgs graphArgs = reduceMaxModel.graphArgs; + + int8_t activationValue = OH_NN_FUSED_NONE; + OHNNOperandTest activation = {OH_NN_INT8, OH_NN_ADD_ACTIVATIONTYPE, {}, &activationValue, sizeof(int8_t)}; + graphArgs.operands = {reduceMaxModel.input, reduceMaxModel.axis, reduceMaxModel.output, activation, + reduceMaxModel.keepDims}; + graphArgs.paramIndices = {3, 4}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceMax_Model_Finish_01 + * @tc.desc: 模型构图,未添加操作数 + * @tc.type: FUNC + */ +HWTEST_F(ReduceMaxTest, SUB_AI_NNRt_Func_North_ReduceMax_Model_Finish_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + OHNNGraphArgs graphArgs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, SingleModelBuildEndStep(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceMax_Model_Finish_02 + * @tc.desc: 模型构图,未设置输入输出 + * @tc.type: FUNC + */ +HWTEST_F(ReduceMaxTest, SUB_AI_NNRt_Func_North_ReduceMax_Model_Finish_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceMaxModel1 reduceMaxModel; + OHNNGraphArgs graphArgs = reduceMaxModel.graphArgs; + graphArgs.specifyIO = false; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceMax_Model_Finish_03 + * @tc.desc: 模型构图,设置输入输出,构图成功 + * @tc.type: FUNC + */ +HWTEST_F(ReduceMaxTest, SUB_AI_NNRt_Func_North_ReduceMax_Model_Finish_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceMaxModel1 reduceMaxModel; + OHNNGraphArgs graphArgs = reduceMaxModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceMax_Model_SetOperandValue_01 + * @tc.desc: 设置操作数值,操作数不存在 + * @tc.type: FUNC + */ +HWTEST_F(ReduceMaxTest, SUB_AI_NNRt_Func_North_ReduceMax_Model_SetOperandValue_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceMaxModel1 reduceMaxModel; + OHNNGraphArgs graphArgs = reduceMaxModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData( + model, 1000+i, operandTem.data, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceMax_Model_SetOperandValue_02 + * @tc.desc: 设置操作数值,buufer为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ReduceMaxTest, SUB_AI_NNRt_Func_North_ReduceMax_Model_SetOperandValue_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceMaxModel1 reduceMaxModel; + OHNNGraphArgs graphArgs = reduceMaxModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, i, nullptr, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceMax_Model_SetOperandValue_03 + * @tc.desc: 设置操作数值,length为0 + * @tc.type: FUNC + */ +HWTEST_F(ReduceMaxTest, SUB_AI_NNRt_Func_North_ReduceMax_Model_SetOperandValue_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceMaxModel1 reduceMaxModel; + OHNNGraphArgs graphArgs = reduceMaxModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, 1000+i, operandTem.data, 0)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceMax_Model_SpecifyInputsAndOutputs_01 + * @tc.desc: 设置输入输出,inputIndices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ReduceMaxTest, SUB_AI_NNRt_Func_North_ReduceMax_Model_SpecifyInputsAndOutputs_01, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceMaxModel1 reduceMaxModel; + OHNNGraphArgs graphArgs = reduceMaxModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceMax_Model_SpecifyInputsAndOutputs_02 + * @tc.desc: 设置输入输出,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ReduceMaxTest, SUB_AI_NNRt_Func_North_ReduceMax_Model_SpecifyInputsAndOutputs_02, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceMaxModel1 reduceMaxModel; + OHNNGraphArgs graphArgs = reduceMaxModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceMax_Model_SpecifyInputsAndOutputs_03 + * @tc.desc: 设置输入输出,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(ReduceMaxTest, SUB_AI_NNRt_Func_North_ReduceMax_Model_SpecifyInputsAndOutputs_03, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceMaxModel1 reduceMaxModel; + OHNNGraphArgs graphArgs = reduceMaxModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceMax_Model_SpecifyInputsAndOutputs_04 + * @tc.desc: 设置输入输出,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(ReduceMaxTest, SUB_AI_NNRt_Func_North_ReduceMax_Model_SpecifyInputsAndOutputs_04, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceMaxModel1 reduceMaxModel; + OHNNGraphArgs graphArgs = reduceMaxModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceMax_Model_SpecifyInputsAndOutputs_05 + * @tc.desc: 设置输入输出,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ReduceMaxTest, SUB_AI_NNRt_Func_North_ReduceMax_Model_SpecifyInputsAndOutputs_05, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceMaxModel1 reduceMaxModel; + OHNNGraphArgs graphArgs = reduceMaxModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceMax_Model_SpecifyInputsAndOutputs_06 + * @tc.desc: 设置输入输出,outputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ReduceMaxTest, SUB_AI_NNRt_Func_North_ReduceMax_Model_SpecifyInputsAndOutputs_06, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceMaxModel1 reduceMaxModel; + OHNNGraphArgs graphArgs = reduceMaxModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceMax_Model_SpecifyInputsAndOutputs_07 + * @tc.desc: 设置输入输出,outputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(ReduceMaxTest, SUB_AI_NNRt_Func_North_ReduceMax_Model_SpecifyInputsAndOutputs_07, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceMaxModel1 reduceMaxModel; + OHNNGraphArgs graphArgs = reduceMaxModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.outputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceMax_Model_SpecifyInputsAndOutputs_08 + * @tc.desc: 设置输入输出,outputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(ReduceMaxTest, SUB_AI_NNRt_Func_North_ReduceMax_Model_SpecifyInputsAndOutputs_08, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceMaxModel1 reduceMaxModel; + OHNNGraphArgs graphArgs = reduceMaxModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceMax_Model_AddOperation_01 + * @tc.desc: 添加算子,paramindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ReduceMaxTest, SUB_AI_NNRt_Func_North_ReduceMax_Model_AddOperation_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceMaxModel1 reduceMaxModel; + OHNNGraphArgs graphArgs = reduceMaxModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + nullptr, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceMax_Model_AddOperation_02 + * @tc.desc: 添加算子,paramindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ReduceMaxTest, SUB_AI_NNRt_Func_North_ReduceMax_Model_AddOperation_02, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceMaxModel1 reduceMaxModel; + OHNNGraphArgs graphArgs = reduceMaxModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceMax_Model_AddOperation_03 + * @tc.desc: 添加算子,paramindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(ReduceMaxTest, SUB_AI_NNRt_Func_North_ReduceMax_Model_AddOperation_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceMaxModel1 reduceMaxModel; + OHNNGraphArgs graphArgs = reduceMaxModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.paramIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceMax_Model_AddOperation_04 + * @tc.desc: 添加算子,paramindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(ReduceMaxTest, SUB_AI_NNRt_Func_North_ReduceMax_Model_AddOperation_04, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceMaxModel1 reduceMaxModel; + OHNNGraphArgs graphArgs = reduceMaxModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceMax_Model_AddOperation_05 + * @tc.desc: 添加算子,inputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ReduceMaxTest, SUB_AI_NNRt_Func_North_ReduceMax_Model_AddOperation_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceMaxModel1 reduceMaxModel; + OHNNGraphArgs graphArgs = reduceMaxModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceMax_Model_AddOperation_06 + * @tc.desc: 添加算子,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ReduceMaxTest, SUB_AI_NNRt_Func_North_ReduceMax_Model_AddOperation_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceMaxModel1 reduceMaxModel; + OHNNGraphArgs graphArgs = reduceMaxModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceMax_Model_AddOperation_07 + * @tc.desc: 添加算子,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(ReduceMaxTest, SUB_AI_NNRt_Func_North_ReduceMax_Model_AddOperation_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceMaxModel1 reduceMaxModel; + OHNNGraphArgs graphArgs = reduceMaxModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceMax_Model_AddOperation_08 + * @tc.desc: 添加算子,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(ReduceMaxTest, SUB_AI_NNRt_Func_North_ReduceMax_Model_AddOperation_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceMaxModel1 reduceMaxModel; + OHNNGraphArgs graphArgs = reduceMaxModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceMax_Model_AddOperation_09 + * @tc.desc: 添加算子,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ReduceMaxTest, SUB_AI_NNRt_Func_North_ReduceMax_Model_AddOperation_09, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceMaxModel1 reduceMaxModel; + OHNNGraphArgs graphArgs = reduceMaxModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(nullptr, graphArgs.operationType, + ¶mIndices, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} \ No newline at end of file diff --git a/test/nnrt_xts_acts/nncore/opstest/src/reducemin_test.cpp b/test/nnrt_xts_acts/nncore/opstest/src/reducemin_test.cpp new file mode 100644 index 0000000..c33c0ed --- /dev/null +++ b/test/nnrt_xts_acts/nncore/opstest/src/reducemin_test.cpp @@ -0,0 +1,928 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "nncore_utils.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Test; +class ReduceMinTest : public testing::Test {}; + +struct ReduceMinModel1 { + const std::vector input_shape = {2, 3}; + const std::vector output_shape = {3}; + + bool keepDimsValue[1] = {false}; + float inputValue[2][3] = {{1, 2, 3}, {4, 0, 6}}; + float axisValue[1] = {0}; + float outputValue[3] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, input_shape, inputValue, 6*sizeof(float)}; + OHNNOperandTest axis = {OH_NN_FLOAT32, OH_NN_TENSOR, {1}, axisValue, sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, output_shape, outputValue, 3*sizeof(float)}; + OHNNOperandTest keepDims = {OH_NN_BOOL, OH_NN_REDUCE_MIN_KEEP_DIMS, {1}, keepDimsValue, sizeof(bool)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_REDUCE_MIN, + .operands = {input, axis, output, keepDims}, + .paramIndices = {3}, + .inputIndices = {0, 1}, + .outputIndices = {2}}; +}; + +struct ReduceMinModel2 { + const std::vector input_shape = {2, 3}; + const std::vector output_shape = {2, 1}; + + bool keepDimsValue[1] = {true}; + float inputValue[2][3] = {{1, 2, 3}, {4, 0, 6}}; + float axisValue[1] = {1}; + float outputValue[2][1] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, input_shape, inputValue, 6*sizeof(float)}; + OHNNOperandTest axis = {OH_NN_FLOAT32, OH_NN_TENSOR, {1}, axisValue, sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, output_shape, outputValue, 2*sizeof(float)}; + OHNNOperandTest keepDims = {OH_NN_BOOL, OH_NN_REDUCE_MIN_KEEP_DIMS, {1}, keepDimsValue, sizeof(bool)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_REDUCE_MIN, + .operands = {input, axis, output, keepDims}, + .paramIndices = {3}, + .inputIndices = {0, 1}, + .outputIndices = {2}}; +}; + +struct ReduceMinModel3 { + const std::vector input_shape = {3, 2}; + const std::vector output_shape = {3}; + + bool keepDimsValue[1] = {false}; + float inputValue[3][2] = {{7, 8}, {3, 4}, {5, 1}}; + float axisValue[1] = {-1}; + float outputValue[3] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, input_shape, inputValue, 6*sizeof(float)}; + OHNNOperandTest axis = {OH_NN_FLOAT32, OH_NN_TENSOR, {1}, axisValue, sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, output_shape, outputValue, 3*sizeof(float)}; + OHNNOperandTest keepDims = {OH_NN_BOOL, OH_NN_REDUCE_MIN_KEEP_DIMS, {1}, keepDimsValue, sizeof(bool)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_REDUCE_MIN, + .operands = {input, axis, output, keepDims}, + .paramIndices = {3}, + .inputIndices = {0, 1}, + .outputIndices = {2}}; +}; + +struct ReduceMinModel4 { + const std::vector input_shape = {2, 3}; + const std::vector output_shape = {1, 1}; + + bool keepDimsValue[1] = {true}; + float inputValue[2][3] = {{1, 2, 3}, {4, 0, 6}}; + float axisValue[2] = {0, 1}; + float outputValue[1][1] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, input_shape, inputValue, 6*sizeof(float)}; + OHNNOperandTest axis = {OH_NN_FLOAT32, OH_NN_TENSOR, {2}, axisValue, 2*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, output_shape, outputValue, sizeof(float)}; + OHNNOperandTest keepDims = {OH_NN_BOOL, OH_NN_REDUCE_MIN_KEEP_DIMS, {1}, keepDimsValue, sizeof(bool)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_REDUCE_MIN, + .operands = {input, axis, output, keepDims}, + .paramIndices = {3}, + .inputIndices = {0, 1}, + .outputIndices = {2}}; +}; + +struct ReduceMinModel5 { + const std::vector input_shape = {2, 2}; + const std::vector output_shape = {1}; + + bool keepDimsValue[1] = {false}; + float inputValue[2][2] = {{2, 3}, {6, 1}}; + float axisValue[1] = {1}; + float outputValue[1] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, input_shape, inputValue, 6*sizeof(float)}; + OHNNOperandTest axis = {OH_NN_FLOAT32, OH_NN_TENSOR, {1}, axisValue, sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, output_shape, outputValue, 2*sizeof(float)}; + OHNNOperandTest keepDims = {OH_NN_BOOL, OH_NN_REDUCE_MIN_KEEP_DIMS, {1}, keepDimsValue, sizeof(bool)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_REDUCE_MIN, + .operands = {input, axis, output, keepDims}, + .paramIndices = {3}, + .inputIndices = {0, 1}, + .outputIndices = {2}}; +}; + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceMin_Build_01 + * @tc.desc: ReduceMinModel1模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(ReduceMinTest, SUB_AI_NNRt_Func_North_ReduceMin_Build_01, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceMinModel1 reduceMinModel; + OHNNGraphArgs graphArgs = reduceMinModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceMin_Build_02 + * @tc.desc: ReduceMinModel2模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(ReduceMinTest, SUB_AI_NNRt_Func_North_ReduceMin_Build_02, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceMinModel2 reduceMinModel; + OHNNGraphArgs graphArgs = reduceMinModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceMin_Build_03 + * @tc.desc: ReduceMinModel3模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(ReduceMinTest, SUB_AI_NNRt_Func_North_ReduceMin_Build_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceMinModel3 reduceMinModel; + OHNNGraphArgs graphArgs = reduceMinModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceMin_Build_04 + * @tc.desc: ReduceMinModel4模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(ReduceMinTest, SUB_AI_NNRt_Func_North_ReduceMin_Build_04, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceMinModel4 reduceMinModel; + OHNNGraphArgs graphArgs = reduceMinModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceMin_Build_05 + * @tc.desc: ReduceMinModel5模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(ReduceMinTest, SUB_AI_NNRt_Func_North_ReduceMin_Build_05, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceMinModel5 reduceMinModel; + OHNNGraphArgs graphArgs = reduceMinModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceMin_Build_06 + * @tc.desc: ReduceMinModel1模型输入Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(ReduceMinTest, SUB_AI_NNRt_Func_North_ReduceMin_Build_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceMinModel1 reduceMinModel; + OHNNGraphArgs graphArgs = reduceMinModel.graphArgs; + graphArgs.operands = {reduceMinModel.input, reduceMinModel.input, reduceMinModel.axis, reduceMinModel.output, + reduceMinModel.keepDims}; + graphArgs.inputIndices = {0, 1, 2}; + graphArgs.outputIndices = {3}; + graphArgs.paramIndices = {4}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceMin_Build_07 + * @tc.desc: ReduceMinModel1模型输出Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(ReduceMinTest, SUB_AI_NNRt_Func_North_ReduceMin_Build_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceMinModel1 reduceMinModel; + OHNNGraphArgs graphArgs = reduceMinModel.graphArgs; + graphArgs.operands = {reduceMinModel.input, reduceMinModel.axis, reduceMinModel.output, reduceMinModel.output, + reduceMinModel.keepDims}; + graphArgs.inputIndices = {0, 1}; + graphArgs.outputIndices = {2, 3}; + graphArgs.paramIndices = {4}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceMin_Build_08 + * @tc.desc: ReduceMinModel1模型传入非法参数进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(ReduceMinTest, SUB_AI_NNRt_Func_North_ReduceMin_Build_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceMinModel1 reduceMinModel; + OHNNGraphArgs graphArgs = reduceMinModel.graphArgs; + + int8_t activationValue = OH_NN_FUSED_NONE; + OHNNOperandTest activation = {OH_NN_INT8, OH_NN_ADD_ACTIVATIONTYPE, {}, &activationValue, sizeof(int8_t)}; + graphArgs.operands = {reduceMinModel.input, reduceMinModel.axis, reduceMinModel.output, activation, + reduceMinModel.keepDims}; + graphArgs.paramIndices = {3, 4}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceMin_Model_Finish_01 + * @tc.desc: 模型构图,未添加操作数 + * @tc.type: FUNC + */ +HWTEST_F(ReduceMinTest, SUB_AI_NNRt_Func_North_ReduceMin_Model_Finish_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + OHNNGraphArgs graphArgs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, SingleModelBuildEndStep(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceMin_Model_Finish_02 + * @tc.desc: 模型构图,未设置输入输出 + * @tc.type: FUNC + */ +HWTEST_F(ReduceMinTest, SUB_AI_NNRt_Func_North_ReduceMin_Model_Finish_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceMinModel1 reduceMinModel; + OHNNGraphArgs graphArgs = reduceMinModel.graphArgs; + graphArgs.specifyIO = false; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceMin_Model_Finish_03 + * @tc.desc: 模型构图,设置输入输出,构图成功 + * @tc.type: FUNC + */ +HWTEST_F(ReduceMinTest, SUB_AI_NNRt_Func_North_ReduceMin_Model_Finish_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceMinModel1 reduceMinModel; + OHNNGraphArgs graphArgs = reduceMinModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceMin_Model_SetOperandValue_01 + * @tc.desc: 设置操作数值,操作数不存在 + * @tc.type: FUNC + */ +HWTEST_F(ReduceMinTest, SUB_AI_NNRt_Func_North_ReduceMin_Model_SetOperandValue_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceMinModel1 reduceMinModel; + OHNNGraphArgs graphArgs = reduceMinModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData( + model, 1000+i, operandTem.data, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceMin_Model_SetOperandValue_02 + * @tc.desc: 设置操作数值,buufer为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ReduceMinTest, SUB_AI_NNRt_Func_North_ReduceMin_Model_SetOperandValue_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceMinModel1 reduceMinModel; + OHNNGraphArgs graphArgs = reduceMinModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, i, nullptr, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceMin_Model_SetOperandValue_03 + * @tc.desc: 设置操作数值,length为0 + * @tc.type: FUNC + */ +HWTEST_F(ReduceMinTest, SUB_AI_NNRt_Func_North_ReduceMin_Model_SetOperandValue_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceMinModel1 reduceMinModel; + OHNNGraphArgs graphArgs = reduceMinModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, 1000+i, operandTem.data, 0)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceMin_Model_SpecifyInputsAndOutputs_01 + * @tc.desc: 设置输入输出,inputIndices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ReduceMinTest, SUB_AI_NNRt_Func_North_ReduceMin_Model_SpecifyInputsAndOutputs_01, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceMinModel1 reduceMinModel; + OHNNGraphArgs graphArgs = reduceMinModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceMin_Model_SpecifyInputsAndOutputs_02 + * @tc.desc: 设置输入输出,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ReduceMinTest, SUB_AI_NNRt_Func_North_ReduceMin_Model_SpecifyInputsAndOutputs_02, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceMinModel1 reduceMinModel; + OHNNGraphArgs graphArgs = reduceMinModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceMin_Model_SpecifyInputsAndOutputs_03 + * @tc.desc: 设置输入输出,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(ReduceMinTest, SUB_AI_NNRt_Func_North_ReduceMin_Model_SpecifyInputsAndOutputs_03, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceMinModel1 reduceMinModel; + OHNNGraphArgs graphArgs = reduceMinModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceMin_Model_SpecifyInputsAndOutputs_04 + * @tc.desc: 设置输入输出,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(ReduceMinTest, SUB_AI_NNRt_Func_North_ReduceMin_Model_SpecifyInputsAndOutputs_04, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceMinModel1 reduceMinModel; + OHNNGraphArgs graphArgs = reduceMinModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceMin_Model_SpecifyInputsAndOutputs_05 + * @tc.desc: 设置输入输出,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ReduceMinTest, SUB_AI_NNRt_Func_North_ReduceMin_Model_SpecifyInputsAndOutputs_05, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceMinModel1 reduceMinModel; + OHNNGraphArgs graphArgs = reduceMinModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceMin_Model_SpecifyInputsAndOutputs_06 + * @tc.desc: 设置输入输出,outputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ReduceMinTest, SUB_AI_NNRt_Func_North_ReduceMin_Model_SpecifyInputsAndOutputs_06, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceMinModel1 reduceMinModel; + OHNNGraphArgs graphArgs = reduceMinModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceMin_Model_SpecifyInputsAndOutputs_07 + * @tc.desc: 设置输入输出,outputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(ReduceMinTest, SUB_AI_NNRt_Func_North_ReduceMin_Model_SpecifyInputsAndOutputs_07, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceMinModel1 reduceMinModel; + OHNNGraphArgs graphArgs = reduceMinModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.outputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceMin_Model_SpecifyInputsAndOutputs_08 + * @tc.desc: 设置输入输出,outputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(ReduceMinTest, SUB_AI_NNRt_Func_North_ReduceMin_Model_SpecifyInputsAndOutputs_08, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceMinModel1 reduceMinModel; + OHNNGraphArgs graphArgs = reduceMinModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceMin_Model_AddOperation_01 + * @tc.desc: 添加算子,paramindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ReduceMinTest, SUB_AI_NNRt_Func_North_ReduceMin_Model_AddOperation_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceMinModel1 reduceMinModel; + OHNNGraphArgs graphArgs = reduceMinModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + nullptr, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceMin_Model_AddOperation_02 + * @tc.desc: 添加算子,paramindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ReduceMinTest, SUB_AI_NNRt_Func_North_ReduceMin_Model_AddOperation_02, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceMinModel1 reduceMinModel; + OHNNGraphArgs graphArgs = reduceMinModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceMin_Model_AddOperation_03 + * @tc.desc: 添加算子,paramindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(ReduceMinTest, SUB_AI_NNRt_Func_North_ReduceMin_Model_AddOperation_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceMinModel1 reduceMinModel; + OHNNGraphArgs graphArgs = reduceMinModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.paramIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceMin_Model_AddOperation_04 + * @tc.desc: 添加算子,paramindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(ReduceMinTest, SUB_AI_NNRt_Func_North_ReduceMin_Model_AddOperation_04, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceMinModel1 reduceMinModel; + OHNNGraphArgs graphArgs = reduceMinModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceMin_Model_AddOperation_05 + * @tc.desc: 添加算子,inputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ReduceMinTest, SUB_AI_NNRt_Func_North_ReduceMin_Model_AddOperation_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceMinModel1 reduceMinModel; + OHNNGraphArgs graphArgs = reduceMinModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceMin_Model_AddOperation_06 + * @tc.desc: 添加算子,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ReduceMinTest, SUB_AI_NNRt_Func_North_ReduceMin_Model_AddOperation_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceMinModel1 reduceMinModel; + OHNNGraphArgs graphArgs = reduceMinModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceMin_Model_AddOperation_07 + * @tc.desc: 添加算子,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(ReduceMinTest, SUB_AI_NNRt_Func_North_ReduceMin_Model_AddOperation_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceMinModel1 reduceMinModel; + OHNNGraphArgs graphArgs = reduceMinModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceMin_Model_AddOperation_08 + * @tc.desc: 添加算子,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(ReduceMinTest, SUB_AI_NNRt_Func_North_ReduceMin_Model_AddOperation_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceMinModel1 reduceMinModel; + OHNNGraphArgs graphArgs = reduceMinModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceMin_Model_AddOperation_09 + * @tc.desc: 添加算子,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ReduceMinTest, SUB_AI_NNRt_Func_North_ReduceMin_Model_AddOperation_09, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceMinModel1 reduceMinModel; + OHNNGraphArgs graphArgs = reduceMinModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(nullptr, graphArgs.operationType, + ¶mIndices, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} \ No newline at end of file diff --git a/test/nnrt_xts_acts/nncore/opstest/src/reducesum_test.cpp b/test/nnrt_xts_acts/nncore/opstest/src/reducesum_test.cpp new file mode 100644 index 0000000..f39a739 --- /dev/null +++ b/test/nnrt_xts_acts/nncore/opstest/src/reducesum_test.cpp @@ -0,0 +1,938 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "nncore_utils.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Test; +class ReduceSumTest : public testing::Test {}; + +struct ReduceSumModel1 { + const std::vector input_shape = {2, 3}; + const std::vector output_shape = {1, 3}; + + bool keepDimsValue[1] = {true}; + bool reduceToEndValue[1] = {false}; + float inputValue[2][3] = {{1, 2, 3}, {4, 5, 6}}; + float axisValue[1] = {0}; + float outputValue[1][3] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, input_shape, inputValue, 6*sizeof(float)}; + OHNNOperandTest axis = {OH_NN_FLOAT32, OH_NN_TENSOR, {1}, axisValue, sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, output_shape, outputValue, 3*sizeof(float)}; + OHNNOperandTest keepDims = {OH_NN_BOOL, OH_NN_REDUCE_SUM_KEEP_DIMS, {1}, keepDimsValue, sizeof(bool)}; + OHNNOperandTest reduceToEnd = {OH_NN_BOOL, OH_NN_REDUCE_SUM_REDUCE_TO_END, {1}, reduceToEndValue, sizeof(bool)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_REDUCE_SUM, + .operands = {input, axis, output, keepDims, reduceToEnd}, + .paramIndices = {3, 4}, + .inputIndices = {0, 1}, + .outputIndices = {2}}; +}; + +struct ReduceSumModel2 { + const std::vector input_shape = {2, 3}; + const std::vector output_shape = {2}; + + bool keepDimsValue[1] = {false}; + bool reduceToEndValue[1] = {false}; + float inputValue[2][3] = {{1, 2, 3}, {4, 5, 6}}; + float axisValue[1] = {1}; + float outputValue[2] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, input_shape, inputValue, 6*sizeof(float)}; + OHNNOperandTest axis = {OH_NN_FLOAT32, OH_NN_TENSOR, {1}, axisValue, sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, output_shape, outputValue, 2*sizeof(float)}; + OHNNOperandTest keepDims = {OH_NN_BOOL, OH_NN_REDUCE_SUM_KEEP_DIMS, {1}, keepDimsValue, sizeof(bool)}; + OHNNOperandTest reduceToEnd = {OH_NN_BOOL, OH_NN_REDUCE_SUM_REDUCE_TO_END, {1}, reduceToEndValue, sizeof(bool)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_REDUCE_SUM, + .operands = {input, axis, output, keepDims, reduceToEnd}, + .paramIndices = {3, 4}, + .inputIndices = {0, 1}, + .outputIndices = {2}}; +}; + +struct ReduceSumModel3 { + const std::vector input_shape = {2, 3}; + const std::vector output_shape = {2, 3}; + + bool keepDimsValue[1] = {false}; + bool reduceToEndValue[1] = {true}; + float inputValue[2][3] = {{1, 2, 3}, {4, 5, 6}}; + float axisValue = {}; + float outputValue[2][3] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, input_shape, inputValue, 6*sizeof(float)}; + OHNNOperandTest axis = {OH_NN_FLOAT32, OH_NN_TENSOR, {}, &axisValue, 0*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, output_shape, outputValue, 6*sizeof(float)}; + OHNNOperandTest keepDims = {OH_NN_BOOL, OH_NN_REDUCE_SUM_KEEP_DIMS, {1}, keepDimsValue, sizeof(bool)}; + OHNNOperandTest reduceToEnd = {OH_NN_BOOL, OH_NN_REDUCE_SUM_REDUCE_TO_END, {1}, reduceToEndValue, sizeof(bool)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_REDUCE_SUM, + .operands = {input, axis, output, keepDims, reduceToEnd}, + .paramIndices = {3, 4}, + .inputIndices = {0, 1}, + .outputIndices = {2}}; +}; + +struct ReduceSumModel4 { + const std::vector input_shape = {2, 3}; + const std::vector output_shape = {2, 1}; + + bool keepDimsValue[1] = {true}; + bool reduceToEndValue[1] = {false}; + float inputValue[2][3] = {{1, 2, 3}, {4, 5, 6}}; + float axisValue[1] = {1}; + float outputValue[2][1] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, input_shape, inputValue, 6*sizeof(float)}; + OHNNOperandTest axis = {OH_NN_FLOAT32, OH_NN_TENSOR, {1}, &axisValue, sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, output_shape, outputValue, 2*sizeof(float)}; + OHNNOperandTest keepDims = {OH_NN_BOOL, OH_NN_REDUCE_SUM_KEEP_DIMS, {1}, keepDimsValue, sizeof(bool)}; + OHNNOperandTest reduceToEnd = {OH_NN_BOOL, OH_NN_REDUCE_SUM_REDUCE_TO_END, {1}, reduceToEndValue, sizeof(bool)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_REDUCE_SUM, + .operands = {input, axis, output, keepDims, reduceToEnd}, + .paramIndices = {3, 4}, + .inputIndices = {0, 1}, + .outputIndices = {2}}; +}; + +struct ReduceSumModel5 { + const std::vector input_shape = {2, 3}; + const std::vector output_shape = {1}; + + bool keepDimsValue[1] = {true}; + bool reduceToEndValue[1] = {false}; + float inputValue[2][3] = {{1, 2, 3}, {4, 5, 6}}; + float axisValue[1] = {1}; + float outputValue[1] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, input_shape, inputValue, 6*sizeof(float)}; + OHNNOperandTest axis = {OH_NN_FLOAT32, OH_NN_TENSOR, {1}, axisValue, sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, output_shape, outputValue, sizeof(float)}; + OHNNOperandTest keepDims = {OH_NN_BOOL, OH_NN_REDUCE_SUM_KEEP_DIMS, {1}, keepDimsValue, sizeof(bool)}; + OHNNOperandTest reduceToEnd = {OH_NN_BOOL, OH_NN_REDUCE_SUM_REDUCE_TO_END, {1}, reduceToEndValue, sizeof(bool)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_REDUCE_SUM, + .operands = {input, axis, output, keepDims, reduceToEnd}, + .paramIndices = {3, 4}, + .inputIndices = {0, 1}, + .outputIndices = {2}}; +}; + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceSum_Build_01 + * @tc.desc: ReduceSumModel1模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(ReduceSumTest, SUB_AI_NNRt_Func_North_ReduceSum_Build_01, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceSumModel1 reduceSumModel; + OHNNGraphArgs graphArgs = reduceSumModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceSum_Build_02 + * @tc.desc: ReduceSumModel2模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(ReduceSumTest, SUB_AI_NNRt_Func_North_ReduceSum_Build_02, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceSumModel2 reduceSumModel; + OHNNGraphArgs graphArgs = reduceSumModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceSum_Build_03 + * @tc.desc: ReduceSumModel3模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(ReduceSumTest, SUB_AI_NNRt_Func_North_ReduceSum_Build_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceSumModel3 reduceSumModel; + OHNNGraphArgs graphArgs = reduceSumModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceSum_Build_04 + * @tc.desc: ReduceSumModel4模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(ReduceSumTest, SUB_AI_NNRt_Func_North_ReduceSum_Build_04, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceSumModel4 reduceSumModel; + OHNNGraphArgs graphArgs = reduceSumModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceSum_Build_05 + * @tc.desc: ReduceSumModel5模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(ReduceSumTest, SUB_AI_NNRt_Func_North_ReduceSum_Build_05, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceSumModel5 reduceSumModel; + OHNNGraphArgs graphArgs = reduceSumModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceSum_Build_06 + * @tc.desc: ReduceSumModel1模型输入Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(ReduceSumTest, SUB_AI_NNRt_Func_North_ReduceSum_Build_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceSumModel1 reduceSumModel; + OHNNGraphArgs graphArgs = reduceSumModel.graphArgs; + graphArgs.operands = {reduceSumModel.input, reduceSumModel.input, reduceSumModel.axis, reduceSumModel.output, + reduceSumModel.keepDims, reduceSumModel.reduceToEnd}; + graphArgs.inputIndices = {0, 1, 2}; + graphArgs.outputIndices = {3}; + graphArgs.paramIndices = {4, 5}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceSum_Build_07 + * @tc.desc: ReduceSumModel1模型输出Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(ReduceSumTest, SUB_AI_NNRt_Func_North_ReduceSum_Build_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceSumModel1 reduceSumModel; + OHNNGraphArgs graphArgs = reduceSumModel.graphArgs; + graphArgs.operands = {reduceSumModel.input, reduceSumModel.axis, reduceSumModel.output, reduceSumModel.output, + reduceSumModel.keepDims, reduceSumModel.reduceToEnd}; + graphArgs.inputIndices = {0, 1}; + graphArgs.outputIndices = {2, 3}; + graphArgs.paramIndices = {4, 5}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceSum_Build_08 + * @tc.desc: ReduceSumModel1模型传入非法参数进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(ReduceSumTest, SUB_AI_NNRt_Func_North_ReduceSum_Build_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceSumModel1 reduceSumModel; + OHNNGraphArgs graphArgs = reduceSumModel.graphArgs; + + int8_t activationValue = OH_NN_FUSED_NONE; + OHNNOperandTest activation = {OH_NN_INT8, OH_NN_ADD_ACTIVATIONTYPE, {}, &activationValue, sizeof(int8_t)}; + graphArgs.operands = {reduceSumModel.input, reduceSumModel.axis, reduceSumModel.output, activation, + reduceSumModel.keepDims, reduceSumModel.reduceToEnd}; + graphArgs.paramIndices = {3, 4, 5}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceSum_Model_Finish_01 + * @tc.desc: 模型构图,未添加操作数 + * @tc.type: FUNC + */ +HWTEST_F(ReduceSumTest, SUB_AI_NNRt_Func_North_ReduceSum_Model_Finish_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + OHNNGraphArgs graphArgs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, SingleModelBuildEndStep(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceSum_Model_Finish_02 + * @tc.desc: 模型构图,未设置输入输出 + * @tc.type: FUNC + */ +HWTEST_F(ReduceSumTest, SUB_AI_NNRt_Func_North_ReduceSum_Model_Finish_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceSumModel1 reduceSumModel; + OHNNGraphArgs graphArgs = reduceSumModel.graphArgs; + graphArgs.specifyIO = false; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceSum_Model_Finish_03 + * @tc.desc: 模型构图,设置输入输出,构图成功 + * @tc.type: FUNC + */ +HWTEST_F(ReduceSumTest, SUB_AI_NNRt_Func_North_ReduceSum_Model_Finish_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceSumModel1 reduceSumModel; + OHNNGraphArgs graphArgs = reduceSumModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceSum_Model_SetOperandValue_01 + * @tc.desc: 设置操作数值,操作数不存在 + * @tc.type: FUNC + */ +HWTEST_F(ReduceSumTest, SUB_AI_NNRt_Func_North_ReduceSum_Model_SetOperandValue_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceSumModel1 reduceSumModel; + OHNNGraphArgs graphArgs = reduceSumModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData( + model, 1000+i, operandTem.data, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceSum_Model_SetOperandValue_02 + * @tc.desc: 设置操作数值,buufer为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ReduceSumTest, SUB_AI_NNRt_Func_North_ReduceSum_Model_SetOperandValue_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceSumModel1 reduceSumModel; + OHNNGraphArgs graphArgs = reduceSumModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, i, nullptr, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceSum_Model_SetOperandValue_03 + * @tc.desc: 设置操作数值,length为0 + * @tc.type: FUNC + */ +HWTEST_F(ReduceSumTest, SUB_AI_NNRt_Func_North_ReduceSum_Model_SetOperandValue_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceSumModel1 reduceSumModel; + OHNNGraphArgs graphArgs = reduceSumModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, 1000+i, operandTem.data, 0)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceSum_Model_SpecifyInputsAndOutputs_01 + * @tc.desc: 设置输入输出,inputIndices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ReduceSumTest, SUB_AI_NNRt_Func_North_ReduceSum_Model_SpecifyInputsAndOutputs_01, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceSumModel1 reduceSumModel; + OHNNGraphArgs graphArgs = reduceSumModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceSum_Model_SpecifyInputsAndOutputs_02 + * @tc.desc: 设置输入输出,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ReduceSumTest, SUB_AI_NNRt_Func_North_ReduceSum_Model_SpecifyInputsAndOutputs_02, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceSumModel1 reduceSumModel; + OHNNGraphArgs graphArgs = reduceSumModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceSum_Model_SpecifyInputsAndOutputs_03 + * @tc.desc: 设置输入输出,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(ReduceSumTest, SUB_AI_NNRt_Func_North_ReduceSum_Model_SpecifyInputsAndOutputs_03, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceSumModel1 reduceSumModel; + OHNNGraphArgs graphArgs = reduceSumModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceSum_Model_SpecifyInputsAndOutputs_04 + * @tc.desc: 设置输入输出,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(ReduceSumTest, SUB_AI_NNRt_Func_North_ReduceSum_Model_SpecifyInputsAndOutputs_04, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceSumModel1 reduceSumModel; + OHNNGraphArgs graphArgs = reduceSumModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceSum_Model_SpecifyInputsAndOutputs_05 + * @tc.desc: 设置输入输出,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ReduceSumTest, SUB_AI_NNRt_Func_North_ReduceSum_Model_SpecifyInputsAndOutputs_05, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceSumModel1 reduceSumModel; + OHNNGraphArgs graphArgs = reduceSumModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceSum_Model_SpecifyInputsAndOutputs_06 + * @tc.desc: 设置输入输出,outputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ReduceSumTest, SUB_AI_NNRt_Func_North_ReduceSum_Model_SpecifyInputsAndOutputs_06, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceSumModel1 reduceSumModel; + OHNNGraphArgs graphArgs = reduceSumModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceSum_Model_SpecifyInputsAndOutputs_07 + * @tc.desc: 设置输入输出,outputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(ReduceSumTest, SUB_AI_NNRt_Func_North_ReduceSum_Model_SpecifyInputsAndOutputs_07, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceSumModel1 reduceSumModel; + OHNNGraphArgs graphArgs = reduceSumModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.outputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceSum_Model_SpecifyInputsAndOutputs_08 + * @tc.desc: 设置输入输出,outputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(ReduceSumTest, SUB_AI_NNRt_Func_North_ReduceSum_Model_SpecifyInputsAndOutputs_08, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceSumModel1 reduceSumModel; + OHNNGraphArgs graphArgs = reduceSumModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceSum_Model_AddOperation_01 + * @tc.desc: 添加算子,paramindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ReduceSumTest, SUB_AI_NNRt_Func_North_ReduceSum_Model_AddOperation_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceSumModel1 reduceSumModel; + OHNNGraphArgs graphArgs = reduceSumModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + nullptr, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceSum_Model_AddOperation_02 + * @tc.desc: 添加算子,paramindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ReduceSumTest, SUB_AI_NNRt_Func_North_ReduceSum_Model_AddOperation_02, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceSumModel1 reduceSumModel; + OHNNGraphArgs graphArgs = reduceSumModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceSum_Model_AddOperation_03 + * @tc.desc: 添加算子,paramindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(ReduceSumTest, SUB_AI_NNRt_Func_North_ReduceSum_Model_AddOperation_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceSumModel1 reduceSumModel; + OHNNGraphArgs graphArgs = reduceSumModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.paramIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceSum_Model_AddOperation_04 + * @tc.desc: 添加算子,paramindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(ReduceSumTest, SUB_AI_NNRt_Func_North_ReduceSum_Model_AddOperation_04, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceSumModel1 reduceSumModel; + OHNNGraphArgs graphArgs = reduceSumModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceSum_Model_AddOperation_05 + * @tc.desc: 添加算子,inputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ReduceSumTest, SUB_AI_NNRt_Func_North_ReduceSum_Model_AddOperation_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceSumModel1 reduceSumModel; + OHNNGraphArgs graphArgs = reduceSumModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceSum_Model_AddOperation_06 + * @tc.desc: 添加算子,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ReduceSumTest, SUB_AI_NNRt_Func_North_ReduceSum_Model_AddOperation_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceSumModel1 reduceSumModel; + OHNNGraphArgs graphArgs = reduceSumModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceSum_Model_AddOperation_07 + * @tc.desc: 添加算子,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(ReduceSumTest, SUB_AI_NNRt_Func_North_ReduceSum_Model_AddOperation_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceSumModel1 reduceSumModel; + OHNNGraphArgs graphArgs = reduceSumModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceSum_Model_AddOperation_08 + * @tc.desc: 添加算子,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(ReduceSumTest, SUB_AI_NNRt_Func_North_ReduceSum_Model_AddOperation_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceSumModel1 reduceSumModel; + OHNNGraphArgs graphArgs = reduceSumModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ReduceSum_Model_AddOperation_09 + * @tc.desc: 添加算子,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ReduceSumTest, SUB_AI_NNRt_Func_North_ReduceSum_Model_AddOperation_09, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ReduceSumModel1 reduceSumModel; + OHNNGraphArgs graphArgs = reduceSumModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(nullptr, graphArgs.operationType, + ¶mIndices, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} \ No newline at end of file diff --git a/test/nnrt_xts_acts/nncore/opstest/src/round_test.cpp b/test/nnrt_xts_acts/nncore/opstest/src/round_test.cpp new file mode 100644 index 0000000..ba593cb --- /dev/null +++ b/test/nnrt_xts_acts/nncore/opstest/src/round_test.cpp @@ -0,0 +1,757 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "nncore_utils.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Test; +class RoundTest : public testing::Test {}; + +struct RoundModel1 { + const std::vector tensor_shape = {7}; + float inputValue[6] = {-2.5, -1.5, -0.5, 0.5, 1.5, 2.5}; + float outputValue[6] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, inputValue, 6*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, outputValue, 6*sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_ROUND, + .operands = {input, output}, + .paramIndices = {}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + + +struct RoundModel2 { + const std::vector tensor_shape = {4}; + float inputValue[4] = {-1.7, -3.3, 0, 5.5}; + float outputValue[4] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, inputValue, 4*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, outputValue, 4*sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_ROUND, + .operands = {input, output}, + .paramIndices = {}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Round_Build_01 + * @tc.desc: RoundModel1模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(RoundTest, SUB_AI_NNRt_Func_North_Round_Build_01, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RoundModel1 roundModel; + OHNNGraphArgs graphArgs = roundModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Round_Build_02 + * @tc.desc: RoundModel2模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(RoundTest, SUB_AI_NNRt_Func_North_Round_Build_02, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RoundModel2 roundModel; + OHNNGraphArgs graphArgs = roundModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Round_Build_03 + * @tc.desc: RoundModel1模型输入Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(RoundTest, SUB_AI_NNRt_Func_North_Round_Build_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RoundModel1 roundModel; + OHNNGraphArgs graphArgs = roundModel.graphArgs; + graphArgs.operands = {roundModel.input, roundModel.input, roundModel.output}; + graphArgs.inputIndices = {0, 1}; + graphArgs.outputIndices = {2}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Round_Build_04 + * @tc.desc: RoundModel1模型输出Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(RoundTest, SUB_AI_NNRt_Func_North_Round_Build_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RoundModel1 roundModel; + OHNNGraphArgs graphArgs = roundModel.graphArgs; + graphArgs.operands = {roundModel.input, roundModel.output, roundModel.output}; + graphArgs.inputIndices = {0}; + graphArgs.outputIndices = {1, 2}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Round_Build_05 + * @tc.desc: RoundModel1模型传入非法参数进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(RoundTest, SUB_AI_NNRt_Func_North_Round_Build_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RoundModel1 roundModel; + OHNNGraphArgs graphArgs = roundModel.graphArgs; + + int8_t activationValue = OH_NN_FUSED_NONE; + OHNNOperandTest activation = {OH_NN_INT8, OH_NN_ADD_ACTIVATIONTYPE, {}, &activationValue, sizeof(int8_t)}; + graphArgs.operands = {roundModel.input, roundModel.output, activation}; + graphArgs.paramIndices = {2}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Round_Model_Finish_01 + * @tc.desc: 模型构图,未添加操作数 + * @tc.type: FUNC + */ +HWTEST_F(RoundTest, SUB_AI_NNRt_Func_North_Round_Model_Finish_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + OHNNGraphArgs graphArgs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, SingleModelBuildEndStep(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Round_Model_Finish_02 + * @tc.desc: 模型构图,未设置输入输出 + * @tc.type: FUNC + */ +HWTEST_F(RoundTest, SUB_AI_NNRt_Func_North_Round_Model_Finish_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RoundModel1 roundModel; + OHNNGraphArgs graphArgs = roundModel.graphArgs; + graphArgs.specifyIO = false; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Round_Model_Finish_03 + * @tc.desc: 模型构图,设置输入输出,构图成功 + * @tc.type: FUNC + */ +HWTEST_F(RoundTest, SUB_AI_NNRt_Func_North_Round_Model_Finish_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RoundModel1 roundModel; + OHNNGraphArgs graphArgs = roundModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Round_Model_SetOperandValue_01 + * @tc.desc: 设置操作数值,操作数不存在 + * @tc.type: FUNC + */ +HWTEST_F(RoundTest, SUB_AI_NNRt_Func_North_Round_Model_SetOperandValue_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RoundModel1 roundModel; + OHNNGraphArgs graphArgs = roundModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData( + model, 1000+i, operandTem.data, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Round_Model_SetOperandValue_02 + * @tc.desc: 设置操作数值,buufer为nullptr + * @tc.type: FUNC + */ +HWTEST_F(RoundTest, SUB_AI_NNRt_Func_North_Round_Model_SetOperandValue_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RoundModel1 roundModel; + OHNNGraphArgs graphArgs = roundModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, i, nullptr, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Round_Model_SetOperandValue_03 + * @tc.desc: 设置操作数值,length为0 + * @tc.type: FUNC + */ +HWTEST_F(RoundTest, SUB_AI_NNRt_Func_North_Round_Model_SetOperandValue_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RoundModel1 roundModel; + OHNNGraphArgs graphArgs = roundModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, 1000+i, operandTem.data, 0)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Round_Model_SpecifyInputsAndOutputs_01 + * @tc.desc: 设置输入输出,inputIndices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(RoundTest, SUB_AI_NNRt_Func_North_Round_Model_SpecifyInputsAndOutputs_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RoundModel1 roundModel; + OHNNGraphArgs graphArgs = roundModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Round_Model_SpecifyInputsAndOutputs_02 + * @tc.desc: 设置输入输出,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(RoundTest, SUB_AI_NNRt_Func_North_Round_Model_SpecifyInputsAndOutputs_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RoundModel1 roundModel; + OHNNGraphArgs graphArgs = roundModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Round_Model_SpecifyInputsAndOutputs_03 + * @tc.desc: 设置输入输出,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(RoundTest, SUB_AI_NNRt_Func_North_Round_Model_SpecifyInputsAndOutputs_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RoundModel1 roundModel; + OHNNGraphArgs graphArgs = roundModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Round_Model_SpecifyInputsAndOutputs_04 + * @tc.desc: 设置输入输出,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(RoundTest, SUB_AI_NNRt_Func_North_Round_Model_SpecifyInputsAndOutputs_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RoundModel1 roundModel; + OHNNGraphArgs graphArgs = roundModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Round_Model_SpecifyInputsAndOutputs_05 + * @tc.desc: 设置输入输出,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(RoundTest, SUB_AI_NNRt_Func_North_Round_Model_SpecifyInputsAndOutputs_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RoundModel1 roundModel; + OHNNGraphArgs graphArgs = roundModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Round_Model_SpecifyInputsAndOutputs_06 + * @tc.desc: 设置输入输出,outputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(RoundTest, SUB_AI_NNRt_Func_North_Round_Model_SpecifyInputsAndOutputs_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RoundModel1 roundModel; + OHNNGraphArgs graphArgs = roundModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Round_Model_SpecifyInputsAndOutputs_07 + * @tc.desc: 设置输入输出,outputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(RoundTest, SUB_AI_NNRt_Func_North_Round_Model_SpecifyInputsAndOutputs_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RoundModel1 roundModel; + OHNNGraphArgs graphArgs = roundModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.outputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Round_Model_SpecifyInputsAndOutputs_08 + * @tc.desc: 设置输入输出,outputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(RoundTest, SUB_AI_NNRt_Func_North_Round_Model_SpecifyInputsAndOutputs_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RoundModel1 roundModel; + OHNNGraphArgs graphArgs = roundModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Round_Model_AddOperation_01 + * @tc.desc: 添加算子,paramindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(RoundTest, SUB_AI_NNRt_Func_North_Round_Model_AddOperation_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RoundModel1 roundModel; + OHNNGraphArgs graphArgs = roundModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + nullptr, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Round_Model_AddOperation_02 + * @tc.desc: 添加算子,paramindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(RoundTest, SUB_AI_NNRt_Func_North_Round_Model_AddOperation_02, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RoundModel1 roundModel; + OHNNGraphArgs graphArgs = roundModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.data = nullptr; + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Round_Model_AddOperation_03 + * @tc.desc: 添加算子,paramindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(RoundTest, SUB_AI_NNRt_Func_North_Round_Model_AddOperation_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RoundModel1 roundModel; + OHNNGraphArgs graphArgs = roundModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.paramIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Round_Model_AddOperation_04 + * @tc.desc: 添加算子,paramindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(RoundTest, SUB_AI_NNRt_Func_North_Round_Model_AddOperation_04, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RoundModel1 roundModel; + OHNNGraphArgs graphArgs = roundModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.size = 0; + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Round_Model_AddOperation_05 + * @tc.desc: 添加算子,inputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(RoundTest, SUB_AI_NNRt_Func_North_Round_Model_AddOperation_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RoundModel1 roundModel; + OHNNGraphArgs graphArgs = roundModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Round_Model_AddOperation_06 + * @tc.desc: 添加算子,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(RoundTest, SUB_AI_NNRt_Func_North_Round_Model_AddOperation_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RoundModel1 roundModel; + OHNNGraphArgs graphArgs = roundModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Round_Model_AddOperation_07 + * @tc.desc: 添加算子,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(RoundTest, SUB_AI_NNRt_Func_North_Round_Model_AddOperation_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RoundModel1 roundModel; + OHNNGraphArgs graphArgs = roundModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Round_Model_AddOperation_08 + * @tc.desc: 添加算子,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(RoundTest, SUB_AI_NNRt_Func_North_Round_Model_AddOperation_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RoundModel1 roundModel; + OHNNGraphArgs graphArgs = roundModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Round_Model_AddOperation_09 + * @tc.desc: 添加算子,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(RoundTest, SUB_AI_NNRt_Func_North_Round_Model_AddOperation_09, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + RoundModel1 roundModel; + OHNNGraphArgs graphArgs = roundModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(nullptr, graphArgs.operationType, + ¶mIndices, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} \ No newline at end of file diff --git a/test/nnrt_xts_acts/nncore/opstest/src/scatter_nd_test.cpp b/test/nnrt_xts_acts/nncore/opstest/src/scatter_nd_test.cpp new file mode 100644 index 0000000..8964156 --- /dev/null +++ b/test/nnrt_xts_acts/nncore/opstest/src/scatter_nd_test.cpp @@ -0,0 +1,880 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "nncore_utils.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Test; +class ScatterNdTest : public testing::Test {}; + +struct ScatterNdModel1 { + const std::vector indices_shape = {2, 1}; + const std::vector updates_shape = {2}; + const std::vector shape_shape = {1}; + const std::vector output_shape = {3}; + float indicesValue[2][1] = {{0}, {2}}; + float updatesValue[2] = {1, 3}; + float shapeValue[1] = {3}; + float outputValue[3] = {0}; + + OHNNOperandTest indices = {OH_NN_FLOAT32, OH_NN_TENSOR, indices_shape, indicesValue, 4*sizeof(float)}; + OHNNOperandTest updates = {OH_NN_FLOAT32, OH_NN_TENSOR, updates_shape, updatesValue, 2*sizeof(float)}; + OHNNOperandTest shape = {OH_NN_FLOAT32, OH_NN_TENSOR, shape_shape, shapeValue, sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, output_shape, outputValue, 3*sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_SCATTER_ND, + .operands = {indices, updates, shape, output}, + .paramIndices = {}, + .inputIndices = {0, 1, 2}, + .outputIndices = {3}}; +}; + +struct ScatterNdModel2 { + const std::vector indices_shape = {2, 2}; + const std::vector updates_shape = {2}; + const std::vector shape_shape = {2}; + const std::vector output_shape = {3}; + float indicesValue[2][2] = {{1, 0}, {0, 1}}; + float updatesValue[2] = {4, 5}; + float shapeValue[2] = {2, 2}; + float outputValue[2][2] = {0}; + + OHNNOperandTest indices = {OH_NN_FLOAT32, OH_NN_TENSOR, indices_shape, indicesValue, 4*sizeof(float)}; + OHNNOperandTest updates = {OH_NN_FLOAT32, OH_NN_TENSOR, updates_shape, updatesValue, 2*sizeof(float)}; + OHNNOperandTest shape = {OH_NN_FLOAT32, OH_NN_TENSOR, shape_shape, shapeValue, 2*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, output_shape, outputValue, 4*sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_SCATTER_ND, + .operands = {indices, updates, shape, output}, + .paramIndices = {}, + .inputIndices = {0, 1, 2}, + .outputIndices = {3}}; +}; + +struct ScatterNdModel3 { + const std::vector indices_shape = {2, 2}; + const std::vector updates_shape = {2, 2}; + const std::vector shape_shape = {2}; + const std::vector output_shape = {2, 2}; + float indicesValue[2][2] = {{0, 1}, {1, 0}}; + float updatesValue[2][2] = {{6, 7}, {8, 9}}; + float shapeValue[2] = {2, 2}; + float outputValue[2][2] = {0}; + + OHNNOperandTest indices = {OH_NN_FLOAT32, OH_NN_TENSOR, indices_shape, indicesValue, 4*sizeof(float)}; + OHNNOperandTest updates = {OH_NN_FLOAT32, OH_NN_TENSOR, updates_shape, updatesValue, 4*sizeof(float)}; + OHNNOperandTest shape = {OH_NN_FLOAT32, OH_NN_TENSOR, shape_shape, shapeValue, 2*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, output_shape, outputValue, 4*sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_SCATTER_ND, + .operands = {indices, updates, shape, output}, + .paramIndices = {}, + .inputIndices = {0, 1, 2}, + .outputIndices = {3}}; +}; + +struct ScatterNdModel4 { + const std::vector indices_shape = {2, 3}; + const std::vector updates_shape = {2}; + const std::vector shape_shape = {3}; + const std::vector output_shape = {2, 2, 2}; + float indicesValue[2][3] = {{0, 0, 1}, {1, 1, 0}}; + float updatesValue[2] = {10, 11}; + float shapeValue[3] = {2, 2, 2}; + float outputValue[2][2][2] = {0}; + + OHNNOperandTest indices = {OH_NN_FLOAT32, OH_NN_TENSOR, indices_shape, indicesValue, 6*sizeof(float)}; + OHNNOperandTest updates = {OH_NN_FLOAT32, OH_NN_TENSOR, updates_shape, updatesValue, 2*sizeof(float)}; + OHNNOperandTest shape = {OH_NN_FLOAT32, OH_NN_TENSOR, shape_shape, shapeValue, 3*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, output_shape, outputValue, 8*sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_SCATTER_ND, + .operands = {indices, updates, shape, output}, + .paramIndices = {}, + .inputIndices = {0, 1, 2}, + .outputIndices = {3}}; +}; + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ScatterNd_Build_01 + * @tc.desc: ScatterNdModel1模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(ScatterNdTest, SUB_AI_NNRt_Func_North_ScatterNd_Build_01, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ScatterNdModel1 scatterNdModel; + OHNNGraphArgs graphArgs = scatterNdModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ScatterNd_Build_02 + * @tc.desc: ScatterNdModel2模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(ScatterNdTest, SUB_AI_NNRt_Func_North_ScatterNd_Build_02, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ScatterNdModel2 scatterNdModel; + OHNNGraphArgs graphArgs = scatterNdModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ScatterNd_Build_03 + * @tc.desc: ScatterNdModel3模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(ScatterNdTest, SUB_AI_NNRt_Func_North_ScatterNd_Build_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ScatterNdModel3 scatterNdModel; + OHNNGraphArgs graphArgs = scatterNdModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ScatterNd_Build_04 + * @tc.desc: ScatterNdModel4模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(ScatterNdTest, SUB_AI_NNRt_Func_North_ScatterNd_Build_04, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ScatterNdModel4 scatterNdModel; + OHNNGraphArgs graphArgs = scatterNdModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ScatterNd_Build_05 + * @tc.desc: ScatterNdModel1模型输入Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(ScatterNdTest, SUB_AI_NNRt_Func_North_ScatterNd_Build_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ScatterNdModel1 scatterNdModel; + OHNNGraphArgs graphArgs = scatterNdModel.graphArgs; + graphArgs.operands = {scatterNdModel.indices, scatterNdModel.indices, scatterNdModel.updates, + scatterNdModel.shape, scatterNdModel.output}; + graphArgs.inputIndices = {0, 1, 2, 3}; + graphArgs.outputIndices = {4}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ScatterNd_Build_06 + * @tc.desc: ScatterNdModel1模型输出Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(ScatterNdTest, SUB_AI_NNRt_Func_North_ScatterNd_Build_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ScatterNdModel1 scatterNdModel; + OHNNGraphArgs graphArgs = scatterNdModel.graphArgs; + graphArgs.operands = {scatterNdModel.indices, scatterNdModel.updates, scatterNdModel.shape, + scatterNdModel.output, scatterNdModel.output}; + graphArgs.inputIndices = {0, 1, 2}; + graphArgs.outputIndices = {3, 4}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ScatterNd_Build_07 + * @tc.desc: ScatterNdModel1模型传入非法参数进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(ScatterNdTest, SUB_AI_NNRt_Func_North_ScatterNd_Build_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ScatterNdModel1 scatterNdModel; + OHNNGraphArgs graphArgs = scatterNdModel.graphArgs; + + int8_t activationValue = OH_NN_FUSED_NONE; + OHNNOperandTest activation = {OH_NN_INT8, OH_NN_ADD_ACTIVATIONTYPE, {}, &activationValue, sizeof(int8_t)}; + graphArgs.operands = {scatterNdModel.indices, scatterNdModel.updates, scatterNdModel.shape, + scatterNdModel.output, activation}; + graphArgs.paramIndices = {4}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ScatterNd_Model_Finish_01 + * @tc.desc: 模型构图,未添加操作数 + * @tc.type: FUNC + */ +HWTEST_F(ScatterNdTest, SUB_AI_NNRt_Func_North_ScatterNd_Model_Finish_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + OHNNGraphArgs graphArgs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, SingleModelBuildEndStep(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ScatterNd_Model_Finish_02 + * @tc.desc: 模型构图,未设置输入输出 + * @tc.type: FUNC + */ +HWTEST_F(ScatterNdTest, SUB_AI_NNRt_Func_North_ScatterNd_Model_Finish_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ScatterNdModel1 scatterNdModel; + OHNNGraphArgs graphArgs = scatterNdModel.graphArgs; + graphArgs.specifyIO = false; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ScatterNd_Model_Finish_03 + * @tc.desc: 模型构图,设置输入输出,构图成功 + * @tc.type: FUNC + */ +HWTEST_F(ScatterNdTest, SUB_AI_NNRt_Func_North_ScatterNd_Model_Finish_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ScatterNdModel1 scatterNdModel; + OHNNGraphArgs graphArgs = scatterNdModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ScatterNd_Model_SetOperandValue_01 + * @tc.desc: 设置操作数值,操作数不存在 + * @tc.type: FUNC + */ +HWTEST_F(ScatterNdTest, SUB_AI_NNRt_Func_North_ScatterNd_Model_SetOperandValue_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ScatterNdModel1 scatterNdModel; + OHNNGraphArgs graphArgs = scatterNdModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData( + model, 1000+i, operandTem.data, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ScatterNd_Model_SetOperandValue_02 + * @tc.desc: 设置操作数值,buufer为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ScatterNdTest, SUB_AI_NNRt_Func_North_ScatterNd_Model_SetOperandValue_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ScatterNdModel1 scatterNdModel; + OHNNGraphArgs graphArgs = scatterNdModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, i, nullptr, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ScatterNd_Model_SetOperandValue_03 + * @tc.desc: 设置操作数值,length为0 + * @tc.type: FUNC + */ +HWTEST_F(ScatterNdTest, SUB_AI_NNRt_Func_North_ScatterNd_Model_SetOperandValue_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ScatterNdModel1 scatterNdModel; + OHNNGraphArgs graphArgs = scatterNdModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, 1000+i, operandTem.data, 0)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ScatterNd_Model_SpecifyInputsAndOutputs_01 + * @tc.desc: 设置输入输出,inputIndices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ScatterNdTest, SUB_AI_NNRt_Func_North_ScatterNd_Model_SpecifyInputsAndOutputs_01, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ScatterNdModel1 scatterNdModel; + OHNNGraphArgs graphArgs = scatterNdModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ScatterNd_Model_SpecifyInputsAndOutputs_02 + * @tc.desc: 设置输入输出,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ScatterNdTest, SUB_AI_NNRt_Func_North_ScatterNd_Model_SpecifyInputsAndOutputs_02, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ScatterNdModel1 scatterNdModel; + OHNNGraphArgs graphArgs = scatterNdModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ScatterNd_Model_SpecifyInputsAndOutputs_03 + * @tc.desc: 设置输入输出,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(ScatterNdTest, SUB_AI_NNRt_Func_North_ScatterNd_Model_SpecifyInputsAndOutputs_03, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ScatterNdModel1 scatterNdModel; + OHNNGraphArgs graphArgs = scatterNdModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ScatterNd_Model_SpecifyInputsAndOutputs_04 + * @tc.desc: 设置输入输出,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(ScatterNdTest, SUB_AI_NNRt_Func_North_ScatterNd_Model_SpecifyInputsAndOutputs_04, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ScatterNdModel1 scatterNdModel; + OHNNGraphArgs graphArgs = scatterNdModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ScatterNd_Model_SpecifyInputsAndOutputs_05 + * @tc.desc: 设置输入输出,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ScatterNdTest, SUB_AI_NNRt_Func_North_ScatterNd_Model_SpecifyInputsAndOutputs_05, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ScatterNdModel1 scatterNdModel; + OHNNGraphArgs graphArgs = scatterNdModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ScatterNd_Model_SpecifyInputsAndOutputs_06 + * @tc.desc: 设置输入输出,outputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ScatterNdTest, SUB_AI_NNRt_Func_North_ScatterNd_Model_SpecifyInputsAndOutputs_06, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ScatterNdModel1 scatterNdModel; + OHNNGraphArgs graphArgs = scatterNdModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ScatterNd_Model_SpecifyInputsAndOutputs_07 + * @tc.desc: 设置输入输出,outputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(ScatterNdTest, SUB_AI_NNRt_Func_North_ScatterNd_Model_SpecifyInputsAndOutputs_07, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ScatterNdModel1 scatterNdModel; + OHNNGraphArgs graphArgs = scatterNdModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.outputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ScatterNd_Model_SpecifyInputsAndOutputs_08 + * @tc.desc: 设置输入输出,outputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(ScatterNdTest, SUB_AI_NNRt_Func_North_ScatterNd_Model_SpecifyInputsAndOutputs_08, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ScatterNdModel1 scatterNdModel; + OHNNGraphArgs graphArgs = scatterNdModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ScatterNd_Model_AddOperation_01 + * @tc.desc: 添加算子,paramindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ScatterNdTest, SUB_AI_NNRt_Func_North_ScatterNd_Model_AddOperation_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ScatterNdModel1 scatterNdModel; + OHNNGraphArgs graphArgs = scatterNdModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + nullptr, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ScatterNd_Model_AddOperation_02 + * @tc.desc: 添加算子,paramindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ScatterNdTest, SUB_AI_NNRt_Func_North_ScatterNd_Model_AddOperation_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ScatterNdModel1 scatterNdModel; + OHNNGraphArgs graphArgs = scatterNdModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.data = nullptr; + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ScatterNd_Model_AddOperation_03 + * @tc.desc: 添加算子,paramindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(ScatterNdTest, SUB_AI_NNRt_Func_North_ScatterNd_Model_AddOperation_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ScatterNdModel1 scatterNdModel; + OHNNGraphArgs graphArgs = scatterNdModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.paramIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ScatterNd_Model_AddOperation_04 + * @tc.desc: 添加算子,paramindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(ScatterNdTest, SUB_AI_NNRt_Func_North_ScatterNd_Model_AddOperation_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ScatterNdModel1 scatterNdModel; + OHNNGraphArgs graphArgs = scatterNdModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.size = 0; + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ScatterNd_Model_AddOperation_05 + * @tc.desc: 添加算子,inputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ScatterNdTest, SUB_AI_NNRt_Func_North_ScatterNd_Model_AddOperation_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ScatterNdModel1 scatterNdModel; + OHNNGraphArgs graphArgs = scatterNdModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ScatterNd_Model_AddOperation_06 + * @tc.desc: 添加算子,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ScatterNdTest, SUB_AI_NNRt_Func_North_ScatterNd_Model_AddOperation_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ScatterNdModel1 scatterNdModel; + OHNNGraphArgs graphArgs = scatterNdModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ScatterNd_Model_AddOperation_07 + * @tc.desc: 添加算子,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(ScatterNdTest, SUB_AI_NNRt_Func_North_ScatterNd_Model_AddOperation_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ScatterNdModel1 scatterNdModel; + OHNNGraphArgs graphArgs = scatterNdModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ScatterNd_Model_AddOperation_08 + * @tc.desc: 添加算子,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(ScatterNdTest, SUB_AI_NNRt_Func_North_ScatterNd_Model_AddOperation_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ScatterNdModel1 scatterNdModel; + OHNNGraphArgs graphArgs = scatterNdModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_ScatterNd_Model_AddOperation_09 + * @tc.desc: 添加算子,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(ScatterNdTest, SUB_AI_NNRt_Func_North_ScatterNd_Model_AddOperation_09, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + ScatterNdModel1 scatterNdModel; + OHNNGraphArgs graphArgs = scatterNdModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(nullptr, graphArgs.operationType, + ¶mIndices, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} \ No newline at end of file diff --git a/test/nnrt_xts_acts/nncore/opstest/src/select_test.cpp b/test/nnrt_xts_acts/nncore/opstest/src/select_test.cpp new file mode 100644 index 0000000..472d6dc --- /dev/null +++ b/test/nnrt_xts_acts/nncore/opstest/src/select_test.cpp @@ -0,0 +1,907 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "nncore_utils.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Test; +class SelectTest : public testing::Test {}; + +struct SelectModel1 { + const std::vector tensor_shape = {1}; + bool input0Value[1] = {true}; + float input1Value[1] = {1}; + float input2Value[1] = {4}; + float outputValue[1] = {0}; + + OHNNOperandTest input0 = {OH_NN_BOOL, OH_NN_TENSOR, tensor_shape, input0Value, sizeof(bool)}; + OHNNOperandTest input1 = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, input1Value, sizeof(float)}; + OHNNOperandTest input2 = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, input2Value, sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, outputValue, sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_SELECT, + .operands = {input0, input1, input2, output}, + .paramIndices = {}, + .inputIndices = {0, 1, 2}, + .outputIndices = {3}}; +}; + +struct SelectModel2 { + const std::vector tensor_shape = {3}; + bool input0Value[3] = {true, false, false}; + float input1Value[3] = {1, 2, 3}; + float input2Value[3] = {4, 5, 6}; + float outputValue[3] = {0}; + + OHNNOperandTest input0 = {OH_NN_BOOL, OH_NN_TENSOR, tensor_shape, input0Value, 3*sizeof(bool)}; + OHNNOperandTest input1 = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, input1Value, 3*sizeof(float)}; + OHNNOperandTest input2 = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, input2Value, 3*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, outputValue, 3*sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_SELECT, + .operands = {input0, input1, input2, output}, + .paramIndices = {}, + .inputIndices = {0, 1, 2}, + .outputIndices = {3}}; +}; + +struct SelectModel3 { + const std::vector input0_shape = {2}; + const std::vector input_shape = {3}; + bool input0Value[2] = {true, false}; + float input1Value[3] = {1, 2, 3}; + float input2Value[3] = {4, 5, 6}; + float outputValue[3] = {0}; + + OHNNOperandTest input0 = {OH_NN_BOOL, OH_NN_TENSOR, input0_shape, input0Value, 2*sizeof(bool)}; + OHNNOperandTest input1 = {OH_NN_FLOAT32, OH_NN_TENSOR, input_shape, input1Value, 3*sizeof(float)}; + OHNNOperandTest input2 = {OH_NN_FLOAT32, OH_NN_TENSOR, input_shape, input2Value, 3*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, input_shape, outputValue, 3*sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_SELECT, + .operands = {input0, input1, input2, output}, + .paramIndices = {}, + .inputIndices = {0, 1, 2}, + .outputIndices = {3}}; +}; + +struct SelectModel4 { + const std::vector input0_shape = {2}; + const std::vector input1_shape = {3}; + const std::vector input2_shape = {4}; + bool input0Value[2] = {true, false}; + float input1Value[3] = {1, 2, 3}; + float input2Value[4] = {4, 5, 6, 7}; + float outputValue[3] = {0}; + + OHNNOperandTest input0 = {OH_NN_BOOL, OH_NN_TENSOR, input0_shape, input0Value, 2*sizeof(bool)}; + OHNNOperandTest input1 = {OH_NN_FLOAT32, OH_NN_TENSOR, input1_shape, input1Value, 3*sizeof(float)}; + OHNNOperandTest input2 = {OH_NN_FLOAT32, OH_NN_TENSOR, input2_shape, input2Value, 4*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, input1_shape, outputValue, 3*sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_SELECT, + .operands = {input0, input1, input2, output}, + .paramIndices = {}, + .inputIndices = {0, 1, 2}, + .outputIndices = {3}}; +}; + +struct SelectModel5 { + const std::vector tensor_shape = {1}; + bool* input0Value = {}; + float* input1Value = {}; + float* outputValue = {}; + + OHNNOperandTest input0 = {OH_NN_BOOL, OH_NN_TENSOR, tensor_shape, input0Value, sizeof(bool)}; + OHNNOperandTest input1 = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, input1Value, sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, outputValue, sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_SELECT, + .operands = {input0, input1, input1, output}, + .paramIndices = {}, + .inputIndices = {0, 1, 2}, + .outputIndices = {3}}; +}; + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Select_Build_01 + * @tc.desc: SelectModel1模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(SelectTest, SUB_AI_NNRt_Func_North_Select_Build_01, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SelectModel1 selectModel; + OHNNGraphArgs graphArgs = selectModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Select_Build_02 + * @tc.desc: SelectModel2模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(SelectTest, SUB_AI_NNRt_Func_North_Select_Build_02, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SelectModel2 selectModel; + OHNNGraphArgs graphArgs = selectModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Select_Build_03 + * @tc.desc: SelectModel3模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(SelectTest, SUB_AI_NNRt_Func_North_Select_Build_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SelectModel3 selectModel; + OHNNGraphArgs graphArgs = selectModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Select_Build_04 + * @tc.desc: SelectModel4模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(SelectTest, SUB_AI_NNRt_Func_North_Select_Build_04, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SelectModel4 selectModel; + OHNNGraphArgs graphArgs = selectModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Select_Build_05 + * @tc.desc: SelectModel5模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(SelectTest, SUB_AI_NNRt_Func_North_Select_Build_05, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SelectModel5 selectModel; + OHNNGraphArgs graphArgs = selectModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Select_Build_06 + * @tc.desc: SelectModel1模型输入Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(SelectTest, SUB_AI_NNRt_Func_North_Select_Build_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SelectModel1 selectModel; + OHNNGraphArgs graphArgs = selectModel.graphArgs; + graphArgs.operands = {selectModel.input0, selectModel.input1, selectModel.input2, + selectModel.input2, selectModel.output}; + graphArgs.inputIndices = {0, 1, 2, 3}; + graphArgs.outputIndices = {4}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Select_Build_07 + * @tc.desc: SelectModel1模型输出Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(SelectTest, SUB_AI_NNRt_Func_North_Select_Build_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SelectModel1 selectModel; + OHNNGraphArgs graphArgs = selectModel.graphArgs; + graphArgs.operands = {selectModel.input0, selectModel.input1, selectModel.input2, + selectModel.output, selectModel.output}; + graphArgs.inputIndices = {0, 1, 2}; + graphArgs.outputIndices = {3, 4}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Select_Build_08 + * @tc.desc: SelectModel1模型传入非法参数进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(SelectTest, SUB_AI_NNRt_Func_North_Select_Build_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SelectModel1 selectModel; + OHNNGraphArgs graphArgs = selectModel.graphArgs; + + int8_t activationValue = OH_NN_FUSED_NONE; + OHNNOperandTest activation = {OH_NN_INT8, OH_NN_ADD_ACTIVATIONTYPE, {}, &activationValue, sizeof(int8_t)}; + graphArgs.operands = {selectModel.input0, selectModel.input1, selectModel.input2, selectModel.output, activation}; + graphArgs.paramIndices = {4}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Select_Model_Finish_01 + * @tc.desc: 模型构图,未添加操作数 + * @tc.type: FUNC + */ +HWTEST_F(SelectTest, SUB_AI_NNRt_Func_North_Select_Model_Finish_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + OHNNGraphArgs graphArgs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, SingleModelBuildEndStep(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Select_Model_Finish_02 + * @tc.desc: 模型构图,未设置输入输出 + * @tc.type: FUNC + */ +HWTEST_F(SelectTest, SUB_AI_NNRt_Func_North_Select_Model_Finish_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SelectModel1 selectModel; + OHNNGraphArgs graphArgs = selectModel.graphArgs; + graphArgs.specifyIO = false; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Select_Model_Finish_03 + * @tc.desc: 模型构图,设置输入输出,构图成功 + * @tc.type: FUNC + */ +HWTEST_F(SelectTest, SUB_AI_NNRt_Func_North_Select_Model_Finish_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SelectModel1 selectModel; + OHNNGraphArgs graphArgs = selectModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Select_Model_SetOperandValue_01 + * @tc.desc: 设置操作数值,操作数不存在 + * @tc.type: FUNC + */ +HWTEST_F(SelectTest, SUB_AI_NNRt_Func_North_Select_Model_SetOperandValue_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SelectModel1 selectModel; + OHNNGraphArgs graphArgs = selectModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData( + model, 1000+i, operandTem.data, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Select_Model_SetOperandValue_02 + * @tc.desc: 设置操作数值,buufer为nullptr + * @tc.type: FUNC + */ +HWTEST_F(SelectTest, SUB_AI_NNRt_Func_North_Select_Model_SetOperandValue_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SelectModel1 selectModel; + OHNNGraphArgs graphArgs = selectModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, i, nullptr, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Select_Model_SetOperandValue_03 + * @tc.desc: 设置操作数值,length为0 + * @tc.type: FUNC + */ +HWTEST_F(SelectTest, SUB_AI_NNRt_Func_North_Select_Model_SetOperandValue_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SelectModel1 selectModel; + OHNNGraphArgs graphArgs = selectModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, 1000+i, operandTem.data, 0)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Select_Model_SpecifyInputsAndOutputs_01 + * @tc.desc: 设置输入输出,inputIndices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(SelectTest, SUB_AI_NNRt_Func_North_Select_Model_SpecifyInputsAndOutputs_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SelectModel1 selectModel; + OHNNGraphArgs graphArgs = selectModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Select_Model_SpecifyInputsAndOutputs_02 + * @tc.desc: 设置输入输出,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(SelectTest, SUB_AI_NNRt_Func_North_Select_Model_SpecifyInputsAndOutputs_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SelectModel1 selectModel; + OHNNGraphArgs graphArgs = selectModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Select_Model_SpecifyInputsAndOutputs_03 + * @tc.desc: 设置输入输出,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(SelectTest, SUB_AI_NNRt_Func_North_Select_Model_SpecifyInputsAndOutputs_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SelectModel1 selectModel; + OHNNGraphArgs graphArgs = selectModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Select_Model_SpecifyInputsAndOutputs_04 + * @tc.desc: 设置输入输出,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(SelectTest, SUB_AI_NNRt_Func_North_Select_Model_SpecifyInputsAndOutputs_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SelectModel1 selectModel; + OHNNGraphArgs graphArgs = selectModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Select_Model_SpecifyInputsAndOutputs_05 + * @tc.desc: 设置输入输出,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(SelectTest, SUB_AI_NNRt_Func_North_Select_Model_SpecifyInputsAndOutputs_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SelectModel1 selectModel; + OHNNGraphArgs graphArgs = selectModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Select_Model_SpecifyInputsAndOutputs_06 + * @tc.desc: 设置输入输出,outputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(SelectTest, SUB_AI_NNRt_Func_North_Select_Model_SpecifyInputsAndOutputs_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SelectModel1 selectModel; + OHNNGraphArgs graphArgs = selectModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Select_Model_SpecifyInputsAndOutputs_07 + * @tc.desc: 设置输入输出,outputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(SelectTest, SUB_AI_NNRt_Func_North_Select_Model_SpecifyInputsAndOutputs_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SelectModel1 selectModel; + OHNNGraphArgs graphArgs = selectModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.outputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Select_Model_SpecifyInputsAndOutputs_08 + * @tc.desc: 设置输入输出,outputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(SelectTest, SUB_AI_NNRt_Func_North_Select_Model_SpecifyInputsAndOutputs_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SelectModel1 selectModel; + OHNNGraphArgs graphArgs = selectModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Select_Model_AddOperation_01 + * @tc.desc: 添加算子,paramindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(SelectTest, SUB_AI_NNRt_Func_North_Select_Model_AddOperation_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SelectModel1 selectModel; + OHNNGraphArgs graphArgs = selectModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + nullptr, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Select_Model_AddOperation_02 + * @tc.desc: 添加算子,paramindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(SelectTest, SUB_AI_NNRt_Func_North_Select_Model_AddOperation_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SelectModel1 selectModel; + OHNNGraphArgs graphArgs = selectModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.data = nullptr; + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Select_Model_AddOperation_03 + * @tc.desc: 添加算子,paramindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(SelectTest, SUB_AI_NNRt_Func_North_Select_Model_AddOperation_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SelectModel1 selectModel; + OHNNGraphArgs graphArgs = selectModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.paramIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Select_Model_AddOperation_04 + * @tc.desc: 添加算子,paramindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(SelectTest, SUB_AI_NNRt_Func_North_Select_Model_AddOperation_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SelectModel1 selectModel; + OHNNGraphArgs graphArgs = selectModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.size = 0; + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Select_Model_AddOperation_05 + * @tc.desc: 添加算子,inputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(SelectTest, SUB_AI_NNRt_Func_North_Select_Model_AddOperation_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SelectModel1 selectModel; + OHNNGraphArgs graphArgs = selectModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Select_Model_AddOperation_06 + * @tc.desc: 添加算子,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(SelectTest, SUB_AI_NNRt_Func_North_Select_Model_AddOperation_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SelectModel1 selectModel; + OHNNGraphArgs graphArgs = selectModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Select_Model_AddOperation_07 + * @tc.desc: 添加算子,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(SelectTest, SUB_AI_NNRt_Func_North_Select_Model_AddOperation_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SelectModel1 selectModel; + OHNNGraphArgs graphArgs = selectModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Select_Model_AddOperation_08 + * @tc.desc: 添加算子,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(SelectTest, SUB_AI_NNRt_Func_North_Select_Model_AddOperation_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SelectModel1 selectModel; + OHNNGraphArgs graphArgs = selectModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Select_Model_AddOperation_09 + * @tc.desc: 添加算子,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(SelectTest, SUB_AI_NNRt_Func_North_Select_Model_AddOperation_09, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SelectModel1 selectModel; + OHNNGraphArgs graphArgs = selectModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(nullptr, graphArgs.operationType, + ¶mIndices, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} \ No newline at end of file diff --git a/test/nnrt_xts_acts/nncore/opstest/src/sin_test.cpp b/test/nnrt_xts_acts/nncore/opstest/src/sin_test.cpp new file mode 100644 index 0000000..c97c723 --- /dev/null +++ b/test/nnrt_xts_acts/nncore/opstest/src/sin_test.cpp @@ -0,0 +1,756 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "nncore_utils.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Test; +class SinTest : public testing::Test {}; + +struct SinModel1 { + const std::vector tensor_shape = {3}; + float inputValue[3] = {0, 1, 2}; + float outputValue[3] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, inputValue, 3*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, outputValue, 3*sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_SIN, + .operands = {input, output}, + .paramIndices = {}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + + +struct SinModel2 { + const std::vector tensor_shape = {2}; + bool inputValue[2] = {false, true}; + float outputValue[2] = {0}; + + OHNNOperandTest input = {OH_NN_BOOL, OH_NN_TENSOR, tensor_shape, inputValue, 2*sizeof(bool)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, outputValue, 2*sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_SIN, + .operands = {input, output}, + .paramIndices = {}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Sin_Build_01 + * @tc.desc: SinModel1模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(SinTest, SUB_AI_NNRt_Func_North_Sin_Build_01, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SinModel1 sinModel; + OHNNGraphArgs graphArgs = sinModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Sin_Build_02 + * @tc.desc: SinModel2模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(SinTest, SUB_AI_NNRt_Func_North_Sin_Build_02, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SinModel2 sinModel; + OHNNGraphArgs graphArgs = sinModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Sin_Build_03 + * @tc.desc: SinModel1模型输入Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(SinTest, SUB_AI_NNRt_Func_North_Sin_Build_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SinModel1 sinModel; + OHNNGraphArgs graphArgs = sinModel.graphArgs; + graphArgs.operands = {sinModel.input, sinModel.input, sinModel.output}; + graphArgs.inputIndices = {0, 1}; + graphArgs.outputIndices = {2}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Sin_Build_04 + * @tc.desc: SinModel1模型输出Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(SinTest, SUB_AI_NNRt_Func_North_Sin_Build_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SinModel1 sinModel; + OHNNGraphArgs graphArgs = sinModel.graphArgs; + graphArgs.operands = {sinModel.input, sinModel.output, sinModel.output}; + graphArgs.inputIndices = {0}; + graphArgs.outputIndices = {1, 2}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Sin_Build_05 + * @tc.desc: SinModel1模型传入非法参数进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(SinTest, SUB_AI_NNRt_Func_North_Sin_Build_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SinModel1 sinModel; + OHNNGraphArgs graphArgs = sinModel.graphArgs; + + int8_t activationValue = OH_NN_FUSED_NONE; + OHNNOperandTest activation = {OH_NN_INT8, OH_NN_ADD_ACTIVATIONTYPE, {}, &activationValue, sizeof(int8_t)}; + graphArgs.operands = {sinModel.input, sinModel.output, activation}; + graphArgs.paramIndices = {2}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Sin_Model_Finish_01 + * @tc.desc: 模型构图,未添加操作数 + * @tc.type: FUNC + */ +HWTEST_F(SinTest, SUB_AI_NNRt_Func_North_Sin_Model_Finish_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + OHNNGraphArgs graphArgs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, SingleModelBuildEndStep(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Sin_Model_Finish_02 + * @tc.desc: 模型构图,未设置输入输出 + * @tc.type: FUNC + */ +HWTEST_F(SinTest, SUB_AI_NNRt_Func_North_Sin_Model_Finish_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SinModel1 sinModel; + OHNNGraphArgs graphArgs = sinModel.graphArgs; + graphArgs.specifyIO = false; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Sin_Model_Finish_03 + * @tc.desc: 模型构图,设置输入输出,构图成功 + * @tc.type: FUNC + */ +HWTEST_F(SinTest, SUB_AI_NNRt_Func_North_Sin_Model_Finish_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SinModel1 sinModel; + OHNNGraphArgs graphArgs = sinModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Sin_Model_SetOperandValue_01 + * @tc.desc: 设置操作数值,操作数不存在 + * @tc.type: FUNC + */ +HWTEST_F(SinTest, SUB_AI_NNRt_Func_North_Sin_Model_SetOperandValue_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SinModel1 sinModel; + OHNNGraphArgs graphArgs = sinModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData( + model, 1000+i, operandTem.data, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Sin_Model_SetOperandValue_02 + * @tc.desc: 设置操作数值,buufer为nullptr + * @tc.type: FUNC + */ +HWTEST_F(SinTest, SUB_AI_NNRt_Func_North_Sin_Model_SetOperandValue_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SinModel1 sinModel; + OHNNGraphArgs graphArgs = sinModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, i, nullptr, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Sin_Model_SetOperandValue_03 + * @tc.desc: 设置操作数值,length为0 + * @tc.type: FUNC + */ +HWTEST_F(SinTest, SUB_AI_NNRt_Func_North_Sin_Model_SetOperandValue_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SinModel1 sinModel; + OHNNGraphArgs graphArgs = sinModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, 1000+i, operandTem.data, 0)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Sin_Model_SpecifyInputsAndOutputs_01 + * @tc.desc: 设置输入输出,inputIndices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(SinTest, SUB_AI_NNRt_Func_North_Sin_Model_SpecifyInputsAndOutputs_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SinModel1 sinModel; + OHNNGraphArgs graphArgs = sinModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Sin_Model_SpecifyInputsAndOutputs_02 + * @tc.desc: 设置输入输出,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(SinTest, SUB_AI_NNRt_Func_North_Sin_Model_SpecifyInputsAndOutputs_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SinModel1 sinModel; + OHNNGraphArgs graphArgs = sinModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Sin_Model_SpecifyInputsAndOutputs_03 + * @tc.desc: 设置输入输出,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(SinTest, SUB_AI_NNRt_Func_North_Sin_Model_SpecifyInputsAndOutputs_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SinModel1 sinModel; + OHNNGraphArgs graphArgs = sinModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Sin_Model_SpecifyInputsAndOutputs_04 + * @tc.desc: 设置输入输出,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(SinTest, SUB_AI_NNRt_Func_North_Sin_Model_SpecifyInputsAndOutputs_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SinModel1 sinModel; + OHNNGraphArgs graphArgs = sinModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Sin_Model_SpecifyInputsAndOutputs_05 + * @tc.desc: 设置输入输出,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(SinTest, SUB_AI_NNRt_Func_North_Sin_Model_SpecifyInputsAndOutputs_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SinModel1 sinModel; + OHNNGraphArgs graphArgs = sinModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Sin_Model_SpecifyInputsAndOutputs_06 + * @tc.desc: 设置输入输出,outputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(SinTest, SUB_AI_NNRt_Func_North_Sin_Model_SpecifyInputsAndOutputs_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SinModel1 sinModel; + OHNNGraphArgs graphArgs = sinModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Sin_Model_SpecifyInputsAndOutputs_07 + * @tc.desc: 设置输入输出,outputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(SinTest, SUB_AI_NNRt_Func_North_Sin_Model_SpecifyInputsAndOutputs_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SinModel1 sinModel; + OHNNGraphArgs graphArgs = sinModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.outputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Sin_Model_SpecifyInputsAndOutputs_08 + * @tc.desc: 设置输入输出,outputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(SinTest, SUB_AI_NNRt_Func_North_Sin_Model_SpecifyInputsAndOutputs_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SinModel1 sinModel; + OHNNGraphArgs graphArgs = sinModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Sin_Model_AddOperation_01 + * @tc.desc: 添加算子,paramindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(SinTest, SUB_AI_NNRt_Func_North_Sin_Model_AddOperation_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SinModel1 sinModel; + OHNNGraphArgs graphArgs = sinModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + nullptr, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Sin_Model_AddOperation_02 + * @tc.desc: 添加算子,paramindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(SinTest, SUB_AI_NNRt_Func_North_Sin_Model_AddOperation_02, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SinModel1 sinModel; + OHNNGraphArgs graphArgs = sinModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.data = nullptr; + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Sin_Model_AddOperation_03 + * @tc.desc: 添加算子,paramindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(SinTest, SUB_AI_NNRt_Func_North_Sin_Model_AddOperation_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SinModel1 sinModel; + OHNNGraphArgs graphArgs = sinModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.paramIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Sin_Model_AddOperation_04 + * @tc.desc: 添加算子,paramindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(SinTest, SUB_AI_NNRt_Func_North_Sin_Model_AddOperation_04, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SinModel1 sinModel; + OHNNGraphArgs graphArgs = sinModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.size = 0; + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Sin_Model_AddOperation_05 + * @tc.desc: 添加算子,inputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(SinTest, SUB_AI_NNRt_Func_North_Sin_Model_AddOperation_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SinModel1 sinModel; + OHNNGraphArgs graphArgs = sinModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Sin_Model_AddOperation_06 + * @tc.desc: 添加算子,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(SinTest, SUB_AI_NNRt_Func_North_Sin_Model_AddOperation_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SinModel1 sinModel; + OHNNGraphArgs graphArgs = sinModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Sin_Model_AddOperation_07 + * @tc.desc: 添加算子,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(SinTest, SUB_AI_NNRt_Func_North_Sin_Model_AddOperation_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SinModel1 sinModel; + OHNNGraphArgs graphArgs = sinModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Sin_Model_AddOperation_08 + * @tc.desc: 添加算子,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(SinTest, SUB_AI_NNRt_Func_North_Sin_Model_AddOperation_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SinModel1 sinModel; + OHNNGraphArgs graphArgs = sinModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Sin_Model_AddOperation_09 + * @tc.desc: 添加算子,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(SinTest, SUB_AI_NNRt_Func_North_Sin_Model_AddOperation_09, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SinModel1 sinModel; + OHNNGraphArgs graphArgs = sinModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(nullptr, graphArgs.operationType, + ¶mIndices, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} \ No newline at end of file diff --git a/test/nnrt_xts_acts/nncore/opstest/src/space_to_depth_test.cpp b/test/nnrt_xts_acts/nncore/opstest/src/space_to_depth_test.cpp new file mode 100644 index 0000000..6218400 --- /dev/null +++ b/test/nnrt_xts_acts/nncore/opstest/src/space_to_depth_test.cpp @@ -0,0 +1,869 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "nncore_utils.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Test; +class SpaceToDepthTest : public testing::Test {}; + +struct SpaceToDepthModel1 { + const std::vector input_shape = {1, 1, 4, 4}; + const std::vector output_shape = {1, 4, 2, 2}; + std::vector blockSizeValue = {2}; + float inputValue[1][1][4][4] = {1}; + float outputValue[1][4][2][2] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, input_shape, inputValue, 16*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, output_shape, outputValue, 16*sizeof(float)}; + OHNNOperandTest blockSize = {OH_NN_INT64, OH_NN_SPACE_TO_DEPTH_BLOCK_SIZE, {1}, &blockSizeValue, sizeof(int64_t)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_SPACE_TO_DEPTH, + .operands = {input, output, blockSize}, + .paramIndices = {2}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +struct SpaceToDepthModel2 { + const std::vector input_shape = {2, 3, 8, 8}; + const std::vector output_shape = {2, 48, 2, 2}; + std::vector blockSizeValue = {4}; + float inputValue[2][3][8][8] = {1}; + float outputValue[2][48][2][2] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, input_shape, inputValue, 384*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, output_shape, outputValue, 384*sizeof(float)}; + OHNNOperandTest blockSize = {OH_NN_INT64, OH_NN_SPACE_TO_DEPTH_BLOCK_SIZE, {1}, &blockSizeValue, sizeof(int64_t)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_SPACE_TO_DEPTH, + .operands = {input, output, blockSize}, + .paramIndices = {2}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +struct SpaceToDepthModel3 { + const std::vector input_shape = {1, 2, 6, 6}; + const std::vector output_shape = {1, 18, 2, 2}; + std::vector blockSizeValue = {3}; + float inputValue[1][2][6][6] = {1}; + float outputValue[1][18][2][2] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, input_shape, inputValue, 72*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, output_shape, outputValue, 72*sizeof(float)}; + OHNNOperandTest blockSize = {OH_NN_INT64, OH_NN_SPACE_TO_DEPTH_BLOCK_SIZE, {1}, &blockSizeValue, sizeof(int64_t)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_SPACE_TO_DEPTH, + .operands = {input, output, blockSize}, + .paramIndices = {2}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +struct SpaceToDepthModel4 { + const std::vector input_shape = {1, 4, 2, 2}; + const std::vector output_shape = {1, 16, 1, 1}; + std::vector blockSizeValue = {2}; + float inputValue[1][4][2][2] = {1}; + float outputValue[1][16][1][1] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, input_shape, inputValue, 16*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, output_shape, outputValue, 16*sizeof(float)}; + OHNNOperandTest blockSize = {OH_NN_INT64, OH_NN_SPACE_TO_DEPTH_BLOCK_SIZE, {1}, &blockSizeValue, sizeof(int64_t)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_SPACE_TO_DEPTH, + .operands = {input, output, blockSize}, + .paramIndices = {2}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +/** + * @tc.number : SUB_AI_NNRt_Func_North_SpaceToDepth_Build_01 + * @tc.desc: SpaceToDepthModel1模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(SpaceToDepthTest, SUB_AI_NNRt_Func_North_SpaceToDepth_Build_01, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SpaceToDepthModel1 spaceToDepthModel; + OHNNGraphArgs graphArgs = spaceToDepthModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_SpaceToDepth_Build_02 + * @tc.desc: SpaceToDepthModel2模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(SpaceToDepthTest, SUB_AI_NNRt_Func_North_SpaceToDepth_Build_02, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SpaceToDepthModel2 spaceToDepthModel; + OHNNGraphArgs graphArgs = spaceToDepthModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_SpaceToDepth_Build_03 + * @tc.desc: SpaceToDepthModel3模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(SpaceToDepthTest, SUB_AI_NNRt_Func_North_SpaceToDepth_Build_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SpaceToDepthModel3 spaceToDepthModel; + OHNNGraphArgs graphArgs = spaceToDepthModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_SpaceToDepth_Build_04 + * @tc.desc: SpaceToDepthModel4模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(SpaceToDepthTest, SUB_AI_NNRt_Func_North_SpaceToDepth_Build_04, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SpaceToDepthModel4 spaceToDepthModel; + OHNNGraphArgs graphArgs = spaceToDepthModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_SpaceToDepth_Build_05 + * @tc.desc: SpaceToDepthModel1模型输入Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(SpaceToDepthTest, SUB_AI_NNRt_Func_North_SpaceToDepth_Build_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SpaceToDepthModel1 spaceToDepthModel; + OHNNGraphArgs graphArgs = spaceToDepthModel.graphArgs; + graphArgs.operands = {spaceToDepthModel.input, spaceToDepthModel.input, + spaceToDepthModel.output, spaceToDepthModel.blockSize}; + graphArgs.inputIndices = {0, 1}; + graphArgs.outputIndices = {2}; + graphArgs.paramIndices = {3}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_SpaceToDepth_Build_06 + * @tc.desc: SpaceToDepthModel1模型输出Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(SpaceToDepthTest, SUB_AI_NNRt_Func_North_SpaceToDepth_Build_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SpaceToDepthModel1 spaceToDepthModel; + OHNNGraphArgs graphArgs = spaceToDepthModel.graphArgs; + graphArgs.operands = {spaceToDepthModel.input, spaceToDepthModel.output, + spaceToDepthModel.output, spaceToDepthModel.blockSize}; + graphArgs.inputIndices = {0}; + graphArgs.outputIndices = {1, 2}; + graphArgs.paramIndices = {3}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_SpaceToDepth_Build_07 + * @tc.desc: SpaceToDepthModel1模型传入非法参数进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(SpaceToDepthTest, SUB_AI_NNRt_Func_North_SpaceToDepth_Build_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SpaceToDepthModel1 spaceToDepthModel; + OHNNGraphArgs graphArgs = spaceToDepthModel.graphArgs; + + int8_t activationValue = OH_NN_FUSED_NONE; + OHNNOperandTest activation = {OH_NN_INT8, OH_NN_ADD_ACTIVATIONTYPE, {}, &activationValue, sizeof(int8_t)}; + graphArgs.operands = {spaceToDepthModel.input, spaceToDepthModel.output, + spaceToDepthModel.blockSize, activation}; + graphArgs.paramIndices = {2, 3}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_SpaceToDepth_Model_Finish_01 + * @tc.desc: 模型构图,未添加操作数 + * @tc.type: FUNC + */ +HWTEST_F(SpaceToDepthTest, SUB_AI_NNRt_Func_North_SpaceToDepth_Model_Finish_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + OHNNGraphArgs graphArgs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, SingleModelBuildEndStep(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_SpaceToDepth_Model_Finish_02 + * @tc.desc: 模型构图,未设置输入输出 + * @tc.type: FUNC + */ +HWTEST_F(SpaceToDepthTest, SUB_AI_NNRt_Func_North_SpaceToDepth_Model_Finish_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SpaceToDepthModel1 spaceToDepthModel; + OHNNGraphArgs graphArgs = spaceToDepthModel.graphArgs; + graphArgs.specifyIO = false; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_SpaceToDepth_Model_Finish_03 + * @tc.desc: 模型构图,设置输入输出,构图成功 + * @tc.type: FUNC + */ +HWTEST_F(SpaceToDepthTest, SUB_AI_NNRt_Func_North_SpaceToDepth_Model_Finish_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SpaceToDepthModel1 spaceToDepthModel; + OHNNGraphArgs graphArgs = spaceToDepthModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_SpaceToDepth_Model_SetOperandValue_01 + * @tc.desc: 设置操作数值,操作数不存在 + * @tc.type: FUNC + */ +HWTEST_F(SpaceToDepthTest, SUB_AI_NNRt_Func_North_SpaceToDepth_Model_SetOperandValue_01, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SpaceToDepthModel1 spaceToDepthModel; + OHNNGraphArgs graphArgs = spaceToDepthModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData( + model, 1000+i, operandTem.data, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_SpaceToDepth_Model_SetOperandValue_02 + * @tc.desc: 设置操作数值,buufer为nullptr + * @tc.type: FUNC + */ +HWTEST_F(SpaceToDepthTest, SUB_AI_NNRt_Func_North_SpaceToDepth_Model_SetOperandValue_02, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SpaceToDepthModel1 spaceToDepthModel; + OHNNGraphArgs graphArgs = spaceToDepthModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, i, nullptr, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_SpaceToDepth_Model_SetOperandValue_03 + * @tc.desc: 设置操作数值,length为0 + * @tc.type: FUNC + */ +HWTEST_F(SpaceToDepthTest, SUB_AI_NNRt_Func_North_SpaceToDepth_Model_SetOperandValue_03, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SpaceToDepthModel1 spaceToDepthModel; + OHNNGraphArgs graphArgs = spaceToDepthModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, 1000+i, operandTem.data, 0)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_SpaceToDepth_Model_SpecifyInputsAndOutputs_01 + * @tc.desc: 设置输入输出,inputIndices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(SpaceToDepthTest, SUB_AI_NNRt_Func_North_SpaceToDepth_Model_SpecifyInputsAndOutputs_01, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SpaceToDepthModel1 spaceToDepthModel; + OHNNGraphArgs graphArgs = spaceToDepthModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_SpaceToDepth_Model_SpecifyInputsAndOutputs_02 + * @tc.desc: 设置输入输出,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(SpaceToDepthTest, SUB_AI_NNRt_Func_North_SpaceToDepth_Model_SpecifyInputsAndOutputs_02, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SpaceToDepthModel1 spaceToDepthModel; + OHNNGraphArgs graphArgs = spaceToDepthModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_SpaceToDepth_Model_SpecifyInputsAndOutputs_03 + * @tc.desc: 设置输入输出,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(SpaceToDepthTest, SUB_AI_NNRt_Func_North_SpaceToDepth_Model_SpecifyInputsAndOutputs_03, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SpaceToDepthModel1 spaceToDepthModel; + OHNNGraphArgs graphArgs = spaceToDepthModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_SpaceToDepth_Model_SpecifyInputsAndOutputs_04 + * @tc.desc: 设置输入输出,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(SpaceToDepthTest, SUB_AI_NNRt_Func_North_SpaceToDepth_Model_SpecifyInputsAndOutputs_04, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SpaceToDepthModel1 spaceToDepthModel; + OHNNGraphArgs graphArgs = spaceToDepthModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_SpaceToDepth_Model_SpecifyInputsAndOutputs_05 + * @tc.desc: 设置输入输出,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(SpaceToDepthTest, SUB_AI_NNRt_Func_North_SpaceToDepth_Model_SpecifyInputsAndOutputs_05, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SpaceToDepthModel1 spaceToDepthModel; + OHNNGraphArgs graphArgs = spaceToDepthModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_SpaceToDepth_Model_SpecifyInputsAndOutputs_06 + * @tc.desc: 设置输入输出,outputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(SpaceToDepthTest, SUB_AI_NNRt_Func_North_SpaceToDepth_Model_SpecifyInputsAndOutputs_06, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SpaceToDepthModel1 spaceToDepthModel; + OHNNGraphArgs graphArgs = spaceToDepthModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_SpaceToDepth_Model_SpecifyInputsAndOutputs_07 + * @tc.desc: 设置输入输出,outputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(SpaceToDepthTest, SUB_AI_NNRt_Func_North_SpaceToDepth_Model_SpecifyInputsAndOutputs_07, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SpaceToDepthModel1 spaceToDepthModel; + OHNNGraphArgs graphArgs = spaceToDepthModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.outputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_SpaceToDepth_Model_SpecifyInputsAndOutputs_08 + * @tc.desc: 设置输入输出,outputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(SpaceToDepthTest, SUB_AI_NNRt_Func_North_SpaceToDepth_Model_SpecifyInputsAndOutputs_08, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SpaceToDepthModel1 spaceToDepthModel; + OHNNGraphArgs graphArgs = spaceToDepthModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_SpaceToDepth_Model_AddOperation_01 + * @tc.desc: 添加算子,paramindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(SpaceToDepthTest, SUB_AI_NNRt_Func_North_SpaceToDepth_Model_AddOperation_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SpaceToDepthModel1 spaceToDepthModel; + OHNNGraphArgs graphArgs = spaceToDepthModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + nullptr, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_SpaceToDepth_Model_AddOperation_02 + * @tc.desc: 添加算子,paramindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(SpaceToDepthTest, SUB_AI_NNRt_Func_North_SpaceToDepth_Model_AddOperation_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SpaceToDepthModel1 spaceToDepthModel; + OHNNGraphArgs graphArgs = spaceToDepthModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_SpaceToDepth_Model_AddOperation_03 + * @tc.desc: 添加算子,paramindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(SpaceToDepthTest, SUB_AI_NNRt_Func_North_SpaceToDepth_Model_AddOperation_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SpaceToDepthModel1 spaceToDepthModel; + OHNNGraphArgs graphArgs = spaceToDepthModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.paramIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_SpaceToDepth_Model_AddOperation_04 + * @tc.desc: 添加算子,paramindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(SpaceToDepthTest, SUB_AI_NNRt_Func_North_SpaceToDepth_Model_AddOperation_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SpaceToDepthModel1 spaceToDepthModel; + OHNNGraphArgs graphArgs = spaceToDepthModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_SpaceToDepth_Model_AddOperation_05 + * @tc.desc: 添加算子,inputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(SpaceToDepthTest, SUB_AI_NNRt_Func_North_SpaceToDepth_Model_AddOperation_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SpaceToDepthModel1 spaceToDepthModel; + OHNNGraphArgs graphArgs = spaceToDepthModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_SpaceToDepth_Model_AddOperation_06 + * @tc.desc: 添加算子,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(SpaceToDepthTest, SUB_AI_NNRt_Func_North_SpaceToDepth_Model_AddOperation_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SpaceToDepthModel1 spaceToDepthModel; + OHNNGraphArgs graphArgs = spaceToDepthModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_SpaceToDepth_Model_AddOperation_07 + * @tc.desc: 添加算子,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(SpaceToDepthTest, SUB_AI_NNRt_Func_North_SpaceToDepth_Model_AddOperation_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SpaceToDepthModel1 spaceToDepthModel; + OHNNGraphArgs graphArgs = spaceToDepthModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_SpaceToDepth_Model_AddOperation_08 + * @tc.desc: 添加算子,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(SpaceToDepthTest, SUB_AI_NNRt_Func_North_SpaceToDepth_Model_AddOperation_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SpaceToDepthModel1 spaceToDepthModel; + OHNNGraphArgs graphArgs = spaceToDepthModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_SpaceToDepth_Model_AddOperation_09 + * @tc.desc: 添加算子,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(SpaceToDepthTest, SUB_AI_NNRt_Func_North_SpaceToDepth_Model_AddOperation_09, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SpaceToDepthModel1 spaceToDepthModel; + OHNNGraphArgs graphArgs = spaceToDepthModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(nullptr, graphArgs.operationType, + ¶mIndices, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} \ No newline at end of file diff --git a/test/nnrt_xts_acts/nncore/opstest/src/sparse_to_dense_test.cpp b/test/nnrt_xts_acts/nncore/opstest/src/sparse_to_dense_test.cpp new file mode 100644 index 0000000..b0e6c1c --- /dev/null +++ b/test/nnrt_xts_acts/nncore/opstest/src/sparse_to_dense_test.cpp @@ -0,0 +1,986 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "nncore_utils.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Test; +class SparseToDenseTest : public testing::Test {}; + +struct SparseToDenseModel1 { + const std::vector indices_shape = {2, 2}; + const std::vector value_shape = {2}; + const std::vector output_shape = {2, 3}; + + float indicesValue[2][2] = {{0, 0}, {1, 2}}; + float valueValue[2] = {1, 2}; + float sparseShapeValue[2] = {2, 3}; + float outputValue[2][3] = {0}; + + OHNNOperandTest indices = {OH_NN_FLOAT32, OH_NN_TENSOR, indices_shape, indicesValue, 4*sizeof(bool)}; + OHNNOperandTest value = {OH_NN_FLOAT32, OH_NN_TENSOR, value_shape, valueValue, 2*sizeof(float)}; + OHNNOperandTest sparseDense = {OH_NN_FLOAT32, OH_NN_TENSOR, value_shape, sparseShapeValue, 2*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, output_shape, outputValue, 6*sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_SPARSE_TO_DENSE, + .operands = {indices, value, sparseDense, output}, + .paramIndices = {}, + .inputIndices = {0, 1, 2}, + .outputIndices = {3}}; +}; + +struct SparseToDenseModel2 { + const std::vector indices_shape = {3, 2}; + const std::vector value_shape = {3}; + const std::vector sparse_dense_shape = {2}; + const std::vector output_shape = {2, 3}; + + float indicesValue[3][2] = {{0, 1}, {1, 1}, {1, 2}}; + float valueValue[3] = {3, 4, 5}; + float sparseShapeValue[3] = {2, 3}; + float outputValue[2][3] = {0}; + + OHNNOperandTest indices = {OH_NN_FLOAT32, OH_NN_TENSOR, indices_shape, indicesValue, 6*sizeof(bool)}; + OHNNOperandTest value = {OH_NN_FLOAT32, OH_NN_TENSOR, value_shape, valueValue, 3*sizeof(float)}; + OHNNOperandTest sparseDense = {OH_NN_FLOAT32, OH_NN_TENSOR, sparse_dense_shape, sparseShapeValue, 2*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, output_shape, outputValue, 6*sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_SPARSE_TO_DENSE, + .operands = {indices, value, sparseDense, output}, + .paramIndices = {}, + .inputIndices = {0, 1, 2}, + .outputIndices = {3}}; +}; + +struct SparseToDenseModel3 { + const std::vector indices_shape = {2, 2}; + const std::vector value_shape = {2}; + const std::vector output_shape = {3, 4}; + + float indicesValue[2][2] = {{1, 0}, {0, 3}}; + float valueValue[2] = {9, 10}; + float sparseShapeValue[2] = {3, 4}; + float outputValue[3][4] = {0}; + + OHNNOperandTest indices = {OH_NN_FLOAT32, OH_NN_TENSOR, indices_shape, indicesValue, 4*sizeof(bool)}; + OHNNOperandTest value = {OH_NN_FLOAT32, OH_NN_TENSOR, value_shape, valueValue, 2*sizeof(float)}; + OHNNOperandTest sparseDense = {OH_NN_FLOAT32, OH_NN_TENSOR, value_shape, sparseShapeValue, 2*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, output_shape, outputValue, 12*sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_SPARSE_TO_DENSE, + .operands = {indices, value, sparseDense, output}, + .paramIndices = {}, + .inputIndices = {0, 1, 2}, + .outputIndices = {3}}; +}; + +struct SparseToDenseModel4 { + const std::vector indices_shape = {4}; + const std::vector value_shape = {2}; + const std::vector output_shape = {2, 3}; + + float indicesValue[4] = {0, 0, 1, 2}; + float valueValue[2] = {1, 2}; + float sparseShapeValue[2] = {2, 3}; + float outputValue[2][3] = {0}; + + OHNNOperandTest indices = {OH_NN_FLOAT32, OH_NN_TENSOR, indices_shape, indicesValue, 4*sizeof(bool)}; + OHNNOperandTest value = {OH_NN_FLOAT32, OH_NN_TENSOR, value_shape, valueValue, 2*sizeof(float)}; + OHNNOperandTest sparseDense = {OH_NN_FLOAT32, OH_NN_TENSOR, value_shape, sparseShapeValue, 2*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, output_shape, outputValue, 6*sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_SPARSE_TO_DENSE, + .operands = {indices, value, sparseDense, output}, + .paramIndices = {}, + .inputIndices = {0, 1, 2}, + .outputIndices = {3}}; +}; + +struct SparseToDenseModel5 { + const std::vector indices_shape = {2, 2}; + const std::vector value_shape = {1, 2}; + const std::vector sparse_dense_shape = {2}; + const std::vector output_shape = {3, 4}; + + float indicesValue[2][2] = {{0, 0}, {1, 2}}; + float valueValue[1][2] = {{1, 2}}; + float sparseShapeValue[2] = {2, 3}; + float outputValue[2][3] = {0}; + + OHNNOperandTest indices = {OH_NN_FLOAT32, OH_NN_TENSOR, indices_shape, indicesValue, 4*sizeof(bool)}; + OHNNOperandTest value = {OH_NN_FLOAT32, OH_NN_TENSOR, value_shape, valueValue, 2*sizeof(float)}; + OHNNOperandTest sparseDense = {OH_NN_FLOAT32, OH_NN_TENSOR, sparse_dense_shape, sparseShapeValue, 2*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, output_shape, outputValue, 6*sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_SPARSE_TO_DENSE, + .operands = {indices, value, sparseDense, output}, + .paramIndices = {}, + .inputIndices = {0, 1, 2}, + .outputIndices = {3}}; +}; + +struct SparseToDenseModel6 { + const std::vector indices_shape = {2, 2}; + const std::vector value_shape = {2}; + const std::vector sparse_dense_shape = {3}; + const std::vector output_shape = {3, 4}; + + float indicesValue[2][2] = {{0, 0}, {1, 2}}; + float valueValue[2] = {1, 2}; + float sparseShapeValue[3] = {2, 3, 1}; + float outputValue[2][3][1] = {0}; + + OHNNOperandTest indices = {OH_NN_FLOAT32, OH_NN_TENSOR, indices_shape, indicesValue, 4*sizeof(bool)}; + OHNNOperandTest value = {OH_NN_FLOAT32, OH_NN_TENSOR, value_shape, valueValue, 2*sizeof(float)}; + OHNNOperandTest sparseDense = {OH_NN_FLOAT32, OH_NN_TENSOR, sparse_dense_shape, sparseShapeValue, 3*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, output_shape, outputValue, 6*sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_SPARSE_TO_DENSE, + .operands = {indices, value, sparseDense, output}, + .paramIndices = {}, + .inputIndices = {0, 1, 2}, + .outputIndices = {3}}; +}; + +/** + * @tc.number : SUB_AI_NNRt_Func_North_SparseToDense_Build_01 + * @tc.desc: SparseToDenseModel1模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(SparseToDenseTest, SUB_AI_NNRt_Func_North_SparseToDense_Build_01, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SparseToDenseModel1 sparseToDenseModel; + OHNNGraphArgs graphArgs = sparseToDenseModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_SparseToDense_Build_02 + * @tc.desc: SparseToDenseModel2模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(SparseToDenseTest, SUB_AI_NNRt_Func_North_SparseToDense_Build_02, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SparseToDenseModel2 sparseToDenseModel; + OHNNGraphArgs graphArgs = sparseToDenseModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_SparseToDense_Build_03 + * @tc.desc: SparseToDenseModel3模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(SparseToDenseTest, SUB_AI_NNRt_Func_North_SparseToDense_Build_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SparseToDenseModel3 sparseToDenseModel; + OHNNGraphArgs graphArgs = sparseToDenseModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_SparseToDense_Build_04 + * @tc.desc: SparseToDenseModel4模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(SparseToDenseTest, SUB_AI_NNRt_Func_North_SparseToDense_Build_04, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SparseToDenseModel4 sparseToDenseModel; + OHNNGraphArgs graphArgs = sparseToDenseModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_SparseToDense_Build_05 + * @tc.desc: SparseToDenseModel5模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(SparseToDenseTest, SUB_AI_NNRt_Func_North_SparseToDense_Build_05, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SparseToDenseModel5 sparseToDenseModel; + OHNNGraphArgs graphArgs = sparseToDenseModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_SparseToDense_Build_06 + * @tc.desc: SparseToDenseModel6模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(SparseToDenseTest, SUB_AI_NNRt_Func_North_SparseToDense_Build_06, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SparseToDenseModel6 sparseToDenseModel; + OHNNGraphArgs graphArgs = sparseToDenseModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_SparseToDense_Build_07 + * @tc.desc: SparseToDenseModel1模型输入Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(SparseToDenseTest, SUB_AI_NNRt_Func_North_SparseToDense_Build_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SparseToDenseModel1 sparseToDenseModel; + OHNNGraphArgs graphArgs = sparseToDenseModel.graphArgs; + graphArgs.operands = {sparseToDenseModel.indices, sparseToDenseModel.value, sparseToDenseModel.sparseDense, + sparseToDenseModel.value, sparseToDenseModel.output}; + graphArgs.inputIndices = {0, 1, 2, 3}; + graphArgs.outputIndices = {4}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_SparseToDense_Build_08 + * @tc.desc: SparseToDenseModel1模型输出Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(SparseToDenseTest, SUB_AI_NNRt_Func_North_SparseToDense_Build_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SparseToDenseModel1 sparseToDenseModel; + OHNNGraphArgs graphArgs = sparseToDenseModel.graphArgs; + graphArgs.operands = {sparseToDenseModel.indices, sparseToDenseModel.value, sparseToDenseModel.sparseDense, + sparseToDenseModel.output, sparseToDenseModel.output}; + graphArgs.inputIndices = {0, 1, 2}; + graphArgs.outputIndices = {3, 4}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_SparseToDense_Build_09 + * @tc.desc: SparseToDenseModel1模型传入非法参数进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(SparseToDenseTest, SUB_AI_NNRt_Func_North_SparseToDense_Build_09, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SparseToDenseModel1 sparseToDenseModel; + OHNNGraphArgs graphArgs = sparseToDenseModel.graphArgs; + + int8_t activationValue = OH_NN_FUSED_NONE; + OHNNOperandTest activation = {OH_NN_INT8, OH_NN_ADD_ACTIVATIONTYPE, {}, &activationValue, sizeof(int8_t)}; + graphArgs.operands = {sparseToDenseModel.indices, sparseToDenseModel.value, sparseToDenseModel.sparseDense, + sparseToDenseModel.output, activation}; + graphArgs.paramIndices = {4}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_SparseToDense_Model_Finish_01 + * @tc.desc: 模型构图,未添加操作数 + * @tc.type: FUNC + */ +HWTEST_F(SparseToDenseTest, SUB_AI_NNRt_Func_North_SparseToDense_Model_Finish_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + OHNNGraphArgs graphArgs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, SingleModelBuildEndStep(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_SparseToDense_Model_Finish_02 + * @tc.desc: 模型构图,未设置输入输出 + * @tc.type: FUNC + */ +HWTEST_F(SparseToDenseTest, SUB_AI_NNRt_Func_North_SparseToDense_Model_Finish_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SparseToDenseModel1 sparseToDenseModel; + OHNNGraphArgs graphArgs = sparseToDenseModel.graphArgs; + graphArgs.specifyIO = false; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_SparseToDense_Model_Finish_03 + * @tc.desc: 模型构图,设置输入输出,构图成功 + * @tc.type: FUNC + */ +HWTEST_F(SparseToDenseTest, SUB_AI_NNRt_Func_North_SparseToDense_Model_Finish_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SparseToDenseModel1 sparseToDenseModel; + OHNNGraphArgs graphArgs = sparseToDenseModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_SparseToDense_Model_SetOperandValue_01 + * @tc.desc: 设置操作数值,操作数不存在 + * @tc.type: FUNC + */ +HWTEST_F(SparseToDenseTest, SUB_AI_NNRt_Func_North_SparseToDense_Model_SetOperandValue_01, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SparseToDenseModel1 sparseToDenseModel; + OHNNGraphArgs graphArgs = sparseToDenseModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData( + model, 1000+i, operandTem.data, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_SparseToDense_Model_SetOperandValue_02 + * @tc.desc: 设置操作数值,buufer为nullptr + * @tc.type: FUNC + */ +HWTEST_F(SparseToDenseTest, SUB_AI_NNRt_Func_North_SparseToDense_Model_SetOperandValue_02, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SparseToDenseModel1 sparseToDenseModel; + OHNNGraphArgs graphArgs = sparseToDenseModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, i, nullptr, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_SparseToDense_Model_SetOperandValue_03 + * @tc.desc: 设置操作数值,length为0 + * @tc.type: FUNC + */ +HWTEST_F(SparseToDenseTest, SUB_AI_NNRt_Func_North_SparseToDense_Model_SetOperandValue_03, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SparseToDenseModel1 sparseToDenseModel; + OHNNGraphArgs graphArgs = sparseToDenseModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, 1000+i, operandTem.data, 0)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_SparseToDense_Model_SpecifyInputsAndOutputs_01 + * @tc.desc: 设置输入输出,inputIndices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(SparseToDenseTest, SUB_AI_NNRt_Func_North_SparseToDense_Model_SpecifyInputsAndOutputs_01, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SparseToDenseModel1 sparseToDenseModel; + OHNNGraphArgs graphArgs = sparseToDenseModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_SparseToDense_Model_SpecifyInputsAndOutputs_02 + * @tc.desc: 设置输入输出,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(SparseToDenseTest, SUB_AI_NNRt_Func_North_SparseToDense_Model_SpecifyInputsAndOutputs_02, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SparseToDenseModel1 sparseToDenseModel; + OHNNGraphArgs graphArgs = sparseToDenseModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_SparseToDense_Model_SpecifyInputsAndOutputs_03 + * @tc.desc: 设置输入输出,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(SparseToDenseTest, SUB_AI_NNRt_Func_North_SparseToDense_Model_SpecifyInputsAndOutputs_03, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SparseToDenseModel1 sparseToDenseModel; + OHNNGraphArgs graphArgs = sparseToDenseModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_SparseToDense_Model_SpecifyInputsAndOutputs_04 + * @tc.desc: 设置输入输出,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(SparseToDenseTest, SUB_AI_NNRt_Func_North_SparseToDense_Model_SpecifyInputsAndOutputs_04, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SparseToDenseModel1 sparseToDenseModel; + OHNNGraphArgs graphArgs = sparseToDenseModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_SparseToDense_Model_SpecifyInputsAndOutputs_05 + * @tc.desc: 设置输入输出,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(SparseToDenseTest, SUB_AI_NNRt_Func_North_SparseToDense_Model_SpecifyInputsAndOutputs_05, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SparseToDenseModel1 sparseToDenseModel; + OHNNGraphArgs graphArgs = sparseToDenseModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_SparseToDense_Model_SpecifyInputsAndOutputs_06 + * @tc.desc: 设置输入输出,outputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(SparseToDenseTest, SUB_AI_NNRt_Func_North_SparseToDense_Model_SpecifyInputsAndOutputs_06, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SparseToDenseModel1 sparseToDenseModel; + OHNNGraphArgs graphArgs = sparseToDenseModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_SparseToDense_Model_SpecifyInputsAndOutputs_07 + * @tc.desc: 设置输入输出,outputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(SparseToDenseTest, SUB_AI_NNRt_Func_North_SparseToDense_Model_SpecifyInputsAndOutputs_07, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SparseToDenseModel1 sparseToDenseModel; + OHNNGraphArgs graphArgs = sparseToDenseModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.outputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_SparseToDense_Model_SpecifyInputsAndOutputs_08 + * @tc.desc: 设置输入输出,outputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(SparseToDenseTest, SUB_AI_NNRt_Func_North_SparseToDense_Model_SpecifyInputsAndOutputs_08, + Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SparseToDenseModel1 sparseToDenseModel; + OHNNGraphArgs graphArgs = sparseToDenseModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_SparseToDense_Model_AddOperation_01 + * @tc.desc: 添加算子,paramindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(SparseToDenseTest, SUB_AI_NNRt_Func_North_SparseToDense_Model_AddOperation_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SparseToDenseModel1 sparseToDenseModel; + OHNNGraphArgs graphArgs = sparseToDenseModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + nullptr, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_SparseToDense_Model_AddOperation_02 + * @tc.desc: 添加算子,paramindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(SparseToDenseTest, SUB_AI_NNRt_Func_North_SparseToDense_Model_AddOperation_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SparseToDenseModel1 sparseToDenseModel; + OHNNGraphArgs graphArgs = sparseToDenseModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.data = nullptr; + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_SparseToDense_Model_AddOperation_03 + * @tc.desc: 添加算子,paramindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(SparseToDenseTest, SUB_AI_NNRt_Func_North_SparseToDense_Model_AddOperation_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SparseToDenseModel1 sparseToDenseModel; + OHNNGraphArgs graphArgs = sparseToDenseModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.paramIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_SparseToDense_Model_AddOperation_04 + * @tc.desc: 添加算子,paramindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(SparseToDenseTest, SUB_AI_NNRt_Func_North_SparseToDense_Model_AddOperation_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SparseToDenseModel1 sparseToDenseModel; + OHNNGraphArgs graphArgs = sparseToDenseModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.size = 0; + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_SparseToDense_Model_AddOperation_05 + * @tc.desc: 添加算子,inputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(SparseToDenseTest, SUB_AI_NNRt_Func_North_SparseToDense_Model_AddOperation_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SparseToDenseModel1 sparseToDenseModel; + OHNNGraphArgs graphArgs = sparseToDenseModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_SparseToDense_Model_AddOperation_06 + * @tc.desc: 添加算子,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(SparseToDenseTest, SUB_AI_NNRt_Func_North_SparseToDense_Model_AddOperation_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SparseToDenseModel1 sparseToDenseModel; + OHNNGraphArgs graphArgs = sparseToDenseModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_SparseToDense_Model_AddOperation_07 + * @tc.desc: 添加算子,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(SparseToDenseTest, SUB_AI_NNRt_Func_North_SparseToDense_Model_AddOperation_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SparseToDenseModel1 sparseToDenseModel; + OHNNGraphArgs graphArgs = sparseToDenseModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_SparseToDense_Model_AddOperation_08 + * @tc.desc: 添加算子,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(SparseToDenseTest, SUB_AI_NNRt_Func_North_SparseToDense_Model_AddOperation_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SparseToDenseModel1 sparseToDenseModel; + OHNNGraphArgs graphArgs = sparseToDenseModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_SparseToDense_Model_AddOperation_09 + * @tc.desc: 添加算子,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(SparseToDenseTest, SUB_AI_NNRt_Func_North_SparseToDense_Model_AddOperation_09, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SparseToDenseModel1 sparseToDenseModel; + OHNNGraphArgs graphArgs = sparseToDenseModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(nullptr, graphArgs.operationType, + ¶mIndices, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} \ No newline at end of file diff --git a/test/nnrt_xts_acts/nncore/opstest/src/square_test.cpp b/test/nnrt_xts_acts/nncore/opstest/src/square_test.cpp new file mode 100644 index 0000000..09f533d --- /dev/null +++ b/test/nnrt_xts_acts/nncore/opstest/src/square_test.cpp @@ -0,0 +1,756 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "nncore_utils.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Test; +class SquareTest : public testing::Test {}; + +struct SquareModel1 { + const std::vector tensor_shape = {5}; + float inputValue[5] = {1, 0, 3, -4, -5}; + float outputValue[5] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, inputValue, 5*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, outputValue, 5*sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_SQUARE, + .operands = {input, output}, + .paramIndices = {}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + + +struct SquareModel2 { + const std::vector tensor_shape = {}; + float* inputValue = {}; + float* outputValue = {}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, inputValue, 0*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, outputValue, 0*sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_SQUARE, + .operands = {input, output}, + .paramIndices = {}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Square_Build_01 + * @tc.desc: SquareModel1模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(SquareTest, SUB_AI_NNRt_Func_North_Square_Build_01, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SquareModel1 squareModel; + OHNNGraphArgs graphArgs = squareModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Square_Build_02 + * @tc.desc: SquareModel2模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(SquareTest, SUB_AI_NNRt_Func_North_Square_Build_02, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SquareModel2 squareModel; + OHNNGraphArgs graphArgs = squareModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Square_Build_03 + * @tc.desc: SquareModel1模型输入Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(SquareTest, SUB_AI_NNRt_Func_North_Square_Build_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SquareModel1 squareModel; + OHNNGraphArgs graphArgs = squareModel.graphArgs; + graphArgs.operands = {squareModel.input, squareModel.input, squareModel.output}; + graphArgs.inputIndices = {0, 1}; + graphArgs.outputIndices = {2}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Square_Build_04 + * @tc.desc: SquareModel1模型输出Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(SquareTest, SUB_AI_NNRt_Func_North_Square_Build_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SquareModel1 squareModel; + OHNNGraphArgs graphArgs = squareModel.graphArgs; + graphArgs.operands = {squareModel.input, squareModel.output, squareModel.output}; + graphArgs.inputIndices = {0}; + graphArgs.outputIndices = {1, 2}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Square_Build_05 + * @tc.desc: SquareModel1模型传入非法参数进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(SquareTest, SUB_AI_NNRt_Func_North_Square_Build_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SquareModel1 squareModel; + OHNNGraphArgs graphArgs = squareModel.graphArgs; + + int8_t activationValue = OH_NN_FUSED_NONE; + OHNNOperandTest activation = {OH_NN_INT8, OH_NN_ADD_ACTIVATIONTYPE, {}, &activationValue, sizeof(int8_t)}; + graphArgs.operands = {squareModel.input, squareModel.output, activation}; + graphArgs.paramIndices = {2}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Square_Model_Finish_01 + * @tc.desc: 模型构图,未添加操作数 + * @tc.type: FUNC + */ +HWTEST_F(SquareTest, SUB_AI_NNRt_Func_North_Square_Model_Finish_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + OHNNGraphArgs graphArgs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, SingleModelBuildEndStep(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Square_Model_Finish_02 + * @tc.desc: 模型构图,未设置输入输出 + * @tc.type: FUNC + */ +HWTEST_F(SquareTest, SUB_AI_NNRt_Func_North_Square_Model_Finish_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SquareModel1 squareModel; + OHNNGraphArgs graphArgs = squareModel.graphArgs; + graphArgs.specifyIO = false; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Square_Model_Finish_03 + * @tc.desc: 模型构图,设置输入输出,构图成功 + * @tc.type: FUNC + */ +HWTEST_F(SquareTest, SUB_AI_NNRt_Func_North_Square_Model_Finish_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SquareModel1 squareModel; + OHNNGraphArgs graphArgs = squareModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Square_Model_SetOperandValue_01 + * @tc.desc: 设置操作数值,操作数不存在 + * @tc.type: FUNC + */ +HWTEST_F(SquareTest, SUB_AI_NNRt_Func_North_Square_Model_SetOperandValue_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SquareModel1 squareModel; + OHNNGraphArgs graphArgs = squareModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData( + model, 1000+i, operandTem.data, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Square_Model_SetOperandValue_02 + * @tc.desc: 设置操作数值,buufer为nullptr + * @tc.type: FUNC + */ +HWTEST_F(SquareTest, SUB_AI_NNRt_Func_North_Square_Model_SetOperandValue_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SquareModel1 squareModel; + OHNNGraphArgs graphArgs = squareModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, i, nullptr, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Square_Model_SetOperandValue_03 + * @tc.desc: 设置操作数值,length为0 + * @tc.type: FUNC + */ +HWTEST_F(SquareTest, SUB_AI_NNRt_Func_North_Square_Model_SetOperandValue_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SquareModel1 squareModel; + OHNNGraphArgs graphArgs = squareModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, 1000+i, operandTem.data, 0)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Square_Model_SpecifyInputsAndOutputs_01 + * @tc.desc: 设置输入输出,inputIndices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(SquareTest, SUB_AI_NNRt_Func_North_Square_Model_SpecifyInputsAndOutputs_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SquareModel1 squareModel; + OHNNGraphArgs graphArgs = squareModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Square_Model_SpecifyInputsAndOutputs_02 + * @tc.desc: 设置输入输出,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(SquareTest, SUB_AI_NNRt_Func_North_Square_Model_SpecifyInputsAndOutputs_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SquareModel1 squareModel; + OHNNGraphArgs graphArgs = squareModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Square_Model_SpecifyInputsAndOutputs_03 + * @tc.desc: 设置输入输出,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(SquareTest, SUB_AI_NNRt_Func_North_Square_Model_SpecifyInputsAndOutputs_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SquareModel1 squareModel; + OHNNGraphArgs graphArgs = squareModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Square_Model_SpecifyInputsAndOutputs_04 + * @tc.desc: 设置输入输出,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(SquareTest, SUB_AI_NNRt_Func_North_Square_Model_SpecifyInputsAndOutputs_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SquareModel1 squareModel; + OHNNGraphArgs graphArgs = squareModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Square_Model_SpecifyInputsAndOutputs_05 + * @tc.desc: 设置输入输出,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(SquareTest, SUB_AI_NNRt_Func_North_Square_Model_SpecifyInputsAndOutputs_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SquareModel1 squareModel; + OHNNGraphArgs graphArgs = squareModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Square_Model_SpecifyInputsAndOutputs_06 + * @tc.desc: 设置输入输出,outputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(SquareTest, SUB_AI_NNRt_Func_North_Square_Model_SpecifyInputsAndOutputs_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SquareModel1 squareModel; + OHNNGraphArgs graphArgs = squareModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Square_Model_SpecifyInputsAndOutputs_07 + * @tc.desc: 设置输入输出,outputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(SquareTest, SUB_AI_NNRt_Func_North_Square_Model_SpecifyInputsAndOutputs_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SquareModel1 squareModel; + OHNNGraphArgs graphArgs = squareModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.outputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Square_Model_SpecifyInputsAndOutputs_08 + * @tc.desc: 设置输入输出,outputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(SquareTest, SUB_AI_NNRt_Func_North_Square_Model_SpecifyInputsAndOutputs_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SquareModel1 squareModel; + OHNNGraphArgs graphArgs = squareModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Square_Model_AddOperation_01 + * @tc.desc: 添加算子,paramindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(SquareTest, SUB_AI_NNRt_Func_North_Square_Model_AddOperation_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SquareModel1 squareModel; + OHNNGraphArgs graphArgs = squareModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + nullptr, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Square_Model_AddOperation_02 + * @tc.desc: 添加算子,paramindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(SquareTest, SUB_AI_NNRt_Func_North_Square_Model_AddOperation_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SquareModel1 squareModel; + OHNNGraphArgs graphArgs = squareModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.data = nullptr; + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Square_Model_AddOperation_03 + * @tc.desc: 添加算子,paramindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(SquareTest, SUB_AI_NNRt_Func_North_Square_Model_AddOperation_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SquareModel1 squareModel; + OHNNGraphArgs graphArgs = squareModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.paramIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Square_Model_AddOperation_04 + * @tc.desc: 添加算子,paramindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(SquareTest, SUB_AI_NNRt_Func_North_Square_Model_AddOperation_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SquareModel1 squareModel; + OHNNGraphArgs graphArgs = squareModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.size = 0; + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Square_Model_AddOperation_05 + * @tc.desc: 添加算子,inputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(SquareTest, SUB_AI_NNRt_Func_North_Square_Model_AddOperation_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SquareModel1 squareModel; + OHNNGraphArgs graphArgs = squareModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Square_Model_AddOperation_06 + * @tc.desc: 添加算子,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(SquareTest, SUB_AI_NNRt_Func_North_Square_Model_AddOperation_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SquareModel1 squareModel; + OHNNGraphArgs graphArgs = squareModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Square_Model_AddOperation_07 + * @tc.desc: 添加算子,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(SquareTest, SUB_AI_NNRt_Func_North_Square_Model_AddOperation_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SquareModel1 squareModel; + OHNNGraphArgs graphArgs = squareModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Square_Model_AddOperation_08 + * @tc.desc: 添加算子,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(SquareTest, SUB_AI_NNRt_Func_North_Square_Model_AddOperation_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SquareModel1 squareModel; + OHNNGraphArgs graphArgs = squareModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Square_Model_AddOperation_09 + * @tc.desc: 添加算子,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(SquareTest, SUB_AI_NNRt_Func_North_Square_Model_AddOperation_09, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SquareModel1 squareModel; + OHNNGraphArgs graphArgs = squareModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(nullptr, graphArgs.operationType, + ¶mIndices, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} \ No newline at end of file diff --git a/test/nnrt_xts_acts/nncore/opstest/src/swish_test.cpp b/test/nnrt_xts_acts/nncore/opstest/src/swish_test.cpp new file mode 100644 index 0000000..49eecee --- /dev/null +++ b/test/nnrt_xts_acts/nncore/opstest/src/swish_test.cpp @@ -0,0 +1,799 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "nncore_utils.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Test; +class SwishTest : public testing::Test {}; + +struct SwishModel1 { + const std::vector tensor_shape = {3, 3}; + float inputValue[3][3] = {{1, 2, 3}, {4, 5, 6}, {7, 8, 9}}; + float outputValue[3][3] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, inputValue, 9*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, outputValue, 9*sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_SWISH, + .operands = {input, output}, + .paramIndices = {}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +struct SwishModel2 { + const std::vector tensor_shape = {2, 3}; + float inputValue[2][3] = {{-1, 0, 1}, {2, -2, 3}}; + float outputValue[2][3] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, inputValue, 6*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, outputValue, 6*sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_SWISH, + .operands = {input, output}, + .paramIndices = {}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +struct SwishModel3 { + const std::vector tensor_shape = {2, 3}; + float inputValue[2][2] = {{0, 0}, {0, 0}}; + float outputValue[2][2] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, inputValue, 4*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, outputValue, 4*sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_SWISH, + .operands = {input, output}, + .paramIndices = {}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Swish_Build_01 + * @tc.desc: SwishModel1模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(SwishTest, SUB_AI_NNRt_Func_North_Swish_Build_01, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SwishModel1 swishModel; + OHNNGraphArgs graphArgs = swishModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Swish_Build_02 + * @tc.desc: SwishModel2模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(SwishTest, SUB_AI_NNRt_Func_North_Swish_Build_02, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SwishModel2 swishModel; + OHNNGraphArgs graphArgs = swishModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Swish_Build_03 + * @tc.desc: SwishModel2模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(SwishTest, SUB_AI_NNRt_Func_North_Swish_Build_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SwishModel3 swishModel; + OHNNGraphArgs graphArgs = swishModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Swish_Build_04 + * @tc.desc: SwishModel1模型输入Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(SwishTest, SUB_AI_NNRt_Func_North_Swish_Build_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SwishModel1 swishModel; + OHNNGraphArgs graphArgs = swishModel.graphArgs; + graphArgs.operands = {swishModel.input, swishModel.input, swishModel.output}; + graphArgs.inputIndices = {0, 1}; + graphArgs.outputIndices = {2}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Swish_Build_05 + * @tc.desc: SwishModel1模型输出Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(SwishTest, SUB_AI_NNRt_Func_North_Swish_Build_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SwishModel1 swishModel; + OHNNGraphArgs graphArgs = swishModel.graphArgs; + graphArgs.operands = {swishModel.input, swishModel.output, swishModel.output}; + graphArgs.inputIndices = {0}; + graphArgs.outputIndices = {1, 2}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Swish_Build_06 + * @tc.desc: SwishModel1模型传入非法参数进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(SwishTest, SUB_AI_NNRt_Func_North_Swish_Build_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SwishModel1 swishModel; + OHNNGraphArgs graphArgs = swishModel.graphArgs; + + int8_t activationValue = OH_NN_FUSED_NONE; + OHNNOperandTest activation = {OH_NN_INT8, OH_NN_ADD_ACTIVATIONTYPE, {}, &activationValue, sizeof(int8_t)}; + graphArgs.operands = {swishModel.input, swishModel.output, activation}; + graphArgs.paramIndices = {2}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Swish_Model_Finish_01 + * @tc.desc: 模型构图,未添加操作数 + * @tc.type: FUNC + */ +HWTEST_F(SwishTest, SUB_AI_NNRt_Func_North_Swish_Model_Finish_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + OHNNGraphArgs graphArgs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, SingleModelBuildEndStep(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Swish_Model_Finish_02 + * @tc.desc: 模型构图,未设置输入输出 + * @tc.type: FUNC + */ +HWTEST_F(SwishTest, SUB_AI_NNRt_Func_North_Swish_Model_Finish_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SwishModel1 swishModel; + OHNNGraphArgs graphArgs = swishModel.graphArgs; + graphArgs.specifyIO = false; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Swish_Model_Finish_03 + * @tc.desc: 模型构图,设置输入输出,构图成功 + * @tc.type: FUNC + */ +HWTEST_F(SwishTest, SUB_AI_NNRt_Func_North_Swish_Model_Finish_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SwishModel1 swishModel; + OHNNGraphArgs graphArgs = swishModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Swish_Model_SetOperandValue_01 + * @tc.desc: 设置操作数值,操作数不存在 + * @tc.type: FUNC + */ +HWTEST_F(SwishTest, SUB_AI_NNRt_Func_North_Swish_Model_SetOperandValue_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SwishModel1 swishModel; + OHNNGraphArgs graphArgs = swishModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData( + model, 1000+i, operandTem.data, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Swish_Model_SetOperandValue_02 + * @tc.desc: 设置操作数值,buufer为nullptr + * @tc.type: FUNC + */ +HWTEST_F(SwishTest, SUB_AI_NNRt_Func_North_Swish_Model_SetOperandValue_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SwishModel1 swishModel; + OHNNGraphArgs graphArgs = swishModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, i, nullptr, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Swish_Model_SetOperandValue_03 + * @tc.desc: 设置操作数值,length为0 + * @tc.type: FUNC + */ +HWTEST_F(SwishTest, SUB_AI_NNRt_Func_North_Swish_Model_SetOperandValue_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SwishModel1 swishModel; + OHNNGraphArgs graphArgs = swishModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, 1000+i, operandTem.data, 0)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Swish_Model_SpecifyInputsAndOutputs_01 + * @tc.desc: 设置输入输出,inputIndices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(SwishTest, SUB_AI_NNRt_Func_North_Swish_Model_SpecifyInputsAndOutputs_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SwishModel1 swishModel; + OHNNGraphArgs graphArgs = swishModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Swish_Model_SpecifyInputsAndOutputs_02 + * @tc.desc: 设置输入输出,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(SwishTest, SUB_AI_NNRt_Func_North_Swish_Model_SpecifyInputsAndOutputs_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SwishModel1 swishModel; + OHNNGraphArgs graphArgs = swishModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Swish_Model_SpecifyInputsAndOutputs_03 + * @tc.desc: 设置输入输出,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(SwishTest, SUB_AI_NNRt_Func_North_Swish_Model_SpecifyInputsAndOutputs_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SwishModel1 swishModel; + OHNNGraphArgs graphArgs = swishModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Swish_Model_SpecifyInputsAndOutputs_04 + * @tc.desc: 设置输入输出,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(SwishTest, SUB_AI_NNRt_Func_North_Swish_Model_SpecifyInputsAndOutputs_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SwishModel1 swishModel; + OHNNGraphArgs graphArgs = swishModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Swish_Model_SpecifyInputsAndOutputs_05 + * @tc.desc: 设置输入输出,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(SwishTest, SUB_AI_NNRt_Func_North_Swish_Model_SpecifyInputsAndOutputs_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SwishModel1 swishModel; + OHNNGraphArgs graphArgs = swishModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Swish_Model_SpecifyInputsAndOutputs_06 + * @tc.desc: 设置输入输出,outputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(SwishTest, SUB_AI_NNRt_Func_North_Swish_Model_SpecifyInputsAndOutputs_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SwishModel1 swishModel; + OHNNGraphArgs graphArgs = swishModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Swish_Model_SpecifyInputsAndOutputs_07 + * @tc.desc: 设置输入输出,outputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(SwishTest, SUB_AI_NNRt_Func_North_Swish_Model_SpecifyInputsAndOutputs_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SwishModel1 swishModel; + OHNNGraphArgs graphArgs = swishModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.outputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Swish_Model_SpecifyInputsAndOutputs_08 + * @tc.desc: 设置输入输出,outputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(SwishTest, SUB_AI_NNRt_Func_North_Swish_Model_SpecifyInputsAndOutputs_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SwishModel1 swishModel; + OHNNGraphArgs graphArgs = swishModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Swish_Model_AddOperation_01 + * @tc.desc: 添加算子,paramindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(SwishTest, SUB_AI_NNRt_Func_North_Swish_Model_AddOperation_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SwishModel1 swishModel; + OHNNGraphArgs graphArgs = swishModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + nullptr, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Swish_Model_AddOperation_02 + * @tc.desc: 添加算子,paramindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(SwishTest, SUB_AI_NNRt_Func_North_Swish_Model_AddOperation_02, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SwishModel1 swishModel; + OHNNGraphArgs graphArgs = swishModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.data = nullptr; + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Swish_Model_AddOperation_03 + * @tc.desc: 添加算子,paramindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(SwishTest, SUB_AI_NNRt_Func_North_Swish_Model_AddOperation_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SwishModel1 swishModel; + OHNNGraphArgs graphArgs = swishModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.paramIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Swish_Model_AddOperation_04 + * @tc.desc: 添加算子,paramindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(SwishTest, SUB_AI_NNRt_Func_North_Swish_Model_AddOperation_04, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SwishModel1 swishModel; + OHNNGraphArgs graphArgs = swishModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.size = 0; + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Swish_Model_AddOperation_05 + * @tc.desc: 添加算子,inputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(SwishTest, SUB_AI_NNRt_Func_North_Swish_Model_AddOperation_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SwishModel1 swishModel; + OHNNGraphArgs graphArgs = swishModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Swish_Model_AddOperation_06 + * @tc.desc: 添加算子,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(SwishTest, SUB_AI_NNRt_Func_North_Swish_Model_AddOperation_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SwishModel1 swishModel; + OHNNGraphArgs graphArgs = swishModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Swish_Model_AddOperation_07 + * @tc.desc: 添加算子,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(SwishTest, SUB_AI_NNRt_Func_North_Swish_Model_AddOperation_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SwishModel1 swishModel; + OHNNGraphArgs graphArgs = swishModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Swish_Model_AddOperation_08 + * @tc.desc: 添加算子,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(SwishTest, SUB_AI_NNRt_Func_North_Swish_Model_AddOperation_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SwishModel1 swishModel; + OHNNGraphArgs graphArgs = swishModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Swish_Model_AddOperation_09 + * @tc.desc: 添加算子,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(SwishTest, SUB_AI_NNRt_Func_North_Swish_Model_AddOperation_09, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + SwishModel1 swishModel; + OHNNGraphArgs graphArgs = swishModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(nullptr, graphArgs.operationType, + ¶mIndices, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} \ No newline at end of file diff --git a/test/nnrt_xts_acts/nncore/opstest/src/unstack_test.cpp b/test/nnrt_xts_acts/nncore/opstest/src/unstack_test.cpp new file mode 100644 index 0000000..372a548 --- /dev/null +++ b/test/nnrt_xts_acts/nncore/opstest/src/unstack_test.cpp @@ -0,0 +1,880 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "nncore_utils.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Test; +class UnstackTest : public testing::Test {}; + +struct UnstackModel1 { + const std::vector input_shape = {3, 2}; + const std::vector output_shape = {3, 1}; + int64_t axisValue = -1; + float inputValue[3][2] = {{1, 2}, {3, 4}, {5, 6}}; + float outputValue[3][1] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, input_shape, inputValue, 6*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, output_shape, outputValue, 3*sizeof(float)}; + OHNNOperandTest axis = {OH_NN_INT64, OH_NN_UNSTACK_AXIS, {1}, &axisValue, sizeof(int64_t)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_UNSTACK, + .operands = {input, output, output, axis}, + .paramIndices = {3}, + .inputIndices = {0}, + .outputIndices = {1, 2}}; +}; + +struct UnstackModel2 { + const std::vector input_shape = {3}; + const std::vector output_shape = {1}; + int64_t axisValue = 0; + float inputValue[3] = {1, 5, 6}; + float outputValue[1] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, input_shape, inputValue, 3*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, output_shape, outputValue, sizeof(float)}; + OHNNOperandTest axis = {OH_NN_INT64, OH_NN_UNSTACK_AXIS, {1}, &axisValue, sizeof(int64_t)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_UNSTACK, + .operands = {input, output, output, output, axis}, + .paramIndices = {4}, + .inputIndices = {0}, + .outputIndices = {1, 2, 3}}; +}; + +struct UnstackModel3 { + const std::vector input_shape = {3, 2}; + const std::vector output_shape = {3, 1}; + int64_t axisValue = 1; + float inputValue[3][2] = {{1, 2}, {3, 4}, {5, 6}}; + float outputValue[3][1] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, input_shape, inputValue, 6*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, output_shape, outputValue, 3*sizeof(float)}; + OHNNOperandTest axis = {OH_NN_INT64, OH_NN_UNSTACK_AXIS, {1}, &axisValue, sizeof(int64_t)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_UNSTACK, + .operands = {input, output, output, axis}, + .paramIndices = {3}, + .inputIndices = {0}, + .outputIndices = {1, 2}}; +}; + +struct UnstackModel4 { + const std::vector input_shape = {3, 2}; + const std::vector output_shape = {1, 2}; + int64_t axisValue = 0; + float inputValue[3][2] = {{1, 2}, {3, 4}, {5, 6}}; + float outputValue[3][1] = {0}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, input_shape, inputValue, 6*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, output_shape, outputValue, 2*sizeof(float)}; + OHNNOperandTest axis = {OH_NN_INT64, OH_NN_UNSTACK_AXIS, {1}, &axisValue, sizeof(int64_t)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_UNSTACK, + .operands = {input, output, output, output, axis}, + .paramIndices = {4}, + .inputIndices = {0}, + .outputIndices = {1, 2, 3}}; +}; + +struct UnstackModel5 { + const std::vector tensor_shape = {}; + int64_t axisValue = 0; + float* inputValue = {}; + float* outputValue = {}; + + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, inputValue, 0*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, outputValue, 0*sizeof(float)}; + OHNNOperandTest axis = {OH_NN_INT64, OH_NN_UNSTACK_AXIS, {1}, &axisValue, sizeof(int64_t)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_UNSTACK, + .operands = {input, output, axis}, + .paramIndices = {2}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Unstack_Build_01 + * @tc.desc: UnstackModel1模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(UnstackTest, SUB_AI_NNRt_Func_North_Unstack_Build_01, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + UnstackModel1 unstackModel; + OHNNGraphArgs graphArgs = unstackModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Unstack_Build_02 + * @tc.desc: UnstackModel2模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(UnstackTest, SUB_AI_NNRt_Func_North_Unstack_Build_02, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + UnstackModel2 unstackModel; + OHNNGraphArgs graphArgs = unstackModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Unstack_Build_03 + * @tc.desc: UnstackModel3模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(UnstackTest, SUB_AI_NNRt_Func_North_Unstack_Build_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + UnstackModel3 unstackModel; + OHNNGraphArgs graphArgs = unstackModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Unstack_Build_04 + * @tc.desc: UnstackModel4模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(UnstackTest, SUB_AI_NNRt_Func_North_Unstack_Build_04, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + UnstackModel4 unstackModel; + OHNNGraphArgs graphArgs = unstackModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Unstack_Build_05 + * @tc.desc: UnstackModel5模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(UnstackTest, SUB_AI_NNRt_Func_North_Unstack_Build_05, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + UnstackModel5 unstackModel; + OHNNGraphArgs graphArgs = unstackModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Unstack_Build_06 + * @tc.desc: UnstackModel1模型输入Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(UnstackTest, SUB_AI_NNRt_Func_North_Unstack_Build_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + UnstackModel1 unstackModel; + OHNNGraphArgs graphArgs = unstackModel.graphArgs; + graphArgs.operands = {unstackModel.input, unstackModel.input, unstackModel.output, + unstackModel.output, unstackModel.axis}; + graphArgs.inputIndices = {0, 1}; + graphArgs.outputIndices = {2, 3}; + graphArgs.paramIndices = {4}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Unstack_Build_07 + * @tc.desc: UnstackModel1模型传入非法参数进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(UnstackTest, SUB_AI_NNRt_Func_North_Unstack_Build_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + UnstackModel1 unstackModel; + OHNNGraphArgs graphArgs = unstackModel.graphArgs; + + int8_t activationValue = OH_NN_FUSED_NONE; + OHNNOperandTest activation = {OH_NN_INT8, OH_NN_ADD_ACTIVATIONTYPE, {}, &activationValue, sizeof(int8_t)}; + graphArgs.operands = {unstackModel.input, unstackModel.output, unstackModel.output, unstackModel.axis, activation}; + graphArgs.paramIndices = {3, 4}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Unstack_Model_Finish_01 + * @tc.desc: 模型构图,未添加操作数 + * @tc.type: FUNC + */ +HWTEST_F(UnstackTest, SUB_AI_NNRt_Func_North_Unstack_Model_Finish_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + OHNNGraphArgs graphArgs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, SingleModelBuildEndStep(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Unstack_Model_Finish_02 + * @tc.desc: 模型构图,未设置输入输出 + * @tc.type: FUNC + */ +HWTEST_F(UnstackTest, SUB_AI_NNRt_Func_North_Unstack_Model_Finish_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + UnstackModel1 unstackModel; + OHNNGraphArgs graphArgs = unstackModel.graphArgs; + graphArgs.specifyIO = false; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Unstack_Model_Finish_03 + * @tc.desc: 模型构图,设置输入输出,构图成功 + * @tc.type: FUNC + */ +HWTEST_F(UnstackTest, SUB_AI_NNRt_Func_North_Unstack_Model_Finish_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + UnstackModel1 unstackModel; + OHNNGraphArgs graphArgs = unstackModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Unstack_Model_SetOperandValue_01 + * @tc.desc: 设置操作数值,操作数不存在 + * @tc.type: FUNC + */ +HWTEST_F(UnstackTest, SUB_AI_NNRt_Func_North_Unstack_Model_SetOperandValue_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + UnstackModel1 unstackModel; + OHNNGraphArgs graphArgs = unstackModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData( + model, 1000+i, operandTem.data, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Unstack_Model_SetOperandValue_02 + * @tc.desc: 设置操作数值,buufer为nullptr + * @tc.type: FUNC + */ +HWTEST_F(UnstackTest, SUB_AI_NNRt_Func_North_Unstack_Model_SetOperandValue_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + UnstackModel1 unstackModel; + OHNNGraphArgs graphArgs = unstackModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, i, nullptr, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Unstack_Model_SetOperandValue_03 + * @tc.desc: 设置操作数值,length为0 + * @tc.type: FUNC + */ +HWTEST_F(UnstackTest, SUB_AI_NNRt_Func_North_Unstack_Model_SetOperandValue_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + UnstackModel1 unstackModel; + OHNNGraphArgs graphArgs = unstackModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, 1000+i, operandTem.data, 0)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Unstack_Model_SpecifyInputsAndOutputs_01 + * @tc.desc: 设置输入输出,inputIndices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(UnstackTest, SUB_AI_NNRt_Func_North_Unstack_Model_SpecifyInputsAndOutputs_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + UnstackModel1 unstackModel; + OHNNGraphArgs graphArgs = unstackModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Unstack_Model_SpecifyInputsAndOutputs_02 + * @tc.desc: 设置输入输出,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(UnstackTest, SUB_AI_NNRt_Func_North_Unstack_Model_SpecifyInputsAndOutputs_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + UnstackModel1 unstackModel; + OHNNGraphArgs graphArgs = unstackModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Unstack_Model_SpecifyInputsAndOutputs_03 + * @tc.desc: 设置输入输出,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(UnstackTest, SUB_AI_NNRt_Func_North_Unstack_Model_SpecifyInputsAndOutputs_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + UnstackModel1 unstackModel; + OHNNGraphArgs graphArgs = unstackModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Unstack_Model_SpecifyInputsAndOutputs_04 + * @tc.desc: 设置输入输出,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(UnstackTest, SUB_AI_NNRt_Func_North_Unstack_Model_SpecifyInputsAndOutputs_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + UnstackModel1 unstackModel; + OHNNGraphArgs graphArgs = unstackModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Unstack_Model_SpecifyInputsAndOutputs_05 + * @tc.desc: 设置输入输出,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(UnstackTest, SUB_AI_NNRt_Func_North_Unstack_Model_SpecifyInputsAndOutputs_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + UnstackModel1 unstackModel; + OHNNGraphArgs graphArgs = unstackModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Unstack_Model_SpecifyInputsAndOutputs_06 + * @tc.desc: 设置输入输出,outputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(UnstackTest, SUB_AI_NNRt_Func_North_Unstack_Model_SpecifyInputsAndOutputs_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + UnstackModel1 unstackModel; + OHNNGraphArgs graphArgs = unstackModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Unstack_Model_SpecifyInputsAndOutputs_07 + * @tc.desc: 设置输入输出,outputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(UnstackTest, SUB_AI_NNRt_Func_North_Unstack_Model_SpecifyInputsAndOutputs_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + UnstackModel1 unstackModel; + OHNNGraphArgs graphArgs = unstackModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.outputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Unstack_Model_SpecifyInputsAndOutputs_08 + * @tc.desc: 设置输入输出,outputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(UnstackTest, SUB_AI_NNRt_Func_North_Unstack_Model_SpecifyInputsAndOutputs_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + UnstackModel1 unstackModel; + OHNNGraphArgs graphArgs = unstackModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Unstack_Model_AddOperation_01 + * @tc.desc: 添加算子,paramindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(UnstackTest, SUB_AI_NNRt_Func_North_Unstack_Model_AddOperation_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + UnstackModel1 unstackModel; + OHNNGraphArgs graphArgs = unstackModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + nullptr, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Unstack_Model_AddOperation_02 + * @tc.desc: 添加算子,paramindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(UnstackTest, SUB_AI_NNRt_Func_North_Unstack_Model_AddOperation_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + UnstackModel1 unstackModel; + OHNNGraphArgs graphArgs = unstackModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Unstack_Model_AddOperation_03 + * @tc.desc: 添加算子,paramindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(UnstackTest, SUB_AI_NNRt_Func_North_Unstack_Model_AddOperation_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + UnstackModel1 unstackModel; + OHNNGraphArgs graphArgs = unstackModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.paramIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Unstack_Model_AddOperation_04 + * @tc.desc: 添加算子,paramindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(UnstackTest, SUB_AI_NNRt_Func_North_Unstack_Model_AddOperation_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + UnstackModel1 unstackModel; + OHNNGraphArgs graphArgs = unstackModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Unstack_Model_AddOperation_05 + * @tc.desc: 添加算子,inputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(UnstackTest, SUB_AI_NNRt_Func_North_Unstack_Model_AddOperation_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + UnstackModel1 unstackModel; + OHNNGraphArgs graphArgs = unstackModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Unstack_Model_AddOperation_06 + * @tc.desc: 添加算子,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(UnstackTest, SUB_AI_NNRt_Func_North_Unstack_Model_AddOperation_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + UnstackModel1 unstackModel; + OHNNGraphArgs graphArgs = unstackModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Unstack_Model_AddOperation_07 + * @tc.desc: 添加算子,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(UnstackTest, SUB_AI_NNRt_Func_North_Unstack_Model_AddOperation_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + UnstackModel1 unstackModel; + OHNNGraphArgs graphArgs = unstackModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Unstack_Model_AddOperation_08 + * @tc.desc: 添加算子,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(UnstackTest, SUB_AI_NNRt_Func_North_Unstack_Model_AddOperation_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + UnstackModel1 unstackModel; + OHNNGraphArgs graphArgs = unstackModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Unstack_Model_AddOperation_09 + * @tc.desc: 添加算子,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(UnstackTest, SUB_AI_NNRt_Func_North_Unstack_Model_AddOperation_09, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + UnstackModel1 unstackModel; + OHNNGraphArgs graphArgs = unstackModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(nullptr, graphArgs.operationType, + ¶mIndices, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} \ No newline at end of file diff --git a/test/nnrt_xts_acts/nncore/opstest/src/where_test.cpp b/test/nnrt_xts_acts/nncore/opstest/src/where_test.cpp new file mode 100644 index 0000000..e1ec341 --- /dev/null +++ b/test/nnrt_xts_acts/nncore/opstest/src/where_test.cpp @@ -0,0 +1,812 @@ +/* + * Copyright (c) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "nncore_utils.h" + +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Test; +class WhereTest : public testing::Test {}; + +struct WhereModel1 { + const std::vector tensor_shape = {3}; + bool input0Value[3] = {true, false, true}; + float input1Value[3] = {1, 2, 3}; + float input2Value[3] = {4, 5, 6}; + float outputValue[3] = {0}; + + OHNNOperandTest input0 = {OH_NN_BOOL, OH_NN_TENSOR, tensor_shape, input0Value, 3*sizeof(bool)}; + OHNNOperandTest input1 = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, input1Value, 3*sizeof(float)}; + OHNNOperandTest input2 = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, input2Value, 3*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, outputValue, 3*sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_WHERE, + .operands = {input0, input1, input2, output}, + .paramIndices = {}, + .inputIndices = {0, 1, 2}, + .outputIndices = {3}}; +}; + +struct WhereModel2 { + const std::vector tensor_shape = {3}; + bool input0Value[3] = {false, false, true}; + float input1Value[1] = {5}; + float input2Value[3] = {7, 8, 9}; + float outputValue[3] = {0}; + + OHNNOperandTest input0 = {OH_NN_BOOL, OH_NN_TENSOR, tensor_shape, input0Value, 3*sizeof(bool)}; + OHNNOperandTest input1 = {OH_NN_FLOAT32, OH_NN_TENSOR, {1}, input1Value, sizeof(float)}; + OHNNOperandTest input2 = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, input2Value, 3*sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, outputValue, 3*sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_WHERE, + .operands = {input0, input1, input2, output}, + .paramIndices = {}, + .inputIndices = {0, 1, 2}, + .outputIndices = {3}}; +}; + +struct WhereModel3 { + const std::vector tensor_shape = {3}; + bool input0Value[3] = {true, true, false}; + float input1Value[3] = {10, 11, 12}; + float input2Value[1] = {3}; + float outputValue[3] = {0}; + + OHNNOperandTest input0 = {OH_NN_BOOL, OH_NN_TENSOR, tensor_shape, input0Value, 3*sizeof(bool)}; + OHNNOperandTest input1 = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, input1Value, 3*sizeof(float)}; + OHNNOperandTest input2 = {OH_NN_FLOAT32, OH_NN_TENSOR, {1}, input2Value, sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, tensor_shape, outputValue, 3*sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_WHERE, + .operands = {input0, input1, input2, output}, + .paramIndices = {}, + .inputIndices = {0, 1, 2}, + .outputIndices = {3}}; +}; + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Where_Build_01 + * @tc.desc: WhereModel1模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(WhereTest, SUB_AI_NNRt_Func_North_Where_Build_01, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + WhereModel1 whereModel; + OHNNGraphArgs graphArgs = whereModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Where_Build_02 + * @tc.desc: WhereModel2模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(WhereTest, SUB_AI_NNRt_Func_North_Where_Build_02, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + WhereModel2 whereModel; + OHNNGraphArgs graphArgs = whereModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Where_Build_03 + * @tc.desc: WhereModel3模型build测试 + * @tc.type: FUNC + */ +HWTEST_F(WhereTest, SUB_AI_NNRt_Func_North_Where_Build_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + WhereModel3 whereModel; + OHNNGraphArgs graphArgs = whereModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + OH_NNCompilation *compilation = OH_NNCompilation_Construct(model); + EXPECT_NE(nullptr, compilation); + + OHNNCompileParam compileParam{ + .performanceMode = OH_NN_PERFORMANCE_HIGH, + .priority = OH_NN_PRIORITY_HIGH, + }; + EXPECT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam)); + + OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation); + EXPECT_NE(nullptr, executor); + + Free(model, compilation, executor); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Where_Build_04 + * @tc.desc: WhereModel1模型输入Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(WhereTest, SUB_AI_NNRt_Func_North_Where_Build_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + WhereModel1 whereModel; + OHNNGraphArgs graphArgs = whereModel.graphArgs; + graphArgs.operands = {whereModel.input0, whereModel.input1, whereModel.input2, + whereModel.input2, whereModel.output}; + graphArgs.inputIndices = {0, 1, 2, 3}; + graphArgs.outputIndices = {4}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Where_Build_05 + * @tc.desc: WhereModel1模型输出Tensor+1进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(WhereTest, SUB_AI_NNRt_Func_North_Where_Build_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + WhereModel1 whereModel; + OHNNGraphArgs graphArgs = whereModel.graphArgs; + graphArgs.operands = {whereModel.input0, whereModel.input1, whereModel.input2, + whereModel.output, whereModel.output}; + graphArgs.inputIndices = {0, 1, 2}; + graphArgs.outputIndices = {3, 4}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Where_Build_06 + * @tc.desc: WhereModel1模型传入非法参数进行build测试 + * @tc.type: FUNC + */ +HWTEST_F(WhereTest, SUB_AI_NNRt_Func_North_Where_Build_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + WhereModel1 whereModel; + OHNNGraphArgs graphArgs = whereModel.graphArgs; + + int8_t activationValue = OH_NN_FUSED_NONE; + OHNNOperandTest activation = {OH_NN_INT8, OH_NN_ADD_ACTIVATIONTYPE, {}, &activationValue, sizeof(int8_t)}; + graphArgs.operands = {whereModel.input0, whereModel.input1, whereModel.input2, whereModel.output, activation}; + graphArgs.paramIndices = {4}; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Where_Model_Finish_01 + * @tc.desc: 模型构图,未添加操作数 + * @tc.type: FUNC + */ +HWTEST_F(WhereTest, SUB_AI_NNRt_Func_North_Where_Model_Finish_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + OHNNGraphArgs graphArgs; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, SingleModelBuildEndStep(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Where_Model_Finish_02 + * @tc.desc: 模型构图,未设置输入输出 + * @tc.type: FUNC + */ +HWTEST_F(WhereTest, SUB_AI_NNRt_Func_North_Where_Model_Finish_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + WhereModel1 whereModel; + OHNNGraphArgs graphArgs = whereModel.graphArgs; + graphArgs.specifyIO = false; + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Where_Model_Finish_03 + * @tc.desc: 模型构图,设置输入输出,构图成功 + * @tc.type: FUNC + */ +HWTEST_F(WhereTest, SUB_AI_NNRt_Func_North_Where_Model_Finish_03, Function | MediumTest | Level1) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + WhereModel1 whereModel; + OHNNGraphArgs graphArgs = whereModel.graphArgs; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Where_Model_SetOperandValue_01 + * @tc.desc: 设置操作数值,操作数不存在 + * @tc.type: FUNC + */ +HWTEST_F(WhereTest, SUB_AI_NNRt_Func_North_Where_Model_SetOperandValue_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + WhereModel1 whereModel; + OHNNGraphArgs graphArgs = whereModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData( + model, 1000+i, operandTem.data, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Where_Model_SetOperandValue_02 + * @tc.desc: 设置操作数值,buufer为nullptr + * @tc.type: FUNC + */ +HWTEST_F(WhereTest, SUB_AI_NNRt_Func_North_Where_Model_SetOperandValue_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + WhereModel1 whereModel; + OHNNGraphArgs graphArgs = whereModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, i, nullptr, operandTem.length)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Where_Model_SetOperandValue_03 + * @tc.desc: 设置操作数值,length为0 + * @tc.type: FUNC + */ +HWTEST_F(WhereTest, SUB_AI_NNRt_Func_North_Where_Model_SetOperandValue_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + WhereModel1 whereModel; + OHNNGraphArgs graphArgs = whereModel.graphArgs; + + NN_TensorDesc* tensorDesc = nullptr; + std::vector tensorDescVec; + + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + tensorDescVec.emplace_back(tensorDesc); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensorToModel(model, tensorDesc)); + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorType(model, i, operandTem.type)); + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SetTensorData(model, 1000+i, operandTem.data, 0)); + } + } + + FreeTensorDescVec(tensorDescVec); + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Where_Model_SpecifyInputsAndOutputs_01 + * @tc.desc: 设置输入输出,inputIndices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(WhereTest, SUB_AI_NNRt_Func_North_Where_Model_SpecifyInputsAndOutputs_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + WhereModel1 whereModel; + OHNNGraphArgs graphArgs = whereModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Where_Model_SpecifyInputsAndOutputs_02 + * @tc.desc: 设置输入输出,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(WhereTest, SUB_AI_NNRt_Func_North_Where_Model_SpecifyInputsAndOutputs_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + WhereModel1 whereModel; + OHNNGraphArgs graphArgs = whereModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Where_Model_SpecifyInputsAndOutputs_03 + * @tc.desc: 设置输入输出,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(WhereTest, SUB_AI_NNRt_Func_North_Where_Model_SpecifyInputsAndOutputs_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + WhereModel1 whereModel; + OHNNGraphArgs graphArgs = whereModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Where_Model_SpecifyInputsAndOutputs_04 + * @tc.desc: 设置输入输出,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(WhereTest, SUB_AI_NNRt_Func_North_Where_Model_SpecifyInputsAndOutputs_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + WhereModel1 whereModel; + OHNNGraphArgs graphArgs = whereModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Where_Model_SpecifyInputsAndOutputs_05 + * @tc.desc: 设置输入输出,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(WhereTest, SUB_AI_NNRt_Func_North_Where_Model_SpecifyInputsAndOutputs_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + WhereModel1 whereModel; + OHNNGraphArgs graphArgs = whereModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Where_Model_SpecifyInputsAndOutputs_06 + * @tc.desc: 设置输入输出,outputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(WhereTest, SUB_AI_NNRt_Func_North_Where_Model_SpecifyInputsAndOutputs_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + WhereModel1 whereModel; + OHNNGraphArgs graphArgs = whereModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Where_Model_SpecifyInputsAndOutputs_07 + * @tc.desc: 设置输入输出,outputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(WhereTest, SUB_AI_NNRt_Func_North_Where_Model_SpecifyInputsAndOutputs_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + WhereModel1 whereModel; + OHNNGraphArgs graphArgs = whereModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.outputIndices = {100000}; + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Where_Model_SpecifyInputsAndOutputs_08 + * @tc.desc: 设置输入输出,outputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(WhereTest, SUB_AI_NNRt_Func_North_Where_Model_SpecifyInputsAndOutputs_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + WhereModel1 whereModel; + OHNNGraphArgs graphArgs = whereModel.graphArgs; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + outputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Where_Model_AddOperation_01 + * @tc.desc: 添加算子,paramindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(WhereTest, SUB_AI_NNRt_Func_North_Where_Model_AddOperation_01, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + WhereModel1 whereModel; + OHNNGraphArgs graphArgs = whereModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + nullptr, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Where_Model_AddOperation_02 + * @tc.desc: 添加算子,paramindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(WhereTest, SUB_AI_NNRt_Func_North_Where_Model_AddOperation_02, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + WhereModel1 whereModel; + OHNNGraphArgs graphArgs = whereModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.data = nullptr; + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Where_Model_AddOperation_03 + * @tc.desc: 添加算子,paramindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(WhereTest, SUB_AI_NNRt_Func_North_Where_Model_AddOperation_03, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + WhereModel1 whereModel; + OHNNGraphArgs graphArgs = whereModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.paramIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Where_Model_AddOperation_04 + * @tc.desc: 添加算子,paramindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(WhereTest, SUB_AI_NNRt_Func_North_Where_Model_AddOperation_04, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + WhereModel1 whereModel; + OHNNGraphArgs graphArgs = whereModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + paramIndices.size = 0; + EXPECT_EQ(OH_NN_SUCCESS, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Where_Model_AddOperation_05 + * @tc.desc: 添加算子,inputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(WhereTest, SUB_AI_NNRt_Func_North_Where_Model_AddOperation_05, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + WhereModel1 whereModel; + OHNNGraphArgs graphArgs = whereModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, nullptr, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Where_Model_AddOperation_06 + * @tc.desc: 添加算子,inputindices中data为nullptr + * @tc.type: FUNC + */ +HWTEST_F(WhereTest, SUB_AI_NNRt_Func_North_Where_Model_AddOperation_06, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + WhereModel1 whereModel; + OHNNGraphArgs graphArgs = whereModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.data = nullptr; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Where_Model_AddOperation_07 + * @tc.desc: 添加算子,inputindices中data对应序号不存在 + * @tc.type: FUNC + */ +HWTEST_F(WhereTest, SUB_AI_NNRt_Func_North_Where_Model_AddOperation_07, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + WhereModel1 whereModel; + OHNNGraphArgs graphArgs = whereModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + graphArgs.inputIndices = {100000}; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Where_Model_AddOperation_08 + * @tc.desc: 添加算子,inputindices中size为0 + * @tc.type: FUNC + */ +HWTEST_F(WhereTest, SUB_AI_NNRt_Func_North_Where_Model_AddOperation_08, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + WhereModel1 whereModel; + OHNNGraphArgs graphArgs = whereModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + inputIndices.size = 0; + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(model, graphArgs.operationType, + ¶mIndices, &inputIndices, &outputIndices)); + + Free(model, nullptr, nullptr); +} + +/** + * @tc.number : SUB_AI_NNRt_Func_North_Where_Model_AddOperation_09 + * @tc.desc: 添加算子,outputindices为nullptr + * @tc.type: FUNC + */ +HWTEST_F(WhereTest, SUB_AI_NNRt_Func_North_Where_Model_AddOperation_09, Function | MediumTest | Level2) +{ + OH_NNModel *model = OH_NNModel_Construct(); + EXPECT_NE(nullptr, model); + + WhereModel1 whereModel; + OHNNGraphArgs graphArgs = whereModel.graphArgs; + graphArgs.addOperation = false; + graphArgs.specifyIO = false; + graphArgs.build = false; + EXPECT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model, graphArgs)); + + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, OH_NNModel_AddOperation(nullptr, graphArgs.operationType, + ¶mIndices, &inputIndices, nullptr)); + + Free(model, nullptr, nullptr); +} \ No newline at end of file -- Gitee From 620090a92bd522b9d98e61532a4183a7b720b5fd Mon Sep 17 00:00:00 2001 From: w30052974 Date: Thu, 17 Apr 2025 14:44:28 +0800 Subject: [PATCH 2/2] =?UTF-8?q?nnrt=20=E7=94=A8=E4=BE=8B=E5=BD=92=E6=A1=A3?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: w30052974 --- .../neural_network_runtime/v2_0/common/mock_idevice.cpp | 2 +- .../neural_network_runtime/v2_0/interface/src/ExecutorTest.cpp | 3 ++- .../neural_network_runtime/v2_0/interface/src/MemoryTest.cpp | 3 ++- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/test/nnrt_xts_acts/neural_network_runtime/v2_0/common/mock_idevice.cpp b/test/nnrt_xts_acts/neural_network_runtime/v2_0/common/mock_idevice.cpp index 536c6ef..5c02b61 100644 --- a/test/nnrt_xts_acts/neural_network_runtime/v2_0/common/mock_idevice.cpp +++ b/test/nnrt_xts_acts/neural_network_runtime/v2_0/common/mock_idevice.cpp @@ -122,7 +122,7 @@ MockIDevice::~MockIDevice() MockIDevice::MockIDevice() { - m_bufferFd = 0; + m_bufferFd = 0; } MockIPreparedModel::~MockIPreparedModel() diff --git a/test/nnrt_xts_acts/neural_network_runtime/v2_0/interface/src/ExecutorTest.cpp b/test/nnrt_xts_acts/neural_network_runtime/v2_0/interface/src/ExecutorTest.cpp index 8766aec..f8a8a58 100644 --- a/test/nnrt_xts_acts/neural_network_runtime/v2_0/interface/src/ExecutorTest.cpp +++ b/test/nnrt_xts_acts/neural_network_runtime/v2_0/interface/src/ExecutorTest.cpp @@ -1240,7 +1240,8 @@ HWTEST_F(ExecutorTest, SUB_AI_NNRt_Func_North_Executor_Combine_0400, Function | ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_SetInputWithMemory(executor, inputIndex, &operand, inputMemory)); - ASSERT_EQ(EOK, memcpy_s(inputMemory->data, operandTem.length, static_cast(operandTem.data), operandTem.length)); + ASSERT_EQ(EOK, memcpy_s(inputMemory->data, + operandTem.length, static_cast(operandTem.data), operandTem.length)); OHNNMemory[inputIndex] = inputMemory; inputIndex += 1; } else if (std::find(graphArgs.outputIndices.begin(), graphArgs.outputIndices.end(), i) != diff --git a/test/nnrt_xts_acts/neural_network_runtime/v2_0/interface/src/MemoryTest.cpp b/test/nnrt_xts_acts/neural_network_runtime/v2_0/interface/src/MemoryTest.cpp index dfdfb97..1093da5 100644 --- a/test/nnrt_xts_acts/neural_network_runtime/v2_0/interface/src/MemoryTest.cpp +++ b/test/nnrt_xts_acts/neural_network_runtime/v2_0/interface/src/MemoryTest.cpp @@ -862,7 +862,8 @@ HWTEST_F(MemoryTest, SUB_AI_NNRt_Func_North_Executor_Memory_Run_0200, Function | OH_NN_Memory *inputMemory = OH_NNExecutor_AllocateInputMemory(executor, inputIndex, operandTem.length); ASSERT_NE(nullptr, inputMemory); ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_SetInputWithMemory(executor, inputIndex, &operand, inputMemory)); - ASSERT_EQ(EOK, memcpy_s(inputMemory->data, operandTem.length, static_cast(operandTem.data), operandTem.length)); + ASSERT_EQ(EOK, memcpy_s(inputMemory->data, + operandTem.length, static_cast(operandTem.data), operandTem.length)); OH_NNExecutor_DestroyInputMemory(executor, inputIndex, &inputMemory); ASSERT_EQ(nullptr, inputMemory); } -- Gitee