From dad76cc8dab41a11e48d8d899ded43fb49e2ac57 Mon Sep 17 00:00:00 2001 From: wangchuanxia Date: Mon, 13 May 2024 19:23:03 +0800 Subject: [PATCH 01/10] add fuzz test Signed-off-by: wangchuanxia --- test/fuzztest/BUILD.gn | 3 +- .../neural_network_core_fuzzer/BUILD.gn | 45 ++++++++ .../neural_network_core_fuzzer/corpus/init | 16 +++ .../nncore_fuzzer.cpp | 105 ++++++++++++++++++ .../nncore_fuzzer.h | 21 ++++ .../neural_network_core_fuzzer/project.xml | 25 +++++ 6 files changed, 214 insertions(+), 1 deletion(-) create mode 100644 test/fuzztest/neural_network_core_fuzzer/BUILD.gn create mode 100644 test/fuzztest/neural_network_core_fuzzer/corpus/init create mode 100644 test/fuzztest/neural_network_core_fuzzer/nncore_fuzzer.cpp create mode 100644 test/fuzztest/neural_network_core_fuzzer/nncore_fuzzer.h create mode 100644 test/fuzztest/neural_network_core_fuzzer/project.xml diff --git a/test/fuzztest/BUILD.gn b/test/fuzztest/BUILD.gn index 3835be8..528069b 100644 --- a/test/fuzztest/BUILD.gn +++ b/test/fuzztest/BUILD.gn @@ -9,7 +9,7 @@ # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and -# limitations under the License. +# limitations under the License. import("//build/test.gni") group("fuzztest") { @@ -18,4 +18,5 @@ group("fuzztest") { deps += [ "hdinnrtdevice_fuzzer:HdiNnrtDeviceFuzzTest" ] deps += [ "hdinnrtpreparedmodel_fuzzer:HdiNnrtPreparedModelFuzzTest" ] + deps += [ "neural_network_core_fuzzer:NNCoreFuzzTest" ] } diff --git a/test/fuzztest/neural_network_core_fuzzer/BUILD.gn b/test/fuzztest/neural_network_core_fuzzer/BUILD.gn new file mode 100644 index 0000000..5ec0f6f --- /dev/null +++ b/test/fuzztest/neural_network_core_fuzzer/BUILD.gn @@ -0,0 +1,45 @@ +# Copyright (C) 2024 Huawei Device Co., Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#####################hydra-fuzz################### +import("//build/config/features.gni") +import("//build/ohos.gni") +import("//build/test.gni") +module_output_path = "neural_network_runtime/" + +##############################fuzztest########################################## +ohos_fuzztest("NNCoreFuzzTest") { + module_out_path = module_output_path + fuzz_config_file = "../neural_network_core_fuzzer" + + include_dirs = [ "../../../interfaces/kits/c/neural_network_runtime" ] + + cflags = [ + "-g", + "-O0", + "-Wno-unused-variable", + "-fno-omit-frame-pointer", + ] + + sources = [ "nncore_fuzzer.cpp" ] + + deps = [ + "../../../frameworks/native/neural_network_core:libneural_network_core", + "../../../frameworks/native/neural_network_runtime:libneural_network_runtime", + ] + + external_deps = [ + "c_utils:utils", + "hilog:libhilog", + ] +} diff --git a/test/fuzztest/neural_network_core_fuzzer/corpus/init b/test/fuzztest/neural_network_core_fuzzer/corpus/init new file mode 100644 index 0000000..51f4e77 --- /dev/null +++ b/test/fuzztest/neural_network_core_fuzzer/corpus/init @@ -0,0 +1,16 @@ +/* + * Copyright (C) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +FUZZ \ No newline at end of file diff --git a/test/fuzztest/neural_network_core_fuzzer/nncore_fuzzer.cpp b/test/fuzztest/neural_network_core_fuzzer/nncore_fuzzer.cpp new file mode 100644 index 0000000..bd7b9dd --- /dev/null +++ b/test/fuzztest/neural_network_core_fuzzer/nncore_fuzzer.cpp @@ -0,0 +1,105 @@ +/* + * Copyright (C) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "nncore_fuzzer.h" +#include "../data.h" +#include "../../../common/log.h" +#include "neural_network_core.h" +#include + +namespace OHOS { +namespace NeuralNetworkRuntime { +bool NNCoreDeviceFuzzTest() +{ + auto ret = OH_NNDevice_GetAllDevicesID(nullptr, nullptr); + if (ret != OH_NN_INVALID_PARAMETER) { + LOGE("[NNCoreFuzzTest]OH_NNDevice_GetAllDevicesID should return OH_NN_INVALID_PARAMETER."); + return false; + } + + const size_t* allDevicesID = new size_t[1]; + ret = OH_NNDevice_GetAllDevicesID(&allDevicesID, nullptr); + if (ret != OH_NN_INVALID_PARAMETER) { + LOGE("[NNCoreFuzzTest]OH_NNDevice_GetAllDevicesID with allDevicesID should return OH_NN_INVALID_PARAMETER."); + delete[] allDevicesID; + return false; + } + delete[] allDevicesID; + + const size_t *allDevicesIDNull = nullptr; + ret = OH_NNDevice_GetAllDevicesID(&allDevicesIDNull, nullptr); + if (ret != OH_NN_INVALID_PARAMETER) { + LOGE("[NNCoreFuzzTest]OH_NNDevice_GetAllDevicesID with null deviceCount should return OH_NN_INVALID_PARAMETER."); + return false; + } + + uint32_t deviceCount = 0; + ret = OH_NNDevice_GetAllDevicesID(&allDevicesIDNull, &deviceCount); + + ret = OH_NNDevice_GetName(0, nullptr); + if (ret != OH_NN_INVALID_PARAMETER) { + LOGE("[NNCoreFuzzTest]OH_NNDevice_GetName with null name should return OH_NN_INVALID_PARAMETER."); + return false; + } + + std::string name = "test"; + const char* nameC = name.c_str(); + ret = OH_NNDevice_GetName(0, &nameC); + if (ret != OH_NN_INVALID_PARAMETER) { + LOGE("[NNCoreFuzzTest]OH_NNDevice_GetName with invalid name should return OH_NN_INVALID_PARAMETER."); + return false; + } + + const char* nnameNullC = nullptr; + ret = OH_NNDevice_GetName(0, &nnameNullC); + if (ret != OH_NN_FAILED) { + LOGE("[NNCoreFuzzTest]OH_NNDevice_GetName with invalid deviceid should return OH_NN_FAILED."); + return false; + } + + ret = OH_NNDevice_GetType(0, nullptr); + if (ret != OH_NN_INVALID_PARAMETER) { + LOGE("[NNCoreFuzzTest]OH_NNDevice_GetType with invalid device id should return OH_NN_INVALID_PARAMETER."); + return false; + } + return true; +} + +bool NNCoreCompilationConstructTest() +{ + auto ret = OH_NNCompilation_Construct(nullptr); + if (ret != nullptr) { + LOGE("[NNCoreFuzzTest]OH_NNCompilation_Construct with nullptr should return nullptr."); + return false; + } + return true; +} + +bool NNCoreFuzzTest(const uint8_t* data, size_t size) +{ + if (!NNCoreDeviceFuzzTest()) { + return false; + } + + return true; +} +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +/* Fuzzer entry point */ +extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) +{ + OHOS::NeuralNetworkRuntime::NNCoreFuzzTest(data, size); + return 0; +} \ No newline at end of file diff --git a/test/fuzztest/neural_network_core_fuzzer/nncore_fuzzer.h b/test/fuzztest/neural_network_core_fuzzer/nncore_fuzzer.h new file mode 100644 index 0000000..f532782 --- /dev/null +++ b/test/fuzztest/neural_network_core_fuzzer/nncore_fuzzer.h @@ -0,0 +1,21 @@ +/* + * Copyright (C) 2024 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NNCORE_FUZZER_H +#define NNCORE_FUZZER_H + +#define FUZZ_PROJECT_NAME "nncore_fuzzer" + +#endif \ No newline at end of file diff --git a/test/fuzztest/neural_network_core_fuzzer/project.xml b/test/fuzztest/neural_network_core_fuzzer/project.xml new file mode 100644 index 0000000..4d483f4 --- /dev/null +++ b/test/fuzztest/neural_network_core_fuzzer/project.xml @@ -0,0 +1,25 @@ + + + + + + 1000 + + 300 + + 4096 + + -- Gitee From 1d1941f248057e6a0cffe75ed6a7d8cc0cef97a1 Mon Sep 17 00:00:00 2001 From: wangchuanxia Date: Sat, 18 May 2024 11:01:09 +0800 Subject: [PATCH 02/10] add fuzz test Signed-off-by: wangchuanxia --- test/fuzztest/data.h | 9 + .../neural_network_core_fuzzer/BUILD.gn | 9 +- .../nncore_fuzzer.cpp | 291 +++++++++++++++--- 3 files changed, 260 insertions(+), 49 deletions(-) diff --git a/test/fuzztest/data.h b/test/fuzztest/data.h index 8ac8442..5be2e46 100644 --- a/test/fuzztest/data.h +++ b/test/fuzztest/data.h @@ -55,6 +55,15 @@ public: { return dataSize - dataPos; } + + const uint8_t* GetSpecificData(size_t startPos, size_t& size) const + { + if ((startPos + size) > dataSize) { + size = dataSize - startPos; + } + return dataFuzz + startPos; + } + private: const uint8_t* dataFuzz {nullptr}; size_t dataSize {0}; diff --git a/test/fuzztest/neural_network_core_fuzzer/BUILD.gn b/test/fuzztest/neural_network_core_fuzzer/BUILD.gn index 5ec0f6f..6a49eeb 100644 --- a/test/fuzztest/neural_network_core_fuzzer/BUILD.gn +++ b/test/fuzztest/neural_network_core_fuzzer/BUILD.gn @@ -22,7 +22,14 @@ ohos_fuzztest("NNCoreFuzzTest") { module_out_path = module_output_path fuzz_config_file = "../neural_network_core_fuzzer" - include_dirs = [ "../../../interfaces/kits/c/neural_network_runtime" ] + include_dirs = [ + "../../..", + "../../../frameworks/native", + "../../../frameworks/native/neural_network_core", + "../../../frameworks/native/neural_network_runtime", + "../../../interfaces/kits/c/neural_network_runtime", + "//third_party/mindspore/mindspore-src/source/mindspore/lite/mindir/include", + ] cflags = [ "-g", diff --git a/test/fuzztest/neural_network_core_fuzzer/nncore_fuzzer.cpp b/test/fuzztest/neural_network_core_fuzzer/nncore_fuzzer.cpp index bd7b9dd..abd6368 100644 --- a/test/fuzztest/neural_network_core_fuzzer/nncore_fuzzer.cpp +++ b/test/fuzztest/neural_network_core_fuzzer/nncore_fuzzer.cpp @@ -15,84 +15,280 @@ #include "nncore_fuzzer.h" #include "../data.h" #include "../../../common/log.h" +#include "compilation.h" +#include "inner_model.h" #include "neural_network_core.h" #include namespace OHOS { namespace NeuralNetworkRuntime { -bool NNCoreDeviceFuzzTest() +const size_t SIZE_ONE = 1; +const size_t CACHE_VERSION = 1; +const size_t BUFFER_SIZE = 32; + +// 返回值检查宏 +#define CHECKNEQ(realRet, expectRet, retValue, ...) \ + do { \ + if ((realRet) != (expectRet)) { \ + printf(__VA_ARGS__); \ + return (retValue); \ + } \ + } while (0) + +#define CHECKEQ(realRet, expectRet, retValue, ...) \ + do { \ + if ((realRet) == (expectRet)) { \ + printf(__VA_ARGS__); \ + return (retValue); \ + } \ + } while (0) + +OH_NN_ReturnCode BuildModel(OH_NNModel** pmodel) { - auto ret = OH_NNDevice_GetAllDevicesID(nullptr, nullptr); - if (ret != OH_NN_INVALID_PARAMETER) { - LOGE("[NNCoreFuzzTest]OH_NNDevice_GetAllDevicesID should return OH_NN_INVALID_PARAMETER."); - return false; - } + // 创建模型实例model,进行模型构造 + OH_NNModel* model = OH_NNModel_Construct(); + CHECKEQ(model, nullptr, OH_NN_NULL_PTR, "Create model failed."); - const size_t* allDevicesID = new size_t[1]; - ret = OH_NNDevice_GetAllDevicesID(&allDevicesID, nullptr); - if (ret != OH_NN_INVALID_PARAMETER) { - LOGE("[NNCoreFuzzTest]OH_NNDevice_GetAllDevicesID with allDevicesID should return OH_NN_INVALID_PARAMETER."); - delete[] allDevicesID; - return false; - } + // 添加Add算子的第一个输入张量,类型为float32,张量形状为[1, 2, 2, 3] + NN_TensorDesc* tensorDesc = OH_NNTensorDesc_Create(); + CHECKEQ(tensorDesc, nullptr, OH_NN_NULL_PTR, "Create TensorDesc failed."); + + int32_t inputDims[4] = {1, 2, 2, 3}; + auto returnCode = OH_NNTensorDesc_SetShape(tensorDesc, inputDims, 4); + CHECKNEQ(returnCode, OH_NN_SUCCESS, returnCode, "Set TensorDesc shape failed."); + + returnCode = OH_NNTensorDesc_SetDataType(tensorDesc, OH_NN_FLOAT32); + CHECKNEQ(returnCode, OH_NN_SUCCESS, returnCode, "Set TensorDesc data type failed."); + + returnCode = OH_NNTensorDesc_SetFormat(tensorDesc, OH_NN_FORMAT_NONE); + CHECKNEQ(returnCode, OH_NN_SUCCESS, returnCode, "Set TensorDesc format failed."); + + returnCode = OH_NNModel_AddTensorToModel(model, tensorDesc); + CHECKNEQ(returnCode, OH_NN_SUCCESS, returnCode, "Add first TensorDesc to model failed."); + + returnCode = OH_NNModel_SetTensorType(model, 0, OH_NN_TENSOR); + CHECKNEQ(returnCode, OH_NN_SUCCESS, returnCode, "Set model tensor type failed."); + + // 添加Add算子的第二个输入张量,类型为float32,张量形状为[1, 2, 2, 3] + tensorDesc = OH_NNTensorDesc_Create(); + CHECKEQ(tensorDesc, nullptr, OH_NN_NULL_PTR, "Create TensorDesc failed."); + + returnCode = OH_NNTensorDesc_SetShape(tensorDesc, inputDims, 4); + CHECKNEQ(returnCode, OH_NN_SUCCESS, returnCode, "Set TensorDesc shape failed."); + + returnCode = OH_NNTensorDesc_SetDataType(tensorDesc, OH_NN_FLOAT32); + CHECKNEQ(returnCode, OH_NN_SUCCESS, returnCode, "Set TensorDesc data type failed."); + + returnCode = OH_NNTensorDesc_SetFormat(tensorDesc, OH_NN_FORMAT_NONE); + CHECKNEQ(returnCode, OH_NN_SUCCESS, returnCode, "Set TensorDesc format failed."); + + returnCode = OH_NNModel_AddTensorToModel(model, tensorDesc); + CHECKNEQ(returnCode, OH_NN_SUCCESS, returnCode, "Add second TensorDesc to model failed."); + + returnCode = OH_NNModel_SetTensorType(model, 1, OH_NN_TENSOR); + CHECKNEQ(returnCode, OH_NN_SUCCESS, returnCode, "Set model tensor type failed."); + + // 添加Add算子的参数张量,该参数张量用于指定激活函数的类型,张量的数据类型为int8。 + tensorDesc = OH_NNTensorDesc_Create(); + CHECKEQ(tensorDesc, nullptr, OH_NN_NULL_PTR, "Create TensorDesc failed."); + + int32_t activationDims = 1; + returnCode = OH_NNTensorDesc_SetShape(tensorDesc, &activationDims, 1); + CHECKNEQ(returnCode, OH_NN_SUCCESS, returnCode, "Set TensorDesc shape failed."); + + returnCode = OH_NNTensorDesc_SetDataType(tensorDesc, OH_NN_INT8); + CHECKNEQ(returnCode, OH_NN_SUCCESS, returnCode, "Set TensorDesc data type failed."); + + returnCode = OH_NNTensorDesc_SetFormat(tensorDesc, OH_NN_FORMAT_NONE); + CHECKNEQ(returnCode, OH_NN_SUCCESS, returnCode, "Set TensorDesc format failed."); + + returnCode = OH_NNModel_AddTensorToModel(model, tensorDesc); + CHECKNEQ(returnCode, OH_NN_SUCCESS, returnCode, "Add second TensorDesc to model failed."); + + returnCode = OH_NNModel_SetTensorType(model, 2, OH_NN_ADD_ACTIVATIONTYPE); + CHECKNEQ(returnCode, OH_NN_SUCCESS, returnCode, "Set model tensor type failed."); + + // 将激活函数类型设置为OH_NNBACKEND_FUSED_NONE,表示该算子不添加激活函数。 + int8_t activationValue = OH_NN_FUSED_NONE; + returnCode = OH_NNModel_SetTensorData(model, 2, &activationValue, sizeof(int8_t)); + CHECKNEQ(returnCode, OH_NN_SUCCESS, returnCode, "Set model tensor data failed."); + + // 设置Add算子的输出张量,类型为float32,张量形状为[1, 2, 2, 3] + tensorDesc = OH_NNTensorDesc_Create(); + CHECKEQ(tensorDesc, nullptr, OH_NN_NULL_PTR, "Create TensorDesc failed."); + + returnCode = OH_NNTensorDesc_SetShape(tensorDesc, inputDims, 4); + CHECKNEQ(returnCode, OH_NN_SUCCESS, returnCode, "Set TensorDesc shape failed."); + + returnCode = OH_NNTensorDesc_SetDataType(tensorDesc, OH_NN_FLOAT32); + CHECKNEQ(returnCode, OH_NN_SUCCESS, returnCode, "Set TensorDesc data type failed."); + + returnCode = OH_NNTensorDesc_SetFormat(tensorDesc, OH_NN_FORMAT_NONE); + CHECKNEQ(returnCode, OH_NN_SUCCESS, returnCode, "Set TensorDesc format failed."); + + returnCode = OH_NNModel_AddTensorToModel(model, tensorDesc); + CHECKNEQ(returnCode, OH_NN_SUCCESS, returnCode, "Add forth TensorDesc to model failed."); + + returnCode = OH_NNModel_SetTensorType(model, 3, OH_NN_TENSOR); + CHECKNEQ(returnCode, OH_NN_SUCCESS, returnCode, "Set model tensor type failed."); + + // 指定Add算子的输入张量、参数张量和输出张量的索引 + uint32_t inputIndicesValues[2] = {0, 1}; + uint32_t paramIndicesValues = 2; + uint32_t outputIndicesValues = 3; + OH_NN_UInt32Array paramIndices = {¶mIndicesValues, 1 * 4}; + OH_NN_UInt32Array inputIndices = {inputIndicesValues, 2 * 4}; + OH_NN_UInt32Array outputIndices = {&outputIndicesValues, 1 * 4}; + + // 向模型实例添加Add算子 + returnCode = OH_NNModel_AddOperation(model, OH_NN_OPS_ADD, ¶mIndices, &inputIndices, &outputIndices); + CHECKNEQ(returnCode, OH_NN_SUCCESS, returnCode, "Add operation to model failed."); + + // 设置模型实例的输入张量、输出张量的索引 + returnCode = OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices); + CHECKNEQ(returnCode, OH_NN_SUCCESS, returnCode, "Specify model inputs and outputs failed."); + + // 完成模型实例的构建 + returnCode = OH_NNModel_Finish(model); + CHECKNEQ(returnCode, OH_NN_SUCCESS, returnCode, "Build model failed."); + + // 返回模型实例 + *pmodel = model; + return OH_NN_SUCCESS; +} + +void NNCoreDeviceFuzzTest(const uint8_t* data, size_t size) +{ + OH_NNDevice_GetAllDevicesID(nullptr, nullptr); + + const size_t* allDevicesID = new size_t[SIZE_ONE]; + OH_NNDevice_GetAllDevicesID(&allDevicesID, nullptr); delete[] allDevicesID; const size_t *allDevicesIDNull = nullptr; - ret = OH_NNDevice_GetAllDevicesID(&allDevicesIDNull, nullptr); - if (ret != OH_NN_INVALID_PARAMETER) { - LOGE("[NNCoreFuzzTest]OH_NNDevice_GetAllDevicesID with null deviceCount should return OH_NN_INVALID_PARAMETER."); - return false; - } + OH_NNDevice_GetAllDevicesID(&allDevicesIDNull, nullptr); uint32_t deviceCount = 0; - ret = OH_NNDevice_GetAllDevicesID(&allDevicesIDNull, &deviceCount); + OH_NNDevice_GetAllDevicesID(&allDevicesIDNull, &deviceCount); - ret = OH_NNDevice_GetName(0, nullptr); - if (ret != OH_NN_INVALID_PARAMETER) { - LOGE("[NNCoreFuzzTest]OH_NNDevice_GetName with null name should return OH_NN_INVALID_PARAMETER."); - return false; - } + Data dataFuzz(data, size); + size_t deviceid = dataFuzz.GetData(); + const char* name = nullptr; + OH_NNDevice_GetName(deviceid, &name); + OH_NN_DeviceType deviceType; + OH_NNDevice_GetType(deviceid, &deviceType); +} - std::string name = "test"; - const char* nameC = name.c_str(); - ret = OH_NNDevice_GetName(0, &nameC); - if (ret != OH_NN_INVALID_PARAMETER) { - LOGE("[NNCoreFuzzTest]OH_NNDevice_GetName with invalid name should return OH_NN_INVALID_PARAMETER."); - return false; - } +bool NNCoreCompilationConstructTest(const uint8_t* data, size_t size) +{ + Data dataFuzz(data, size); + InnerModel model = dataFuzz.GetData(); + OH_NNCompilation_Construct(reinterpret_cast(&model)); + + size_t bufferSize = BUFFER_SIZE; + auto bufferAddr = dataFuzz.GetSpecificData(0, bufferSize); + std::string path((char*)bufferAddr, (char*)bufferAddr + bufferSize); + OH_NNCompilation_ConstructWithOfflineModelFile(path.c_str()); + + OH_NNCompilation_ConstructWithOfflineModelBuffer(bufferAddr, bufferSize); + + Compilation compilation = dataFuzz.GetData(); + OH_NNCompilation* nnCompilation = reinterpret_cast(&compilation); + size_t modelSize = 0; + char buffer[SIZE_ONE]; + OH_NNCompilation_ExportCacheToBuffer(nnCompilation, buffer, SIZE_ONE, &modelSize); + + OH_NNCompilation_ImportCacheFromBuffer(nnCompilation, buffer, SIZE_ONE); + + OH_NNCompilation_Build(nnCompilation); + + OH_NNCompilation_Destroy(&nnCompilation); - const char* nnameNullC = nullptr; - ret = OH_NNDevice_GetName(0, &nnameNullC); - if (ret != OH_NN_FAILED) { - LOGE("[NNCoreFuzzTest]OH_NNDevice_GetName with invalid deviceid should return OH_NN_FAILED."); + OH_NNModel* validModel; + if (BuildModel(&validModel) != OH_NN_SUCCESS) { + LOGE("NNCoreCompilationConstructTest failed, build model failed."); return false; } - - ret = OH_NNDevice_GetType(0, nullptr); - if (ret != OH_NN_INVALID_PARAMETER) { - LOGE("[NNCoreFuzzTest]OH_NNDevice_GetType with invalid device id should return OH_NN_INVALID_PARAMETER."); + OH_NNCompilation* validCompilation = OH_NNCompilation_Construct(validModel); + OH_NNModel_Destroy(&validModel); + if (validCompilation == nullptr) { + LOGE("NNCoreCompilationConstructTest failed, construct valid compilation failed."); return false; } + OH_NNCompilation_AddExtensionConfig(validCompilation, "test", bufferAddr, bufferSize); + + size_t deviceid = dataFuzz.GetData(); + OH_NNCompilation_SetDevice(validCompilation, deviceid); + + OH_NNCompilation_SetCache(validCompilation, path.c_str(), CACHE_VERSION); + + OH_NN_PerformanceMode perf = dataFuzz.GetData(); + OH_NNCompilation_SetPerformanceMode(validCompilation, perf); + + OH_NN_Priority priority = dataFuzz.GetData(); + OH_NNCompilation_SetPriority(validCompilation, priority); + + bool enableFloat16 = dataFuzz.GetData(); + OH_NNCompilation_EnableFloat16(validCompilation, enableFloat16); + OH_NNCompilation_Destroy(&validCompilation); + return true; } -bool NNCoreCompilationConstructTest() +bool NNCoreTensorDescFuzzTest(const uint8_t* data, size_t size) { - auto ret = OH_NNCompilation_Construct(nullptr); - if (ret != nullptr) { - LOGE("[NNCoreFuzzTest]OH_NNCompilation_Construct with nullptr should return nullptr."); + NN_TensorDesc* tensorDesc = OH_NNTensorDesc_Create(); + if (tensorDesc == nullptr) { + LOGE("NNCoreTensorDescFuzzTest failed, create tensor desc failed."); return false; } + + Data dataFuzz(data, size); + size_t bufferSize = BUFFER_SIZE; + auto bufferAddr = dataFuzz.GetSpecificData(0, bufferSize); + std::string path((char*)bufferAddr, (char*)bufferAddr + bufferSize); + OH_NNTensorDesc_SetName(tensorDesc, path.c_str()); + const char* name = nullptr; + OH_NNTensorDesc_GetName(tensorDesc, &name); + + OH_NN_DataType dataType = dataFuzz.GetData(); + OH_NNTensorDesc_SetDataType(tensorDesc, dataType); + OH_NN_DataType dataTypeOut; + OH_NNTensorDesc_GetDataType(tensorDesc, &dataTypeOut); + + int32_t dim[SIZE_ONE] = {dataFuzz.GetData()}; + OH_NNTensorDesc_SetShape(tensorDesc, dim, SIZE_ONE); + int32_t* shape = nullptr; + size_t shapeLength = 0; + OH_NNTensorDesc_GetShape(tensorDesc, &shape, &shapeLength); + + OH_NN_Format format = dataFuzz.GetData(); + OH_NNTensorDesc_SetFormat(tensorDesc, format); + OH_NN_Format formatOut; + OH_NNTensorDesc_GetFormat(tensorDesc, &formatOut); + + size_t elementSize = 0; + OH_NNTensorDesc_GetElementCount(tensorDesc, &elementSize); + size_t byteSize = 0; + OH_NNTensorDesc_GetByteSize(tensorDesc, &byteSize); + + OH_NNTensorDesc_Destroy(&tensorDesc); + return true; } bool NNCoreFuzzTest(const uint8_t* data, size_t size) { - if (!NNCoreDeviceFuzzTest()) { - return false; + bool ret = true; + NNCoreDeviceFuzzTest(data, size); + if (!NNCoreCompilationConstructTest(data, size)) { + ret = false; + } + if (!NNCoreTensorDescFuzzTest(data, size)) { + ret = false; } - return true; + return ret; } } // namespace NeuralNetworkRuntime } // namespace OHOS @@ -100,6 +296,5 @@ bool NNCoreFuzzTest(const uint8_t* data, size_t size) /* Fuzzer entry point */ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) { - OHOS::NeuralNetworkRuntime::NNCoreFuzzTest(data, size); - return 0; + return OHOS::NeuralNetworkRuntime::NNCoreFuzzTest(data, size); } \ No newline at end of file -- Gitee From 0976e3f14e696f606e74c330d34ea3ad59bf756f Mon Sep 17 00:00:00 2001 From: w30052974 Date: Sat, 18 May 2024 16:21:49 +0800 Subject: [PATCH 03/10] fuzztest add Signed-off-by: w30052974 --- test/fuzztest/neural_network_core_fuzzer/nncore_fuzzer.cpp | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/test/fuzztest/neural_network_core_fuzzer/nncore_fuzzer.cpp b/test/fuzztest/neural_network_core_fuzzer/nncore_fuzzer.cpp index abd6368..68a6b15 100644 --- a/test/fuzztest/neural_network_core_fuzzer/nncore_fuzzer.cpp +++ b/test/fuzztest/neural_network_core_fuzzer/nncore_fuzzer.cpp @@ -287,6 +287,12 @@ bool NNCoreFuzzTest(const uint8_t* data, size_t size) if (!NNCoreTensorDescFuzzTest(data, size)) { ret = false; } + if (!NNCoreTensorFuzzTest(data, size)) { + ret = false; + } + if (!NNCoreExecutorFuzzTest(data, size)) { + ret = false; + } return ret; } -- Gitee From 5e8e4250590f2a8191ba27cb0d5f575494ec2edc Mon Sep 17 00:00:00 2001 From: w30052974 Date: Sat, 18 May 2024 16:33:11 +0800 Subject: [PATCH 04/10] fuzztest add Signed-off-by: w30052974 --- .../nncore_fuzzer.cpp | 89 +++++++++++++++++++ 1 file changed, 89 insertions(+) diff --git a/test/fuzztest/neural_network_core_fuzzer/nncore_fuzzer.cpp b/test/fuzztest/neural_network_core_fuzzer/nncore_fuzzer.cpp index 68a6b15..949c225 100644 --- a/test/fuzztest/neural_network_core_fuzzer/nncore_fuzzer.cpp +++ b/test/fuzztest/neural_network_core_fuzzer/nncore_fuzzer.cpp @@ -277,6 +277,95 @@ bool NNCoreTensorDescFuzzTest(const uint8_t* data, size_t size) return true; } +bool NNCoreTensorFuzzTest(const uint8_t* data, size_t size) +{ + Data dataFuzz(data, size); + size_t deviceId = dataFuzz.GetData(); + NN_TensorDesc* tensorDesc = OH_NNTensorDesc_Create(); + int32_t inputDims[4] = {1, 2, 2, 3}; + OH_NNTensorDesc_SetShape(tensorDesc, inputDims, 4); + OH_NNTensorDesc_SetDataType(tensorDesc, OH_NN_FLOAT32); + OH_NNTensorDesc_SetFormat(tensorDesc, OH_NN_FORMAT_NONE); + OH_NNModel_AddTensorToModel(model, tensorDesc); + rOH_NNModel_SetTensorType(model, 0, OH_NN_TENSOR); + NNTensor* nnTensor = OH_NNTensor_create(deviceId, tensorDesc); + + size_t tensorSize = dataFuzz.GetData(); + nnTensor = OH_NNTensor_create(deviceId, tensorDesc, tensorSize); + + int fd = dataFuzz.GetData(); + size_t offset = dataFuzz.GetData(); + nnTensor = OH_NNTensor_create(deviceId, tensorDesc, fd, tensorSize, offset); + + OH_NNTensor_GetTensorDesc(nnTensor); + + OH_NNTensor_GetDataBuffer(nnTensor); + + OH_NNTensor_Getfd(nnTensor, &fd); + + OH_NNTensor_GetSize(nnTensor, &tensorSize); + + OH_NNTensor_GetOffset(nnTensor, &offset); + + OH_NNTensor_Destroy(&nnTensor); + return true; +} + +bool NNCoreExecutorFuzzTest(const uint8_t* data, size_t size) +{ + Data dataFuzz(data, size); + Compilation compilation = dataFuzz.GetData(); + OH_NNCompilation* nnCompilation = reinterpret_cast(&compilation); + OH_NNExecutor* nnExecutor = OH_NNExecutor_Construct(nnCompilation); + + uint32_t outputIndex = dataFuzz.GetData(); + int32_t *shape = nullptr; + uint32_t shapeLenth = 0; + OH_NNExecutor_GetOutputShape(nnExecutor, outputIndex, &shape, &shapeLenth); + + size_t inputCount = 0; + OH_NNExecutor_GetInputCount(nnExecutor, &inputCount); + + size_t outputCount = 0; + OH_NNExecutor_GetOutputCount(nnExecutor, &outputCount); + + size_t index = dataFuzz.GetData(); + NN_TensorDesc* inputTensorDesc = OH_NNExecutor_CreateInputTensorDesc(nnExecutor, index); + + NN_TensorDesc* outputTensorDesc = OH_NNExecutor_CreateOutputTensorDesc(nnExecutor, index); + + size_t *minInputDims = nullptr; + size_t *maxInputDIms = nullptr; + size_t shapeLength = 0; + OH_NNExecutor_GetInputDimRange(nnExecutor, index, &minInputDims, &maxInputDIms, &shapeLength); + + NN_OnRunDone onRunDone = dataFuzz.GetData(); + OH_NNExecutor_SetOnRunDone(nnExecutor, onRunDone); + + NN_OnServiceDied onServiceDied = dataFuzz.GetData(); + OH_NNExecutor_SetOnServiceDied(nnExecutor, onServiceDied); + + vector inputTensors, outputTensors; + inputCount = dataFuzz.GetData(); + outputCount = dataFuzz.GetData(); + for (size_t i = 0; i < inputCount; ++i) { + NN_Tensor* inputTensor = dataFuzz.GetData(); + inputTensors.emplace_back(inputTensor); + } + for (size_t i = 0; i < outputCount; ++i) { + NN_Tensor* outputTensor = dataFuzz.GetData(); + outputTensors.emplace_back(outputTensor); + } + OH_NNExecutor_RunSync(nnExecutor, inputTensors.data(), inputCount, outputTensors.data(), outputCount); + + int32_t timeout = dataFuzz.GetData(); + void* userData = dataFuzz.GetData(); + OH_NNExecutor_RunAsync(nnExecutor, inputTensors.data(), inputCount, outputTensors.data(), outputCount, timeout, userData); + + OH_NNExecutor_Destroy(&nnExecutor); + return true; +} + bool NNCoreFuzzTest(const uint8_t* data, size_t size) { bool ret = true; -- Gitee From 1aac0eb103fa3b52765538873bf6261b15535763 Mon Sep 17 00:00:00 2001 From: w30052974 Date: Mon, 20 May 2024 20:04:05 +0800 Subject: [PATCH 05/10] fuzztest add Signed-off-by: w30052974 --- .../nncore_fuzzer.cpp | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/test/fuzztest/neural_network_core_fuzzer/nncore_fuzzer.cpp b/test/fuzztest/neural_network_core_fuzzer/nncore_fuzzer.cpp index 949c225..654e8b9 100644 --- a/test/fuzztest/neural_network_core_fuzzer/nncore_fuzzer.cpp +++ b/test/fuzztest/neural_network_core_fuzzer/nncore_fuzzer.cpp @@ -283,25 +283,27 @@ bool NNCoreTensorFuzzTest(const uint8_t* data, size_t size) size_t deviceId = dataFuzz.GetData(); NN_TensorDesc* tensorDesc = OH_NNTensorDesc_Create(); int32_t inputDims[4] = {1, 2, 2, 3}; + OH_NNModel* model = nullptr; + BuildModel(&model); OH_NNTensorDesc_SetShape(tensorDesc, inputDims, 4); OH_NNTensorDesc_SetDataType(tensorDesc, OH_NN_FLOAT32); OH_NNTensorDesc_SetFormat(tensorDesc, OH_NN_FORMAT_NONE); OH_NNModel_AddTensorToModel(model, tensorDesc); - rOH_NNModel_SetTensorType(model, 0, OH_NN_TENSOR); - NNTensor* nnTensor = OH_NNTensor_create(deviceId, tensorDesc); + OH_NNModel_SetTensorType(model, 0, OH_NN_TENSOR); + NN_Tensor* nnTensor = OH_NNTensor_Create(deviceId, tensorDesc); size_t tensorSize = dataFuzz.GetData(); - nnTensor = OH_NNTensor_create(deviceId, tensorDesc, tensorSize); + nnTensor = OH_NNTensor_CreateWithSize(deviceId, tensorDesc, tensorSize); int fd = dataFuzz.GetData(); size_t offset = dataFuzz.GetData(); - nnTensor = OH_NNTensor_create(deviceId, tensorDesc, fd, tensorSize, offset); + nnTensor = OH_NNTensor_CreateWithFd(deviceId, tensorDesc, fd, tensorSize, offset); OH_NNTensor_GetTensorDesc(nnTensor); OH_NNTensor_GetDataBuffer(nnTensor); - OH_NNTensor_Getfd(nnTensor, &fd); + OH_NNTensor_GetFd(nnTensor, &fd); OH_NNTensor_GetSize(nnTensor, &tensorSize); @@ -330,9 +332,9 @@ bool NNCoreExecutorFuzzTest(const uint8_t* data, size_t size) OH_NNExecutor_GetOutputCount(nnExecutor, &outputCount); size_t index = dataFuzz.GetData(); - NN_TensorDesc* inputTensorDesc = OH_NNExecutor_CreateInputTensorDesc(nnExecutor, index); + OH_NNExecutor_CreateInputTensorDesc(nnExecutor, index); - NN_TensorDesc* outputTensorDesc = OH_NNExecutor_CreateOutputTensorDesc(nnExecutor, index); + OH_NNExecutor_CreateOutputTensorDesc(nnExecutor, index); size_t *minInputDims = nullptr; size_t *maxInputDIms = nullptr; @@ -345,7 +347,7 @@ bool NNCoreExecutorFuzzTest(const uint8_t* data, size_t size) NN_OnServiceDied onServiceDied = dataFuzz.GetData(); OH_NNExecutor_SetOnServiceDied(nnExecutor, onServiceDied); - vector inputTensors, outputTensors; + std::vector inputTensors, outputTensors; inputCount = dataFuzz.GetData(); outputCount = dataFuzz.GetData(); for (size_t i = 0; i < inputCount; ++i) { -- Gitee From 08a0ed10d8809076b1ce7c5bf02cf72c65b50e18 Mon Sep 17 00:00:00 2001 From: w30052974 Date: Fri, 24 May 2024 15:20:13 +0800 Subject: [PATCH 06/10] fuzztest add Signed-off-by: w30052974 --- test/fuzztest/neural_network_core_fuzzer/nncore_fuzzer.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/fuzztest/neural_network_core_fuzzer/nncore_fuzzer.cpp b/test/fuzztest/neural_network_core_fuzzer/nncore_fuzzer.cpp index 654e8b9..99c5d22 100644 --- a/test/fuzztest/neural_network_core_fuzzer/nncore_fuzzer.cpp +++ b/test/fuzztest/neural_network_core_fuzzer/nncore_fuzzer.cpp @@ -183,7 +183,7 @@ bool NNCoreCompilationConstructTest(const uint8_t* data, size_t size) { Data dataFuzz(data, size); InnerModel model = dataFuzz.GetData(); - OH_NNCompilation_Construct(reinterpret_cast(&model)); + OH_NNCompilation* compilationC = OH_NNCompilation_Construct(reinterpret_cast(&model)); size_t bufferSize = BUFFER_SIZE; auto bufferAddr = dataFuzz.GetSpecificData(0, bufferSize); @@ -202,7 +202,7 @@ bool NNCoreCompilationConstructTest(const uint8_t* data, size_t size) OH_NNCompilation_Build(nnCompilation); - OH_NNCompilation_Destroy(&nnCompilation); + OH_NNCompilation_Destroy(&compilationC); OH_NNModel* validModel; if (BuildModel(&validModel) != OH_NN_SUCCESS) { -- Gitee From 094c739084b2263350eb4db066fcfe9f431c5bfa Mon Sep 17 00:00:00 2001 From: w30052974 Date: Mon, 27 May 2024 14:54:44 +0800 Subject: [PATCH 07/10] fuzztest add Signed-off-by: w30052974 --- .../nncore_fuzzer.cpp | 76 +++++++------------ 1 file changed, 27 insertions(+), 49 deletions(-) diff --git a/test/fuzztest/neural_network_core_fuzzer/nncore_fuzzer.cpp b/test/fuzztest/neural_network_core_fuzzer/nncore_fuzzer.cpp index 99c5d22..ab4e788 100644 --- a/test/fuzztest/neural_network_core_fuzzer/nncore_fuzzer.cpp +++ b/test/fuzztest/neural_network_core_fuzzer/nncore_fuzzer.cpp @@ -25,6 +25,9 @@ namespace NeuralNetworkRuntime { const size_t SIZE_ONE = 1; const size_t CACHE_VERSION = 1; const size_t BUFFER_SIZE = 32; +const size_t TENSOR_TWO = 2; +const size_t TENSOR_THREE = 3; +const size_t SHAPE_LENTH = 4; // 返回值检查宏 #define CHECKNEQ(realRet, expectRet, retValue, ...) \ @@ -43,18 +46,12 @@ const size_t BUFFER_SIZE = 32; } \ } while (0) -OH_NN_ReturnCode BuildModel(OH_NNModel** pmodel) +void AddTensorDescToModel(OH_NNModel* model, int32_t* inputDims, size_t shapeLength, size_t inputIndex) { - // 创建模型实例model,进行模型构造 - OH_NNModel* model = OH_NNModel_Construct(); - CHECKEQ(model, nullptr, OH_NN_NULL_PTR, "Create model failed."); - - // 添加Add算子的第一个输入张量,类型为float32,张量形状为[1, 2, 2, 3] NN_TensorDesc* tensorDesc = OH_NNTensorDesc_Create(); CHECKEQ(tensorDesc, nullptr, OH_NN_NULL_PTR, "Create TensorDesc failed."); - int32_t inputDims[4] = {1, 2, 2, 3}; - auto returnCode = OH_NNTensorDesc_SetShape(tensorDesc, inputDims, 4); + auto returnCode = OH_NNTensorDesc_SetShape(tensorDesc, inputDims, shapeLength); CHECKNEQ(returnCode, OH_NN_SUCCESS, returnCode, "Set TensorDesc shape failed."); returnCode = OH_NNTensorDesc_SetDataType(tensorDesc, OH_NN_FLOAT32); @@ -66,34 +63,29 @@ OH_NN_ReturnCode BuildModel(OH_NNModel** pmodel) returnCode = OH_NNModel_AddTensorToModel(model, tensorDesc); CHECKNEQ(returnCode, OH_NN_SUCCESS, returnCode, "Add first TensorDesc to model failed."); - returnCode = OH_NNModel_SetTensorType(model, 0, OH_NN_TENSOR); + returnCode = OH_NNModel_SetTensorType(model, inputIndex, OH_NN_TENSOR); CHECKNEQ(returnCode, OH_NN_SUCCESS, returnCode, "Set model tensor type failed."); +} - // 添加Add算子的第二个输入张量,类型为float32,张量形状为[1, 2, 2, 3] - tensorDesc = OH_NNTensorDesc_Create(); - CHECKEQ(tensorDesc, nullptr, OH_NN_NULL_PTR, "Create TensorDesc failed."); - - returnCode = OH_NNTensorDesc_SetShape(tensorDesc, inputDims, 4); - CHECKNEQ(returnCode, OH_NN_SUCCESS, returnCode, "Set TensorDesc shape failed."); - - returnCode = OH_NNTensorDesc_SetDataType(tensorDesc, OH_NN_FLOAT32); - CHECKNEQ(returnCode, OH_NN_SUCCESS, returnCode, "Set TensorDesc data type failed."); - - returnCode = OH_NNTensorDesc_SetFormat(tensorDesc, OH_NN_FORMAT_NONE); - CHECKNEQ(returnCode, OH_NN_SUCCESS, returnCode, "Set TensorDesc format failed."); +OH_NN_ReturnCode BuildModel(OH_NNModel** pmodel) +{ + // 创建模型实例model,进行模型构造 + OH_NNModel* model = OH_NNModel_Construct(); + CHECKEQ(model, nullptr, OH_NN_NULL_PTR, "Create model failed."); - returnCode = OH_NNModel_AddTensorToModel(model, tensorDesc); - CHECKNEQ(returnCode, OH_NN_SUCCESS, returnCode, "Add second TensorDesc to model failed."); + // 添加Add算子的第一个输入张量,类型为float32,张量形状为[1, 2, 2, 3] + int32_t inputDims[4] = {1, 2, 2, 3}; + AddTensorDescToModel(model, inputDims, SHAPE_LENTH, 0); - returnCode = OH_NNModel_SetTensorType(model, 1, OH_NN_TENSOR); - CHECKNEQ(returnCode, OH_NN_SUCCESS, returnCode, "Set model tensor type failed."); + // 添加Add算子的第二个输入张量,类型为float32,张量形状为[1, 2, 2, 3] + AddTensorDescToModel(model, inputDims, SHAPE_LENTH, 1); // 添加Add算子的参数张量,该参数张量用于指定激活函数的类型,张量的数据类型为int8。 - tensorDesc = OH_NNTensorDesc_Create(); + NN_TensorDesc*tensorDesc = OH_NNTensorDesc_Create(); CHECKEQ(tensorDesc, nullptr, OH_NN_NULL_PTR, "Create TensorDesc failed."); int32_t activationDims = 1; - returnCode = OH_NNTensorDesc_SetShape(tensorDesc, &activationDims, 1); + auto returnCode = OH_NNTensorDesc_SetShape(tensorDesc, &activationDims, 1); CHECKNEQ(returnCode, OH_NN_SUCCESS, returnCode, "Set TensorDesc shape failed."); returnCode = OH_NNTensorDesc_SetDataType(tensorDesc, OH_NN_INT8); @@ -105,32 +97,16 @@ OH_NN_ReturnCode BuildModel(OH_NNModel** pmodel) returnCode = OH_NNModel_AddTensorToModel(model, tensorDesc); CHECKNEQ(returnCode, OH_NN_SUCCESS, returnCode, "Add second TensorDesc to model failed."); - returnCode = OH_NNModel_SetTensorType(model, 2, OH_NN_ADD_ACTIVATIONTYPE); + returnCode = OH_NNModel_SetTensorType(model, TENSOR_TWO, OH_NN_ADD_ACTIVATIONTYPE); CHECKNEQ(returnCode, OH_NN_SUCCESS, returnCode, "Set model tensor type failed."); // 将激活函数类型设置为OH_NNBACKEND_FUSED_NONE,表示该算子不添加激活函数。 int8_t activationValue = OH_NN_FUSED_NONE; - returnCode = OH_NNModel_SetTensorData(model, 2, &activationValue, sizeof(int8_t)); + returnCode = OH_NNModel_SetTensorData(model, TENSOR_TWO, &activationValue, sizeof(int8_t)); CHECKNEQ(returnCode, OH_NN_SUCCESS, returnCode, "Set model tensor data failed."); // 设置Add算子的输出张量,类型为float32,张量形状为[1, 2, 2, 3] - tensorDesc = OH_NNTensorDesc_Create(); - CHECKEQ(tensorDesc, nullptr, OH_NN_NULL_PTR, "Create TensorDesc failed."); - - returnCode = OH_NNTensorDesc_SetShape(tensorDesc, inputDims, 4); - CHECKNEQ(returnCode, OH_NN_SUCCESS, returnCode, "Set TensorDesc shape failed."); - - returnCode = OH_NNTensorDesc_SetDataType(tensorDesc, OH_NN_FLOAT32); - CHECKNEQ(returnCode, OH_NN_SUCCESS, returnCode, "Set TensorDesc data type failed."); - - returnCode = OH_NNTensorDesc_SetFormat(tensorDesc, OH_NN_FORMAT_NONE); - CHECKNEQ(returnCode, OH_NN_SUCCESS, returnCode, "Set TensorDesc format failed."); - - returnCode = OH_NNModel_AddTensorToModel(model, tensorDesc); - CHECKNEQ(returnCode, OH_NN_SUCCESS, returnCode, "Add forth TensorDesc to model failed."); - - returnCode = OH_NNModel_SetTensorType(model, 3, OH_NN_TENSOR); - CHECKNEQ(returnCode, OH_NN_SUCCESS, returnCode, "Set model tensor type failed."); + AddTensorDescToModel(model, inputDims, SHAPE_LENTH, TENSOR_THREE); // 指定Add算子的输入张量、参数张量和输出张量的索引 uint32_t inputIndicesValues[2] = {0, 1}; @@ -285,7 +261,7 @@ bool NNCoreTensorFuzzTest(const uint8_t* data, size_t size) int32_t inputDims[4] = {1, 2, 2, 3}; OH_NNModel* model = nullptr; BuildModel(&model); - OH_NNTensorDesc_SetShape(tensorDesc, inputDims, 4); + OH_NNTensorDesc_SetShape(tensorDesc, inputDims, SHAPE_LENTH); OH_NNTensorDesc_SetDataType(tensorDesc, OH_NN_FLOAT32); OH_NNTensorDesc_SetFormat(tensorDesc, OH_NN_FORMAT_NONE); OH_NNModel_AddTensorToModel(model, tensorDesc); @@ -347,7 +323,8 @@ bool NNCoreExecutorFuzzTest(const uint8_t* data, size_t size) NN_OnServiceDied onServiceDied = dataFuzz.GetData(); OH_NNExecutor_SetOnServiceDied(nnExecutor, onServiceDied); - std::vector inputTensors, outputTensors; + std::vector inputTensors; + std::vector outputTensors; inputCount = dataFuzz.GetData(); outputCount = dataFuzz.GetData(); for (size_t i = 0; i < inputCount; ++i) { @@ -362,7 +339,8 @@ bool NNCoreExecutorFuzzTest(const uint8_t* data, size_t size) int32_t timeout = dataFuzz.GetData(); void* userData = dataFuzz.GetData(); - OH_NNExecutor_RunAsync(nnExecutor, inputTensors.data(), inputCount, outputTensors.data(), outputCount, timeout, userData); + OH_NNExecutor_RunAsync(nnExecutor, inputTensors.data(), inputCount, outputTensors.data(), + outputCount, timeout, userData); OH_NNExecutor_Destroy(&nnExecutor); return true; -- Gitee From e340ac95cde7d1085e3cd105cca9f9a294773ceb Mon Sep 17 00:00:00 2001 From: w30052974 Date: Mon, 27 May 2024 15:03:28 +0800 Subject: [PATCH 08/10] fuzztest add Signed-off-by: w30052974 --- test/fuzztest/neural_network_core_fuzzer/nncore_fuzzer.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/test/fuzztest/neural_network_core_fuzzer/nncore_fuzzer.cpp b/test/fuzztest/neural_network_core_fuzzer/nncore_fuzzer.cpp index ab4e788..35d2576 100644 --- a/test/fuzztest/neural_network_core_fuzzer/nncore_fuzzer.cpp +++ b/test/fuzztest/neural_network_core_fuzzer/nncore_fuzzer.cpp @@ -46,7 +46,7 @@ const size_t SHAPE_LENTH = 4; } \ } while (0) -void AddTensorDescToModel(OH_NNModel* model, int32_t* inputDims, size_t shapeLength, size_t inputIndex) +OH_NN_ReturnCode AddTensorDescToModel(OH_NNModel* model, int32_t* inputDims, size_t shapeLength, size_t inputIndex) { NN_TensorDesc* tensorDesc = OH_NNTensorDesc_Create(); CHECKEQ(tensorDesc, nullptr, OH_NN_NULL_PTR, "Create TensorDesc failed."); @@ -65,6 +65,8 @@ void AddTensorDescToModel(OH_NNModel* model, int32_t* inputDims, size_t shapeLen returnCode = OH_NNModel_SetTensorType(model, inputIndex, OH_NN_TENSOR); CHECKNEQ(returnCode, OH_NN_SUCCESS, returnCode, "Set model tensor type failed."); + + return OH_NN_SUCCESS; } OH_NN_ReturnCode BuildModel(OH_NNModel** pmodel) -- Gitee From a4b3415c62b360606af0c50d9f5257dcb297b043 Mon Sep 17 00:00:00 2001 From: w30052974 Date: Mon, 27 May 2024 15:06:22 +0800 Subject: [PATCH 09/10] fuzztest add Signed-off-by: w30052974 --- .../neural_network_core_fuzzer/nncore_fuzzer.cpp | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/test/fuzztest/neural_network_core_fuzzer/nncore_fuzzer.cpp b/test/fuzztest/neural_network_core_fuzzer/nncore_fuzzer.cpp index 35d2576..7c7502f 100644 --- a/test/fuzztest/neural_network_core_fuzzer/nncore_fuzzer.cpp +++ b/test/fuzztest/neural_network_core_fuzzer/nncore_fuzzer.cpp @@ -61,7 +61,7 @@ OH_NN_ReturnCode AddTensorDescToModel(OH_NNModel* model, int32_t* inputDims, siz CHECKNEQ(returnCode, OH_NN_SUCCESS, returnCode, "Set TensorDesc format failed."); returnCode = OH_NNModel_AddTensorToModel(model, tensorDesc); - CHECKNEQ(returnCode, OH_NN_SUCCESS, returnCode, "Add first TensorDesc to model failed."); + CHECKNEQ(returnCode, OH_NN_SUCCESS, returnCode, "Add TensorDesc to model failed."); returnCode = OH_NNModel_SetTensorType(model, inputIndex, OH_NN_TENSOR); CHECKNEQ(returnCode, OH_NN_SUCCESS, returnCode, "Set model tensor type failed."); @@ -77,17 +77,19 @@ OH_NN_ReturnCode BuildModel(OH_NNModel** pmodel) // 添加Add算子的第一个输入张量,类型为float32,张量形状为[1, 2, 2, 3] int32_t inputDims[4] = {1, 2, 2, 3}; - AddTensorDescToModel(model, inputDims, SHAPE_LENTH, 0); + auto returnCode = AddTensorDescToModel(model, inputDims, SHAPE_LENTH, 0); + CHECKNEQ(returnCode, OH_NN_SUCCESS, returnCode, "Add first TensorDesc to model failed."); // 添加Add算子的第二个输入张量,类型为float32,张量形状为[1, 2, 2, 3] - AddTensorDescToModel(model, inputDims, SHAPE_LENTH, 1); + returnCode = AddTensorDescToModel(model, inputDims, SHAPE_LENTH, 1); + CHECKNEQ(returnCode, OH_NN_SUCCESS, returnCode, "Add second TensorDesc to model failed."); // 添加Add算子的参数张量,该参数张量用于指定激活函数的类型,张量的数据类型为int8。 NN_TensorDesc*tensorDesc = OH_NNTensorDesc_Create(); CHECKEQ(tensorDesc, nullptr, OH_NN_NULL_PTR, "Create TensorDesc failed."); int32_t activationDims = 1; - auto returnCode = OH_NNTensorDesc_SetShape(tensorDesc, &activationDims, 1); + returnCode = OH_NNTensorDesc_SetShape(tensorDesc, &activationDims, 1); CHECKNEQ(returnCode, OH_NN_SUCCESS, returnCode, "Set TensorDesc shape failed."); returnCode = OH_NNTensorDesc_SetDataType(tensorDesc, OH_NN_INT8); @@ -108,7 +110,8 @@ OH_NN_ReturnCode BuildModel(OH_NNModel** pmodel) CHECKNEQ(returnCode, OH_NN_SUCCESS, returnCode, "Set model tensor data failed."); // 设置Add算子的输出张量,类型为float32,张量形状为[1, 2, 2, 3] - AddTensorDescToModel(model, inputDims, SHAPE_LENTH, TENSOR_THREE); + returnCode = AddTensorDescToModel(model, inputDims, SHAPE_LENTH, TENSOR_THREE); + CHECKNEQ(returnCode, OH_NN_SUCCESS, returnCode, "Add third TensorDesc to model failed."); // 指定Add算子的输入张量、参数张量和输出张量的索引 uint32_t inputIndicesValues[2] = {0, 1}; -- Gitee From 1df6425d0f9bfa702226d3150ab81135abdf255f Mon Sep 17 00:00:00 2001 From: w30052974 Date: Mon, 27 May 2024 15:24:54 +0800 Subject: [PATCH 10/10] fuzztest add Signed-off-by: w30052974 --- test/fuzztest/neural_network_core_fuzzer/nncore_fuzzer.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/fuzztest/neural_network_core_fuzzer/nncore_fuzzer.cpp b/test/fuzztest/neural_network_core_fuzzer/nncore_fuzzer.cpp index 7c7502f..f01dba5 100644 --- a/test/fuzztest/neural_network_core_fuzzer/nncore_fuzzer.cpp +++ b/test/fuzztest/neural_network_core_fuzzer/nncore_fuzzer.cpp @@ -85,7 +85,7 @@ OH_NN_ReturnCode BuildModel(OH_NNModel** pmodel) CHECKNEQ(returnCode, OH_NN_SUCCESS, returnCode, "Add second TensorDesc to model failed."); // 添加Add算子的参数张量,该参数张量用于指定激活函数的类型,张量的数据类型为int8。 - NN_TensorDesc*tensorDesc = OH_NNTensorDesc_Create(); + NN_TensorDesc* tensorDesc = OH_NNTensorDesc_Create(); CHECKEQ(tensorDesc, nullptr, OH_NN_NULL_PTR, "Create TensorDesc failed."); int32_t activationDims = 1; -- Gitee