From 9ba54fc53065df528514984fee640b5cca204271 Mon Sep 17 00:00:00 2001 From: w30052974 Date: Mon, 31 Mar 2025 15:48:28 +0800 Subject: [PATCH 1/5] =?UTF-8?q?nnrt=205.1=E6=9E=B6=E6=9E=84=E6=95=B4?= =?UTF-8?q?=E6=94=B9?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: w30052974 --- bundle.json | 4 +- .../native/neural_network_core/cpp_type.h | 1 + .../neural_network_core.cpp | 23 +- .../neural_network_core/nnrt_client.cpp | 1 + .../native/neural_network_core/nnrt_client.h | 1 + .../neural_network_core/tensor_desc.cpp | 6 +- .../native/neural_network_runtime/BUILD.gn | 1 + .../native/neural_network_runtime/device.h | 1 + .../hdi_device_v1_0.cpp | 16 +- .../neural_network_runtime/hdi_device_v1_0.h | 1 + .../hdi_device_v2_0.cpp | 16 +- .../neural_network_runtime/hdi_device_v2_0.h | 1 + .../hdi_device_v2_1.cpp | 16 +- .../neural_network_runtime/hdi_device_v2_1.h | 1 + .../neural_network_runtime/inner_model.cpp | 33 ++- .../neural_network_runtime.cpp | 100 +++++++-- .../neural_network_runtime/nn_tensor.cpp | 5 + .../nncompiled_cache.cpp | 205 +++++++++++------- .../neural_network_runtime/nncompiled_cache.h | 16 +- .../neural_network_runtime/nncompiler.cpp | 75 +++---- .../neural_network_runtime/nnexecutor.cpp | 7 + test/unittest/components/BUILD.gn | 1 + .../components/nn_backend/nn_backend_test.cpp | 1 + .../nn_compiled_cache_test.cpp | 5 +- .../nn_compiler/nn_compiler_test.cpp | 1 + .../nn_executor/nn_executor_test.cpp | 1 + .../components/nn_tensor/nn_tensor_test.cpp | 1 + .../neural_network_core_test.cpp | 1 + .../neural_network_runtime_test.cpp | 1 + 29 files changed, 351 insertions(+), 191 deletions(-) diff --git a/bundle.json b/bundle.json index bb80c79..89327cd 100644 --- a/bundle.json +++ b/bundle.json @@ -30,7 +30,9 @@ "hitrace", "ipc", "mindspore", - "init" + "init", + "json", + "jsoncpp" ], "third_party": [] }, diff --git a/frameworks/native/neural_network_core/cpp_type.h b/frameworks/native/neural_network_core/cpp_type.h index 3122297..690f417 100644 --- a/frameworks/native/neural_network_core/cpp_type.h +++ b/frameworks/native/neural_network_core/cpp_type.h @@ -44,6 +44,7 @@ enum class TuningStrategy { struct Buffer { void* data = nullptr; size_t length = 0; + int fd = -1; }; struct ExtensionConfig { diff --git a/frameworks/native/neural_network_core/neural_network_core.cpp b/frameworks/native/neural_network_core/neural_network_core.cpp index ee8aa28..659d83d 100644 --- a/frameworks/native/neural_network_core/neural_network_core.cpp +++ b/frameworks/native/neural_network_core/neural_network_core.cpp @@ -31,6 +31,7 @@ using namespace OHOS::NeuralNetworkRuntime; #define NNRT_API __attribute__((visibility("default"))) +const size_t INPUT_OUTPUT_MAX_INDICES = 200; NNRT_API OH_NN_ReturnCode OH_NNDevice_GetAllDevicesID(const size_t **allDevicesID, uint32_t *deviceCount) { @@ -750,6 +751,12 @@ NNRT_API OH_NN_ReturnCode OH_NNCompilation_Build(OH_NNCompilation *compilation) configContents.push_back('0'); } + NNRtServiceApi& nnrtService = NNRtServiceApi::GetInstance(); + bool retCode = nnrtService.PullUpDlliteService(); + if (!retCode) { + LOGI("OH_NNCompilation_Build failed, PullUpDlliteService failed."); + } + configs["isExceedRamLimit"] = configContents; compilationImpl->compiler->SetExtensionConfig(configs); @@ -1699,16 +1706,16 @@ NNRT_API OH_NN_ReturnCode OH_NNExecutor_RunSync(OH_NNExecutor *executor, LOGE("OH_NNExecutor_RunSync failed, inputTensor is nullptr."); return OH_NN_INVALID_PARAMETER; } - if (inputCount == 0) { - LOGE("OH_NNExecutor_RunSync failed, inputCount is 0."); + if (inputCount == 0 || (inputCount > INPUT_OUTPUT_MAX_INDICES)) { + LOGE("OH_NNExecutor_RunSync failed, inputCount is 0 or more than 200."); return OH_NN_INVALID_PARAMETER; } if (outputTensor == nullptr) { LOGE("OH_NNExecutor_RunSync failed, outputTensor is nullptr."); return OH_NN_INVALID_PARAMETER; } - if (outputCount == 0) { - LOGE("OH_NNExecutor_RunSync failed, outputCount is 0."); + if (outputCount == 0 || (outputCount > INPUT_OUTPUT_MAX_INDICES)) { + LOGE("OH_NNExecutor_RunSync failed, outputCount is 0 or more than 200."); return OH_NN_INVALID_PARAMETER; } @@ -1732,16 +1739,16 @@ NNRT_API OH_NN_ReturnCode OH_NNExecutor_RunAsync(OH_NNExecutor *executor, LOGE("OH_NNExecutor_RunAsync failed, inputTensor is nullptr."); return OH_NN_INVALID_PARAMETER; } - if (inputCount == 0) { - LOGE("OH_NNExecutor_RunAsync failed, inputCount is 0."); + if (inputCount == 0 || (inputCount > INPUT_OUTPUT_MAX_INDICES)) { + LOGE("OH_NNExecutor_RunAsync failed, inputCount is 0 or more than 200."); return OH_NN_INVALID_PARAMETER; } if (outputTensor == nullptr) { LOGE("OH_NNExecutor_RunAsync failed, outputTensor is nullptr."); return OH_NN_INVALID_PARAMETER; } - if (outputCount == 0) { - LOGE("OH_NNExecutor_RunAsync failed, outputCount is 0."); + if (outputCount == 0 || (outputCount > INPUT_OUTPUT_MAX_INDICES)) { + LOGE("OH_NNExecutor_RunAsync failed, outputCount is 0 or more than 200."); return OH_NN_INVALID_PARAMETER; } if (userData == nullptr) { diff --git a/frameworks/native/neural_network_core/nnrt_client.cpp b/frameworks/native/neural_network_core/nnrt_client.cpp index 4921ac1..1dcf43f 100644 --- a/frameworks/native/neural_network_core/nnrt_client.cpp +++ b/frameworks/native/neural_network_core/nnrt_client.cpp @@ -72,6 +72,7 @@ NNRtServiceApi& NNRtServiceApi::GetInstance() LoadFunction(libNNRtService, "Scheduling", &nnrtService.Scheduling); LoadFunction(libNNRtService, "UpdateModelLatency", &nnrtService.UpdateModelLatency); LoadFunction(libNNRtService, "Unload", &nnrtService.Unload); + LoadFunction(libNNRtService, "PullUpDlliteService", &nnrtService.PullUpDlliteService); nnrtService.m_serviceAvailable = true; return nnrtService; diff --git a/frameworks/native/neural_network_core/nnrt_client.h b/frameworks/native/neural_network_core/nnrt_client.h index 8b87c90..03fb8b0 100644 --- a/frameworks/native/neural_network_core/nnrt_client.h +++ b/frameworks/native/neural_network_core/nnrt_client.h @@ -41,6 +41,7 @@ public: int (*Scheduling)(uint32_t hiaiModelId, bool* needModelLatency, const char* cachePath) = nullptr; int (*UpdateModelLatency)(uint32_t hiaiModelId, int modelLatency) = nullptr; int (*Unload)(uint32_t hiaiModelId) = nullptr; + bool (*PullUpDlliteService)() = nullptr; private: bool m_serviceAvailable = false; diff --git a/frameworks/native/neural_network_core/tensor_desc.cpp b/frameworks/native/neural_network_core/tensor_desc.cpp index 2f1a8bf..b7ecf74 100644 --- a/frameworks/native/neural_network_core/tensor_desc.cpp +++ b/frameworks/native/neural_network_core/tensor_desc.cpp @@ -23,6 +23,7 @@ const uint32_t BIT8_TO_BYTE = 1; const uint32_t BIT16_TO_BYTE = 2; const uint32_t BIT32_TO_BYTE = 4; const uint32_t BIT64_TO_BYTE = 8; +const size_t SHAPE_MAX_NUM = 10; uint32_t GetTypeSize(OH_NN_DataType type) { @@ -114,10 +115,11 @@ OH_NN_ReturnCode TensorDesc::SetShape(const int32_t* shape, size_t shapeNum) LOGE("SetShape failed, shape is nullptr."); return OH_NN_INVALID_PARAMETER; } - if (shapeNum == 0) { - LOGE("SetShape failed, shapeNum is 0."); + if (shapeNum == 0 || shapeNum > SHAPE_MAX_NUM) { + LOGE("SetShape failed, shapeNum is 0 or greater than 10."); return OH_NN_INVALID_PARAMETER; } + m_shape.clear(); for (size_t i = 0; i < shapeNum; ++i) { m_shape.emplace_back(shape[i]); diff --git a/frameworks/native/neural_network_runtime/BUILD.gn b/frameworks/native/neural_network_runtime/BUILD.gn index 31959c7..33b4e59 100644 --- a/frameworks/native/neural_network_runtime/BUILD.gn +++ b/frameworks/native/neural_network_runtime/BUILD.gn @@ -196,6 +196,7 @@ ohos_shared_library("libneural_network_runtime") { "init:libbegetutil", "ipc:ipc_core", "mindspore:mindir_lib", + "json:nlohmann_json_static", ] deps = [ "../neural_network_core:libneural_network_core" ] diff --git a/frameworks/native/neural_network_runtime/device.h b/frameworks/native/neural_network_runtime/device.h index 17017ba..7896899 100644 --- a/frameworks/native/neural_network_runtime/device.h +++ b/frameworks/native/neural_network_runtime/device.h @@ -69,6 +69,7 @@ public: virtual OH_NN_ReturnCode AllocateBuffer(size_t length, int& fd) = 0; virtual OH_NN_ReturnCode ReleaseBuffer(int fd, size_t length) = 0; + virtual OH_NN_ReturnCode ReadOpVersion(int& currentOpVersion) = 0; }; } // namespace NeuralNetworkRuntime } // namespace OHOS diff --git a/frameworks/native/neural_network_runtime/hdi_device_v1_0.cpp b/frameworks/native/neural_network_runtime/hdi_device_v1_0.cpp index a8fac98..67a2a3b 100644 --- a/frameworks/native/neural_network_runtime/hdi_device_v1_0.cpp +++ b/frameworks/native/neural_network_runtime/hdi_device_v1_0.cpp @@ -302,17 +302,9 @@ OH_NN_ReturnCode HDIDeviceV1_0::PrepareModelFromModelCache(const std::vector& preparedModel, bool& isUpdatable) { std::vector iBuffers; - auto memManager = MemoryManager::GetInstance(); - Memory memory; - OH_NN_ReturnCode ret; size_t modelCacheSize = modelCache.size(); for (size_t i = 0; i < modelCacheSize; i++) { - ret = memManager->GetMemory(modelCache[i].data, memory); - if (ret != OH_NN_SUCCESS) { - LOGE("The %zuth model cache is invalid. Please put valid model cache.", i + 1); - return ret; - } - iBuffers.emplace_back(V1_0::SharedBuffer {memory.fd, memory.length, 0, memory.length}); + iBuffers.emplace_back(V1_0::SharedBuffer {modelCache[i].fd, modelCache[i].length, 0, modelCache[i].length}); } V1_0::ModelConfig iModelConfig; @@ -451,5 +443,11 @@ OH_NN_ReturnCode HDIDeviceV1_0::PrepareOfflineModel(std::shared_ptr& preparedModel, bool& isUpdatable) { std::vector iBuffers; - auto memManager = MemoryManager::GetInstance(); - Memory memory; - OH_NN_ReturnCode ret; size_t modelCacheSize = modelCache.size(); for (size_t i = 0; i < modelCacheSize; i++) { - ret = memManager->GetMemory(modelCache[i].data, memory); - if (ret != OH_NN_SUCCESS) { - LOGE("The %{public}zuth model cache is invalid. Please put valid model cache.", i + 1); - return ret; - } - iBuffers.emplace_back(V2_0::SharedBuffer {memory.fd, memory.length, 0, memory.length}); + iBuffers.emplace_back(V2_0::SharedBuffer {modelCache[i].fd, modelCache[i].length, 0, modelCache[i].length}); } V2_0::ModelConfig iModelConfig; @@ -684,5 +676,11 @@ OH_NN_ReturnCode HDIDeviceV2_0::PrepareOfflineModel(std::shared_ptr& preparedModel, bool& isUpdatable) { std::vector iBuffers; - auto memManager = MemoryManager::GetInstance(); - Memory memory; - OH_NN_ReturnCode ret; size_t modelCacheSize = modelCache.size(); for (size_t i = 0; i < modelCacheSize; i++) { - ret = memManager->GetMemory(modelCache[i].data, memory); - if (ret != OH_NN_SUCCESS) { - LOGE("The %{public}zuth model cache is invalid. Please put valid model cache.", i + 1); - return ret; - } - iBuffers.emplace_back(V2_1::SharedBuffer {memory.fd, memory.length, 0, memory.length}); + iBuffers.emplace_back(V2_1::SharedBuffer {modelCache[i].fd, modelCache[i].length, 0, modelCache[i].length}); } V2_1::ModelConfig iModelConfig; @@ -679,5 +671,11 @@ OH_NN_ReturnCode HDIDeviceV2_1::PrepareOfflineModel(std::shared_ptr& indices, std::vector>& nnTensors) { - if (indices.empty()) { + if ((indices.empty()) || (indices.size() > INPUT_OUTPUT_MAX_INDICES)) { LOGE("ConstructNNTensorsFromLiteGraph failed, passed empty indices list."); return OH_NN_INVALID_PARAMETER; } @@ -324,6 +325,16 @@ OH_NN_ReturnCode InnerModel::SetTensorValue(uint32_t index, const void* buffer, OH_NN_ReturnCode InnerModel::ValidateInputAndOutput( const OH_NN_UInt32Array& inputIndices, const OH_NN_UInt32Array& outputIndices) const { + if (inputIndices.size == 0 || inputIndices.size > INPUT_OUTPUT_MAX_INDICES) { + LOGE("ValidateInputAndOutput failed, passed empty input indices."); + return OH_NN_INVALID_PARAMETER; + } + + if (outputIndices.size == 0 || outputIndices.size > INPUT_OUTPUT_MAX_INDICES) { + LOGE("ValidateInputAndOutput failed, passed empty output indices."); + return OH_NN_INVALID_PARAMETER; + } + OH_NN_ReturnCode ret = ValidateTensorArray(inputIndices); if (ret != OH_NN_SUCCESS) { LOGE("ValidateInputAndOutput failed, please check input indices."); @@ -336,16 +347,6 @@ OH_NN_ReturnCode InnerModel::ValidateInputAndOutput( return ret; } - if (inputIndices.size == 0) { - LOGE("ValidateInputAndOutput failed, passed empty input indices."); - return OH_NN_INVALID_PARAMETER; - } - - if (outputIndices.size == 0) { - LOGE("ValidateInputAndOutput failed, passed empty output indices."); - return OH_NN_INVALID_PARAMETER; - } - std::shared_ptr tensor{nullptr}; for (uint32_t i = 0; i < inputIndices.size; i++) { tensor = m_allTensors[inputIndices.data[i]]; @@ -719,6 +720,11 @@ std::vector, OH_NN_TensorType>> InnerModel { std::vector, OH_NN_TensorType>> inputTensorDescs; std::pair, OH_NN_TensorType> tensorDescPair; + if (m_inputTensors.size() > INPUT_OUTPUT_MAX_INDICES) { + LOGE("Input tensor descs more than 200."); + return inputTensorDescs; + } + for (auto inputTensor : m_inputTensors) { tensorDescPair.first = OHOS::NeuralNetworkRuntime::CreateSharedPtr(); inputTensor->ConvertToTensorDesc(*(tensorDescPair.first.get())); @@ -733,6 +739,11 @@ std::vector, OH_NN_TensorType>> InnerModel { std::vector, OH_NN_TensorType>> outputTensorDescs; std::pair, OH_NN_TensorType> tensorDescPair; + if (m_outputTensors.size() > INPUT_OUTPUT_MAX_INDICES) { + LOGE("Output tensor descs more than 200."); + return outputTensorDescs; + } + for (auto outputTensor : m_outputTensors) { tensorDescPair.first = OHOS::NeuralNetworkRuntime::CreateSharedPtr(); outputTensor->ConvertToTensorDesc(*(tensorDescPair.first.get())); diff --git a/frameworks/native/neural_network_runtime/neural_network_runtime.cpp b/frameworks/native/neural_network_runtime/neural_network_runtime.cpp index 1228e74..31df313 100644 --- a/frameworks/native/neural_network_runtime/neural_network_runtime.cpp +++ b/frameworks/native/neural_network_runtime/neural_network_runtime.cpp @@ -30,6 +30,8 @@ #include #include #include +#include "nlohmann/json.hpp" +#include "securec.h" using namespace OHOS::NeuralNetworkRuntime; @@ -49,6 +51,29 @@ const std::string HARDWARE_NAME = "const.ai.nnrt_deivce"; const std::string HARDWARE_VERSION = "v5_0"; constexpr size_t HARDWARE_NAME_MAX_LENGTH = 128; constexpr size_t FILE_NUMBER_MAX = 100; // 限制cache文件数量最大为100 +constexpr size_t EXTENSION_MAX_SIZE =200; // 限制MS传过来的参数最多为200 +constexpr size_t INPUT_MAX_COUNT = 200; // 限制模型最大输入个数为200 +constexpr int32_t HEX_UINT = 16; + +unsigned short CacheInfoGetCrc16(char* buffer, size_t length) +{ + unsigned int sum = 0; + while (length > 1) { + sum += *(reinterpret_cast(buffer)); + length -= sizeof(unsigned short); + buffer += sizeof(unsigned short); + } + + if (length > 0) { + sum += *(reinterpret_cast(buffer)); + } + + while (sum >> HEX_UNIT) { + sum = (sum >> HEX_UNIT) + (sum & 0xffff); + } + + return static_cast(~sum); +} NNRT_API NN_QuantParam *OH_NNQuantParam_Create() { @@ -326,6 +351,11 @@ OH_NN_ReturnCode ParseInputDimsFromExtensions(char* data, size_t dataSize, const size_t inputCount = liteGraph->input_indices_.size(); // LiteGraph输入个数 size_t allTensorSize = liteGraph->all_tensors_.size(); // LiteGraph所有tensor个数 + if (inputCount > INPUT_MAX_COUNT) { + LOGE("ParseInputDimsFromExtensions failed, inputCount more than 200."); + return OH_NN_INVALID_PARAMETER; + } + std::vector inputDim; size_t dataIndex = 0; for (size_t i = 0; i < inputCount; ++i) { @@ -505,6 +535,11 @@ NNRT_API OH_NN_ReturnCode OH_NNModel_BuildFromLiteGraph(OH_NNModel *model, const return OH_NN_INVALID_PARAMETER; } + if (extensionSize > EXTENSION_MAX_SIZE) { + LOGE("OH_NNModel_BuildFromLiteGraph failed, extension more than 200."); + return OH_NN_INVALID_PARAMETER; + } + auto *pLiteGraph = reinterpret_cast(liteGraph); ExtensionConfig extensionConfig; std::unordered_map>> extensionMap; @@ -531,39 +566,76 @@ NNRT_API OH_NN_ReturnCode OH_NNModel_BuildFromLiteGraph(OH_NNModel *model, const } namespace { +OH_NN_ReturnCode CheckCacheFileExtension(const std::string& content, int64_t& fileNumber, int64_t& cacheVersion); + OH_NN_ReturnCode CheckCacheFile(const std::string& cacheInfoPath, int64_t& fileNumber, int64_t& cacheVersion) { - // read number of cache models char path[PATH_MAX]; if (realpath(cacheInfoPath.c_str(), path) == nullptr) { LOGE("OH_NNModel_HasCache get real path of cache info failed."); - return OH_NN_INVALID_PARAMETER; + return OH_NN_INVALID_FILE; } if (access(path, F_OK) != 0) { LOGE("OH_NNModel_HasCache access cache info file failed."); - return OH_NN_INVALID_PARAMETER; + return OH_NN_INVALID_FILE; } std::ifstream ifs(path, std::ios::in | std::ios::binary); if (!ifs) { LOGE("OH_NNModel_HasCache open cache info file failed."); - return OH_NN_INVALID_PARAMETER; + return OH_NN_INVALID_FILE; } - if (!ifs.read(reinterpret_cast(&(fileNumber)), sizeof(fileNumber))) { - LOGI("OH_NNModel_HasCache read cache info file failed."); - ifs.close(); - return OH_NN_INVALID_PARAMETER; + // Read the entire file into a string + std::string content((std::istreambuf_iterator(ifs)), std::istreambuf_iterator()); + ifs.close(); + + return CheckCacheFileExtension(content, fileNumber, cacheVersion); +} + +OH_NN_ReturnCode CheckCacheFileExtension(const std::string& content, int64_t& fileNumber, int64_t& cacheVersion) +{ + if (!nlohmann::json::accept(content)) { + LOGE("OH_NNModel_HasCache CheckCacheFile JSON parse error"); + return OH_NN_INVALID_FILE; } - if (!ifs.read(reinterpret_cast(&(cacheVersion)), sizeof(cacheVersion))) { - LOGI("OH_NNModel_HasCache read cache info file failed."); - ifs.close(); - return OH_NN_INVALID_PARAMETER; + nlohmann::json j = nlohmann::json::parse(content); + if (j.find("data") == j.end()) { + LOGE("OH_NNModel_HasCache read data from cache info file failed."); + return OH_NN_INVALID_FILE; + } + + if(j["data"].find("fileNumber") == j["data"].end()) { + LOGE("OH_NNModel_HasCache read fileNumber from cache info file failed."); + return OH_NN_INVALID_FILE; + } + fileNumber = j["data"]["fileNumber"].get(); + + if(j["data"].find("version") == j["data"].end()) { + LOGE("OH_NNModel_HasCache read version from cache info file failed."); + return OH_NN_INVALID_FILE; + } + cacheVersion = j["data"]["version"].get(); + + if (j.find("CheckSum") == j.end()) { + LOGE("OH_NNModel_HasCache read CheckSum from cache info file failed."); + return OH_NN_INVALID_FILE; + } + const size_t dataLength = j["data"].dump().length(); + char jData[dataLength + 1]; + + if (strncpy(jData, dataLength+1, j["data"].dump().c_str(), dataLength != 0)) { + LOGE("OH_NNModel_HasCache ParseStr failed due to strncpy_s error."); + return OH_NN_INVALID_FILE; + } + + if (static_cast(CacheInfoGetCrc16(jData, dataLength)) != j["CheckSum"].get()) { + LOGE("OH_NNModel_HasCache cache_info CheckSum is not correct."); + return OH_NN_INVALID_FILE; } - ifs.close(); return OH_NN_SUCCESS; } } @@ -577,7 +649,6 @@ NNRT_API bool OH_NNModel_HasCache(const char *cacheDir, const char *modelName, u if (modelName == nullptr) { LOGI("OH_NNModel_HasCache get empty model name."); - return false; } std::string cacheInfoPath = std::string(cacheDir) + "/" + std::string(modelName) + "cache_info.nncache"; @@ -594,6 +665,7 @@ NNRT_API bool OH_NNModel_HasCache(const char *cacheDir, const char *modelName, u OH_NN_ReturnCode returnCode = CheckCacheFile(cacheInfoPath, fileNumber, cacheVersion); if (returnCode != OH_NN_SUCCESS) { LOGE("OH_NNModel_HasCache get fileNumber or cacheVersion fail."); + std::filesystem::remove_all(cacheInfoPath); return false; } diff --git a/frameworks/native/neural_network_runtime/nn_tensor.cpp b/frameworks/native/neural_network_runtime/nn_tensor.cpp index df42f2f..2e79981 100644 --- a/frameworks/native/neural_network_runtime/nn_tensor.cpp +++ b/frameworks/native/neural_network_runtime/nn_tensor.cpp @@ -28,6 +28,7 @@ namespace OHOS { namespace NeuralNetworkRuntime { const uint32_t SUPPORT_NUM_BIT = 8; // Currently support 8-bit quantization only +constexpr size_t DIM_MAX_NUM = 200; void DestroyLiteGraphTensor(void* tensor) { @@ -234,6 +235,10 @@ OH_NN_ReturnCode NNTensor::ValidateDimensions(const std::vector& dimens uint64_t elementCount {1}; uint64_t dataLength {static_cast(GetTypeSize(m_dataType))}; m_isDynamicShape = false; + if (dimensions.size() > DIM_MAX_NUM) { + LOGE("ParseDimension failed, dimensions more than 200."); + return OH_NN_INVALID_PARAMETER; + } for (int32_t dim : dimensions) { if (dim < -1 || dim == 0) { LOGE("ParseDimension failed, dimension of OH_NN_Tensor cannot be 0 or less than -1, receive %d.", dim); diff --git a/frameworks/native/neural_network_runtime/nncompiled_cache.cpp b/frameworks/native/neural_network_runtime/nncompiled_cache.cpp index 5f5ab3c..7d62a54 100644 --- a/frameworks/native/neural_network_runtime/nncompiled_cache.cpp +++ b/frameworks/native/neural_network_runtime/nncompiled_cache.cpp @@ -20,6 +20,7 @@ #include #include #include +#include #include "utils.h" #include "backend_manager.h" @@ -31,11 +32,9 @@ constexpr int32_t NULL_PTR_LENGTH = 0; constexpr int32_t NUMBER_CACHE_INFO_MEMBERS = 3; constexpr int32_t NUMBER_CACHE_INFO_EXTENSION_MEMBERS = 2; constexpr int32_t HEX_UNIT = 16; +constexpr size_t MAX_CACHE_SIZE = 2 * 1024 * 1024; // 限制最大校验内存为2MB constexpr char ROOT_DIR_STR = '/'; constexpr char DOUBLE_SLASH_STR[] = "//"; -constexpr int OPVERSION_SUBSTR_NUM = 2; -const std::string CURRENT_VERSION = "0x00000000"; -const std::string HIAI_VERSION_PATH = "/data/data/hiai/version"; OH_NN_ReturnCode NNCompiledCache::Save(const std::vector& caches, const std::string& cacheDir, @@ -168,11 +167,7 @@ OH_NN_ReturnCode NNCompiledCache::GenerateCacheFiles(const std::vector cacheInfo = CreateUniquePtr(cacheSize); - if (cacheInfo == nullptr) { - LOGE("[NNCompiledCache] GenerateCacheFiles failed, fail to create cacheInfo instance."); - return OH_NN_MEMORY_ERROR; - } + nlohmann::json cacheInfo; OH_NN_ReturnCode ret = GenerateCacheModel(caches, cacheInfo, cacheDir, version); if (ret != OH_NN_SUCCESS) { @@ -191,7 +186,7 @@ OH_NN_ReturnCode NNCompiledCache::GenerateCacheFiles(const std::vector& caches, - std::unique_ptr& cacheInfo, + nlohmann::json& cacheInfo, const std::string& cacheDir, uint32_t version) const { @@ -201,10 +196,9 @@ OH_NN_ReturnCode NNCompiledCache::GenerateCacheModel(const std::vector(cacheNumber); - *cacheInfoPtr++ = static_cast(version); - *cacheInfoPtr++ = static_cast(m_backendID); // Should call SetBackend first. + cacheInfo["data"]["fileNumber"] = static_cast(cacheNumber); + cacheInfo["data"]["version"] = static_cast(version); + cacheInfo["data"]["deviceId"] = static_cast(m_backendID); // Should call SetBackend first. // standardize the input dir OH_NN_ReturnCode ret = OH_NN_SUCCESS; @@ -232,7 +226,7 @@ OH_NN_ReturnCode NNCompiledCache::GenerateCacheModel(const std::vector(GetCrc16(static_cast(caches[i].data), caches[i].length)); - *cacheInfoPtr++ = checkSum; + cacheInfo["data"]["modelCheckSum"][i] = checkSum; if (!cacheModelStream.write(static_cast(caches[i].data), caches[i].length)) { LOGE("[NNCompiledCache] GenerateCacheModel failed, fail to write cache model."); cacheModelStream.close(); @@ -242,31 +236,31 @@ OH_NN_ReturnCode NNCompiledCache::GenerateCacheModel(const std::vectorReadOpVersion(currentOpVersion); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNCompiledCache] GenerateCacheModel failed, fail to read op version."); + return ret; } + cacheInfo["data"]["opVersion"] = currentOpVersion; - int currentOpVersion = std::stoi(currentVersion.substr(OPVERSION_SUBSTR_NUM)); - *cacheInfoPtr++ = currentOpVersion; + LOGI("[NNCompiledCache] GenerateCacheModel m_isExceedRamLimit: %{public}d", static_cast(m_isExceedRamLimit)); + cacheInfo["data"]["isExceedRamLimit"] = m_isExceedRamLimit ? 1 : 0; - LOGI("[NNCompiledCache::GenerateCacheModel] m_isExceedRamLimit: %{public}d", static_cast(m_isExceedRamLimit)); - if (m_isExceedRamLimit) { - *cacheInfoPtr++ = 1; - } else { - *cacheInfoPtr++ = 0; + const size_t dataLength = cacheInfo["data"].dump().length(); + char cacheInfoData[dataLength + 1]; + if (strncpy_s(cacheInfoData, dataLength+1, cacheInfo["data"].dump().c_str(), dataLength) != 0) { + LOGE("ParseStr failed due to strncpy_s error"); + return OH_NN_INVALID_PARAMETER; } + + cacheInfo["CheckSum"] = static_cast(CacheInfoGetCrc16(cacheInfoData, dataLength)); return OH_NN_SUCCESS; } OH_NN_ReturnCode NNCompiledCache::WriteCacheInfo(uint32_t cacheSize, - std::unique_ptr& cacheInfo, + nlohmann::json& cacheInfo, const std::string& cacheDir) const { // standardize the input dir @@ -291,11 +285,7 @@ OH_NN_ReturnCode NNCompiledCache::WriteCacheInfo(uint32_t cacheSize, return OH_NN_INVALID_FILE; } - if (!cacheInfoStream.write(reinterpret_cast(cacheInfo.get()), cacheSize)) { - LOGE("[NNCompiledCache] WriteCacheInfo failed, fail to write cache info."); - cacheInfoStream.close(); - return OH_NN_SAVE_CACHE_EXCEPTION; - } + cacheInfoStream << cacheInfo << std::endl; cacheInfoStream.close(); return OH_NN_SUCCESS; @@ -311,52 +301,103 @@ OH_NN_ReturnCode NNCompiledCache::CheckCacheInfo(NNCompiledCacheInfo& modelCache return OH_NN_INVALID_FILE; } - int charNumber = NUMBER_CACHE_INFO_MEMBERS * sizeof(uint64_t); - if (!infoCacheFile.read(reinterpret_cast(&(modelCacheInfo)), charNumber)) { - LOGE("[NNCompiledCache] CheckCacheInfo failed, error happened when reading cache info file."); - infoCacheFile.close(); + std::string content((std::istreambuf_iterator(infoCacheFile)), std::istreambuf_iterator()); + infoCacheFile.close(); + + if (!nlohmann::json::accept(content)) { + LOGE("[NNCompiledCache] CheckCacheInfo JSON parse error"); return OH_NN_INVALID_FILE; } + // Parse the json string + nlohmann::json j = nlomann::json::parse(content); // modelCacheInfo.deviceId type is int64_t, // it is transformed from size_t value, so the transform here will not truncate value. + if (j.find("data") == j.end()) { + LOGE("[NNCompiledCache] CheckCacheInfo read cache info file failed."); + return OH_NN_INVALID_FILE; + } + + if(j["data"].find("deviceId") == j["data"].end()) { + LOGE("[NNCompiledCache] CheckCacheInfo read deviceId from cache info file failed."); + return OH_NN_INVALID_FILE; + } + modelCacheInfo.deviceId = j["data"]["deviceId"].get(); + + if(j["data"].find("version") == j["data"].end()) { + LOGE("[NNCompiledCache] CheckCacheInfo read version from cache info file failed."); + return OH_NN_INVALID_FILE; + } + modelCacheInfo.version = j["data"]["version"].get(); + size_t deviceId = static_cast(modelCacheInfo.deviceId); if (deviceId != m_backendID) { LOGE("[NNCompiledCache] CheckCacheInfo failed. The deviceId in the cache files " "is different from current deviceId," "please change the cache directory or current deviceId."); - infoCacheFile.close(); return OH_NN_INVALID_PARAMETER; } + if(j["data"].find("fileNumber") == j["data"].end()) { + LOGE("[NNCompiledCache] CheckCacheInfo read fileNumber from cache info file failed."); + return OH_NN_INVALID_FILE; + } + modelCacheInfo.fileNumber = j["data"]["fileNumber"].get(); + + return CheckCacheInfoExtension(modelCacheInfo, j); +} + +OH_NN_ReturnCode NNCompiledCache::CheckCacheInfoExtension(NNCompiledCacheInfo& modelCacheInfo, nlohmann::json& j) const +{ + const size_t dataLength = j["data"].dump().length(); + char jData[dataLength + 1]; + if (strncpy(jData, dataLength+1, j["data"].dump().c_str(), dataLength != 0)) { + LOGE("[NNCompiledCache] ParseStr failed due to strncpy_s error."); + return OH_NN_INVALID_FILE; + } + + if (j.find("CheckSum") == j.end()) { + LOGE("[NNCompiledCache] read CheckSum from cache info file failed."); + return OH_NN_INVALID_FILE; + } + + if (static_cast(CacheInfoGetCrc16(jData, dataLength)) != j["CheckSum"].get()) { + LOGE("[NNCompiledCache] cache_info CheckSum is not correct."); + return OH_NN_INVALID_FILE; + } std::vector modelCheckSum; modelCheckSum.resize(modelCacheInfo.fileNumber); modelCacheInfo.modelCheckSum.resize(modelCacheInfo.fileNumber); - if (!infoCacheFile.read(reinterpret_cast(&modelCheckSum[0]), - modelCacheInfo.fileNumber * sizeof(uint64_t))) { - LOGE("[NNCompiledCache] CheckCacheInfo failed. The info cache file has been changed."); - infoCacheFile.close(); + if(j["data"].find("modelCheckSum") == j["data"].end()) { + LOGE("[NNCompiledCache] CheckCacheInfo read cache file failed."); return OH_NN_INVALID_FILE; } + for (uint32_t i = 0; i < modelCacheInfo.fileNumber; ++i) { + modelCheckSum[i] = static_cast(j["data"]["modelCheckSum"][i]); + } for (uint32_t i = 0; i < modelCacheInfo.fileNumber; ++i) { modelCacheInfo.modelCheckSum[i] = static_cast(modelCheckSum[i]); } - if (!infoCacheFile.read(reinterpret_cast(&(modelCacheInfo.opVersion)), sizeof(uint64_t))) { - LOGW("[NNCompiledCache] opVersion failed."); + if (j["data"].find("opVersion") == j["data"].end()) { + LOGW("[NNCompiledCache] CheckCacheInfo read opVersion from cache info file failed."); + } else { + modelCacheInfo.opVersion = j["data"]["opVersion"].get(); } - if (!infoCacheFile.read(reinterpret_cast(&(modelCacheInfo.isExceedRamLimit)), sizeof(uint64_t))) { - LOGW("[NNCompiledCache] isExceedRamLimit failed."); + if (j["data"].find("isExceedRamLimit") == j["data"].end()) { + LOGE("[NNCompiledCache] CheckCacheInfo read isExceedRamLimit from cache info file failed."); + return OH_NN_INVALID_FILE; + } else { + modelCacheInfo.isExceedRamLimit = j["data"]["isExceedRamLimit"].get(); } - infoCacheFile.close(); return OH_NN_SUCCESS; } OH_NN_ReturnCode NNCompiledCache::ReadCacheModelFile(const std::string& filePath, - OHOS::NeuralNetworkRuntime::Buffer& cache) const + OHOS::NeuralNetworkRuntime::Buffer& cache) { char path[PATH_MAX]; if (realpath(filePath.c_str(), path) == nullptr) { @@ -368,56 +409,53 @@ OH_NN_ReturnCode NNCompiledCache::ReadCacheModelFile(const std::string& filePath return OH_NN_INVALID_PARAMETER; } - FILE* pFile = fopen(path, "rb"); - if (pFile == NULL) { + int fd = open(path, O_RDONLY); + if (fd == -1) { LOGE("[NNCompiledCache] ReadCacheModelFile failed, file fopen failed."); return OH_NN_INVALID_FILE; } - long fsize{-1}; - OH_NN_ReturnCode ret = GetCacheFileLength(pFile, fsize); - if (ret != OH_NN_SUCCESS) { - fclose(pFile); - LOGE("[NNCompiledCache] ReadCacheModelFile failed, get file %{public}s length fialed.", filePath.c_str()); - return ret; - } - - rewind(pFile); - - char* ptr = static_cast(m_device->AllocateBuffer(fsize)); - if (ptr == nullptr) { - LOGE("[NNCompiledCache] ReadCacheModelFile failed, failed to allocate memory."); - fclose(pFile); + struct stat sb; + if (fstat(fd, &sb) == -1) { + close(fd); + LOGE("[NNCompiledCache] ReadCacheModelFile failed, get file %{public}s state failed.", filePath,c_str()); return OH_NN_MEMORY_ERROR; } - LOGI("ReadCacheModelFile read start."); - size_t result = fread(ptr, 1, fsize, pFile); // size of each object in bytes is 1 - LOGI("ReadCacheModelFile read end."); - if (result != static_cast(fsize)) { - LOGE("[NNCompiledCache] ReadCacheModelFile failed, failed to read file."); - fclose(pFile); - m_device->ReleaseBuffer(ptr); - ptr = nullptr; + void *ptr = mmap(NULL, fsize, PROT_READ, MAP_SHARED, fd, 0); + if (ptr == MAP_FAILED) { + LOGE("[NNCompiledCache] ReadCacheModelFile failed, failed to mmap file."); + close(fd); return OH_NN_INVALID_FILE; } - fclose(pFile); cache.data = ptr; cache.length = static_cast(fsize); // fsize should be non-negative, safe to cast. + cache.fd = fd; return OH_NN_SUCCESS; } unsigned short NNCompiledCache::GetCrc16(char* buffer, size_t length) const { unsigned int sum = 0; - while (length > 1) { - sum += *(reinterpret_cast(buffer)); - length -= sizeof(unsigned short); - buffer += sizeof(unsigned short); + + if (length < MAX_CACHE_SIZE) { + while (length > 1) { + sum += *(reinterpret_cast(buffer)); + length -= sizeof(unsigned short); + buffer += sizeof(unsigned short); + } + } else { + size_t step = length / MAX_CACHE_SIZE; + while (length > sizeof(unsigned short) * step + 1) { + sum += *(reinterpret_cast(buffer)); + length -= step * sizeof(unsigned short); + buffer += step * sizeof(unsigned short); + } } if (length > 0) { + buffer += length -1; sum += *(reinterpret_cast(buffer)); } @@ -474,5 +512,14 @@ OH_NN_ReturnCode NNCompiledCache::VerifyCachePath(const std::string& cachePath) return OH_NN_SUCCESS; } + +void NNCompiledCache::ReleaseCacheBuffer(std::vector& buffers) +{ + for (auto buffer : buffers) { + munmap(buffer.data, buffer.length); + close(buffer.fd); + } + buffers.clear(); +} } // namespace NeuralNetworkRuntime } // namespace OHOS diff --git a/frameworks/native/neural_network_runtime/nncompiled_cache.h b/frameworks/native/neural_network_runtime/nncompiled_cache.h index f182ac4..756b784 100644 --- a/frameworks/native/neural_network_runtime/nncompiled_cache.h +++ b/frameworks/native/neural_network_runtime/nncompiled_cache.h @@ -19,6 +19,12 @@ #include #include #include +#include +#include +#include +#include + +#include "nlohmann/json.cpp" #include "device.h" #include "neural_network_runtime/neural_network_runtime.h" @@ -54,20 +60,22 @@ public: void SetModelName(const std::string& modelName); void SetIsExceedRamLimit(const bool isExceedRamLimit); OH_NN_ReturnCode WriteCacheInfo(uint32_t cacheSize, - std::unique_ptr& cacheInfo, + nlohmann::json& cacheInfo, const std::string& cacheDir) const; OH_NN_ReturnCode CheckCacheInfo(NNCompiledCacheInfo& modelCacheInfo, const std::string& cacheInfoPath) const; + OH_NN_ReturnCode CheckCacheInfoExtension(NNCompiledCacheInfo& modelCacheInfo, nlohmann::json& j) const; + void ReleaseCacheBuffer(std::vector& buffers); + unsigned short GetCrc16(char* buffer, size_t length) const; private: OH_NN_ReturnCode GenerateCacheFiles(const std::vector& caches, const std::string& cacheDir, uint32_t version) const; OH_NN_ReturnCode GenerateCacheModel(const std::vector& caches, - std::unique_ptr& cacheInfo, + nlohmann::json& cacheInfo, const std::string& cacheDir, uint32_t version) const; - OH_NN_ReturnCode ReadCacheModelFile(const std::string& file, Buffer& cache) const; - unsigned short GetCrc16(char* buffer, size_t length) const; + OH_NN_ReturnCode ReadCacheModelFile(const std::string& file, Buffer& cache); OH_NN_ReturnCode GetCacheFileLength(FILE* pFile, long& fileSize) const; OH_NN_ReturnCode VerifyCachePath(const std::string& cachePath) const; diff --git a/frameworks/native/neural_network_runtime/nncompiler.cpp b/frameworks/native/neural_network_runtime/nncompiler.cpp index a55825e..257d1e6 100644 --- a/frameworks/native/neural_network_runtime/nncompiler.cpp +++ b/frameworks/native/neural_network_runtime/nncompiler.cpp @@ -14,6 +14,7 @@ */ #include "nncompiler.h" +#include "neural_network_runtime/neural_network_core.h" #include #include @@ -23,6 +24,7 @@ #include "validation.h" #include "nncompiled_cache.h" #include "utils.h" +#include "nlohmann/json.hpp" namespace OHOS { namespace NeuralNetworkRuntime { @@ -34,9 +36,7 @@ constexpr int32_t NUMBER_CACHE_INFO_EXTENSION_MEMBERS = 2; const std::string EXTENSION_KEY_MODEL_NAME = "ModelName"; const std::string EXTENSION_KEY_FM_SHARED = "NPU_FM_SHARED"; const std::string EXTENSION_KEY_IS_EXCEED_RAMLIMIT = "isExceedRamLimit"; -const int OPVERSION_SUBSTR_NUM = 2; -const std::string CURRENT_VERSION = "0x00000000"; -const std::string HIAI_VERSION_PATH = "/data/data/hiai/version"; +constexpr size_t INPUT_OUTPUT_MAX_NUM = 200; struct SerializedTensorDesc { public: @@ -499,15 +499,6 @@ void NNCompiler::ReleaseBuffer(std::vector& buffers) const buffers.clear(); } -void NNCompiler::ReleaseBufferByDevice(std::vector& buffers) const -{ - for (size_t i = 0; i < buffers.size(); ++i) { - // release cache buffer which is allocated by idevice. - m_device->ReleaseBuffer(buffers[i].data); - } - buffers.clear(); -} - OH_NN_ReturnCode NNCompiler::SaveToCacheFile() const { if (m_cachePath.empty()) { @@ -546,6 +537,11 @@ OH_NN_ReturnCode NNCompiler::SaveToCacheFile() const return ret; } + if ((m_inputTensorDescs.size() > INPUT_OUTPUT_MAX_NUM) || (m_outputTensorDescs.size() > INPUT_OUTPUT_MAX_NUM)) { + LOGE("[NNCompiler] SaveToCacheFile failed, m_inputTensorDescs or m_outputTensorDescs is more than 200."); + return OH_NN_INVALID_PARAMETER; + } + Buffer inputTensorDescBuffer; ret = SerializeTensorsToBuffer(m_inputTensorDescs, inputTensorDescBuffer); if (ret != OH_NN_SUCCESS) { @@ -614,7 +610,7 @@ OH_NN_ReturnCode NNCompiler::RestoreFromCacheFile() ret = compiledCache.Restore(m_cachePath, m_cacheVersion, caches); if (ret != OH_NN_SUCCESS) { LOGE("[NNCompiler] RestoreFromCacheFile failed, error happened when restoring model cache."); - ReleaseBufferByDevice(caches); + compiledCache.ReleaseCacheBuffer(caches); return ret; } @@ -623,7 +619,7 @@ OH_NN_ReturnCode NNCompiler::RestoreFromCacheFile() ret = DeserializedTensorsFromBuffer(caches[cacheNum - CACHE_INPUT_TENSORDESC_OFFSET], inputTensorDescs); if (ret != OH_NN_SUCCESS) { LOGE("[NNCompiler] RestoreFromCacheFile failed, error happened when deserializing input tensor desc."); - ReleaseBufferByDevice(caches); + compiledCache.ReleaseCacheBuffer(caches); return ret; } @@ -631,7 +627,7 @@ OH_NN_ReturnCode NNCompiler::RestoreFromCacheFile() ret = DeserializedTensorsFromBuffer(caches[cacheNum - CACHE_OUTPUT_TENSORDESC_OFFSET], outputTensorDescs); if (ret != OH_NN_SUCCESS) { LOGE("[NNCompiler] RestoreFromCacheFile failed, error happened when deserializing output tensor desc."); - ReleaseBufferByDevice(caches); + compiledCache.ReleaseCacheBuffer(caches); return ret; } @@ -645,25 +641,20 @@ OH_NN_ReturnCode NNCompiler::RestoreFromCacheFile() ret = m_device->PrepareModelFromModelCache(modelOnlyCaches, config, m_preparedModel, isUpdatable); if (ret != OH_NN_SUCCESS) { LOGE("[NNCompiler] RestoreFromCacheFile failed, error happened when preparing model from cache."); - ReleaseBufferByDevice(caches); + compiledCache.ReleaseCacheBuffer(caches); return ret; } if (isUpdatable) { LOGI("isUpdatable is true"); - std::string currentVersion = CURRENT_VERSION; - char versionPath[PATH_MAX]; - if (realpath(HIAI_VERSION_PATH.c_str(), versionPath) != nullptr) { - std::ifstream inf(versionPath); - if (inf.is_open()) { - getline(inf, currentVersion); - } - inf.close(); + int currentOpVersion = 0; + ret = m_device->ReadOpVersion(currentOpVersion); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNCompiledCache] GenerateCacheModel failed, fail to read op version."); + return ret; } - int currentOpVersion = std::stoi(currentVersion.substr(OPVERSION_SUBSTR_NUM)); - NNCompiledCacheInfo modelCacheInfo; std::string cacheInfoPath = m_cachePath + "/" + m_extensionConfig.modelName + "cache_info.nncache"; ret = compiledCache.CheckCacheInfo(modelCacheInfo, cacheInfoPath); @@ -680,29 +671,28 @@ OH_NN_ReturnCode NNCompiler::RestoreFromCacheFile() uint32_t cacheSize = NUMBER_CACHE_INFO_MEMBERS + cacheNumber + NUMBER_CACHE_INFO_EXTENSION_MEMBERS; uint32_t infoCharNumber = cacheSize * sizeof(int64_t); - std::unique_ptr cacheInfo = CreateUniquePtr(cacheSize); - if (cacheInfo == nullptr) { - LOGE("[NNCompiledCache] isUpdatable is true to create unique failed."); - return OH_NN_MEMORY_ERROR; - } + nlohmann::json cacheInfo; - auto cacheInfoPtr = cacheInfo.get(); - *cacheInfoPtr++ = modelCacheInfo.fileNumber; - *cacheInfoPtr++ = modelCacheInfo.version - 1; - *cacheInfoPtr++ = modelCacheInfo.deviceId; + cacheInfo["data"]["fileNumber"] = modelCacheInfo.fileNumber; + cacheInfo["data"]["version"] = modelCacheInfo.version - 1; + cacheInfo["data"]["deviceId"] = modelCacheInfo.deviceId; for (size_t i = 0; i < modelCacheInfo.modelCheckSum.size(); ++i) { - *cacheInfoPtr++ = static_cast(modelCacheInfo.modelCheckSum[i]); + cacheInfo["data"]["modelCheckSum"][i] = modelCacheInfo.modelCheckSum[i]; } - *cacheInfoPtr++ = currentOpVersion; + cacheInfo["data"]["opVersion"] = currentOpVersion; + cacheInfo["data"]["isExceedRamLimit"] = modelCacheInfo.isExceedRamLimit ? 1 : 0; - if (modelCacheInfo.isExceedRamLimit) { - *cacheInfoPtr++ = 1; - } else { - *cacheInfoPtr++ = 0; + const size_t dataLength cacheInfo["data"].dump().length(); + char cacheInfoData[dataLength + 1]; + if (strncpy_s(cacheInfoData, dataLength+1, cacheInfo["data"].dump().c_str(), dataLength != 0)) { + LOGE("ParseStr failed due to strncpy_s error"); + return OH_NN_INVALID_PARAMETER; } + cacheInfo["CheckSum"] = static_cast(CacheInfoGetCrc16(cacheInfoData, dataLength)); + ret = compiledCache.WriteCacheInfo(infoCharNumber, cacheInfo, m_cachePath); if (ret != OH_NN_SUCCESS) { LOGE("[NNCompiledCache] isUpdatable is true to write cache info failed."); @@ -711,7 +701,7 @@ OH_NN_ReturnCode NNCompiler::RestoreFromCacheFile() } } - ReleaseBufferByDevice(caches); + compiledCache.ReleaseCacheBuffer(caches); m_inputTensorDescs = inputTensorDescs; m_outputTensorDescs = outputTensorDescs; @@ -996,6 +986,5 @@ OH_NN_ReturnCode NNCompiler::DeserializedTensorsFromBuffer( ReleaseDescShape(immediateTensorDescs); return ret; } - } // NeuralNetworkRuntime } // OHOS diff --git a/frameworks/native/neural_network_runtime/nnexecutor.cpp b/frameworks/native/neural_network_runtime/nnexecutor.cpp index dcf4e9c..d8f1e6d 100644 --- a/frameworks/native/neural_network_runtime/nnexecutor.cpp +++ b/frameworks/native/neural_network_runtime/nnexecutor.cpp @@ -25,6 +25,8 @@ #include "transform.h" namespace OHOS { +constexpr size_t EXTENSION_MAX_SIZE = 200; + namespace NeuralNetworkRuntime { NNExecutor::NNExecutor(size_t backendID, std::shared_ptr device, std::shared_ptr preparedModel, const std::vector, OH_NN_TensorType>>& inputTensorDescs, @@ -197,6 +199,11 @@ OH_NN_ReturnCode NNExecutor::SetExtensionConfig(const std::unordered_map EXTENSION_MAX_SIZE) { + LOGE("[NNExecutor] SetExtensionConfig, configs size more than 200."); + return OH_NN_FAILED; + } + for (auto config : configs) { char* configData = reinterpret_cast(config.second.data()); if (configData == nullptr) { diff --git a/test/unittest/components/BUILD.gn b/test/unittest/components/BUILD.gn index 601e0bf..f9eb95f 100644 --- a/test/unittest/components/BUILD.gn +++ b/test/unittest/components/BUILD.gn @@ -230,6 +230,7 @@ ohos_unittest("NNCompiledCacheTest") { "googletest:gtest_main", "hilog:libhilog", "hitrace:libhitracechain", + "json:nlohmann_json_static", "mindspore:mindir_lib", "neural_network_runtime:libneural_network_core", "neural_network_runtime:libneural_network_runtime", diff --git a/test/unittest/components/nn_backend/nn_backend_test.cpp b/test/unittest/components/nn_backend/nn_backend_test.cpp index 71822d9..175afc6 100644 --- a/test/unittest/components/nn_backend/nn_backend_test.cpp +++ b/test/unittest/components/nn_backend/nn_backend_test.cpp @@ -67,6 +67,7 @@ public: MOCK_METHOD1(ReleaseBuffer, OH_NN_ReturnCode(const void*)); MOCK_METHOD2(AllocateBuffer, OH_NN_ReturnCode(size_t, int&)); MOCK_METHOD2(ReleaseBuffer, OH_NN_ReturnCode(int, size_t)); + MOCK_METHOD1(ReadOpVersion, OH_NN_ReturnCode(int&)); }; /** diff --git a/test/unittest/components/nn_compiled_cache/nn_compiled_cache_test.cpp b/test/unittest/components/nn_compiled_cache/nn_compiled_cache_test.cpp index e43bca5..9321af5 100644 --- a/test/unittest/components/nn_compiled_cache/nn_compiled_cache_test.cpp +++ b/test/unittest/components/nn_compiled_cache/nn_compiled_cache_test.cpp @@ -69,6 +69,7 @@ public: MOCK_METHOD1(ReleaseBuffer, OH_NN_ReturnCode(const void*)); MOCK_METHOD2(AllocateBuffer, OH_NN_ReturnCode(size_t, int&)); MOCK_METHOD2(ReleaseBuffer, OH_NN_ReturnCode(int, size_t)); + MOCK_METHOD1(ReadOpVersion, OH_NN_ReturnCode(int&)); }; /** @@ -477,7 +478,7 @@ HWTEST_F(NNCompiledCacheTest, nncompiledcachetest_writecacheinfo_001, TestSize.L NNCompiledCache nncompiledCache; uint32_t cacheSize = 1; - std::unique_ptr cacheInfo = std::make_unique(cacheSize); + nlohmann::json cacheInfo; std::string cacheDir = "mock"; OH_NN_ReturnCode ret = nncompiledCache.WriteCacheInfo(cacheSize, cacheInfo, cacheDir); @@ -495,7 +496,7 @@ HWTEST_F(NNCompiledCacheTest, nncompiledcachetest_writecacheinfo_002, TestSize.L NNCompiledCache nncompiledCache; uint32_t cacheSize = 1; - std::unique_ptr cacheInfo = std::make_unique(cacheSize); + nlohmann::json cacheInfo; std::string cacheDir = "/data/data"; OH_NN_ReturnCode ret = nncompiledCache.WriteCacheInfo(cacheSize, cacheInfo, cacheDir); diff --git a/test/unittest/components/nn_compiler/nn_compiler_test.cpp b/test/unittest/components/nn_compiler/nn_compiler_test.cpp index 37d938f..ee0ba2b 100644 --- a/test/unittest/components/nn_compiler/nn_compiler_test.cpp +++ b/test/unittest/components/nn_compiler/nn_compiler_test.cpp @@ -70,6 +70,7 @@ public: MOCK_METHOD1(ReleaseBuffer, OH_NN_ReturnCode(const void*)); MOCK_METHOD2(AllocateBuffer, OH_NN_ReturnCode(size_t, int&)); MOCK_METHOD2(ReleaseBuffer, OH_NN_ReturnCode(int, size_t)); + MOCK_METHOD1(ReadOpVersion, OH_NN_ReturnCode(int&)); }; class MockIPreparedModel : public PreparedModel { diff --git a/test/unittest/components/nn_executor/nn_executor_test.cpp b/test/unittest/components/nn_executor/nn_executor_test.cpp index c57486d..f5380a6 100644 --- a/test/unittest/components/nn_executor/nn_executor_test.cpp +++ b/test/unittest/components/nn_executor/nn_executor_test.cpp @@ -79,6 +79,7 @@ public: MOCK_METHOD1(ReleaseBuffer, OH_NN_ReturnCode(const void*)); MOCK_METHOD2(AllocateBuffer, OH_NN_ReturnCode(size_t, int&)); MOCK_METHOD2(ReleaseBuffer, OH_NN_ReturnCode(int, size_t)); + MOCK_METHOD1(ReadOpVersion, OH_NN_ReturnCode(int&)); }; class MockIPreparedModel : public PreparedModel { diff --git a/test/unittest/components/nn_tensor/nn_tensor_test.cpp b/test/unittest/components/nn_tensor/nn_tensor_test.cpp index 9a5a9d9..c89fd8c 100644 --- a/test/unittest/components/nn_tensor/nn_tensor_test.cpp +++ b/test/unittest/components/nn_tensor/nn_tensor_test.cpp @@ -75,6 +75,7 @@ public: MOCK_METHOD1(ReleaseBuffer, OH_NN_ReturnCode(const void*)); MOCK_METHOD2(AllocateBuffer, OH_NN_ReturnCode(size_t, int&)); MOCK_METHOD2(ReleaseBuffer, OH_NN_ReturnCode(int, size_t)); + MOCK_METHOD1(ReadOpVersion, OH_NN_ReturnCode(int&)); }; class MockTensorDesc : public TensorDesc { diff --git a/test/unittest/components/v1_0/neural_network_core_test/neural_network_core_test.cpp b/test/unittest/components/v1_0/neural_network_core_test/neural_network_core_test.cpp index aa4b6c3..ee68a83 100644 --- a/test/unittest/components/v1_0/neural_network_core_test/neural_network_core_test.cpp +++ b/test/unittest/components/v1_0/neural_network_core_test/neural_network_core_test.cpp @@ -133,6 +133,7 @@ public: MOCK_METHOD1(ReleaseBuffer, OH_NN_ReturnCode(const void*)); MOCK_METHOD2(AllocateBuffer, OH_NN_ReturnCode(size_t, int&)); MOCK_METHOD2(ReleaseBuffer, OH_NN_ReturnCode(int, size_t)); + MOCK_METHOD1(ReadOpVersion, OH_NN_ReturnCode(int&)); }; class MockBackend : public Backend { diff --git a/test/unittest/components/v1_0/neural_network_runtime_test/neural_network_runtime_test.cpp b/test/unittest/components/v1_0/neural_network_runtime_test/neural_network_runtime_test.cpp index 457908d..a58ace0 100644 --- a/test/unittest/components/v1_0/neural_network_runtime_test/neural_network_runtime_test.cpp +++ b/test/unittest/components/v1_0/neural_network_runtime_test/neural_network_runtime_test.cpp @@ -296,6 +296,7 @@ public: MOCK_METHOD1(ReleaseBuffer, OH_NN_ReturnCode(const void*)); MOCK_METHOD2(AllocateBuffer, OH_NN_ReturnCode(size_t, int&)); MOCK_METHOD2(ReleaseBuffer, OH_NN_ReturnCode(int, size_t)); + MOCK_METHOD1(ReadOpVersion, OH_NN_ReturnCode(int&)); }; /* -- Gitee From 2bd8544286ae23e58bcf0c83b3329ceed7259212 Mon Sep 17 00:00:00 2001 From: w30052974 Date: Mon, 31 Mar 2025 16:13:40 +0800 Subject: [PATCH 2/5] =?UTF-8?q?nnrt=205.1=E6=9E=B6=E6=9E=84=E6=95=B4?= =?UTF-8?q?=E6=94=B9?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: w30052974 --- .../neural_network_core.cpp | 9 +++--- .../neural_network_runtime.cpp | 12 ++++---- .../neural_network_runtime/nn_tensor.cpp | 1 + .../nncompiled_cache.cpp | 28 ++++++++++--------- .../neural_network_runtime/nncompiled_cache.h | 2 +- .../neural_network_runtime/nncompiler.cpp | 8 +++--- 6 files changed, 32 insertions(+), 28 deletions(-) diff --git a/frameworks/native/neural_network_core/neural_network_core.cpp b/frameworks/native/neural_network_core/neural_network_core.cpp index 659d83d..fb1c313 100644 --- a/frameworks/native/neural_network_core/neural_network_core.cpp +++ b/frameworks/native/neural_network_core/neural_network_core.cpp @@ -1706,7 +1706,8 @@ NNRT_API OH_NN_ReturnCode OH_NNExecutor_RunSync(OH_NNExecutor *executor, LOGE("OH_NNExecutor_RunSync failed, inputTensor is nullptr."); return OH_NN_INVALID_PARAMETER; } - if (inputCount == 0 || (inputCount > INPUT_OUTPUT_MAX_INDICES)) { + + if ((inputCount == 0) || (inputCount > INPUT_OUTPUT_MAX_INDICES)) { LOGE("OH_NNExecutor_RunSync failed, inputCount is 0 or more than 200."); return OH_NN_INVALID_PARAMETER; } @@ -1714,7 +1715,7 @@ NNRT_API OH_NN_ReturnCode OH_NNExecutor_RunSync(OH_NNExecutor *executor, LOGE("OH_NNExecutor_RunSync failed, outputTensor is nullptr."); return OH_NN_INVALID_PARAMETER; } - if (outputCount == 0 || (outputCount > INPUT_OUTPUT_MAX_INDICES)) { + if ((outputCount == 0) || (outputCount > INPUT_OUTPUT_MAX_INDICES)) { LOGE("OH_NNExecutor_RunSync failed, outputCount is 0 or more than 200."); return OH_NN_INVALID_PARAMETER; } @@ -1739,7 +1740,7 @@ NNRT_API OH_NN_ReturnCode OH_NNExecutor_RunAsync(OH_NNExecutor *executor, LOGE("OH_NNExecutor_RunAsync failed, inputTensor is nullptr."); return OH_NN_INVALID_PARAMETER; } - if (inputCount == 0 || (inputCount > INPUT_OUTPUT_MAX_INDICES)) { + if ((inputCount == 0) || (inputCount > INPUT_OUTPUT_MAX_INDICES)) { LOGE("OH_NNExecutor_RunAsync failed, inputCount is 0 or more than 200."); return OH_NN_INVALID_PARAMETER; } @@ -1747,7 +1748,7 @@ NNRT_API OH_NN_ReturnCode OH_NNExecutor_RunAsync(OH_NNExecutor *executor, LOGE("OH_NNExecutor_RunAsync failed, outputTensor is nullptr."); return OH_NN_INVALID_PARAMETER; } - if (outputCount == 0 || (outputCount > INPUT_OUTPUT_MAX_INDICES)) { + if ((outputCount == 0) || (outputCount > INPUT_OUTPUT_MAX_INDICES)) { LOGE("OH_NNExecutor_RunAsync failed, outputCount is 0 or more than 200."); return OH_NN_INVALID_PARAMETER; } diff --git a/frameworks/native/neural_network_runtime/neural_network_runtime.cpp b/frameworks/native/neural_network_runtime/neural_network_runtime.cpp index 31df313..b2419a3 100644 --- a/frameworks/native/neural_network_runtime/neural_network_runtime.cpp +++ b/frameworks/native/neural_network_runtime/neural_network_runtime.cpp @@ -51,9 +51,9 @@ const std::string HARDWARE_NAME = "const.ai.nnrt_deivce"; const std::string HARDWARE_VERSION = "v5_0"; constexpr size_t HARDWARE_NAME_MAX_LENGTH = 128; constexpr size_t FILE_NUMBER_MAX = 100; // 限制cache文件数量最大为100 -constexpr size_t EXTENSION_MAX_SIZE =200; // 限制MS传过来的参数最多为200 +constexpr size_t EXTENSION_MAX_SIZE = 200; // 限制MS传过来的参数最多为200 constexpr size_t INPUT_MAX_COUNT = 200; // 限制模型最大输入个数为200 -constexpr int32_t HEX_UINT = 16; +constexpr int32_t HEX_UNIT = 16; unsigned short CacheInfoGetCrc16(char* buffer, size_t length) { @@ -536,7 +536,7 @@ NNRT_API OH_NN_ReturnCode OH_NNModel_BuildFromLiteGraph(OH_NNModel *model, const } if (extensionSize > EXTENSION_MAX_SIZE) { - LOGE("OH_NNModel_BuildFromLiteGraph failed, extension more than 200."); + LOGE("OH_NNModel_BuildFromLiteGraph failed, extensionSize more than 200."); return OH_NN_INVALID_PARAMETER; } @@ -607,13 +607,13 @@ OH_NN_ReturnCode CheckCacheFileExtension(const std::string& content, int64_t& fi return OH_NN_INVALID_FILE; } - if(j["data"].find("fileNumber") == j["data"].end()) { + if (j["data"].find("fileNumber") == j["data"].end()) { LOGE("OH_NNModel_HasCache read fileNumber from cache info file failed."); return OH_NN_INVALID_FILE; } fileNumber = j["data"]["fileNumber"].get(); - if(j["data"].find("version") == j["data"].end()) { + if (j["data"].find("version") == j["data"].end()) { LOGE("OH_NNModel_HasCache read version from cache info file failed."); return OH_NN_INVALID_FILE; } @@ -626,7 +626,7 @@ OH_NN_ReturnCode CheckCacheFileExtension(const std::string& content, int64_t& fi const size_t dataLength = j["data"].dump().length(); char jData[dataLength + 1]; - if (strncpy(jData, dataLength+1, j["data"].dump().c_str(), dataLength != 0)) { + if (strncpy_s(jData, dataLength+1, j["data"].dump().c_str(), dataLength) != 0) { LOGE("OH_NNModel_HasCache ParseStr failed due to strncpy_s error."); return OH_NN_INVALID_FILE; } diff --git a/frameworks/native/neural_network_runtime/nn_tensor.cpp b/frameworks/native/neural_network_runtime/nn_tensor.cpp index 2e79981..e6d002f 100644 --- a/frameworks/native/neural_network_runtime/nn_tensor.cpp +++ b/frameworks/native/neural_network_runtime/nn_tensor.cpp @@ -239,6 +239,7 @@ OH_NN_ReturnCode NNTensor::ValidateDimensions(const std::vector& dimens LOGE("ParseDimension failed, dimensions more than 200."); return OH_NN_INVALID_PARAMETER; } + for (int32_t dim : dimensions) { if (dim < -1 || dim == 0) { LOGE("ParseDimension failed, dimension of OH_NN_Tensor cannot be 0 or less than -1, receive %d.", dim); diff --git a/frameworks/native/neural_network_runtime/nncompiled_cache.cpp b/frameworks/native/neural_network_runtime/nncompiled_cache.cpp index 7d62a54..408d4ea 100644 --- a/frameworks/native/neural_network_runtime/nncompiled_cache.cpp +++ b/frameworks/native/neural_network_runtime/nncompiled_cache.cpp @@ -253,7 +253,7 @@ OH_NN_ReturnCode NNCompiledCache::GenerateCacheModel(const std::vector(CacheInfoGetCrc16(cacheInfoData, dataLength)); return OH_NN_SUCCESS; @@ -303,14 +303,13 @@ OH_NN_ReturnCode NNCompiledCache::CheckCacheInfo(NNCompiledCacheInfo& modelCache std::string content((std::istreambuf_iterator(infoCacheFile)), std::istreambuf_iterator()); infoCacheFile.close(); - if (!nlohmann::json::accept(content)) { LOGE("[NNCompiledCache] CheckCacheInfo JSON parse error"); return OH_NN_INVALID_FILE; } - // Parse the json string - nlohmann::json j = nlomann::json::parse(content); + // Parse the JSON string + nlohmann::json j = nlohmann::json::parse(content); // modelCacheInfo.deviceId type is int64_t, // it is transformed from size_t value, so the transform here will not truncate value. if (j.find("data") == j.end()) { @@ -318,13 +317,13 @@ OH_NN_ReturnCode NNCompiledCache::CheckCacheInfo(NNCompiledCacheInfo& modelCache return OH_NN_INVALID_FILE; } - if(j["data"].find("deviceId") == j["data"].end()) { + if (j["data"].find("deviceId") == j["data"].end()) { LOGE("[NNCompiledCache] CheckCacheInfo read deviceId from cache info file failed."); return OH_NN_INVALID_FILE; } modelCacheInfo.deviceId = j["data"]["deviceId"].get(); - if(j["data"].find("version") == j["data"].end()) { + if (j["data"].find("version") == j["data"].end()) { LOGE("[NNCompiledCache] CheckCacheInfo read version from cache info file failed."); return OH_NN_INVALID_FILE; } @@ -338,7 +337,7 @@ OH_NN_ReturnCode NNCompiledCache::CheckCacheInfo(NNCompiledCacheInfo& modelCache return OH_NN_INVALID_PARAMETER; } - if(j["data"].find("fileNumber") == j["data"].end()) { + if (j["data"].find("fileNumber") == j["data"].end()) { LOGE("[NNCompiledCache] CheckCacheInfo read fileNumber from cache info file failed."); return OH_NN_INVALID_FILE; } @@ -347,11 +346,12 @@ OH_NN_ReturnCode NNCompiledCache::CheckCacheInfo(NNCompiledCacheInfo& modelCache return CheckCacheInfoExtension(modelCacheInfo, j); } -OH_NN_ReturnCode NNCompiledCache::CheckCacheInfoExtension(NNCompiledCacheInfo& modelCacheInfo, nlohmann::json& j) const +OH_NN_ReturnCode NNCompiledCache::CheckCacheInfoExtension(NNCompiledCacheInfo& modelCacheInfo, + nlohmann::json& j) const { const size_t dataLength = j["data"].dump().length(); char jData[dataLength + 1]; - if (strncpy(jData, dataLength+1, j["data"].dump().c_str(), dataLength != 0)) { + if (strncpy_s(jData, dataLength+1, j["data"].dump().c_str(), dataLength) != 0) { LOGE("[NNCompiledCache] ParseStr failed due to strncpy_s error."); return OH_NN_INVALID_FILE; } @@ -368,8 +368,8 @@ OH_NN_ReturnCode NNCompiledCache::CheckCacheInfoExtension(NNCompiledCacheInfo& m std::vector modelCheckSum; modelCheckSum.resize(modelCacheInfo.fileNumber); modelCacheInfo.modelCheckSum.resize(modelCacheInfo.fileNumber); - if(j["data"].find("modelCheckSum") == j["data"].end()) { - LOGE("[NNCompiledCache] CheckCacheInfo read cache file failed."); + if (j["data"].find("modelCheckSum") == j["data"].end()) { + LOGE("[NNCompiledCache] CheckCacheInfo read modelCheckSum from cache file failed."); return OH_NN_INVALID_FILE; } for (uint32_t i = 0; i < modelCacheInfo.fileNumber; ++i) { @@ -422,6 +422,8 @@ OH_NN_ReturnCode NNCompiledCache::ReadCacheModelFile(const std::string& filePath return OH_NN_MEMORY_ERROR; } + off_t fsize = sb.st_size; + void *ptr = mmap(NULL, fsize, PROT_READ, MAP_SHARED, fd, 0); if (ptr == MAP_FAILED) { LOGE("[NNCompiledCache] ReadCacheModelFile failed, failed to mmap file."); @@ -448,14 +450,14 @@ unsigned short NNCompiledCache::GetCrc16(char* buffer, size_t length) const } else { size_t step = length / MAX_CACHE_SIZE; while (length > sizeof(unsigned short) * step + 1) { - sum += *(reinterpret_cast(buffer)); + sum += *(reinterpret_cast(buffer)); length -= step * sizeof(unsigned short); buffer += step * sizeof(unsigned short); } } if (length > 0) { - buffer += length -1; + buffer += length - 1; sum += *(reinterpret_cast(buffer)); } diff --git a/frameworks/native/neural_network_runtime/nncompiled_cache.h b/frameworks/native/neural_network_runtime/nncompiled_cache.h index 756b784..7ee8ce2 100644 --- a/frameworks/native/neural_network_runtime/nncompiled_cache.h +++ b/frameworks/native/neural_network_runtime/nncompiled_cache.h @@ -24,7 +24,7 @@ #include #include -#include "nlohmann/json.cpp" +#include "nlohmann/json.hpp" #include "device.h" #include "neural_network_runtime/neural_network_runtime.h" diff --git a/frameworks/native/neural_network_runtime/nncompiler.cpp b/frameworks/native/neural_network_runtime/nncompiler.cpp index 257d1e6..ec40f00 100644 --- a/frameworks/native/neural_network_runtime/nncompiler.cpp +++ b/frameworks/native/neural_network_runtime/nncompiler.cpp @@ -14,7 +14,7 @@ */ #include "nncompiler.h" -#include "neural_network_runtime/neural_network_core.h" +#include "neural_network_runtime/neural_network_runtime.h" #include #include @@ -562,8 +562,8 @@ OH_NN_ReturnCode NNCompiler::SaveToCacheFile() const tensorBuffers.emplace_back(outputTensorDescBuffer); compiledCache.SetModelName(m_extensionConfig.modelName); - ret = compiledCache.Save(caches, m_cachePath, m_cacheVersion); compiledCache.SetIsExceedRamLimit(m_extensionConfig.isExceedRamLimit); + ret = compiledCache.Save(caches, m_cachePath, m_cacheVersion); if (ret != OH_NN_SUCCESS) { LOGE("[NNCompiler] SaveToCacheFile failed, error happened when saving model cache."); ReleaseBuffer(tensorBuffers); @@ -684,9 +684,9 @@ OH_NN_ReturnCode NNCompiler::RestoreFromCacheFile() cacheInfo["data"]["opVersion"] = currentOpVersion; cacheInfo["data"]["isExceedRamLimit"] = modelCacheInfo.isExceedRamLimit ? 1 : 0; - const size_t dataLength cacheInfo["data"].dump().length(); + const size_t dataLength = cacheInfo["data"].dump().length(); char cacheInfoData[dataLength + 1]; - if (strncpy_s(cacheInfoData, dataLength+1, cacheInfo["data"].dump().c_str(), dataLength != 0)) { + if (strncpy_s(cacheInfoData, dataLength+1, cacheInfo["data"].dump().c_str(), dataLength) != 0) { LOGE("ParseStr failed due to strncpy_s error"); return OH_NN_INVALID_PARAMETER; } -- Gitee From a269fcd487f9334342c2d68aeb995cb86eb7ac60 Mon Sep 17 00:00:00 2001 From: w30052974 Date: Mon, 31 Mar 2025 16:37:14 +0800 Subject: [PATCH 3/5] =?UTF-8?q?nnrt=205.1=E6=9E=B6=E6=9E=84=E6=95=B4?= =?UTF-8?q?=E6=94=B9?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: w30052974 --- frameworks/native/neural_network_runtime/BUILD.gn | 2 +- frameworks/native/neural_network_runtime/nncompiled_cache.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/frameworks/native/neural_network_runtime/BUILD.gn b/frameworks/native/neural_network_runtime/BUILD.gn index 33b4e59..5ef450f 100644 --- a/frameworks/native/neural_network_runtime/BUILD.gn +++ b/frameworks/native/neural_network_runtime/BUILD.gn @@ -195,8 +195,8 @@ ohos_shared_library("libneural_network_runtime") { "hitrace:libhitracechain", "init:libbegetutil", "ipc:ipc_core", - "mindspore:mindir_lib", "json:nlohmann_json_static", + "mindspore:mindir_lib", ] deps = [ "../neural_network_core:libneural_network_core" ] diff --git a/frameworks/native/neural_network_runtime/nncompiled_cache.cpp b/frameworks/native/neural_network_runtime/nncompiled_cache.cpp index 408d4ea..0fcf114 100644 --- a/frameworks/native/neural_network_runtime/nncompiled_cache.cpp +++ b/frameworks/native/neural_network_runtime/nncompiled_cache.cpp @@ -418,7 +418,7 @@ OH_NN_ReturnCode NNCompiledCache::ReadCacheModelFile(const std::string& filePath struct stat sb; if (fstat(fd, &sb) == -1) { close(fd); - LOGE("[NNCompiledCache] ReadCacheModelFile failed, get file %{public}s state failed.", filePath,c_str()); + LOGE("[NNCompiledCache] ReadCacheModelFile failed, get file %{public}s state failed.", filePath.c_str()); return OH_NN_MEMORY_ERROR; } -- Gitee From a36e1c7bfdc0f05baee58032a2850ac5ce469c91 Mon Sep 17 00:00:00 2001 From: w30052974 Date: Mon, 31 Mar 2025 16:50:02 +0800 Subject: [PATCH 4/5] =?UTF-8?q?nnrt=205.1=E6=9E=B6=E6=9E=84=E6=95=B4?= =?UTF-8?q?=E6=94=B9?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: w30052974 --- common/utils.h | 19 ++++++++++++++++ .../neural_network_runtime.cpp | 22 +------------------ .../neural_network_runtime/nncompiler.cpp | 1 - 3 files changed, 20 insertions(+), 22 deletions(-) diff --git a/common/utils.h b/common/utils.h index 5dce79c..4689ae5 100644 --- a/common/utils.h +++ b/common/utils.h @@ -51,6 +51,25 @@ std::unique_ptr CreateUniquePtr(Args&&... args) std::string GenUniqueName(const std::string&, const std::string&, const std::string&); +unsigned short CacheInfoGetCrc16(char* buffer, size_t length) +{ + unsigned int sum = 0; + while (length > 1) { + sum += *(reinterpret_cast(buffer)); + length -= sizeof(unsigned short); + buffer += sizeof(unsigned short); + } + + if (length > 0) { + sum += *(reinterpret_cast(buffer)); + } + + while (sum >> HEX_UNIT) { + sum = (sum >> HEX_UNIT) + (sum & 0xffff); + } + + return static_cast(~sum); +} } // namespace NeuralNetworkRuntime } // namespace OHOS #endif // NEURAL_NETWORK_RUNTIME_UTILS_H diff --git a/frameworks/native/neural_network_runtime/neural_network_runtime.cpp b/frameworks/native/neural_network_runtime/neural_network_runtime.cpp index b2419a3..c682431 100644 --- a/frameworks/native/neural_network_runtime/neural_network_runtime.cpp +++ b/frameworks/native/neural_network_runtime/neural_network_runtime.cpp @@ -20,10 +20,10 @@ #include "executor.h" #include "inner_model.h" #include "log.h" +#include "utils.h" #include "quant_param.h" #include "validation.h" #include "syspara/parameter.h" -#include "securec.h" #include #include @@ -55,26 +55,6 @@ constexpr size_t EXTENSION_MAX_SIZE = 200; // 限制MS传过来的参数最多 constexpr size_t INPUT_MAX_COUNT = 200; // 限制模型最大输入个数为200 constexpr int32_t HEX_UNIT = 16; -unsigned short CacheInfoGetCrc16(char* buffer, size_t length) -{ - unsigned int sum = 0; - while (length > 1) { - sum += *(reinterpret_cast(buffer)); - length -= sizeof(unsigned short); - buffer += sizeof(unsigned short); - } - - if (length > 0) { - sum += *(reinterpret_cast(buffer)); - } - - while (sum >> HEX_UNIT) { - sum = (sum >> HEX_UNIT) + (sum & 0xffff); - } - - return static_cast(~sum); -} - NNRT_API NN_QuantParam *OH_NNQuantParam_Create() { auto* quantParamImpl = new (std::nothrow) QuantParams(); diff --git a/frameworks/native/neural_network_runtime/nncompiler.cpp b/frameworks/native/neural_network_runtime/nncompiler.cpp index ec40f00..ad04e27 100644 --- a/frameworks/native/neural_network_runtime/nncompiler.cpp +++ b/frameworks/native/neural_network_runtime/nncompiler.cpp @@ -14,7 +14,6 @@ */ #include "nncompiler.h" -#include "neural_network_runtime/neural_network_runtime.h" #include #include -- Gitee From 506f47abfe0779f04c881f74cdf13c6ced9b2a55 Mon Sep 17 00:00:00 2001 From: w30052974 Date: Mon, 31 Mar 2025 17:44:03 +0800 Subject: [PATCH 5/5] =?UTF-8?q?nnrt=205.1=E6=9E=B6=E6=9E=84=E6=95=B4?= =?UTF-8?q?=E6=94=B9?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: w30052974 --- common/utils.h | 19 ----------------- .../neural_network_runtime.cpp | 21 ++++++++++++++++++- .../neural_network_runtime/nncompiler.cpp | 1 + .../neural_network_runtime.h | 1 + 4 files changed, 22 insertions(+), 20 deletions(-) diff --git a/common/utils.h b/common/utils.h index 4689ae5..5dce79c 100644 --- a/common/utils.h +++ b/common/utils.h @@ -51,25 +51,6 @@ std::unique_ptr CreateUniquePtr(Args&&... args) std::string GenUniqueName(const std::string&, const std::string&, const std::string&); -unsigned short CacheInfoGetCrc16(char* buffer, size_t length) -{ - unsigned int sum = 0; - while (length > 1) { - sum += *(reinterpret_cast(buffer)); - length -= sizeof(unsigned short); - buffer += sizeof(unsigned short); - } - - if (length > 0) { - sum += *(reinterpret_cast(buffer)); - } - - while (sum >> HEX_UNIT) { - sum = (sum >> HEX_UNIT) + (sum & 0xffff); - } - - return static_cast(~sum); -} } // namespace NeuralNetworkRuntime } // namespace OHOS #endif // NEURAL_NETWORK_RUNTIME_UTILS_H diff --git a/frameworks/native/neural_network_runtime/neural_network_runtime.cpp b/frameworks/native/neural_network_runtime/neural_network_runtime.cpp index c682431..905582d 100644 --- a/frameworks/native/neural_network_runtime/neural_network_runtime.cpp +++ b/frameworks/native/neural_network_runtime/neural_network_runtime.cpp @@ -20,7 +20,6 @@ #include "executor.h" #include "inner_model.h" #include "log.h" -#include "utils.h" #include "quant_param.h" #include "validation.h" #include "syspara/parameter.h" @@ -55,6 +54,26 @@ constexpr size_t EXTENSION_MAX_SIZE = 200; // 限制MS传过来的参数最多 constexpr size_t INPUT_MAX_COUNT = 200; // 限制模型最大输入个数为200 constexpr int32_t HEX_UNIT = 16; +unsigned short CacheInfoGetCrc16(char* buffer, size_t length) +{ + unsigned int sum = 0; + while (length > 1) { + sum += *(reinterpret_cast(buffer)); + length -= sizeof(unsigned short); + buffer += sizeof(unsigned short); + } + + if (length > 0) { + sum += *(reinterpret_cast(buffer)); + } + + while (sum >> HEX_UNIT) { + sum = (sum >> HEX_UNIT) + (sum & 0xffff); + } + + return static_cast(~sum); +} + NNRT_API NN_QuantParam *OH_NNQuantParam_Create() { auto* quantParamImpl = new (std::nothrow) QuantParams(); diff --git a/frameworks/native/neural_network_runtime/nncompiler.cpp b/frameworks/native/neural_network_runtime/nncompiler.cpp index ad04e27..ec40f00 100644 --- a/frameworks/native/neural_network_runtime/nncompiler.cpp +++ b/frameworks/native/neural_network_runtime/nncompiler.cpp @@ -14,6 +14,7 @@ */ #include "nncompiler.h" +#include "neural_network_runtime/neural_network_runtime.h" #include #include diff --git a/interfaces/kits/c/neural_network_runtime/neural_network_runtime.h b/interfaces/kits/c/neural_network_runtime/neural_network_runtime.h index f1c9040..fdffb3f 100644 --- a/interfaces/kits/c/neural_network_runtime/neural_network_runtime.h +++ b/interfaces/kits/c/neural_network_runtime/neural_network_runtime.h @@ -635,6 +635,7 @@ OH_NN_ReturnCode OH_NNExecutor_SetOutputWithMemory(OH_NNExecutor *executor, uint32_t outputIndex, const OH_NN_Memory *memory); +unsigned short CacheInfoGetCrc16(char* buffer, size_t length); #ifdef __cplusplus } #endif // __cplusplus -- Gitee