diff --git a/frameworks/native/neural_network_core/nnrt_client.h b/frameworks/native/neural_network_core/nnrt_client.h index 1622aa364d83d8804180cd229e07c4fb15ec5ecf..ff43c0cf91b68b41d0e647281f4807324bf8ea0b 100644 --- a/frameworks/native/neural_network_core/nnrt_client.h +++ b/frameworks/native/neural_network_core/nnrt_client.h @@ -42,7 +42,7 @@ public: int (*UpdateModelLatency)(uint32_t hiaiModelId, int modelLatency) = nullptr; int (*Unload)(uint32_t hiaiModelId) = nullptr; bool (*PullUpDlliteService)() = nullptr; - int (*AutoReinitSetModelID)(uint32_t m_originHiaiModelId, uint32_t hiaimodelID, size_t nnrtmodelID) = nullptr; + int (*AutoReinitSetModelID)(uint32_t originHiaiModelId, uint32_t hiaimodelID, size_t nnrtmodelID) = nullptr; int (*AutoReinitScheduling)(uint32_t originHiaimodelID, uint32_t hiaiModelId, bool* needModelLatency, const char* cachePath) = nullptr; int (*AutoUnload)(uint32_t originHiaimodelID, uint32_t hiaiModelId) = nullptr; diff --git a/frameworks/native/neural_network_runtime/nnexecutor.cpp b/frameworks/native/neural_network_runtime/nnexecutor.cpp index 64dd6dc88d92dd14f1bf0bfa85dbda4782d99a58..4adb72875739827a8e8bbed8890ddf6edf32fca9 100644 --- a/frameworks/native/neural_network_runtime/nnexecutor.cpp +++ b/frameworks/native/neural_network_runtime/nnexecutor.cpp @@ -152,7 +152,7 @@ NNExecutor::NNExecutor(size_t backendID, std::shared_ptr device, std::sh }; m_autoUnloadHandler->PostTask(AutoUnloadTask, "nnexecutor_autounload" + std::to_string(m_executorid), AUTOUNLOAD_TIME); - + GetModelID(m_originHiaiModelId); LOGI("manualload pid=%{public}d originHiaiModelId=%{public}d", getpid(), m_originHiaiModelId); @@ -388,7 +388,7 @@ OH_NN_ReturnCode NNExecutor::DeserializedTensorsFromBuffer( auto memRet = memcpy_s(&desc.m_dataType, SIZE_OF_DATATYPE, ptr, sizeof(desc.m_dataType)); if (memRet != EOK) { - LOGE("[NNCompiler] DeserializedTensorsFromBuffer failed, failed to memcpy_s data type."); + LOGE("[NNExecutor] DeserializedTensorsFromBuffer failed, failed to memcpy_s data type."); ReleaseDescShape(immediateTensorDescs); return OH_NN_MEMORY_ERROR; } @@ -396,7 +396,7 @@ OH_NN_ReturnCode NNExecutor::DeserializedTensorsFromBuffer( memRet = memcpy_s(&desc.m_format, SIZE_OF_FORMAT, ptr, sizeof(desc.m_format)); if (memRet != EOK) { - LOGE("[NNCompiler] DeserializedTensorsFromBuffer failed, failed to memcpy_s format."); + LOGE("[NNExecutor] DeserializedTensorsFromBuffer failed, failed to memcpy_s format."); ReleaseDescShape(immediateTensorDescs); return OH_NN_MEMORY_ERROR; } @@ -562,7 +562,6 @@ OH_NN_ReturnCode NNExecutor::RunSync(NN_Tensor* inputTensors[], size_t inputSize if (Reload() != OH_NN_SUCCESS) { return OH_NN_INVALID_PARAMETER; } - auto _ret = GetModelID(modelId); LOGI("AutoReload pid=%{public}d originHiaiModelId=%{public}d hiaiModelId=%{public}d", getpid(), m_originHiaiModelId, modelId); @@ -1518,7 +1517,7 @@ OH_NN_ReturnCode NNExecutor::ReinitScheduling(uint32_t hiaimodelID, bool* needMo } if (nnrtService.AutoReinitSetModelID == nullptr) { - LOGE("HiaiExecutorImpl] ReinitScheduling failed, nnrtService AutoReinitSetModelId func is nullptr."); + LOGE("[HiaiExecutorImpl] ReinitScheduling failed, nnrtService AutoReinitSetModelId func is nullptr."); return OH_NN_INVALID_PARAMETER; } @@ -1554,7 +1553,7 @@ OH_NN_ReturnCode NNExecutor::ReinitScheduling(uint32_t hiaimodelID, bool* needMo LOGE("ReinitScheduling failed, nnrtService IsSupportScheduling func is nullptr."); return OH_NN_INVALID_PARAMETER; } - + ret = nnrtService.AutoReinitScheduling(m_originHiaiModelId, hiaimodelID, needModelLatency, cachePath); if (ret != static_cast(OH_NN_SUCCESS)) { LOGE("ReinitScheduling failed, some error happened when scheduling."); @@ -1571,7 +1570,6 @@ OH_NN_ReturnCode NNExecutor::DeinitScheduling(uint32_t hiaimodelID) LOGE("[HiaiExecutorImpl] AutoUnload failed, nnrtService AutoUnload func is nullptr."); return OH_NN_INVALID_PARAMETER; } - int ret = nnrtService.AutoUnload(m_originHiaiModelId, hiaimodelID); if (ret != static_cast(OH_NN_SUCCESS)) { LOGE("[HiaiExecutorImpl] AutoUnload failed, some error happen when AutoUnload hiaiModelId."); @@ -1618,4 +1616,4 @@ bool NNExecutor::DeinitModel(std::string mode) return true; } } // namespace NeuralNetworkRuntime -} // namespace OHOS +} // namespace OHOS \ No newline at end of file