diff --git a/frameworks/native/neural_network_core/neural_network_core.cpp b/frameworks/native/neural_network_core/neural_network_core.cpp index 6646c3de7f249754a2ff527bb2777a4579028813..5a5efc3abda23855a3ca5bf771f8d7c118db7487 100644 --- a/frameworks/native/neural_network_core/neural_network_core.cpp +++ b/frameworks/native/neural_network_core/neural_network_core.cpp @@ -1244,6 +1244,11 @@ OH_NN_ReturnCode Scheduling(Compilation** compilation) return OH_NN_INVALID_PARAMETER; } + std::string cachePath = ""; + if (compilationImpl->cachePath != nullptr) { + cachePath = compilationImpl->cachePath; + } + bool supportStat = false; int ret = nnrtService.IsSupportScheduling(&supportStat); if (ret != static_cast(OH_NN_SUCCESS)) { @@ -1261,7 +1266,7 @@ OH_NN_ReturnCode Scheduling(Compilation** compilation) } bool needModelLatency = false; - ret = nnrtService.Scheduling(compilationImpl->hiaiModelId, &needModelLatency); + ret = nnrtService.Scheduling(compilationImpl->hiaiModelId, &needModelLatency, cachePath.c_str()); if (ret != static_cast(OH_NN_SUCCESS)) { LOGE("Scheduling failed, some error happened when scheduling."); return static_cast(ret); diff --git a/frameworks/native/neural_network_core/nnrt_client.h b/frameworks/native/neural_network_core/nnrt_client.h index d40736660be8dba4fc9a94cfd26c66514307400d..dd190845e7ec00320da7d5a602bde7c344e9dc1e 100644 --- a/frameworks/native/neural_network_core/nnrt_client.h +++ b/frameworks/native/neural_network_core/nnrt_client.h @@ -37,7 +37,7 @@ public: int (*IsSupportAuthentication)(bool* supportStat) = nullptr; int (*IsSupportScheduling)(bool* supportStat) = nullptr; int (*Authentication)(int callingPid) = nullptr; - int (*Scheduling)(uint32_t hiaiModelId, bool* needModelLatency) = nullptr; + int (*Scheduling)(uint32_t hiaiModelId, bool* needModelLatency, const char* cachePath) = nullptr; int (*UpdateModelLatency)(uint32_t hiaiModelId, int modelLatency) = nullptr; int (*Unload)(uint32_t hiaiModelId) = nullptr;