diff --git a/frameworks/native/neural_network_core/neural_network_core.cpp b/frameworks/native/neural_network_core/neural_network_core.cpp index 63fed5c04f46b6bf0223343de85c4db668870b24..c4249b75a61180c606b51ee41cf4563600f34f6a 100644 --- a/frameworks/native/neural_network_core/neural_network_core.cpp +++ b/frameworks/native/neural_network_core/neural_network_core.cpp @@ -648,6 +648,33 @@ OH_NN_ReturnCode GetNnrtModelId(Compilation* compilationImpl, NNRtServiceApi& nn return OH_NN_SUCCESS; } + +OH_NN_ReturnCode IsCompilationAvaliable(Compilation* compilationImpl) +{ + if (compilationImpl == nullptr) { + LOGE("IsCompilationAvaliable failed, compilation implementation is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + + if (((compilationImpl->nnModel != nullptr) && (compilationImpl->offlineModelPath != nullptr)) || + ((compilationImpl->nnModel != nullptr) && + ((compilationImpl->offlineModelBuffer.first != nullptr) || + (compilationImpl->offlineModelBuffer.second != static_cast(0)))) || + ((compilationImpl->offlineModelPath != nullptr) && + ((compilationImpl->offlineModelBuffer.first != nullptr) || + (compilationImpl->offlineModelBuffer.second != static_cast(0))))) { + LOGE("IsCompilationAvaliable failed, find multi model to build compilation."); + return OH_NN_INVALID_PARAMETER; + } + + if (compilationImpl->compiler != nullptr) { + LOGE("IsCompilationAvaliable failed, the compiler in compilation is not nullptr, " + "please input a new compilation."); + return OH_NN_INVALID_PARAMETER; + } + + return OH_NN_SUCCESS; +} } OH_NN_ReturnCode GetModelId(Compilation** compilation) @@ -700,24 +727,12 @@ NNRT_API OH_NN_ReturnCode OH_NNCompilation_Build(OH_NNCompilation *compilation) return OH_NN_INVALID_PARAMETER; } - Compilation* compilationImpl = reinterpret_cast(compilation); - - if (((compilationImpl->nnModel != nullptr) && (compilationImpl->offlineModelPath != nullptr)) || - ((compilationImpl->nnModel != nullptr) && - ((compilationImpl->offlineModelBuffer.first != nullptr) || - (compilationImpl->offlineModelBuffer.second != static_cast(0)))) || - ((compilationImpl->offlineModelPath != nullptr) && - ((compilationImpl->offlineModelBuffer.first != nullptr) || - (compilationImpl->offlineModelBuffer.second != static_cast(0))))) { - LOGE("OH_NNCompilation_Build failed, find multi model to build compilation."); - return OH_NN_INVALID_PARAMETER; - } - OH_NN_ReturnCode ret = OH_NN_SUCCESS; - if (compilationImpl->compiler != nullptr) { - LOGE("OH_NNCompilation_Build failed, the compiler in compilation is not nullptr, " - "please input a new compilation."); - return OH_NN_INVALID_PARAMETER; + Compilation* compilationImpl = reinterpret_cast(compilation); + ret = IsCompilationAvaliable(compilationImpl); + if (ret != OH_NN_SUCCESS) { + LOGE("OH_NNCompilation_Build failed, fail to compiler parameter."); + return ret; } Compiler* compiler = nullptr; @@ -755,7 +770,7 @@ NNRT_API OH_NN_ReturnCode OH_NNCompilation_Build(OH_NNCompilation *compilation) if (nnrtService.IsServiceAvaliable()) { bool retCode = nnrtService.PullUpDlliteService(); if (!retCode) { - LOGI("OH_NNCompilation_Build failed, PullUpDlliteService failed."); + LOGW("OH_NNCompilation_Build failed, PullUpDlliteService failed."); } } @@ -1126,7 +1141,6 @@ NNRT_API NN_Tensor* OH_NNTensor_CreateWithFd(size_t deviceID, NNRT_API OH_NN_ReturnCode OH_NNTensor_Destroy(NN_Tensor **tensor) { - LOGI("start OH_NNTensor_Destroy"); if (tensor == nullptr) { LOGE("OH_NNTensor_Destroy failed, tensor is nullptr."); return OH_NN_INVALID_PARAMETER; diff --git a/frameworks/native/neural_network_runtime/nncompiled_cache.cpp b/frameworks/native/neural_network_runtime/nncompiled_cache.cpp index 0fcf1144a34979fb9747888d5581b7b2696c685e..5053e52fcf2474d0ee8b34b1c332a9043979ba24 100644 --- a/frameworks/native/neural_network_runtime/nncompiled_cache.cpp +++ b/frameworks/native/neural_network_runtime/nncompiled_cache.cpp @@ -61,9 +61,9 @@ OH_NN_ReturnCode NNCompiledCache::Save(const std::vector& caches) +OH_NN_ReturnCode NNCompiledCache::CheckCache(const std::string& cacheDir, + uint32_t version, + std::vector& caches) { if (cacheDir.empty()) { LOGE("[NNCompiledCache] Restore failed, cacheDir is empty."); @@ -79,6 +79,17 @@ OH_NN_ReturnCode NNCompiledCache::Restore(const std::string& cacheDir, LOGE("[NNCompiledCache] Restore failed, m_device is empty."); return OH_NN_INVALID_PARAMETER; } + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode NNCompiledCache::Restore(const std::string& cacheDir, + uint32_t version, + std::vector& caches) +{ + OH_NN_ReturnCode ret = CheckCache(cacheDir, version, caches); + if (ret != OH_NN_SUCCESS) { + return ret; + } std::string cacheInfoPath = cacheDir + "/" + m_modelName + "cache_info.nncache"; char path[PATH_MAX]; @@ -92,7 +103,7 @@ OH_NN_ReturnCode NNCompiledCache::Restore(const std::string& cacheDir, } NNCompiledCacheInfo cacheInfo; - OH_NN_ReturnCode ret = CheckCacheInfo(cacheInfo, path); + ret = CheckCacheInfo(cacheInfo, path); if (ret != OH_NN_SUCCESS) { LOGE("[NNCompiledCache] Restore failed, error happened when calling CheckCacheInfo."); return ret; @@ -122,6 +133,7 @@ OH_NN_ReturnCode NNCompiledCache::Restore(const std::string& cacheDir, cacheInfo.modelCheckSum[i]) { LOGE("[NNCompiledCache] Restore failed, the cache model file %{public}s has been changed.", cacheModelPath.c_str()); + close(modelBuffer.fd); return OH_NN_INVALID_FILE; } @@ -424,7 +436,7 @@ OH_NN_ReturnCode NNCompiledCache::ReadCacheModelFile(const std::string& filePath off_t fsize = sb.st_size; - void *ptr = mmap(NULL, fsize, PROT_READ, MAP_SHARED, fd, 0); + void *ptr = mmap(nullptr, fsize, PROT_READ, MAP_SHARED, fd, 0); if (ptr == MAP_FAILED) { LOGE("[NNCompiledCache] ReadCacheModelFile failed, failed to mmap file."); close(fd); @@ -441,6 +453,10 @@ unsigned short NNCompiledCache::GetCrc16(char* buffer, size_t length) const { unsigned int sum = 0; + if (buffer == nullptr) { + return static_cast(~sum); + } + if (length < MAX_CACHE_SIZE) { while (length > 1) { sum += *(reinterpret_cast(buffer)); diff --git a/frameworks/native/neural_network_runtime/nncompiled_cache.h b/frameworks/native/neural_network_runtime/nncompiled_cache.h index 7ee8ce21ee6d1bcfbe7d85b7f7020c351a5cfc87..c68e6054ac74663822dd8ca87e9d5102921475d8 100644 --- a/frameworks/native/neural_network_runtime/nncompiled_cache.h +++ b/frameworks/native/neural_network_runtime/nncompiled_cache.h @@ -68,6 +68,9 @@ public: unsigned short GetCrc16(char* buffer, size_t length) const; private: + OH_NN_ReturnCode CheckCache(const std::string& cacheDir, + uint32_t version, + std::vector& caches); OH_NN_ReturnCode GenerateCacheFiles(const std::vector& caches, const std::string& cacheDir, uint32_t version) const;