diff --git a/frameworks/native/neural_network_core/neural_network_core.cpp b/frameworks/native/neural_network_core/neural_network_core.cpp index ec2753ebd81e1c755c22d92ecc84e61427c5d2f9..1b6483dda21e66ee1d98f3d9bf9ce441c5f470b8 100644 --- a/frameworks/native/neural_network_core/neural_network_core.cpp +++ b/frameworks/native/neural_network_core/neural_network_core.cpp @@ -601,7 +601,7 @@ OH_NN_ReturnCode Authentication(Compilation** compilation) compilationImpl->callingPid = std::atoi((iter->second).data()); } - NNRtServiceApi& nnrtService = NNRtServiceApi::GetInstance(); + const NNRtServiceApi& nnrtService = NNRtServiceApi::GetInstance(); if (!nnrtService.IsServiceAvaliable()) { LOGW("Authentication failed, fail to get nnrt service, skip Authentication."); return OH_NN_SUCCESS; @@ -1624,8 +1624,6 @@ OH_NN_ReturnCode RunSync(Executor *executor, } long timeStart = 0; - long timeEnd = 0; - int32_t modelLatency = 0; if (configPtr->isNeedModelLatency) { timeStart = std::chrono::duration_cast( std::chrono::system_clock::now().time_since_epoch()).count(); @@ -1638,9 +1636,9 @@ OH_NN_ReturnCode RunSync(Executor *executor, } if (configPtr->isNeedModelLatency) { - timeEnd = std::chrono::duration_cast( + long timeEnd = std::chrono::duration_cast( std::chrono::system_clock::now().time_since_epoch()).count(); - modelLatency = static_cast((timeEnd - timeStart)); + int32_t modelLatency = static_cast((timeEnd - timeStart)); std::thread t(UpdateModelLatency, configPtr, modelLatency); t.detach(); LOGE("update async start."); diff --git a/frameworks/native/neural_network_runtime/neural_network_runtime.cpp b/frameworks/native/neural_network_runtime/neural_network_runtime.cpp index c56ffdef50644e75e4bf2debf6b0ad3e82e4d77a..25d71c3cd060130c5bf9f91f339d2654d86e56c2 100644 --- a/frameworks/native/neural_network_runtime/neural_network_runtime.cpp +++ b/frameworks/native/neural_network_runtime/neural_network_runtime.cpp @@ -314,7 +314,6 @@ OH_NN_ReturnCode ParseInputDimsFromExtensions(char* data, size_t dataSize, const size_t inputCount = liteGraph->input_indices_.size(); // LiteGraph输入个数 size_t allTensorSize = liteGraph->all_tensors_.size(); // LiteGraph所有tensor个数 - size_t inputDimSize = 0; // 存放每个输入的维度 std::vector inputDim; size_t dataIndex = 0; for (size_t i = 0; i < inputCount; ++i) { @@ -328,7 +327,7 @@ OH_NN_ReturnCode ParseInputDimsFromExtensions(char* data, size_t dataSize, const //获取当前输入的维度 mindspore::lite::TensorPtr tensor = liteGraph->all_tensors_[liteGraph->input_indices_[i]]; auto tensorDims = mindspore::lite::MindIR_Tensor_GetDims(tensor); - inputDimSize = tensorDims.size(); + size_t inputDimSize = tensorDims.size(); if (allDimsSize < inputDimSize) { LOGE("ParseInputDimsFromExtensions failed, dataSize is invalid."); extensionConfig.inputDims.clear(); @@ -552,9 +551,9 @@ NNRT_API bool OH_NNModel_HasCache(const char *cacheDir, const char *modelName) ifs.close(); // determine whether cache model files exist - std::string cacheModelPath; for (int64_t i = 0; i < fileNumber; ++i) { - cacheModelPath = std::string(cacheDir) + "/" + std::string(modelName) + std::to_string(i) + ".nncache"; + std::string cacheModelPath = + std::string(cacheDir) + "/" + std::string(modelName) + std::to_string(i) + ".nncache"; exist = (exist && (stat(cacheModelPath.c_str(), &buffer) == 0)); }