From 192303cec6ce08fd97da873236457d7df4f697fc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=86=8A=E6=94=80?= Date: Fri, 31 Oct 2025 10:29:56 +0800 Subject: [PATCH] clean code --- mindspore-lite/python/api/model.py | 2 -- .../src/extendrt/delegate/ascend_acl/model_process.cc | 5 +---- .../kernel/cpu/fp16_grad/arithmetic_fp16_self_grad.cc | 4 ++++ mindspore-lite/src/litert/kernel/cpu/fp32_grad/dropout.h | 6 +++++- .../kernel/cpu/nnacl_c/fp32/non_max_suppression_fp32.c | 2 +- .../src/litert/kernel/cpu/nnacl_c/int8/quantize.c | 4 ++++ mindspore-lite/tools/common/node_util.h | 9 +++++++-- .../tools/optimizer/graph/redundant_op_remove_pass.cc | 4 ++-- 8 files changed, 24 insertions(+), 12 deletions(-) diff --git a/mindspore-lite/python/api/model.py b/mindspore-lite/python/api/model.py index 9bf6eff4..4bb2f292 100644 --- a/mindspore-lite/python/api/model.py +++ b/mindspore-lite/python/api/model.py @@ -936,11 +936,9 @@ class MultiModelRunner: context = Context() check_isinstance("context", context, Context) check_isinstance("config_path", config_path, str) - self.provider = context.ascend.provider if not os.path.exists(model_path): raise RuntimeError( "build_from_file failed, model_path does not exist!") - self.model_path_ = model_path model_type_ = _c_lite_wrapper.ModelType.kMindIR if model_type is not ModelType.MINDIR: raise RuntimeError( diff --git a/mindspore-lite/src/extendrt/delegate/ascend_acl/model_process.cc b/mindspore-lite/src/extendrt/delegate/ascend_acl/model_process.cc index 8f7ef761..644cda8d 100644 --- a/mindspore-lite/src/extendrt/delegate/ascend_acl/model_process.cc +++ b/mindspore-lite/src/extendrt/delegate/ascend_acl/model_process.cc @@ -1759,10 +1759,7 @@ MSTensor ModelProcess::GetOutputWithZeroCopy(const std::vector *output auto ret = allocator_->CopyDeviceDataToDevice(output_info.cur_device_data, user_output.GetDeviceData(), user_output.DataSize(), output_info.buffer_size, device_id_, user_output.GetDeviceId()); - if (ret != kSuccess) { - MS_LOG(ERROR) << "Copy output data from device to current device failed."; - return MSTensor(nullptr); - } + MS_CHECK_TRUE_MSG(ret == kSuccess, MSTensor(nullptr), "Copy output data from device to current device failed!"); } } else if (user_output.Data() != nullptr) { aclrtMemcpyKind kind = ACL_MEMCPY_DEVICE_TO_HOST; diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16_grad/arithmetic_fp16_self_grad.cc b/mindspore-lite/src/litert/kernel/cpu/fp16_grad/arithmetic_fp16_self_grad.cc index 382ca27c..ce0863ee 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16_grad/arithmetic_fp16_self_grad.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp16_grad/arithmetic_fp16_self_grad.cc @@ -25,6 +25,9 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_LogGrad; +namespace { +constexpr int kCountMinVal = 4; +} // namespace namespace mindspore::kernel { int ArithmeticSelfGradFp16CPUKernel::Prepare() { if (in_tensors_.size() != C2NUM) { @@ -56,6 +59,7 @@ int ArithmeticSelfGradFp16CPUKernel::DoActivation(int task_id) { auto error_code = RET_OK; CHECK_NULL_RETURN(param_act_grad_); if (param_act_grad_->type_ == schema::PrimitiveType_LogGrad) { + MS_CHECK_TRUE_RET(count >= kCountMinVal, RET_ERROR); error_code = Fp16LogGrad(yt_addr + start, input_addr + start, count, output_addr + start); } else { MS_LOG(ERROR) << "Activation type error"; diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/dropout.h b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/dropout.h index 9fc3d63d..99935b48 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/dropout.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/dropout.h @@ -25,7 +25,11 @@ class DropoutCPUKernel : public LiteKernel { public: DropoutCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::InnerContext *ctx) - : LiteKernel(parameter, inputs, outputs, ctx), thread_count_(ctx->thread_num_) {} + : LiteKernel(parameter, inputs, outputs, ctx), thread_count_(ctx->thread_num_) { + std::random_device rd; + unsigned int random_value = rd(); + generator_.seed(random_value); + } ~DropoutCPUKernel() override = default; diff --git a/mindspore-lite/src/litert/kernel/cpu/nnacl_c/fp32/non_max_suppression_fp32.c b/mindspore-lite/src/litert/kernel/cpu/nnacl_c/fp32/non_max_suppression_fp32.c index 0d854cd9..a58165fd 100644 --- a/mindspore-lite/src/litert/kernel/cpu/nnacl_c/fp32/non_max_suppression_fp32.c +++ b/mindspore-lite/src/litert/kernel/cpu/nnacl_c/fp32/non_max_suppression_fp32.c @@ -148,7 +148,7 @@ int NonMaxSuppressionSelecte(NonMaxSuppressionStruct *nm_suppression, bool simpl SortCandidates(env, sorted_candidates, above_score_candidates, above_score_candidates_size); int selected_box_per_class_size = 0; - while (sorted_candidates_size >= 0 && selected_index_size < nm_suppression->max_output_per_class_) { + while (sorted_candidates_size > 0 && selected_index_size < nm_suppression->max_output_per_class_) { NMSBox *cand = sorted_candidates[sorted_candidates_size - 1]; bool selected = true; for (int k = 0; k < selected_box_per_class_size; k++) { diff --git a/mindspore-lite/src/litert/kernel/cpu/nnacl_c/int8/quantize.c b/mindspore-lite/src/litert/kernel/cpu/nnacl_c/int8/quantize.c index e9eadb23..82da3ffe 100644 --- a/mindspore-lite/src/litert/kernel/cpu/nnacl_c/int8/quantize.c +++ b/mindspore-lite/src/litert/kernel/cpu/nnacl_c/int8/quantize.c @@ -157,5 +157,9 @@ void QuantizeMultiplier(double double_multiplier, int32_t *quantized_multiplier, rounded >>= 1; ++*shift; } + if (rounded >= INT_MAX) { + *quantized_multiplier = 0; + return; + } *quantized_multiplier = (dul.ul & dSignMask) ? (-(int32_t)(rounded)) : (int32_t)(rounded); } diff --git a/mindspore-lite/tools/common/node_util.h b/mindspore-lite/tools/common/node_util.h index f5dd0dcb..4bdd5f5d 100644 --- a/mindspore-lite/tools/common/node_util.h +++ b/mindspore-lite/tools/common/node_util.h @@ -340,8 +340,13 @@ static STATUS TransFilterData(kTransFilterType type, int32_t filterK, int32_t fi template static STATUS TransFilterData(schema::TensorT *tensor, kTransFilterType type, int32_t filterK, int32_t filterC, int32_t filterH, int32_t filterW) { - MS_ASSERT(tensor != nullptr); - int count = filterH * filterW * filterC * filterK; + MS_CHECK_TRUE_MSG(tensor != nullptr, RET_NULL_PTR, "tensor is nullptr!"); + MS_CHECK_INT_MUL_NOT_OVERFLOW(filterH, filterW, RET_ERROR); + int hwVal = filterH * filterW; + MS_CHECK_INT_MUL_NOT_OVERFLOW(filterC, filterK, RET_ERROR); + int ckVal = filterC * filterK; + MS_CHECK_INT_MUL_NOT_OVERFLOW(hwVal, ckVal, RET_ERROR); + int count = hwVal * ckVal; if (count <= 0) { MS_LOG(ERROR) << "Dim size invalid"; return RET_ERROR; diff --git a/mindspore-lite/tools/optimizer/graph/redundant_op_remove_pass.cc b/mindspore-lite/tools/optimizer/graph/redundant_op_remove_pass.cc index a753b60c..ad8270f8 100644 --- a/mindspore-lite/tools/optimizer/graph/redundant_op_remove_pass.cc +++ b/mindspore-lite/tools/optimizer/graph/redundant_op_remove_pass.cc @@ -345,8 +345,8 @@ int RemoveRedundantOpPass::RemoveDropoutOp(const AnfNodePtr &anf_node, const Fun MS_LOG(ERROR) << "dropout out node is invalid."; return lite::RET_ERROR; } - MS_CHECK_TRUE_RET(node->cast() != nullptr, RET_ERROR); - MS_CHECK_TRUE_RET(node->cast()->size() > kInputSizeThree, RET_ERROR); + MS_CHECK_TRUE_MSG(node->cast() != nullptr, RET_ERROR, "node is not cnode!"); + MS_CHECK_TRUE_MSG(node->cast()->size() >= kInputSizeThree, RET_ERROR, "size of cnode should >= 3!"); auto get_index_node = node->cast()->input(kInputIndexTwo)->cast(); if (get_index_node == nullptr) { MS_LOG(ERROR) << "tuple get item node is invalid."; -- Gitee