diff --git a/mindspore-lite/src/litert/kernel/cpu/nnacl_c/infer/prior_box_infer.c b/mindspore-lite/src/litert/kernel/cpu/nnacl_c/infer/prior_box_infer.c index a49b2c38e443a1c507892920dc215a742af69cb1..1676c57af028fffff691255c8486f0f442447642 100644 --- a/mindspore-lite/src/litert/kernel/cpu/nnacl_c/infer/prior_box_infer.c +++ b/mindspore-lite/src/litert/kernel/cpu/nnacl_c/infer/prior_box_infer.c @@ -69,7 +69,10 @@ int PriorBoxInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC int32_t min_sizes_size = param->min_sizes_size; int32_t max_sizes_size = param->max_sizes_size; - int32_t num_priors_box = min_sizes_size * different_aspect_ratios_size + max_sizes_size; + NNACL_CHECK_INT_MUL_NOT_OVERFLOW(min_sizes_size, different_aspect_ratios_size, NNACL_ERRCODE_MUL_OVERFLOW); + int32_t num_priors_box = min_sizes_size * different_aspect_ratios_size; + NNACL_CHECK_INT_ADD_NOT_OVERFLOW(num_priors_box, max_sizes_size, NNACL_ERRCODE_ADD_OVERFLOW); + num_priors_box += max_sizes_size; const int kPriorBoxPoints = 4; const int kPriorBoxN = 1; const int kPriorBoxW = 1; diff --git a/mindspore-lite/src/litert/kernel/cpu/nnacl_c/kernel/group_convolution.c b/mindspore-lite/src/litert/kernel/cpu/nnacl_c/kernel/group_convolution.c index 9a68f98cda5e6c230c92d6b6748c02a05fe85462..bcf5662bda2c211d18736a7712c4cc20ddacfaa3 100644 --- a/mindspore-lite/src/litert/kernel/cpu/nnacl_c/kernel/group_convolution.c +++ b/mindspore-lite/src/litert/kernel/cpu/nnacl_c/kernel/group_convolution.c @@ -304,18 +304,30 @@ int GroupConvolutionCompute(KernelBase *self) { // second, separate group conv input into several parts. This step must be in runtime stage. int ret = GroupConvSeparateInput(group_conv, i); if (ret != NNACL_OK) { + self->env_->Free(self->env_->allocator_, sub_kernel_in_tensor->data_); + sub_kernel_in_tensor->data_ = NULL; + self->env_->Free(self->env_->allocator_, sub_kernel_out_tensor->data_); + sub_kernel_out_tensor->data_ = NULL; return ret; } // sun kernels run ret = group_conv->group_convs_[i]->Compute(group_conv->group_convs_[i]); if (ret != NNACL_OK) { + self->env_->Free(self->env_->allocator_, sub_kernel_in_tensor->data_); + sub_kernel_in_tensor->data_ = NULL; + self->env_->Free(self->env_->allocator_, sub_kernel_out_tensor->data_); + sub_kernel_out_tensor->data_ = NULL; return ret; } // post process, concat all outputs of sub-kernels into one output ret = GroupConvPostConcat(group_conv, i); if (ret != NNACL_OK) { + self->env_->Free(self->env_->allocator_, sub_kernel_in_tensor->data_); + sub_kernel_in_tensor->data_ = NULL; + self->env_->Free(self->env_->allocator_, sub_kernel_out_tensor->data_); + sub_kernel_out_tensor->data_ = NULL; return ret; } diff --git a/mindspore-lite/src/litert/kernel/cpu/nnacl_c/kernel/scale.c b/mindspore-lite/src/litert/kernel/cpu/nnacl_c/kernel/scale.c index d87f6dd9e18d117c3b6d6c2ff03a2e94e6b64fd0..cffbb4f858e466bbcb5a69f42e650fc3fcfc34f2 100644 --- a/mindspore-lite/src/litert/kernel/cpu/nnacl_c/kernel/scale.c +++ b/mindspore-lite/src/litert/kernel/cpu/nnacl_c/kernel/scale.c @@ -184,7 +184,7 @@ int ScaleInitScaleOffset(ScaleStruct *scale) { int malloc_size = NNACLGetElementNum(offset_tensor) * data_type_size; NNACL_CHECK_MALLOC_SIZE(malloc_size); scale->offset_ = scale->base_.env_->Alloc(scale->base_.env_->allocator_, malloc_size); - NNACL_MALLOC_CHECK_NULL_RETURN_ERR(scale->scale_); + NNACL_MALLOC_CHECK_NULL_RETURN_ERR(scale->offset_); (void)memcpy(scale->offset_, offset_tensor->data_, malloc_size); } else { scale->malloc_offset_ = false; diff --git a/mindspore-lite/src/litert/kernel/cpu/nnacl_c/kernel/where.c b/mindspore-lite/src/litert/kernel/cpu/nnacl_c/kernel/where.c index 15229018f0d7d064b11b4f115c482abbfd58d13b..94a9c3c8075501c78bd9a0149af5544558a9fa6d 100644 --- a/mindspore-lite/src/litert/kernel/cpu/nnacl_c/kernel/where.c +++ b/mindspore-lite/src/litert/kernel/cpu/nnacl_c/kernel/where.c @@ -126,6 +126,8 @@ int WhereRunWithSingleInput(WhereStruct *where) { if (true_num > 0) { output->data_ = result; + } else { + where->base_.env_->Free(where->base_.env_->allocator_, result); } return NNACL_OK; } diff --git a/mindspore-lite/tools/converter/micro/coder/wrapper/int8/conv1x1_init_int8_wrapper.c b/mindspore-lite/tools/converter/micro/coder/wrapper/int8/conv1x1_init_int8_wrapper.c index f9ffb0a79d1e7ac264e7542b9d3df2139e170d0e..bac7a523ee7977f466c425668bd1f484ad7c6a27 100644 --- a/mindspore-lite/tools/converter/micro/coder/wrapper/int8/conv1x1_init_int8_wrapper.c +++ b/mindspore-lite/tools/converter/micro/coder/wrapper/int8/conv1x1_init_int8_wrapper.c @@ -58,7 +58,6 @@ int Conv1x1Init(int8_t *src_weight, int32_t *src_bias, int32_t *filter_zps, int3 int32_t *bias_data_ = (int32_t *)(buf + *offset); *offset += size * sizeof(int32_t); if (bias_data_ == NULL) { - free(packed_weight_); return NNACL_ERR; } memset(bias_data_, 0, size * sizeof(int32_t)); @@ -86,7 +85,6 @@ int Conv1x1Init(int8_t *src_weight, int32_t *src_bias, int32_t *filter_zps, int3 int32_t *bias_data_ = ((*offset + size * sizeof(int32_t)) <= buf_size) ? (int32_t *)(buf + *offset) : NULL; *offset += size * sizeof(int32_t); if (bias_data_ == NULL) { - free(packed_weight_); packed_weight_ = NULL; return NNACL_ERR; }