diff --git a/build_helper.py b/build_helper.py index b8435d8470cd0b31d9f8e06dae68fd0b83fe197c..41017a134b19ff522d4f49d99623b257c884fc86 100755 --- a/build_helper.py +++ b/build_helper.py @@ -78,6 +78,7 @@ def do_patch(patch_dir, target_dir): '0042-scatterND-indices-illegal.patch', '0043-fix-too-many-hi-app-event-reports.patch', '0044-fix-multi-thread-nnrt-infer-failed.patch', + '0045-rename-func.patch', ] cwd = os.getcwd() diff --git a/patches/0045-rename-func.patch b/patches/0045-rename-func.patch new file mode 100644 index 0000000000000000000000000000000000000000..3ce124124ef7506e171f26dc3dcaab02e712e1be --- /dev/null +++ b/patches/0045-rename-func.patch @@ -0,0 +1,2565 @@ +From 1bda42613ff44b4016878140b7a9cc3ad40e4a4e Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?=E7=8E=8B=E5=BC=BA=E5=88=9A?= <2593994958@qq.com> +Date: Fri, 15 Nov 2024 17:27:22 +0800 +Subject: [PATCH] rename func in tensor_c_utils + +--- + .../device/cpu/kernel/nnacl/fp16/utils_fp16.c | 2 +- + .../cpu/kernel/nnacl/infer/adam_infer.c | 9 +++-- + .../nnacl/infer/adam_weight_decay_infer.c | 10 ++--- + .../cpu/kernel/nnacl/infer/addn_infer.c | 2 +- + .../kernel/nnacl/infer/apply_momentum_infer.c | 5 ++- + .../nnacl/infer/arithmetic_grad_infer.c | 4 +- + .../cpu/kernel/nnacl/infer/assign_infer.c | 2 +- + .../kernel/nnacl/infer/batch_to_space_infer.c | 4 +- + .../kernel/nnacl/infer/broadcast_to_infer.c | 2 +- + .../nnacl/infer/constant_of_shape_infer.c | 2 +- + .../control/tensorlist_fromtensor_infer.c | 2 +- + .../infer/control/tensorlist_getitem_infer.c | 4 +- + .../infer/control/tensorlist_reserve_infer.c | 4 +- + .../infer/control/tensorlist_setitem_infer.c | 2 +- + .../infer/control/tensorlist_stack_infer.c | 2 +- + .../cpu/kernel/nnacl/infer/conv2d_infer.c | 30 +++++++-------- + .../nnacl/infer/crop_and_resize_infer.c | 6 +-- + .../cpu/kernel/nnacl/infer/deconv2d_infer.c | 18 ++++----- + .../nnacl/infer/depthwise_conv2d_infer.c | 4 +- + .../kernel/nnacl/infer/expand_dims_infer.c | 2 +- + .../nnacl/infer/format_transpose_infer.c | 8 ++-- + .../cpu/kernel/nnacl/infer/gather_infer.c | 2 +- + .../cpu/kernel/nnacl/infer/lin_space_infer.c | 2 +- + .../cpu/kernel/nnacl/infer/mfcc_infer.c | 2 +- + .../device/cpu/kernel/nnacl/infer/pad_infer.c | 2 +- + .../cpu/kernel/nnacl/infer/power_infer.c | 2 +- + .../cpu/kernel/nnacl/infer/prior_box_infer.c | 2 +- + .../infer/random_standard_normal_infer.c | 2 +- + .../cpu/kernel/nnacl/infer/range_infer.c | 4 +- + .../cpu/kernel/nnacl/infer/reshape_infer.c | 8 ++-- + .../kernel/nnacl/infer/resize_grad_infer.c | 4 +- + .../cpu/kernel/nnacl/infer/resize_infer.c | 26 ++++++------- + .../kernel/nnacl/infer/roi_pooling_infer.c | 2 +- + .../cpu/kernel/nnacl/infer/scatter_nd_infer.c | 4 +- + .../device/cpu/kernel/nnacl/infer/sgd_infer.c | 5 ++- + .../cpu/kernel/nnacl/infer/slice_infer.c | 4 +- + .../nnacl/infer/space_to_batch_nd_infer.c | 4 +- + .../nnacl/infer/sparse_to_dense_infer.c | 2 +- + .../cpu/kernel/nnacl/infer/squeeze_infer.c | 2 +- + .../nnacl/infer/strided_slice_grad_infer.c | 7 ++-- + .../kernel/nnacl/infer/strided_slice_infer.c | 10 ++--- + .../infer/string/custom_normalize_infer.c | 2 +- + .../infer/string/hashtable_lookup_infer.c | 2 +- + .../nnacl/infer/string/lsh_projection_infer.c | 6 +-- + .../cpu/kernel/nnacl/infer/tile_infer.c | 4 +- + .../kernel/nnacl/infer/uniform_real_infer.c | 2 +- + .../cpu/kernel/nnacl/kernel/activation.c | 4 +- + .../device/cpu/kernel/nnacl/kernel/addn.c | 8 ++-- + .../cpu/kernel/nnacl/kernel/arithmetic.c | 14 +++---- + .../cpu/kernel/nnacl/kernel/arithmetic_self.c | 4 +- + .../device/cpu/kernel/nnacl/kernel/cast.c | 4 +- + .../device/cpu/kernel/nnacl/kernel/clip.c | 4 +- + .../device/cpu/kernel/nnacl/kernel/concat.c | 4 +- + .../kernel/nnacl/kernel/convolution_base.c | 38 +++++++++---------- + .../nnacl/kernel/convolution_delegate.c | 4 +- + .../nnacl/kernel/convolution_depthwise.c | 6 +-- + .../nnacl/kernel/convolution_slidewindow.c | 24 ++++++------ + .../cpu/kernel/nnacl/kernel/deconvolution.c | 4 +- + .../nnacl/kernel/deconvolution_depthwise.c | 34 ++++++++--------- + .../nnacl/kernel/deconvolution_winograd.c | 8 ++-- + .../device/cpu/kernel/nnacl/kernel/exp.c | 2 +- + .../kernel/nnacl/kernel/f16/arithmetic_f16.c | 10 ++--- + .../cpu/kernel/nnacl/kernel/f16/concat_f16.c | 2 +- + .../cpu/kernel/nnacl/kernel/f16/reduce_f16.c | 4 +- + .../cpu/kernel/nnacl/kernel/f16/stack_f16.c | 4 +- + .../device/cpu/kernel/nnacl/kernel/fill.c | 4 +- + .../device/cpu/kernel/nnacl/kernel/gather.c | 6 +-- + .../kernel/nnacl/kernel/group_convolution.c | 18 ++++----- + .../cpu/kernel/nnacl/kernel/group_norm.c | 6 +-- + .../cpu/kernel/nnacl/kernel/layer_norm.c | 2 +- + .../kernel/nnacl/kernel/local_response_norm.c | 8 ++-- + .../cpu/kernel/nnacl/kernel/matmul_base.c | 4 +- + .../device/cpu/kernel/nnacl/kernel/non_zero.c | 2 +- + .../device/cpu/kernel/nnacl/kernel/one_hot.c | 4 +- + .../cpu/kernel/nnacl/kernel/ones_like.c | 2 +- + .../device/cpu/kernel/nnacl/kernel/pad.c | 12 +++--- + .../device/cpu/kernel/nnacl/kernel/pooling.c | 16 ++++---- + .../cpu/kernel/nnacl/kernel/prior_box.c | 14 +++---- + .../device/cpu/kernel/nnacl/kernel/range.c | 2 +- + .../device/cpu/kernel/nnacl/kernel/reduce.c | 14 +++---- + .../device/cpu/kernel/nnacl/kernel/reshape.c | 2 +- + .../device/cpu/kernel/nnacl/kernel/scale.c | 6 +-- + .../device/cpu/kernel/nnacl/kernel/size.c | 2 +- + .../device/cpu/kernel/nnacl/kernel/softmax.c | 2 +- + .../device/cpu/kernel/nnacl/kernel/stack.c | 6 +-- + .../cpu/kernel/nnacl/kernel/strided_slice.c | 6 +-- + .../device/cpu/kernel/nnacl/kernel/tile.c | 6 +-- + .../cpu/kernel/nnacl/kernel/transpose.c | 10 ++--- + .../device/cpu/kernel/nnacl/kernel/unique.c | 2 +- + .../cpu/kernel/nnacl/kernel/zeros_like.c | 2 +- + .../device/cpu/kernel/nnacl/tensor_c_utils.c | 30 +++++++-------- + .../device/cpu/kernel/nnacl/tensor_c_utils.h | 26 ++++++------- + .../src/litert/kernel/cpu/fp32/where_fp32.cc | 14 +++---- + .../pass/format_pass/insert_transpose.cc | 2 +- + .../src/litert/runtime_shape_fusion_pass.cc | 4 +- + mindspore/lite/src/tensor.h | 2 +- + .../base/reshape_dynamic_base_coder.cc | 2 +- + .../fp16/lstm_mindir_dynamic_fp16_coder.cc | 2 +- + .../nnacl/fp16/matmul_dynamic_fp16_coder.cc | 4 +- + .../nnacl/fp16/slice_dynamic_fp16_coder.cc | 2 +- + .../fp16/transpose_dynamic_fp16_coder.cc | 2 +- + .../nnacl/fp32/gather_dynamic_fp32_coder.cc | 2 +- + .../fp32/transpose_dynamic_fp32_coder.cc | 2 +- + 103 files changed, 337 insertions(+), 333 deletions(-) + +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/fp16/utils_fp16.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/fp16/utils_fp16.c +index aaf4b327..5e6db879 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/fp16/utils_fp16.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/fp16/utils_fp16.c +@@ -24,7 +24,7 @@ void *GetOrAllocFp16Data(TensorC *t, ExecEnv *env, bool cast) { + return t->data_; + } + if (t->data_type_ == kNumberTypeFloat32) { +- int ele_num = GetElementNum(t); ++ int ele_num = TensorCGetElementNum(t); + void *fp16_data = env->Alloc(env->allocator_, ele_num * sizeof(float16_t)); + NNACL_MALLOC_CHECK_NULL_RETURN_NULL(fp16_data); + if (cast) { +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/adam_infer.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/adam_infer.c +index 181bd8a4..b9be136e 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/adam_infer.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/adam_infer.c +@@ -24,10 +24,11 @@ int AdamInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **o + return check_ret; + } + +- if (GetElementNum(inputs[0]) != GetElementNum(inputs[1]) || GetElementNum(inputs[0]) != GetElementNum(inputs[2]) || +- GetElementNum(inputs[0]) != GetElementNum(inputs[9]) || GetElementNum(inputs[3]) != 1 || +- GetElementNum(inputs[4]) != 1 || GetElementNum(inputs[5]) != 1 || GetElementNum(inputs[6]) != 1 || +- GetElementNum(inputs[7]) != 1 || GetElementNum(inputs[8]) != 1) { ++ if (TensorCGetElementNum(inputs[0]) != TensorCGetElementNum(inputs[1]) || TensorCGetElementNum(inputs[0]) != ++ TensorCGetElementNum(inputs[2]) || ++ TensorCGetElementNum(inputs[0]) != TensorCGetElementNum(inputs[9]) || TensorCGetElementNum(inputs[3]) != 1 || ++ TensorCGetElementNum(inputs[4]) != 1 || TensorCGetElementNum(inputs[5]) != 1 || TensorCGetElementNum(inputs[6]) != 1 || ++ TensorCGetElementNum(inputs[7]) != 1 || TensorCGetElementNum(inputs[8]) != 1) { + return NNACL_ERR; + } + if (outputs_size != 0) { +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/adam_weight_decay_infer.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/adam_weight_decay_infer.c +index 6202c225..cf09f057 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/adam_weight_decay_infer.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/adam_weight_decay_infer.c +@@ -36,11 +36,11 @@ int AdamWeightDecayInferShape(const TensorC *const *inputs, size_t inputs_size, + return check_ret; + } + +- if (GetElementNum(inputs[var_idx]) != GetElementNum(inputs[m_idx]) || +- GetElementNum(inputs[var_idx]) != GetElementNum(inputs[v_idx]) || +- GetElementNum(inputs[var_idx]) != GetElementNum(inputs[grad_idx]) || GetElementNum(inputs[lr_idx]) != 1 || +- GetElementNum(inputs[beta1_idx]) != 1 || GetElementNum(inputs[beta2_idx]) != 1 || +- GetElementNum(inputs[epsilon]) != 1 || GetElementNum(inputs[decay_idx]) != 1) { ++ if (TensorCGetElementNum(inputs[var_idx]) != TensorCGetElementNum(inputs[m_idx]) || ++ TensorCGetElementNum(inputs[var_idx]) != TensorCGetElementNum(inputs[v_idx]) || ++ TensorCGetElementNum(inputs[var_idx]) != TensorCGetElementNum(inputs[grad_idx]) || TensorCGetElementNum(inputs[lr_idx]) != 1 || ++ TensorCGetElementNum(inputs[beta1_idx]) != 1 || TensorCGetElementNum(inputs[beta2_idx]) != 1 || ++ TensorCGetElementNum(inputs[epsilon]) != 1 || TensorCGetElementNum(inputs[decay_idx]) != 1) { + return NNACL_ERR; + } + if (outputs_size != 0) { +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/addn_infer.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/addn_infer.c +index a409a3d3..c05a9de1 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/addn_infer.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/addn_infer.c +@@ -54,7 +54,7 @@ int AddnInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **o + + // make sure all elements have the same size or 1 (broadcasting) in all dimensions + for (size_t i = 1; i < inputs_size; ++i) { +- if ((inputs[i]->shape_size_ != max_dims) && (GetElementNum(inputs[i]) != GetElementNum(inputs[max_dims_idx]))) { ++ if ((inputs[i]->shape_size_ != max_dims) && (TensorCGetElementNum(inputs[i]) != TensorCGetElementNum(inputs[max_dims_idx]))) { + return NNACL_ERR; + } + if (inputs[i]->shape_size_ == max_dims) { +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/apply_momentum_infer.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/apply_momentum_infer.c +index e707e273..a3154190 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/apply_momentum_infer.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/apply_momentum_infer.c +@@ -25,8 +25,9 @@ int ApplyMomentumInferShape(const TensorC *const *inputs, size_t inputs_size, Te + return check_ret; + } + +- if (GetElementNum(inputs[0]) != GetElementNum(inputs[1]) || GetElementNum(inputs[0]) != GetElementNum(inputs[3]) || +- GetElementNum(inputs[2]) != 1 || GetElementNum(inputs[4]) != 1) { ++ if (TensorCGetElementNum(inputs[0]) != TensorCGetElementNum(inputs[1]) || TensorCGetElementNum(inputs[0]) != ++ TensorCGetElementNum(inputs[3]) || ++ TensorCGetElementNum(inputs[2]) != 1 || TensorCGetElementNum(inputs[4]) != 1) { + return NNACL_INPUT_TENSOR_ERROR; + } + if (outputs_size != 0) { +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/arithmetic_grad_infer.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/arithmetic_grad_infer.c +index d4cef5d1..a5e31ecf 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/arithmetic_grad_infer.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/arithmetic_grad_infer.c +@@ -54,7 +54,7 @@ int ArithmeticGradInferShape(const TensorC *const *inputs, size_t inputs_size, T + + ArithmeticParameter *param = (ArithmeticParameter *)parameter; + +- if (GetElementNum(dx1) < GetElementNum(dx2)) { ++ if (TensorCGetElementNum(dx1) < TensorCGetElementNum(dx2)) { + param->ndim_ = in_shape1_size; + param->in_elements_num0_ = (int)param->ndim_; + param->in_elements_num1_ = (int)param->ndim_; +@@ -70,7 +70,7 @@ int ArithmeticGradInferShape(const TensorC *const *inputs, size_t inputs_size, T + param->in_shape0_[i] = in_shape1[i]; + param->out_shape_[i] = out_shape[i]; + } +- } else if (GetElementNum(dx2) < GetElementNum(dx1)) { ++ } else if (TensorCGetElementNum(dx2) < TensorCGetElementNum(dx1)) { + param->ndim_ = in_shape0_size; + param->in_elements_num0_ = (int)param->ndim_; + param->in_elements_num1_ = (int)param->ndim_; +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/assign_infer.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/assign_infer.c +index 24ed4334..ac4f5ad5 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/assign_infer.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/assign_infer.c +@@ -25,7 +25,7 @@ int AssignInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC * + return check_ret; + } + +- if (GetElementNum(inputs[0]) != GetElementNum(inputs[1])) { ++ if (TensorCGetElementNum(inputs[0]) != TensorCGetElementNum(inputs[1])) { + return NNACL_ERR; + } + +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/batch_to_space_infer.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/batch_to_space_infer.c +index 57079df2..ea782a94 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/batch_to_space_infer.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/batch_to_space_infer.c +@@ -72,10 +72,10 @@ int SetOutputShapeFromInput(const TensorC *const *inputs, TensorC **outputs) { + } + int *block_shape = (int *)(inputs[1]->data_); + int *crops = (int *)(inputs[2]->data_); +- if (GetElementNum(inputs[1]) != 2) { ++ if (TensorCGetElementNum(inputs[1]) != 2) { + return NNACL_PARAM_INVALID; + } +- if (GetElementNum(inputs[2]) != 4) { ++ if (TensorCGetElementNum(inputs[2]) != 4) { + return NNACL_PARAM_INVALID; + } + int mul_block_shape_ = 1; +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/broadcast_to_infer.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/broadcast_to_infer.c +index cb9d7ea4..ec588842 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/broadcast_to_infer.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/broadcast_to_infer.c +@@ -173,7 +173,7 @@ int BroadcastToInferShape(const TensorC *const *inputs, size_t inputs_size, Tens + if (shape_tensor->data_ == NULL) { + return NNACL_INFER_INVALID; + } +- dst_shape_size = GetElementNum(shape_tensor); ++ dst_shape_size = TensorCGetElementNum(shape_tensor); + if (dst_shape_size > MAX_SHAPE_SIZE) { + return NNACL_INPUT_TENSOR_ERROR; + } +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/constant_of_shape_infer.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/constant_of_shape_infer.c +index da67c067..09275ae6 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/constant_of_shape_infer.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/constant_of_shape_infer.c +@@ -33,7 +33,7 @@ int ConstantOfShapeInferShape(const TensorC *const *inputs, size_t inputs_size, + if (!InferFlag(inputs, inputs_size) || in_tensor->data_ == NULL) { + return NNACL_INFER_INVALID; + } +- int size = GetElementNum(in_tensor); ++ int size = TensorCGetElementNum(in_tensor); + if (size < 0 || size > MAX_SHAPE_SIZE) { + return NNACL_ERR; + } +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/control/tensorlist_fromtensor_infer.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/control/tensorlist_fromtensor_infer.c +index daac1b43..0caf245e 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/control/tensorlist_fromtensor_infer.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/control/tensorlist_fromtensor_infer.c +@@ -65,7 +65,7 @@ int TensorListFromTensorInferShape(const TensorC *const *inputs, size_t inputs_s + tensor_shape.shape_size_[i] = (int)(input0->shape_size_) - 1; + } + +- ShapeSet(output->element_shape_, &(output->element_shape_size_), ele_shape_ptr, (size_t)GetElementNum(input1)); ++ ShapeSet(output->element_shape_, &(output->element_shape_size_), ele_shape_ptr, (size_t) TensorCGetElementNum(input1)); + output->element_num_ = (size_t)(dim0); + int ret = MallocTensorListData(output, input0->data_type_, &tensor_shape); + if (ret != NNACL_OK) { +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/control/tensorlist_getitem_infer.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/control/tensorlist_getitem_infer.c +index 3ef697d1..b5986ad1 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/control/tensorlist_getitem_infer.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/control/tensorlist_getitem_infer.c +@@ -34,7 +34,7 @@ int TensorListGetItemInferShape(const TensorC *const *inputs, size_t inputs_size + if (get_index->data_ == NULL) { + return NNACL_INFER_INVALID; + } +- if (GetElementNum(get_index) != 1) { ++ if (TensorCGetElementNum(get_index) != 1) { + return NNACL_ERR; + } + TensorC *output = outputs[0]; +@@ -69,7 +69,7 @@ int TensorListGetItemInferShape(const TensorC *const *inputs, size_t inputs_size + NNACL_CHECK_NULL_RETURN_ERR(ele_shape_data); + int element_shape[MAX_SHAPE_SIZE] = {0}; + size_t element_shape_size = 0; +- for (int i = 0; i < GetElementNum(input2); ++i) { ++ for (int i = 0; i < TensorCGetElementNum(input2); ++i) { + ShapePush(element_shape, &element_shape_size, ele_shape_data[i]); + } + int status = +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/control/tensorlist_reserve_infer.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/control/tensorlist_reserve_infer.c +index f4582291..14ed00b6 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/control/tensorlist_reserve_infer.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/control/tensorlist_reserve_infer.c +@@ -52,11 +52,11 @@ int TensorListReserveInferShape(const TensorC *const *inputs, size_t inputs_size + if (input1->data_ == NULL) { + return NNACL_INFER_INVALID; + } +- if (GetElementNum(input1) != 1) { ++ if (TensorCGetElementNum(input1) != 1) { + return NNACL_ERR; + } + int num_elements = ((int *)(input1->data_))[0]; +- ShapeSet(output->element_shape_, &(output->element_shape_size_), ele_shape_ptr, (size_t)GetElementNum(input0)); ++ ShapeSet(output->element_shape_, &(output->element_shape_size_), ele_shape_ptr, (size_t) TensorCGetElementNum(input0)); + output->element_num_ = (size_t)(num_elements); + + vvector tmp_shape; +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/control/tensorlist_setitem_infer.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/control/tensorlist_setitem_infer.c +index e7ed4532..d71b2319 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/control/tensorlist_setitem_infer.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/control/tensorlist_setitem_infer.c +@@ -27,7 +27,7 @@ int PreJudge(const TensorC *get_index, TensorListC *input0, const TensorC *value + if (get_index->data_type_ != kNumberTypeInt && get_index->data_type_ != kNumberTypeInt32) { + return NNACL_ERR; + } +- if (GetElementNum(get_index) != 1) { ++ if (TensorCGetElementNum(get_index) != 1) { + return NNACL_ERR; + } + if (get_index->data_ == NULL) { +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/control/tensorlist_stack_infer.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/control/tensorlist_stack_infer.c +index ac89dada..f478166c 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/control/tensorlist_stack_infer.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/control/tensorlist_stack_infer.c +@@ -54,7 +54,7 @@ int TensorListStackInferShape(const TensorC *const *inputs, size_t inputs_size, + ShapePush(output_shape, &output_shape_size, input0->element_shape_[i]); + } + } else { +- int ele_shape_num = GetElementNum(ele_shape); ++ int ele_shape_num = TensorCGetElementNum(ele_shape); + if (ele_shape_num > MAX_SHAPE_SIZE) { + return NNACL_ERR; + } +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/conv2d_infer.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/conv2d_infer.c +index 5ffe52f2..c05beee3 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/conv2d_infer.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/conv2d_infer.c +@@ -134,33 +134,33 @@ int Conv2dInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC * + return NNACL_INFER_INVALID; + } + +- int ret = CheckConvAttr(GetChannel(input_tensor), weight_tensor, param); ++ int ret = CheckConvAttr(TensorCGetChannel(input_tensor), weight_tensor, param); + if (ret != NNACL_OK) { + return ret; + } + + int output_w = 0, output_h = 0; +- ret = ConvInferShape(GetHeight(input_tensor), GetWidth(input_tensor), &output_h, &output_w, param); ++ ret = ConvInferShape(TensorCGetHeight(input_tensor), TensorCGetWidth(input_tensor), &output_h, &output_w, param); + if (ret != NNACL_OK) { + return ret; + } + + out_tensor->shape_size_ = input_tensor->shape_size_; +- SetBatch(out_tensor, GetBatch(input_tensor)); +- SetChannel(out_tensor, GetBatch(weight_tensor)); ++ TensorCSetBatch(out_tensor, TensorCGetBatch(input_tensor)); ++ TensorCSetChannel(out_tensor, TensorCGetBatch(weight_tensor)); + output_h = output_h >= 0 ? output_h : 1; +- SetHeight(out_tensor, output_h); ++ TensorCSetHeight(out_tensor, output_h); + output_w = output_w >= 0 ? output_w : 1; +- SetWidth(out_tensor, output_w); +- +- param->input_batch_ = GetBatch(input_tensor); +- param->input_h_ = GetHeight(input_tensor); +- param->input_w_ = GetWidth(input_tensor); +- param->input_channel_ = GetChannel(input_tensor); +- param->output_batch_ = GetBatch(out_tensor); +- param->output_h_ = GetHeight(out_tensor); +- param->output_w_ = GetWidth(out_tensor); +- param->output_channel_ = GetChannel(out_tensor); ++ TensorCSetWidth(out_tensor, output_w); ++ ++ param->input_batch_ = TensorCGetBatch(input_tensor); ++ param->input_h_ = TensorCGetHeight(input_tensor); ++ param->input_w_ = TensorCGetWidth(input_tensor); ++ param->input_channel_ = TensorCGetChannel(input_tensor); ++ param->output_batch_ = TensorCGetBatch(out_tensor); ++ param->output_h_ = TensorCGetHeight(out_tensor); ++ param->output_w_ = TensorCGetWidth(out_tensor); ++ param->output_channel_ = TensorCGetChannel(out_tensor); + param->out_format_ = out_tensor->format_; + return NNACL_OK; + } +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/crop_and_resize_infer.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/crop_and_resize_infer.c +index e2df6408..c50b71f7 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/crop_and_resize_infer.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/crop_and_resize_infer.c +@@ -39,7 +39,7 @@ int CropAndResizeInferShape(const TensorC *const *inputs, size_t inputs_size, Te + } + int output_shape[MAX_SHAPE_SIZE] = {0}; + size_t output_shape_size = 0; +- if (GetBatch(input) == 0) { ++ if (TensorCGetBatch(input) == 0) { + ShapePush(output_shape, &output_shape_size, 0); + } else if (inputs[1]->data_ != NULL) { + const TensorC *boxes_tensor = inputs[1]; +@@ -56,12 +56,12 @@ int CropAndResizeInferShape(const TensorC *const *inputs, size_t inputs_size, Te + if (data == NULL) { + return NNACL_INFER_INVALID; + } +- if (GetElementNum(shape_tensor) < 2) { ++ if (TensorCGetElementNum(shape_tensor) < 2) { + return NNACL_INPUT_TENSOR_ERROR; + } + ShapePush(output_shape, &output_shape_size, data[0]); + ShapePush(output_shape, &output_shape_size, data[1]); +- ShapePush(output_shape, &output_shape_size, GetChannel(input)); ++ ShapePush(output_shape, &output_shape_size, TensorCGetChannel(input)); + SetShapeArray(output, output_shape, output_shape_size); + return NNACL_OK; + } +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/deconv2d_infer.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/deconv2d_infer.c +index 8c666f60..0858c6c7 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/deconv2d_infer.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/deconv2d_infer.c +@@ -41,20 +41,20 @@ int Deconv2dInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC + if (!InferFlag(inputs, inputs_size)) { + return NNACL_INFER_INVALID; + } +- int32_t input_h = GetHeight(input); +- int32_t input_w = GetWidth(input); ++ int32_t input_h = TensorCGetHeight(input); ++ int32_t input_w = TensorCGetWidth(input); + +- int32_t output_n = GetBatch(input); ++ int32_t output_n = TensorCGetBatch(input); + int32_t output_h = 0; + int32_t output_w = 0; +- int32_t output_c = GetChannel(weight); +- NNACL_CHECK_TRUE_RET(GetChannel(input) == GetBatch(weight), NNACL_ERR); +- if (param->group_ == GetChannel(input) && 1 == GetChannel(weight)) { +- output_c = GetBatch(weight); /* depthwise */ ++ int32_t output_c = TensorCGetChannel(weight); ++ NNACL_CHECK_TRUE_RET(TensorCGetChannel(input) == TensorCGetBatch(weight), NNACL_ERR); ++ if (param->group_ == TensorCGetChannel(input) && 1 == TensorCGetChannel(weight)) { ++ output_c = TensorCGetBatch(weight); /* depthwise */ + } + +- int kernel_w = param->kernel_w_ != -1 ? param->kernel_w_ : GetWidth(weight); +- int kernel_h = param->kernel_h_ != -1 ? param->kernel_h_ : GetHeight(weight); ++ int kernel_w = param->kernel_w_ != -1 ? param->kernel_w_ : TensorCGetWidth(weight); ++ int kernel_h = param->kernel_h_ != -1 ? param->kernel_h_ : TensorCGetHeight(weight); + NNACL_CHECK_FALSE(kernel_w <= 0, NNACL_ERR); + NNACL_CHECK_FALSE(kernel_h <= 0, NNACL_ERR); + NNACL_CHECK_FALSE(INT_MUL_OVERFLOW(kernel_h, kernel_w), NNACL_ERR); +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/depthwise_conv2d_infer.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/depthwise_conv2d_infer.c +index eb77d4b4..09e50fea 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/depthwise_conv2d_infer.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/depthwise_conv2d_infer.c +@@ -44,8 +44,8 @@ int DepthwiseConv2dInferShape(const TensorC *const *inputs, size_t inputs_size, + if (param->stride_h_ == 0 || param->stride_w_ == 0) { + return NNACL_PARAM_INVALID; + } +- param->kernel_h_ = param->kernel_h_ != -1 ? param->kernel_h_ : GetHeight(inputs[kWeightIndex]); +- param->kernel_w_ = param->kernel_w_ != -1 ? param->kernel_w_ : GetWidth(inputs[kWeightIndex]); ++ param->kernel_h_ = param->kernel_h_ != -1 ? param->kernel_h_ : TensorCGetHeight(inputs[kWeightIndex]); ++ param->kernel_w_ = param->kernel_w_ != -1 ? param->kernel_w_ : TensorCGetWidth(inputs[kWeightIndex]); + if (param->pad_mode_ == Pad_same) { + output_h = ceil((float)(input_h) / (float)(param->stride_h_)); + output_w = ceil((float)(input_w) / (float)(param->stride_w_)); +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/expand_dims_infer.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/expand_dims_infer.c +index 7970673c..75cea32f 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/expand_dims_infer.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/expand_dims_infer.c +@@ -42,7 +42,7 @@ int ExpandDimsInferShape(const TensorC *const *inputs, size_t inputs_size, Tenso + if (inputs[1]->data_ == NULL) { + return NNACL_INPUT_TENSOR_ERROR; + } +- if (GetElementNum(inputs[1]) < 1) { ++ if (TensorCGetElementNum(inputs[1]) < 1) { + return NNACL_ERR; + } + int dim = ((int32_t *)(inputs[1]->data_))[0]; +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/format_transpose_infer.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/format_transpose_infer.c +index dcea5350..3fa31b40 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/format_transpose_infer.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/format_transpose_infer.c +@@ -39,10 +39,10 @@ int FormatTransposeInferShape(const TensorC *const *inputs, size_t inputs_size, + return NNACL_INFER_INVALID; + } + +- int input_b = GetBatch(input); +- int input_h = GetHeight(input); +- int input_w = GetWidth(input); +- int input_c = GetChannel(input); ++ int input_b = TensorCGetBatch(input); ++ int input_h = TensorCGetHeight(input); ++ int input_w = TensorCGetWidth(input); ++ int input_c = TensorCGetChannel(input); + + // set output shape + int out_shape[MAX_SHAPE_SIZE] = {0}; +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/gather_infer.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/gather_infer.c +index 0554acae..41e3d7a3 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/gather_infer.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/gather_infer.c +@@ -46,7 +46,7 @@ int GatherInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC * + if (inputs[2]->data_ == NULL) { + return NNACL_NULL_PTR; + } +- if (GetElementNum(inputs[2]) < 1) { ++ if (TensorCGetElementNum(inputs[2]) < 1) { + return NNACL_ERR; + } + int axis = *((int *)inputs[2]->data_); +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/lin_space_infer.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/lin_space_infer.c +index a4f4978c..3bf5f896 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/lin_space_infer.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/lin_space_infer.c +@@ -33,7 +33,7 @@ int LinSpaceInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC + if (!InferFlag(inputs, inputs_size)) { + return NNACL_INFER_INVALID; + } +- if (GetElementNum(inputs[2]) < 1) { ++ if (TensorCGetElementNum(inputs[2]) < 1) { + return NNACL_ERR; + } + int *num = (int *)(inputs[2]->data_); +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/mfcc_infer.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/mfcc_infer.c +index d2fd690c..6832c402 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/mfcc_infer.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/mfcc_infer.c +@@ -34,7 +34,7 @@ int MfccInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **o + if (input->shape_size_ != 3) { + return NNACL_ERR; + } +- if (GetElementNum(inputs[1]) != 1) { ++ if (TensorCGetElementNum(inputs[1]) != 1) { + return NNACL_ERR; + } + output->shape_size_ = 3; +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/pad_infer.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/pad_infer.c +index 41e87b79..9d862c33 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/pad_infer.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/pad_infer.c +@@ -37,7 +37,7 @@ int PadInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **ou + return NNACL_INPUT_TENSOR_ERROR; + } + const TensorC *paddings = inputs[1]; +- int size = GetElementNum(paddings); ++ int size = TensorCGetElementNum(paddings); + if (size > MAX_PAD_SIZE) { + return NNACL_PARAM_INVALID; + } +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/power_infer.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/power_infer.c +index e90e18cb..7ab199b4 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/power_infer.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/power_infer.c +@@ -44,7 +44,7 @@ int PowerInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC ** + } + if (exp_tensor != NULL) { + bool exp_x_equal = ShapeEqual(exp_tensor->shape_, exp_tensor->shape_size_, x_tensor->shape_, x_tensor->shape_size_); +- if (!exp_x_equal && GetElementNum(exp_tensor) != 1) { ++ if (!exp_x_equal && TensorCGetElementNum(exp_tensor) != 1) { + return NNACL_INPUT_TENSOR_ERROR; + } + } +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/prior_box_infer.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/prior_box_infer.c +index 0abd0a8a..b9161645 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/prior_box_infer.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/prior_box_infer.c +@@ -75,7 +75,7 @@ int PriorBoxInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC + const int kPriorBoxW = 1; + const int kPriorBoxC = 2; + +- int32_t h = GetHeight(input) * GetWidth(input) * num_priors_box * kPriorBoxPoints; ++ int32_t h = TensorCGetHeight(input) * TensorCGetWidth(input) * num_priors_box * kPriorBoxPoints; + output->shape_size_ = 4; + output->shape_[0] = kPriorBoxN; + output->shape_[1] = h; +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/random_standard_normal_infer.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/random_standard_normal_infer.c +index 5eee3480..bb977319 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/random_standard_normal_infer.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/random_standard_normal_infer.c +@@ -35,7 +35,7 @@ int RandomStandardNormalInferShape(const TensorC *const *inputs, size_t inputs_s + if (input_data == NULL) { + return NNACL_INFER_INVALID; + } +- int input_num = GetElementNum(inputs[0]); ++ int input_num = TensorCGetElementNum(inputs[0]); + if (input_num > MAX_SHAPE_SIZE) { + return NNACL_INPUT_TENSOR_ERROR; + } +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/range_infer.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/range_infer.c +index b10523a4..48f604d1 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/range_infer.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/range_infer.c +@@ -34,7 +34,7 @@ int RangeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC ** + if (!InferFlag(inputs, inputs_size)) { + return NNACL_INFER_INVALID; + } +- if (GetElementNum(inputs[FIRST_INPUT]) < 1) { ++ if (TensorCGetElementNum(inputs[FIRST_INPUT]) < 1) { + return NNACL_ERR; + } + int shape_size = 0; +@@ -46,7 +46,7 @@ int RangeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC ** + (inputs[FIRST_INPUT]->data_type_ != inputs[THIRD_INPUT]->data_type_)) { + return NNACL_INFER_INVALID; + } +- if (GetElementNum(inputs[SECOND_INPUT]) < 1 || GetElementNum(inputs[THIRD_INPUT]) < 1) { ++ if (TensorCGetElementNum(inputs[SECOND_INPUT]) < 1 || TensorCGetElementNum(inputs[THIRD_INPUT]) < 1) { + return NNACL_ERR; + } + switch (inputs[0]->data_type_) { +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/reshape_infer.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/reshape_infer.c +index 37aaa410..b86547da 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/reshape_infer.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/reshape_infer.c +@@ -20,7 +20,7 @@ + #include "nnacl/tensor_c_utils.h" + + int CalShape(const int *data, const TensorC *const *inputs, int *out_shape, size_t *out_shape_size, int shape_size) { +- int input_count = GetElementNum(inputs[0]); ++ int input_count = TensorCGetElementNum(inputs[0]); + int index = 0; + int size = 1; + for (int i = 0; i < shape_size; i++) { +@@ -62,7 +62,7 @@ int CalNewShape(const TensorC *in_tensor, int *out_shape, size_t out_shape_size) + } else if (out_shape[i] < 0) { + return NNACL_ERR; + } else if (out_shape[i] == 0) { +- if (GetElementNum(in_tensor) != 0) { ++ if (TensorCGetElementNum(in_tensor) != 0) { + out_shape[i] = in_tensor->shape_[i]; + out_shape_size_new *= out_shape[i]; + } else { +@@ -182,7 +182,7 @@ int ReshapeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC + size_t out_shape_size = 0; + if (inputs_size == 2) { + const TensorC *shape_tensor = inputs[1]; +- if (GetElementNum(input) == 1) { ++ if (TensorCGetElementNum(input) == 1) { + if (shape_tensor->data_ == NULL || (shape_tensor->shape_size_ == 1 && shape_tensor->shape_[0] == 0)) { + SetShapeArray(output, out_shape, out_shape_size); + return NNACL_OK; +@@ -192,7 +192,7 @@ int ReshapeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC + if (shape_tensor->data_ == NULL) { + return NNACL_INFER_INVALID; + } +- int shape_size = GetElementNum(shape_tensor); ++ int shape_size = TensorCGetElementNum(shape_tensor); + if (shape_size > MAX_SHAPE_SIZE) { + return NNACL_ERR; + } +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/resize_grad_infer.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/resize_grad_infer.c +index 3d358743..96d8183e 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/resize_grad_infer.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/resize_grad_infer.c +@@ -45,10 +45,10 @@ int ResizeGradInferShape(const TensorC *const *inputs, size_t inputs_size, Tenso + size_t output_shape_size = 0; + int32_t *data = (int32_t *)(input_1->data_); + +- ShapePush(output_shape, &output_shape_size, GetBatch(input)); ++ ShapePush(output_shape, &output_shape_size, TensorCGetBatch(input)); + ShapePush(output_shape, &output_shape_size, data[0]); + ShapePush(output_shape, &output_shape_size, data[1]); +- ShapePush(output_shape, &output_shape_size, GetChannel(input)); ++ ShapePush(output_shape, &output_shape_size, TensorCGetChannel(input)); + SetShapeArray(output, output_shape, output_shape_size); + } else { + return NNACL_ERR; +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/resize_infer.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/resize_infer.c +index e282e4b6..061ebd12 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/resize_infer.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/resize_infer.c +@@ -27,7 +27,7 @@ int HandleTwoInputs(const TensorC *const *inputs, ResizeParameter *param) { + if (shape_tensor->data_ == NULL) { + return NNACL_INFER_INVALID; + } +- int shape_size = GetElementNum(shape_tensor); ++ int shape_size = TensorCGetElementNum(shape_tensor); + void *origin_data = shape_tensor->data_; + if (origin_data == NULL) { + return NNACL_INFER_INVALID; +@@ -47,16 +47,16 @@ int HandleTwoInputs(const TensorC *const *inputs, ResizeParameter *param) { + param->new_width_ = data[width_index]; + } else if (shape_tensor->data_type_ == kNumberTypeFloat32) { + float *data = (float *)(origin_data); +- NNACL_CHECK_INT_MUL_NOT_OVERFLOW((int)(data[height_index]), GetHeight(input), NNACL_ERRCODE_MUL_OVERFLOW); +- NNACL_CHECK_INT_MUL_NOT_OVERFLOW((int)(data[width_index]), GetWidth(input), NNACL_ERRCODE_MUL_OVERFLOW); +- param->new_height_ = round(data[height_index] * GetHeight(input)); +- param->new_width_ = round(data[width_index] * GetWidth(input)); ++ NNACL_CHECK_INT_MUL_NOT_OVERFLOW((int)(data[height_index]), TensorCGetHeight(input), NNACL_ERRCODE_MUL_OVERFLOW); ++ NNACL_CHECK_INT_MUL_NOT_OVERFLOW((int)(data[width_index]), TensorCGetWidth(input), NNACL_ERRCODE_MUL_OVERFLOW); ++ param->new_height_ = round(data[height_index] * TensorCGetHeight(input)); ++ param->new_width_ = round(data[width_index] * TensorCGetWidth(input)); + } else if (shape_tensor->data_type_ == kNumberTypeFloat16) { + uint16_t *data = (uint16_t *)(shape_tensor->data_); + float scale_height = ShortToFloat32(data[height_index]); + float scale_width = ShortToFloat32(data[width_index]); +- param->new_height_ = round(scale_height * GetHeight(input)); +- param->new_width_ = round(scale_width * GetWidth(input)); ++ param->new_height_ = round(scale_height * TensorCGetHeight(input)); ++ param->new_width_ = round(scale_width * TensorCGetWidth(input)); + } + break; + } +@@ -69,10 +69,10 @@ int HandleTwoInputs(const TensorC *const *inputs, ResizeParameter *param) { + } else { + return NNACL_ERR; + } +- NNACL_CHECK_INT_MUL_NOT_OVERFLOW(GetHeight(input) - 1, scale - 1, NNACL_ERRCODE_MUL_OVERFLOW); +- NNACL_CHECK_INT_MUL_NOT_OVERFLOW(GetWidth(input) - 1, scale - 1, NNACL_ERRCODE_MUL_OVERFLOW); +- param->new_height_ = GetHeight(input) + (GetHeight(input) - 1) * (scale - 1); +- param->new_width_ = GetWidth(input) + (GetWidth(input) - 1) * (scale - 1); ++ NNACL_CHECK_INT_MUL_NOT_OVERFLOW(TensorCGetHeight(input) - 1, scale - 1, NNACL_ERRCODE_MUL_OVERFLOW); ++ NNACL_CHECK_INT_MUL_NOT_OVERFLOW(TensorCGetWidth(input) - 1, scale - 1, NNACL_ERRCODE_MUL_OVERFLOW); ++ param->new_height_ = TensorCGetHeight(input) + (TensorCGetHeight(input) - 1) * (scale - 1); ++ param->new_width_ = TensorCGetWidth(input) + (TensorCGetWidth(input) - 1) * (scale - 1); + break; + } + default: { +@@ -115,12 +115,12 @@ int ResizeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC * + NNACL_CHECK_NULL_RETURN_ERR(param); + int output_shape[MAX_SHAPE_SIZE] = {0}; + size_t output_shape_size = 0; +- ShapePush(output_shape, &output_shape_size, GetBatch(input)); ++ ShapePush(output_shape, &output_shape_size, TensorCGetBatch(input)); + int ret = CalculateNewHeightAndWidth(inputs, inputs_size, param); + if (ret == NNACL_OK) { + ShapePush(output_shape, &output_shape_size, param->new_height_); + ShapePush(output_shape, &output_shape_size, param->new_width_); +- ShapePush(output_shape, &output_shape_size, GetChannel(input)); ++ ShapePush(output_shape, &output_shape_size, TensorCGetChannel(input)); + SetShapeArray(output, output_shape, output_shape_size); + } + return ret; +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/roi_pooling_infer.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/roi_pooling_infer.c +index 8075222d..df04759e 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/roi_pooling_infer.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/roi_pooling_infer.c +@@ -44,7 +44,7 @@ int ROIPoolingInferShape(const TensorC *const *inputs, size_t inputs_size, Tenso + output->shape_[0] = roi->shape_[0]; + output->shape_[1] = param->pooledH_; + output->shape_[2] = param->pooledW_; +- output->shape_[3] = GetChannel(input); ++ output->shape_[3] = TensorCGetChannel(input); + return NNACL_OK; + } + +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/scatter_nd_infer.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/scatter_nd_infer.c +index 4ede0bb4..1c9ef19d 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/scatter_nd_infer.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/scatter_nd_infer.c +@@ -37,8 +37,8 @@ int ScatterNdInferShape(const TensorC *const *inputs, size_t inputs_size, Tensor + return NNACL_INFER_INVALID; + } + int *shape_data = (int *)(shape->data_); +- NNACL_CHECK_TRUE_RET(GetElementNum(shape) <= MAX_SHAPE_SIZE, NNACL_ERR); +- SetShapeArray(output, shape_data, (size_t)GetElementNum(shape)); ++ NNACL_CHECK_TRUE_RET(TensorCGetElementNum(shape) <= MAX_SHAPE_SIZE, NNACL_ERR); ++ SetShapeArray(output, shape_data, (size_t) TensorCGetElementNum(shape)); + return NNACL_OK; + } + +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/sgd_infer.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/sgd_infer.c +index 9ec96f79..a711e3c8 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/sgd_infer.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/sgd_infer.c +@@ -25,8 +25,9 @@ int SgdInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **ou + return check_ret; + } + +- if (GetElementNum(inputs[0]) != GetElementNum(inputs[1]) || GetElementNum(inputs[0]) != GetElementNum(inputs[3]) || +- GetElementNum(inputs[2]) != 1 || GetElementNum(inputs[4]) != 1) { ++ if (TensorCGetElementNum(inputs[0]) != TensorCGetElementNum(inputs[1]) || TensorCGetElementNum(inputs[0]) != ++ TensorCGetElementNum(inputs[3]) || ++ TensorCGetElementNum(inputs[2]) != 1 || TensorCGetElementNum(inputs[4]) != 1) { + return NNACL_INPUT_TENSOR_ERROR; + } + if (outputs_size != 0) { +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/slice_infer.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/slice_infer.c +index 766a2cdd..31fc8051 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/slice_infer.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/slice_infer.c +@@ -35,7 +35,7 @@ static bool CheckInputsDataType(const TensorC *const *inputs, size_t inputs_size + + int InitBeginAndSizeParam(const TensorC *const *inputs, SliceParameter *param) { + /* init begin parameter */ +- int slice_begin_size = GetElementNum(inputs[1]); ++ int slice_begin_size = TensorCGetElementNum(inputs[1]); + int *begin_ptr = (int *)(inputs[1]->data_); + if (slice_begin_size != param->param_length_ || begin_ptr == NULL) { + return NNACL_INFER_INVALID; +@@ -48,7 +48,7 @@ int InitBeginAndSizeParam(const TensorC *const *inputs, SliceParameter *param) { + } + + /* init size parameter */ +- int slice_size_size = GetElementNum(inputs[2]); ++ int slice_size_size = TensorCGetElementNum(inputs[2]); + int *size_ptr = (int *)(inputs[2]->data_); + if (slice_size_size != param->param_length_ || size_ptr == NULL) { + return NNACL_INFER_INVALID; +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/space_to_batch_nd_infer.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/space_to_batch_nd_infer.c +index dc76268d..32497fec 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/space_to_batch_nd_infer.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/space_to_batch_nd_infer.c +@@ -68,7 +68,7 @@ int SpaceSetOutputShapeFromInput(const TensorC *const *inputs, size_t inputs_siz + if (input->shape_size_ != 4) { + return NNACL_ERR; + } +- if (GetElementNum(inputs[2]) != 4) { ++ if (TensorCGetElementNum(inputs[2]) != 4) { + return NNACL_ERR; + } + int *block_shape = (int *)(inputs[1]->data_); +@@ -76,7 +76,7 @@ int SpaceSetOutputShapeFromInput(const TensorC *const *inputs, size_t inputs_siz + int padding_left = 0; + int padding_right = 0; + int block_w = 1; +- if (GetElementNum(inputs[1]) == 2) { ++ if (TensorCGetElementNum(inputs[1]) == 2) { + padding_left = padding[2]; + padding_right = padding[3]; + block_w = block_shape[1]; +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/sparse_to_dense_infer.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/sparse_to_dense_infer.c +index 9b513e68..342ebc67 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/sparse_to_dense_infer.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/sparse_to_dense_infer.c +@@ -35,7 +35,7 @@ int SparseToDenseInferShape(const TensorC *const *inputs, size_t inputs_size, Te + return NNACL_INFER_INVALID; + } + int *input1_data = (int *)(input1->data_); +- int data_num = GetElementNum(input1); ++ int data_num = TensorCGetElementNum(input1); + if (input1_data == 0 || data_num > MAX_SHAPE_SIZE) { + return NNACL_INPUT_TENSOR_ERROR; + } +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/squeeze_infer.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/squeeze_infer.c +index e0f78a13..3b85f3fc 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/squeeze_infer.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/squeeze_infer.c +@@ -41,7 +41,7 @@ int SqueezeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC + NNACL_PARAM_INVALID); + int *axis_data = (int *)(inputs[1]->data_); + NNACL_CHECK_TRUE_RET(axis_data != NULL, NNACL_PARAM_INVALID); +- param->axis_size_ = GetElementNum(inputs[1]); ++ param->axis_size_ = TensorCGetElementNum(inputs[1]); + for (size_t i = 0; i < param->axis_size_; i++) { + param->axis_[i] = *(axis_data + i); + } +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/strided_slice_grad_infer.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/strided_slice_grad_infer.c +index bd8927ba..10d78a5f 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/strided_slice_grad_infer.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/strided_slice_grad_infer.c +@@ -24,10 +24,11 @@ bool StridedSliceCheckInputs(const TensorC *const *inputs, size_t inputs_size) { + return false; + } + } +- if (GetElementNum(inputs[2]) > MAX_SHAPE_SIZE) { ++ if (TensorCGetElementNum(inputs[2]) > MAX_SHAPE_SIZE) { + return false; + } +- if (GetElementNum(inputs[2]) != GetElementNum(inputs[3]) && GetElementNum(inputs[2]) != GetElementNum(inputs[4])) { ++ if (TensorCGetElementNum(inputs[2]) != TensorCGetElementNum(inputs[3]) && TensorCGetElementNum(inputs[2]) != ++ TensorCGetElementNum(inputs[4])) { + return false; + } + return true; // note: the original code is ndim_ <= in_shape_size +@@ -86,7 +87,7 @@ int StridedSliceGradInferShape(const TensorC *const *inputs, size_t inputs_size, + int *end_data = (int *)(inputs[3]->data_); + int *stride_data = (int *)(inputs[4]->data_); + +- size_t ndim_ = (size_t)GetElementNum(begin_tensor); ++ size_t ndim_ = (size_t) TensorCGetElementNum(begin_tensor); + for (size_t i = 0; i < ndim_; ++i) { + ShapePush(begins_, &begins_size, begin_data[i]); + ShapePush(ends_, &ends_size, end_data[i]); +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/strided_slice_infer.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/strided_slice_infer.c +index f4d25ac9..581774d4 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/strided_slice_infer.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/strided_slice_infer.c +@@ -71,7 +71,7 @@ int HandleAxesInputNotExist(const TensorC *const *inputs, struct StridedSliceTra + if (begin_data == NULL || end_data == NULL || stride_data == NULL) { + return NNACL_ERR; + } +- transfer_buffer->ndim_ = GetElementNum(begin_tensor); ++ transfer_buffer->ndim_ = TensorCGetElementNum(begin_tensor); + for (int i = 0; i < transfer_buffer->ndim_; ++i) { + ShapePush(transfer_buffer->begins_, &transfer_buffer->begins_size_, begin_data[i]); + ShapePush(transfer_buffer->ends_, &transfer_buffer->ends_size_, end_data[i]); +@@ -82,8 +82,8 @@ int HandleAxesInputNotExist(const TensorC *const *inputs, struct StridedSliceTra + + int GenerateAxes(const TensorC *axes_tensor, int *axes, int num, int ndim) { + int *axes_data = NULL; +- if (GetElementNum(axes_tensor) != 0) { +- if (GetElementNum(axes_tensor) != num) { ++ if (TensorCGetElementNum(axes_tensor) != 0) { ++ if (TensorCGetElementNum(axes_tensor) != num) { + return NNACL_ERR; + } + axes_data = (int *)(axes_tensor->data_); +@@ -122,11 +122,11 @@ int HandleAxesInputExist(const TensorC *const *inputs, int *ndim, int *in_shape, + + // when input contains axes, begins, ends, strides will be expand to the same length as input rank + *ndim = (int)(input_tensor->shape_size_); +- int begin_ndim = GetElementNum(begin_tensor); ++ int begin_ndim = TensorCGetElementNum(begin_tensor); + + int *stride_data = NULL; + const TensorC *stride_tensor = inputs[4]; +- int stride_data_num = GetElementNum(stride_tensor); ++ int stride_data_num = TensorCGetElementNum(stride_tensor); + if (stride_data_num != 0) { + NNACL_CHECK_TRUE_RET(stride_data_num == begin_ndim, NNACL_ERR); + stride_data = (int *)(stride_tensor->data_); +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/string/custom_normalize_infer.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/string/custom_normalize_infer.c +index e0afcccf..42f2d673 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/string/custom_normalize_infer.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/string/custom_normalize_infer.c +@@ -33,7 +33,7 @@ int CustomNormalizeInferShape(const TensorC *const *inputs, size_t inputs_size, + if (input->data_ == NULL) { + return NNACL_INFER_INVALID; + } +- if (GetElementNum(input) < 1) { ++ if (TensorCGetElementNum(input) < 1) { + return NNACL_ERR; + } + if (input->data_type_ != kNumberTypeUInt32 && input->data_type_ != kObjectTypeString) { +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/string/hashtable_lookup_infer.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/string/hashtable_lookup_infer.c +index 912a4063..09317a3e 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/string/hashtable_lookup_infer.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/string/hashtable_lookup_infer.c +@@ -37,7 +37,7 @@ int HashtableLoopupInferShape(const TensorC *const *inputs, size_t inputs_size, + output->data_type_ = values->data_type_; + output->format_ = input->format_; + hits->shape_size_ = 1; +- hits->shape_[0] = GetDimensionSize(input, 0); ++ hits->shape_[0] = TensorCGetDimensionSize(input, 0); + hits->data_type_ = kNumberTypeUInt8; + hits->format_ = input->format_; + +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/string/lsh_projection_infer.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/string/lsh_projection_infer.c +index 818373b8..9a16256c 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/string/lsh_projection_infer.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/string/lsh_projection_infer.c +@@ -26,7 +26,7 @@ int LshProjectionInferShape(const TensorC *const *inputs, size_t inputs_size, Te + } + + const TensorC *in_hash = inputs[0]; +- if (in_hash->shape_size_ != 2 || GetDimensionSize(in_hash, 1) > 32) { ++ if (in_hash->shape_size_ != 2 || TensorCGetDimensionSize(in_hash, 1) > 32) { + return NNACL_ERR; + } + TensorC *out_tensor = outputs[0]; +@@ -38,10 +38,10 @@ int LshProjectionInferShape(const TensorC *const *inputs, size_t inputs_size, Te + LshProjectionParameter *param = (LshProjectionParameter *)parameter; + switch (param->lsh_type_) { + case LshProjectionType_SPARSE: +- ShapePush(out_shape, &out_shape_size, GetDimensionSize(in_hash, 0)); ++ ShapePush(out_shape, &out_shape_size, TensorCGetDimensionSize(in_hash, 0)); + break; + case LshProjectionType_DENSE: +- ShapePush(out_shape, &out_shape_size, GetDimensionSize(in_hash, 0) * GetDimensionSize(in_hash, 1)); ++ ShapePush(out_shape, &out_shape_size, TensorCGetDimensionSize(in_hash, 0) * TensorCGetDimensionSize(in_hash, 1)); + break; + default: + return NNACL_ERR; +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/tile_infer.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/tile_infer.c +index 953c3def..53cd3aac 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/tile_infer.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/tile_infer.c +@@ -64,7 +64,7 @@ int TileInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **o + return NNACL_INPUT_TENSOR_ERROR; + } + NNACL_CHECK_TRUE_RET(input1_shape_size <= MAX_SHAPE_SIZE, NNACL_ERR); +- int data_num = GetElementNum(inputs[1]); ++ int data_num = TensorCGetElementNum(inputs[1]); + multiples_size = (size_t)(data_num); + if (inputs[1]->data_type_ != kNumberTypeInt && inputs[1]->data_type_ != kNumberTypeInt32) { + return NNACL_INPUT_TENSOR_ERROR; +@@ -81,7 +81,7 @@ int TileInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **o + int *dims = param->dims_; + size_t dims_size = param->dims_size_; + if (dims_size == 0) { +- int dim_num = GetElementNum(inputs[1]); ++ int dim_num = TensorCGetElementNum(inputs[1]); + NNACL_CHECK_TRUE_RET(dim_num <= MAX_SHAPE_SIZE, NNACL_ERR); + for (int dim = 0; dim < dim_num; ++dim) { + ShapePush(dims, &dims_size, dim); +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/uniform_real_infer.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/uniform_real_infer.c +index da8b08a4..8aa82814 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/uniform_real_infer.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/uniform_real_infer.c +@@ -33,7 +33,7 @@ int UniformRealInferShape(const TensorC *const *inputs, size_t inputs_size, Tens + if (input_data == NULL) { + return NNACL_INFER_INVALID; + } +- int input_num = GetElementNum(inputs[0]); ++ int input_num = TensorCGetElementNum(inputs[0]); + if (input_num > MAX_SHAPE_SIZE || input_num < 0) { + return NNACL_INPUT_TENSOR_ERROR; + } +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/activation.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/activation.c +index 438f5523..2ad44796 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/activation.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/activation.c +@@ -34,7 +34,7 @@ int ActivationResize(struct KernelBase *self) { + ActivationStruct *activation = (ActivationStruct *)self; + NNACL_CHECK_NULL_RETURN_ERR(activation); + self->thread_nr_ = self->UpdateThread(TC_TYPE(PrimType_Activation, activation->act_type_), 1, 1, +- GetElementNum(self->out_[0]), self->thread_nr_); ++ TensorCGetElementNum(self->out_[0]), self->thread_nr_); + return NNACL_OK; + } + +@@ -132,7 +132,7 @@ int activation_fp16_run(ActivationStruct *activation, int task_id, int count, in + int ActivationImpl(void *cdata, int task_id, float l, float r) { + ActivationStruct *activation = (ActivationStruct *)cdata; + +- int ele_num = GetElementNum(activation->base.in_[0]); ++ int ele_num = TensorCGetElementNum(activation->base.in_[0]); + NNACL_CHECK_ZERO_RETURN_ERR(activation->base.thread_nr_); + int stride = UP_DIV(ele_num, activation->base.thread_nr_); + NNACL_CHECK_INT_MUL_NOT_OVERFLOW(stride, task_id, NNACL_ERR); +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/addn.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/addn.c +index 987db322..49596c69 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/addn.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/addn.c +@@ -69,14 +69,14 @@ int AddNComputeNoParallel(AddNStruct *addn) { + NNACL_CHECK_NULL_RETURN_ERR(in0_tensor); + TensorC *in1_tensor = addn->base_.in_[SECOND_INPUT]; + NNACL_CHECK_NULL_RETURN_ERR(in1_tensor); +- AddNCompute(addn, IsShapeSame(in0_tensor, in1_tensor), GetElementNum(in0_tensor) == 1); ++ AddNCompute(addn, TensorCIsShapeSame(in0_tensor, in1_tensor), TensorCGetElementNum(in0_tensor) == 1); + + for (size_t i = Index2; i < addn->base_.in_size_; i++) { + TensorC *in_tensor = addn->base_.in_[i]; + NNACL_CHECK_NULL_RETURN_ERR(in_tensor); + addn->in1_addr_ = in_tensor->data_; + addn->in2_addr_ = addn->out_addr_; +- AddNCompute(addn, IsShapeSame(in_tensor, addn->base_.out_[OUTPUT_INDEX]), GetElementNum(in_tensor) == 1); ++ AddNCompute(addn, TensorCIsShapeSame(in_tensor, addn->base_.out_[OUTPUT_INDEX]), TensorCGetElementNum(in_tensor) == 1); + } + return NNACL_OK; + } +@@ -86,7 +86,7 @@ int AddnResize(struct KernelBase *self) { + NNACL_CHECK_NULL_RETURN_ERR(addn); + + TensorC *out_tensor = self->out_[OUTPUT_INDEX]; +- addn->elements_num_ = GetElementNum(out_tensor); ++ addn->elements_num_ = TensorCGetElementNum(out_tensor); + return NNACL_OK; + } + +@@ -107,7 +107,7 @@ int AddnCompute(struct KernelBase *self) { + + for (int i = 0; i < self->in_size_; i++) { + TensorC *in_tensor = self->in_[i]; +- if (!IsShapeSame(in_tensor, self->out_[OUTPUT_INDEX])) { ++ if (!TensorCIsShapeSame(in_tensor, self->out_[OUTPUT_INDEX])) { + return NNACL_ADDN_SHAPE_UNMATCH; + } + } +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/arithmetic.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/arithmetic.c +index 7fd3d3c1..68b76698 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/arithmetic.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/arithmetic.c +@@ -394,7 +394,7 @@ int ArithmeticBroadCastConstTensor(ArithmeticStruct *arithmetic) { + prefer_explicit_broadcast && (arithmetic->base_.in_[FIRST_INPUT]->data_type_ != kNumberTypeBool); + + bool exist_broadcast_ = false; +- int buffer_size = GetElementNum(arithmetic->base_.out_[OUTPUT_INDEX]) * arithmetic->in_data_size_; ++ int buffer_size = TensorCGetElementNum(arithmetic->base_.out_[OUTPUT_INDEX]) * arithmetic->in_data_size_; + if (arithmetic->a_matrix_.is_const_) { + NNACL_CHECK_NULL_RETURN_ERR(arithmetic->base_.in_[FIRST_INPUT]->data_); + if (arithmetic->in_elements_num0_ != arithmetic->out_elements_num_ && prefer_explicit_broadcast) { +@@ -516,7 +516,7 @@ int ArithmeticComputeOfflineInfo(ArithmeticStruct *arithmetic) { + } + + int ArithmeticChooseThreadCuttingStrategy(ArithmeticStruct *arithmetic) { +- int total_num = GetElementNum(arithmetic->base_.out_[OUTPUT_INDEX]); ++ int total_num = TensorCGetElementNum(arithmetic->base_.out_[OUTPUT_INDEX]); + arithmetic->base_.thread_nr_ = + arithmetic->base_.UpdateThread(TC_TYPE(arithmetic->primitive_type_, arithmetic->functions_.activation_type_), 1, 1, + total_num, arithmetic->base_.thread_nr_); +@@ -560,9 +560,9 @@ int ArithmeticResize(struct KernelBase *self) { + + NNACL_CHECK_TRUE_RET(arithmetic->in_data_size_ != 0, NNACL_UNSUPPORTED_DATA_TYPE); + NNACL_CHECK_TRUE_RET(arithmetic->out_data_size_ != 0, NNACL_UNSUPPORTED_DATA_TYPE); +- arithmetic->in_elements_num0_ = GetElementNum(self->in_[FIRST_INPUT]); +- arithmetic->in_elements_num1_ = GetElementNum(self->in_[SECOND_INPUT]); +- arithmetic->out_elements_num_ = GetElementNum(self->in_[OUTPUT_INDEX]); ++ arithmetic->in_elements_num0_ = TensorCGetElementNum(self->in_[FIRST_INPUT]); ++ arithmetic->in_elements_num1_ = TensorCGetElementNum(self->in_[SECOND_INPUT]); ++ arithmetic->out_elements_num_ = TensorCGetElementNum(self->in_[OUTPUT_INDEX]); + + int ret = ResetArithmeticStatus(arithmetic); + if (ret != NNACL_OK) { +@@ -616,8 +616,8 @@ int ArithmeticPrepare(struct KernelBase *self) { + } + arithmetic->init_function_(self); + +- arithmetic->a_matrix_.is_const_ = IsConst(self->in_[FIRST_INPUT]); +- arithmetic->b_matrix_.is_const_ = IsConst(self->in_[SECOND_INPUT]); ++ arithmetic->a_matrix_.is_const_ = TensorCIsConst(self->in_[FIRST_INPUT]); ++ arithmetic->b_matrix_.is_const_ = TensorCIsConst(self->in_[SECOND_INPUT]); + return NNACL_OK; + } + +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/arithmetic_self.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/arithmetic_self.c +index b97fb782..736ba459 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/arithmetic_self.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/arithmetic_self.c +@@ -76,7 +76,7 @@ void ArithmeticSelfGetArithmeticSelfF16Function(ArithmeticSelfStruct *arithmetic + } + + int ArithmeticSelfExecute(ArithmeticSelfStruct *arithmetic_self, int task_id) { +- int elements_num = GetElementNum(arithmetic_self->base_.in_[FIRST_INPUT]); ++ int elements_num = TensorCGetElementNum(arithmetic_self->base_.in_[FIRST_INPUT]); + NNACL_CHECK_TRUE_RET(arithmetic_self->base_.thread_nr_, NNACL_ERR); + int stride = UP_DIV(elements_num, arithmetic_self->base_.thread_nr_); + NNACL_CHECK_INT_MUL_NOT_OVERFLOW(task_id, stride, NNACL_ERR); +@@ -132,7 +132,7 @@ int ArithmeticSelfResize(KernelBase *self) { + ArithmeticSelfStruct *arithmetic_self = (ArithmeticSelfStruct *)self; + NNACL_CHECK_NULL_RETURN_ERR(arithmetic_self); + self->thread_nr_ = arithmetic_self->base_.UpdateThread(TC_PTYPE(arithmetic_self->op_type_), 1, 1, +- GetElementNum(self->out_[OUTPUT_INDEX]), self->thread_nr_); ++ TensorCGetElementNum(self->out_[OUTPUT_INDEX]), self->thread_nr_); + return NNACL_OK; + } + +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/cast.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/cast.c +index a5496590..2feaf5a7 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/cast.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/cast.c +@@ -166,7 +166,7 @@ int cast_resize(struct KernelBase *self) { + NNACL_CHECK_FALSE(self->in_size_ < ONE_TENSOR, NNACL_INPUT_TENSOR_ERROR); + TensorC *in_tensor = self->in_[FIRST_INPUT]; + NNACL_CHECK_NULL_RETURN_ERR(in_tensor); +- int data_num = GetElementNum(in_tensor); ++ int data_num = TensorCGetElementNum(in_tensor); + if (data_num == 0) { + return NNACL_OK; + } +@@ -175,7 +175,7 @@ int cast_resize(struct KernelBase *self) { + NNACL_CHECK_FALSE(self->out_size_ < ONE_TENSOR, NNACL_OUTPUT_TENSOR_ERROR); + // update thread num + cast->base_.thread_nr_ = cast->base_.UpdateThread( +- TC_PTYPE(PrimType_Cast), 1, 1, GetElementNum(cast->base_.out_[FIRST_INPUT]), cast->base_.thread_nr_); ++ TC_PTYPE(PrimType_Cast), 1, 1, TensorCGetElementNum(cast->base_.out_[FIRST_INPUT]), cast->base_.thread_nr_); + cast->stride_ = UP_DIV(data_num, cast->base_.thread_nr_); + return NNACL_OK; + } +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/clip.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/clip.c +index ae8ac5d8..5ea371d2 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/clip.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/clip.c +@@ -42,9 +42,9 @@ int ClipResize(struct KernelBase *self) { + ClipStruct *clip = (ClipStruct *)self; + NNACL_CHECK_NULL_RETURN_ERR(clip); + clip->base_.thread_nr_ = clip->base_.UpdateThread( +- TC_PTYPE(PrimType_Clip), 1, 1, GetElementNum(clip->base_.out_[FIRST_INPUT]), clip->base_.thread_nr_); ++ TC_PTYPE(PrimType_Clip), 1, 1, TensorCGetElementNum(clip->base_.out_[FIRST_INPUT]), clip->base_.thread_nr_); + +- clip->length_ = GetElementNum(clip->base_.in_[FIRST_INPUT]); ++ clip->length_ = TensorCGetElementNum(clip->base_.in_[FIRST_INPUT]); + clip->stride_ = UP_DIV(clip->length_, clip->base_.thread_nr_); + NNACL_CHECK_INT_MUL_NOT_OVERFLOW(clip->stride_, clip->base_.thread_nr_, NNACL_ERR); + return NNACL_OK; +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/concat.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/concat.c +index 04249c1f..0332dd47 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/concat.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/concat.c +@@ -27,7 +27,7 @@ int DoConcat(ConcatStruct *concat, int task_id) { + NNACL_CHECK_FALSE(task_id < 0, NNACL_ERR); + NNACL_CHECK_FALSE(task_id > concat->block_size_, NNACL_ERR); + +- int all_bytes = GetSize(concat->base_.out_[FIRST_INPUT]); ++ int all_bytes = TensorCGetSize(concat->base_.out_[FIRST_INPUT]); + int64_t start = concat->block_splits_[task_id]; + int64_t end = task_id < (concat->block_size_ - 1) ? concat->block_splits_[task_id + 1] : all_bytes; + int64_t start_row = start / concat->inner_sizes_[concat->base_.in_size_]; +@@ -154,7 +154,7 @@ void ComputeConcatUnitBoundary(ConcatStruct *concat, int64_t *pre_sum, int offse + int ChooseConcatThreadCuttingStrategy(ConcatStruct *concat) { + NNACL_CHECK_TRUE_RET(concat->base_.thread_nr_ > 0, NNACL_ERR); + +- int all_bytes = GetSize(concat->base_.out_[FIRST_INPUT]); ++ int all_bytes = TensorCGetSize(concat->base_.out_[FIRST_INPUT]); + int64_t thread_count = MSMAX(1, MSMIN(all_bytes / kConcatMinCostPerThread, concat->base_.thread_nr_)); + + NNACL_CHECK_ZERO_RETURN_ERR(thread_count); +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/convolution_base.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/convolution_base.c +index 8878bd9d..2effbe49 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/convolution_base.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/convolution_base.c +@@ -48,30 +48,30 @@ int ConvBaseUpdateComputeInfo(ConvolutionBaseStruct *conv) { + TensorC *output = conv->base_.out_[OUTPUT_INDEX]; + NNACL_CHECK_NULL_RETURN_ERR(output); + +- conv_param->input_batch_ = GetBatch(input); +- conv_param->input_h_ = GetHeight(input); +- conv_param->input_w_ = GetWidth(input); +- conv_param->input_channel_ = GetChannel(input); +- conv_param->output_batch_ = GetBatch(output); +- conv_param->output_h_ = GetHeight(output); +- conv_param->output_w_ = GetWidth(output); +- conv_param->output_channel_ = GetChannel(output); ++ conv_param->input_batch_ = TensorCGetBatch(input); ++ conv_param->input_h_ = TensorCGetHeight(input); ++ conv_param->input_w_ = TensorCGetWidth(input); ++ conv_param->input_channel_ = TensorCGetChannel(input); ++ conv_param->output_batch_ = TensorCGetBatch(output); ++ conv_param->output_h_ = TensorCGetHeight(output); ++ conv_param->output_w_ = TensorCGetWidth(output); ++ conv_param->output_channel_ = TensorCGetChannel(output); + + ConvComputeParam *compute = &conv->compute_; +- compute->in_n_ = GetBatch(input); +- compute->in_h_ = GetHeight(input); +- compute->in_w_ = GetWidth(input); +- compute->in_c_ = GetChannel(input); ++ compute->in_n_ = TensorCGetBatch(input); ++ compute->in_h_ = TensorCGetHeight(input); ++ compute->in_w_ = TensorCGetWidth(input); ++ compute->in_c_ = TensorCGetChannel(input); + NNACL_CHECK_FALSE(compute->in_c_ != conv_param->input_channel_, NNACL_ERR); + NNACL_CHECK_INT_MUL_NOT_OVERFLOW(compute->in_h_, compute->in_w_, NNACL_ERR); + compute->in_hw_ = compute->in_h_ * compute->in_w_; + NNACL_CHECK_INT_MUL_NOT_OVERFLOW(compute->in_hw_, compute->in_n_, NNACL_ERR); + NNACL_CHECK_INT_MUL_NOT_OVERFLOW(compute->in_hw_ * compute->in_n_, compute->in_c_, NNACL_ERR); + +- compute->out_n_ = GetBatch(output); +- compute->out_h_ = GetHeight(output); +- compute->out_w_ = GetWidth(output); +- compute->out_c_ = GetChannel(output); ++ compute->out_n_ = TensorCGetBatch(output); ++ compute->out_h_ = TensorCGetHeight(output); ++ compute->out_w_ = TensorCGetWidth(output); ++ compute->out_c_ = TensorCGetChannel(output); + NNACL_CHECK_FALSE(compute->out_c_ != conv_param->output_channel_, NNACL_ERR); + NNACL_CHECK_INT_MUL_NOT_OVERFLOW(compute->out_h_, compute->out_w_, NNACL_ERR); + compute->out_hw_ = compute->out_h_ * compute->out_w_; +@@ -131,7 +131,7 @@ int ConvBaseInitConvWeightBias(ConvolutionBaseStruct *conv) { + } + + if (conv->base_.in_size_ == THREE_TENSOR) { +- memcpy(conv->bias_data_, conv->origin_bias_, GetSize(conv->base_.in_[THIRD_INPUT])); ++ memcpy(conv->bias_data_, conv->origin_bias_, TensorCGetSize(conv->base_.in_[THIRD_INPUT])); + } + + if (!conv->base_.train_session_) { +@@ -151,10 +151,10 @@ int ConvBaseCheckResizeValid(ConvolutionBaseStruct *conv) { + // ===============check in channel================= // + TensorC *input_tensor = conv->base_.in_[FIRST_INPUT]; + NNACL_CHECK_NULL_RETURN_ERR(input_tensor); +- int resize_in_channel = GetChannel(input_tensor); ++ int resize_in_channel = TensorCGetChannel(input_tensor); + TensorC *filter_tensor = conv->base_.in_[SECOND_INPUT]; + NNACL_CHECK_NULL_RETURN_ERR(filter_tensor); +- int filter_in_channel = GetChannel(filter_tensor); ++ int filter_in_channel = TensorCGetChannel(filter_tensor); + if (filter_in_channel != resize_in_channel) { + return NNACL_CONVOLUTION_INPUT_CHANNEL_UNMATCH; + } +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/convolution_delegate.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/convolution_delegate.c +index 4c9a6d41..7887de38 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/convolution_delegate.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/convolution_delegate.c +@@ -45,10 +45,10 @@ float *ConvolutionDelegateCopyData(const TensorC *tensor) { + NNACL_CHECK_NULL_RETURN_NULL(tensor); + NNACL_CHECK_NULL_RETURN_NULL(tensor->data_); + +- float *data = (float *)malloc(GetSize(tensor)); ++ float *data = (float *)malloc(TensorCGetSize(tensor)); + NNACL_MALLOC_CHECK_NULL_RETURN_NULL(data); + +- (void)memcpy(data, tensor->data_, GetSize(tensor)); ++ (void)memcpy(data, tensor->data_, TensorCGetSize(tensor)); + return data; + } + +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/convolution_depthwise.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/convolution_depthwise.c +index dcee0856..573593df 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/convolution_depthwise.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/convolution_depthwise.c +@@ -148,9 +148,9 @@ int ConvolutionDepthwisePrepare(KernelBase *self) { + NNACL_CHECK_NULL_RETURN_ERR(weight_tensor); + NNACL_CHECK_TRUE_RET(weight_tensor->shape_size_ == DIMENSION_4D, NNACL_CONVOLUTION_WEIGHT_SHAPE_INVALID); + +- int weight_size_hw = GetHeight(weight_tensor) * GetWidth(weight_tensor); +- NNACL_CHECK_INT_MUL_NOT_OVERFLOW(GetBatch(weight_tensor), weight_size_hw, NNACL_ERR); +- int pack_weight_size = GetBatch(weight_tensor) * weight_size_hw; ++ int weight_size_hw = TensorCGetHeight(weight_tensor) * TensorCGetWidth(weight_tensor); ++ NNACL_CHECK_INT_MUL_NOT_OVERFLOW(TensorCGetBatch(weight_tensor), weight_size_hw, NNACL_ERR); ++ int pack_weight_size = TensorCGetBatch(weight_tensor) * weight_size_hw; + NNACL_CHECK_INT_MUL_NOT_OVERFLOW(pack_weight_size, sizeof(float), NNACL_ERR); + self->work_size_ = pack_weight_size * sizeof(float); + } +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/convolution_slidewindow.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/convolution_slidewindow.c +index 050dfe23..18c35971 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/convolution_slidewindow.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/convolution_slidewindow.c +@@ -83,10 +83,10 @@ void ConvSWPackWeight(ConvolutionBaseStruct *conv) { + TensorC *filter_tensor = conv->base_.in_[SECOND_INPUT]; + NNACL_CHECK_NULL_RETURN_VOID(filter_tensor); + +- int input_channel = GetChannel(filter_tensor); +- int output_channel = GetBatch(filter_tensor); +- int kernel_h = GetHeight(filter_tensor); +- int kernel_w = GetWidth(filter_tensor); ++ int input_channel = TensorCGetChannel(filter_tensor); ++ int output_channel = TensorCGetBatch(filter_tensor); ++ int kernel_h = TensorCGetHeight(filter_tensor); ++ int kernel_w = TensorCGetWidth(filter_tensor); + + int oc_block_num = UP_DIV(output_channel, conv_sw->oc_tile_); + void *origin_weight = (conv->base_.train_session_) ? filter_tensor->data_ : conv->origin_weight_; +@@ -103,10 +103,10 @@ int ConvSWMallocWeightBiasData(ConvolutionBaseStruct *conv) { + TensorC *filter_tensor = conv->base_.in_[SECOND_INPUT]; + NNACL_CHECK_NULL_RETURN_ERR(filter_tensor); + +- int input_channel = GetChannel(filter_tensor); +- int output_channel = GetBatch(filter_tensor); +- int kernel_h = GetHeight(filter_tensor); +- int kernel_w = GetWidth(filter_tensor); ++ int input_channel = TensorCGetChannel(filter_tensor); ++ int output_channel = TensorCGetBatch(filter_tensor); ++ int kernel_h = TensorCGetHeight(filter_tensor); ++ int kernel_w = TensorCGetWidth(filter_tensor); + + NNACL_CHECK_FALSE(input_channel <= 0, NNACL_ERR); + NNACL_CHECK_FALSE(output_channel <= 0, NNACL_ERR); +@@ -215,10 +215,10 @@ int ConvolutionSWPrepare(KernelBase *self) { + NNACL_CHECK_NULL_RETURN_ERR(filter_tensor); + NNACL_CHECK_FALSE(filter_tensor->shape_size_ != DIMENSION_4D, NNACL_CONVOLUTION_WEIGHT_SHAPE_INVALID); + +- int input_channel = GetChannel(filter_tensor); +- int output_channel = GetBatch(filter_tensor); +- int kernel_h = GetHeight(filter_tensor); +- int kernel_w = GetWidth(filter_tensor); ++ int input_channel = TensorCGetChannel(filter_tensor); ++ int output_channel = TensorCGetBatch(filter_tensor); ++ int kernel_h = TensorCGetHeight(filter_tensor); ++ int kernel_w = TensorCGetWidth(filter_tensor); + + NNACL_CHECK_INT_MUL_NOT_OVERFLOW(kernel_h, kernel_w, NNACL_ERR); + int kernel_hw = kernel_h * kernel_w; +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/deconvolution.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/deconvolution.c +index a7de406b..0d98e1b5 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/deconvolution.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/deconvolution.c +@@ -161,8 +161,8 @@ int DeConvCheckvResizeValid(ConvolutionBaseStruct *conv) { + NNACL_CHECK_NULL_RETURN_ERR(input_tensor); + TensorC *filter_tensor = conv->base_.in_[SECOND_INPUT]; + +- int resize_out_channel = GetChannel(input_tensor); +- int filter_out_channel = GetBatch(filter_tensor); ++ int resize_out_channel = TensorCGetChannel(input_tensor); ++ int filter_out_channel = TensorCGetBatch(filter_tensor); + if (filter_out_channel != resize_out_channel) { + return NNACL_DECONV_RESIZE_OC_INVALID; + } +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/deconvolution_depthwise.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/deconvolution_depthwise.c +index 22e9cb5d..094d4a6c 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/deconvolution_depthwise.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/deconvolution_depthwise.c +@@ -131,24 +131,24 @@ void DeConvDwUpdateParam(ConvolutionBaseStruct *conv) { + + ConvParameter *conv_param = (ConvParameter *)conv->base_.param_; + conv_param->thread_num_ = conv->base_.thread_nr_; +- conv_param->input_batch_ = GetBatch(output); +- conv_param->input_h_ = GetHeight(output); +- conv_param->input_w_ = GetWidth(output); +- conv_param->input_channel_ = GetChannel(output); +- conv_param->output_batch_ = GetBatch(input); +- conv_param->output_h_ = GetHeight(input); +- conv_param->output_w_ = GetWidth(input); +- conv_param->output_channel_ = GetChannel(input); ++ conv_param->input_batch_ = TensorCGetBatch(output); ++ conv_param->input_h_ = TensorCGetHeight(output); ++ conv_param->input_w_ = TensorCGetWidth(output); ++ conv_param->input_channel_ = TensorCGetChannel(output); ++ conv_param->output_batch_ = TensorCGetBatch(input); ++ conv_param->output_h_ = TensorCGetHeight(input); ++ conv_param->output_w_ = TensorCGetWidth(input); ++ conv_param->output_channel_ = TensorCGetChannel(input); + + ConvComputeParam *compute = &conv->compute_; +- compute->in_n_ = GetBatch(output); +- compute->in_h_ = GetHeight(output); +- compute->in_w_ = GetWidth(output); +- compute->in_c_ = GetChannel(output); +- compute->out_n_ = GetBatch(input); +- compute->out_h_ = GetHeight(input); +- compute->out_w_ = GetWidth(input); +- compute->out_c_ = GetChannel(input); ++ compute->in_n_ = TensorCGetBatch(output); ++ compute->in_h_ = TensorCGetHeight(output); ++ compute->in_w_ = TensorCGetWidth(output); ++ compute->in_c_ = TensorCGetChannel(output); ++ compute->out_n_ = TensorCGetBatch(input); ++ compute->out_h_ = TensorCGetHeight(input); ++ compute->out_w_ = TensorCGetWidth(input); ++ compute->out_c_ = TensorCGetChannel(input); + } + + int DeConvDwResize(KernelBase *self) { +@@ -205,7 +205,7 @@ int DeConvDwCompute(KernelBase *self) { + } else { + deconv_dw->packed_input_ = in_data; + deconv_dw->packed_output_ = out_data; +- memset(deconv_dw->packed_output_, 0, GetSize(out_tensor)); ++ memset(deconv_dw->packed_output_, 0, TensorCGetSize(out_tensor)); + } + + ret = self->env_->ParallelLaunch(self->env_->thread_pool_, DeconvDwRun, self, self->thread_nr_); +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/deconvolution_winograd.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/deconvolution_winograd.c +index 8596a127..90c01284 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/deconvolution_winograd.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/deconvolution_winograd.c +@@ -319,7 +319,7 @@ int DeConvWinogradInitDataParam(DeConvWinogradStruct *deconv) { + + if (deconv->conv_.base_.in_size_ == THREE_TENSOR) { + TensorC *bias_tensor = deconv->conv_.base_.in_[THIRD_INPUT]; +- if (bias_tensor->shape_size_ == Num1 && GetElementNum(bias_tensor) == deconv->conv_.compute_.out_c_) { ++ if (bias_tensor->shape_size_ == Num1 && TensorCGetElementNum(bias_tensor) == deconv->conv_.compute_.out_c_) { + (void)memcpy(deconv->conv_.bias_data_, bias_tensor->data_, deconv->conv_.compute_.out_c_ * sizeof(float)); + } + } +@@ -417,10 +417,10 @@ int DeConvWinogradPrepare(KernelBase *self) { + // when input data is const tensor, save data in kernel + TensorC *input_tensor = self->in_[FIRST_INPUT]; + NNACL_CHECK_NULL_RETURN_ERR(input_tensor); +- if (IsConst(input_tensor)) { +- deconv->origin_input_ = (float *)malloc(GetSize(input_tensor)); ++ if (TensorCIsConst(input_tensor)) { ++ deconv->origin_input_ = (float *)malloc(TensorCGetSize(input_tensor)); + NNACL_MALLOC_CHECK_NULL_RETURN_ERR(deconv->origin_input_); +- (void)memcpy(deconv->origin_input_, input_tensor->data_, GetSize(input_tensor)); ++ (void)memcpy(deconv->origin_input_, input_tensor->data_, TensorCGetSize(input_tensor)); + } + return NNACL_OK; + } +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/exp.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/exp.c +index fb65fc72..a3768603 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/exp.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/exp.c +@@ -35,7 +35,7 @@ int ExpResize(struct KernelBase *self) { + NNACL_CHECK_NULL_RETURN_ERR(exp); + ExpParameter *param = (ExpParameter *)exp->base_.param_; + NNACL_CHECK_NULL_RETURN_ERR(param); +- exp->element_num_ = GetElementNum(exp->base_.in_[FIRST_INPUT]); ++ exp->element_num_ = TensorCGetElementNum(exp->base_.in_[FIRST_INPUT]); + return NNACL_OK; + } + +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/f16/arithmetic_f16.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/f16/arithmetic_f16.c +index 8f69f6f0..6631aad6 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/f16/arithmetic_f16.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/f16/arithmetic_f16.c +@@ -86,9 +86,9 @@ int ArithmeticF16Resize(KernelBase *self) { + NNACL_CHECK_NULL_RETURN_ERR(t->data_); + void *f32_data = t->data_; + t->data_type_ = kNumberTypeFloat16; +- t->data_ = self->env_->Alloc(self->env_->allocator_, GetSize(t)); ++ t->data_ = self->env_->Alloc(self->env_->allocator_, TensorCGetSize(t)); + NNACL_MALLOC_CHECK_NULL_RETURN_ERR(self->in_[FIRST_INPUT]->data_); +- Float32ToFloat16((float *)(f32_data), (float16_t *)(t->data_), GetElementNum(t)); ++ Float32ToFloat16((float *)(f32_data), (float16_t *)(t->data_), TensorCGetElementNum(t)); + self->env_->Free(self->env_->allocator_, f32_data); + } + if (arithmetic->b_matrix_.is_const_ && self->in_[SECOND_INPUT]->data_type_ == kNumberTypeFloat32) { +@@ -96,9 +96,9 @@ int ArithmeticF16Resize(KernelBase *self) { + NNACL_CHECK_NULL_RETURN_ERR(t->data_); + void *f32_data = t->data_; + t->data_type_ = kNumberTypeFloat16; +- t->data_ = self->env_->Alloc(self->env_->allocator_, GetSize(t)); ++ t->data_ = self->env_->Alloc(self->env_->allocator_, TensorCGetSize(t)); + NNACL_MALLOC_CHECK_NULL_RETURN_ERR(self->in_[FIRST_INPUT]->data_); +- Float32ToFloat16((float *)(f32_data), (float16_t *)(t->data_), GetElementNum(t)); ++ Float32ToFloat16((float *)(f32_data), (float16_t *)(t->data_), TensorCGetElementNum(t)); + self->env_->Free(self->env_->allocator_, f32_data); + } + } +@@ -149,7 +149,7 @@ int ArithmeticF16Compute(KernelBase *self) { + NNACL_CHECK_NULL_RETURN_ERR(arithmetic_f16->arithmetic_.c_matrix_.data_); + NNACL_CHECK_NULL_RETURN_ERR(self->out_[OUTPUT_INDEX]->data_); + Float16ToFloat32((float16_t *)(arithmetic_f16->arithmetic_.c_matrix_.data_), +- (float *)(self->out_[OUTPUT_INDEX]->data_), GetElementNum(self->out_[OUTPUT_INDEX])); ++ (float *)(self->out_[OUTPUT_INDEX]->data_), TensorCGetElementNum(self->out_[OUTPUT_INDEX])); + } + + FreeArithmeticF16Buffers(arithmetic_f16); +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/f16/concat_f16.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/f16/concat_f16.c +index 035a56ca..55d91992 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/f16/concat_f16.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/f16/concat_f16.c +@@ -100,7 +100,7 @@ int ConcatF16Compute(KernelBase *self) { + if (output == NULL) { + ret = NNACL_CONCAT_F16_OUTPUT_DATA_INVALID; + } else { +- Float16ToFloat32((float16_t *)concat->output_, output, GetElementNum(output_tensor)); ++ Float16ToFloat32((float16_t *)concat->output_, output, TensorCGetElementNum(output_tensor)); + } + } + } +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/f16/reduce_f16.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/f16/reduce_f16.c +index f6b09a93..908510b3 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/f16/reduce_f16.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/f16/reduce_f16.c +@@ -66,7 +66,7 @@ void HandleReduceF16ASumAndSumSquare(KernelBase *base) { + float16_t *data = (float16_t *)in_tensor->data_; + NNACL_CHECK_NULL_RETURN_VOID(data); + +- int num = GetElementNum(in_tensor); ++ int num = TensorCGetElementNum(in_tensor); + + if (((ReduceParameter *)base->param_)->mode_ == Reduce_ASum) { + for (int i = 0; i < num; ++i) { +@@ -86,7 +86,7 @@ void HandleReduceF16ASumAndSumSquare(KernelBase *base) { + + int CalculateReduceF16CoeffOutput(KernelBase *base) { + TensorC *out_tensor = base->out_[OUTPUT_INDEX]; +- int num = GetElementNum(out_tensor); ++ int num = TensorCGetElementNum(out_tensor); + + float16_t *out_data = (float16_t *)out_tensor->data_; + for (int i = 0; i < num; ++i) { +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/f16/stack_f16.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/f16/stack_f16.c +index 63910abe..81ebb0ff 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/f16/stack_f16.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/f16/stack_f16.c +@@ -23,7 +23,7 @@ void *StackF16InitBuffer(KernelBase *base, TensorC *t, bool init) { + return t->data_; + } + +- int ele_num = GetElementNum(t); ++ int ele_num = TensorCGetElementNum(t); + void *f16_buffer = base->env_->Alloc(base->env_->allocator_, ele_num * sizeof(float16_t)); + NNACL_MALLOC_CHECK_NULL_RETURN_NULL(f16_buffer); + Float32ToFloat16(t->data_, f16_buffer, ele_num); +@@ -52,7 +52,7 @@ void StackF16FreeBuffer(StackF16Struct *stack_f16) { + /* output transfer */ + Float16ToFloat32((float16_t *)stack_f16->stack_.buffers_[stack_f16->stack_.base_.in_size_], + (float *)stack_f16->stack_.base_.out_[OUTPUT_INDEX]->data_, +- GetElementNum(stack_f16->stack_.base_.out_[OUTPUT_INDEX])); ++ TensorCGetElementNum(stack_f16->stack_.base_.out_[OUTPUT_INDEX])); + } + + for (size_t i = 0; i < (stack_f16->stack_.base_.in_size_ + stack_f16->stack_.base_.out_size_); ++i) { +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/fill.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/fill.c +index db21e338..bc5d7549 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/fill.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/fill.c +@@ -29,10 +29,10 @@ int FillResize(struct KernelBase *self) { + FillStruct *fill = (FillStruct *)self; + NNACL_CHECK_NULL_RETURN_ERR(fill); + fill->base_.thread_nr_ = fill->base_.UpdateThread(TC_PTYPE(PrimType_Fill), 0, 1, +- GetSize(fill->base_.out_[OUTPUT_INDEX]), fill->base_.thread_nr_); ++ TensorCGetSize(fill->base_.out_[OUTPUT_INDEX]), fill->base_.thread_nr_); + + NNACL_CHECK_NULL_RETURN_ERR(fill->base_.out_[OUTPUT_INDEX]); +- fill->data_size_ = (int)GetElementNum(fill->base_.out_[OUTPUT_INDEX]); ++ fill->data_size_ = (int) TensorCGetElementNum(fill->base_.out_[OUTPUT_INDEX]); + fill->thread_sz_count_ = MSMIN(fill->base_.thread_nr_, fill->data_size_); + if (fill->thread_sz_count_ != 0) { + fill->thread_sz_stride_ = UP_DIV(fill->data_size_, fill->thread_sz_count_); +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/gather.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/gather.c +index fd53b561..5e9158f4 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/gather.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/gather.c +@@ -117,12 +117,12 @@ int InitGatherDynamicStatus(GatherStruct *gather) { + for (int i = gather->axis_ + 1; i < in_rank; ++i) { + gather->byte_inner_size_ *= in_shape[i]; + } +- gather->indices_size_ = GetElementNum(gather->base_.in_[SECOND_INPUT]); ++ gather->indices_size_ = TensorCGetElementNum(gather->base_.in_[SECOND_INPUT]); + return NNACL_OK; + } + + void GatherUpdateThreadNumProcess(GatherStruct *gather) { +- int all_bytes = GetSize(gather->base_.out_[OUTPUT_INDEX]); ++ int all_bytes = TensorCGetSize(gather->base_.out_[OUTPUT_INDEX]); + if (all_bytes <= kGatherMinCostPerThread) { + gather->base_.thread_nr_ = 1; + return; +@@ -130,7 +130,7 @@ void GatherUpdateThreadNumProcess(GatherStruct *gather) { + + gather->base_.thread_nr_ = + gather->base_.UpdateThread(TC_PTYPE(PrimType_Gather), 0, gather->byte_inner_size_, +- GetSize(gather->base_.out_[OUTPUT_INDEX]), gather->base_.thread_nr_); ++ TensorCGetSize(gather->base_.out_[OUTPUT_INDEX]), gather->base_.thread_nr_); + return; + } + +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/group_convolution.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/group_convolution.c +index 721c8f08..60b81636 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/group_convolution.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/group_convolution.c +@@ -70,7 +70,7 @@ TensorC *CreateConstTensor(const TensorC *tensor, const int *shape, const int sh + new_tensor->shape_size_ = shape_size; + memcpy(new_tensor->shape_, shape, shape_size * sizeof(int)); + +- int size = GetSize(new_tensor); ++ int size = TensorCGetSize(new_tensor); + if (size <= 0) { + free(new_tensor); + return NULL; +@@ -90,7 +90,7 @@ TensorC *CreateConstTensor(const TensorC *tensor, const int *shape, const int sh + + int GroupConvCreatorNewConstTensor(GroupConvolutionStruct *group_conv, KernelBase *new_conv, int group_id) { + TensorC *origin_weight = group_conv->conv_base_.base_.in_[SECOND_INPUT]; +- int shape[] = {group_conv->sub_out_c_, GetHeight(origin_weight), GetWidth(origin_weight), group_conv->sub_in_c_}; ++ int shape[] = {group_conv->sub_out_c_, TensorCGetHeight(origin_weight), TensorCGetWidth(origin_weight), group_conv->sub_in_c_}; + TensorC *weight_tensor = CreateConstTensor(origin_weight, shape, DIMENSION_4D, group_id); + NNACL_MALLOC_CHECK_NULL_RETURN_ERR(weight_tensor); + new_conv->in_[SECOND_INPUT] = weight_tensor; +@@ -115,8 +115,8 @@ int GroupConvCreatorSetShapeOfTensors(GroupConvolutionStruct *group_conv) { + NNACL_CHECK_NULL_RETURN_ERR(weight_tensor); + NNACL_CHECK_FALSE(origin_conv_param->group_ == 0, NNACL_GROUP_CONVOLUTION_GROUP_INVALID); + NNACL_CHECK_FALSE(weight_tensor->shape_size_ != DIMENSION_4D, NNACL_CONVOLUTION_WEIGHT_SHAPE_INVALID); +- NNACL_CHECK_FALSE(origin_conv_param->kernel_h_ != GetHeight(weight_tensor), NNACL_CONVOLUTION_WEIGHT_SHAPE_INVALID); +- NNACL_CHECK_FALSE(origin_conv_param->kernel_w_ != GetWidth(weight_tensor), NNACL_CONVOLUTION_WEIGHT_SHAPE_INVALID); ++ NNACL_CHECK_FALSE(origin_conv_param->kernel_h_ != TensorCGetHeight(weight_tensor), NNACL_CONVOLUTION_WEIGHT_SHAPE_INVALID); ++ NNACL_CHECK_FALSE(origin_conv_param->kernel_w_ != TensorCGetWidth(weight_tensor), NNACL_CONVOLUTION_WEIGHT_SHAPE_INVALID); + + ConvComputeParam *compute = &group_conv->conv_base_.compute_; + group_conv->ori_in_c_ = compute->in_c_; +@@ -251,11 +251,11 @@ int GroupConvSeparateInput(GroupConvolutionStruct *group_conv, int group_id) { + void GroupConvUpdateShape(GroupConvolutionStruct *group_conv) { + for (int i = 0; i < group_conv->group_; i++) { + TensorC *in_tensor = group_conv->conv_base_.base_.in_[FIRST_INPUT]; +- int in_shape[] = {GetBatch(in_tensor), GetHeight(in_tensor), GetWidth(in_tensor), group_conv->sub_in_c_}; ++ int in_shape[] = {TensorCGetBatch(in_tensor), TensorCGetHeight(in_tensor), TensorCGetWidth(in_tensor), group_conv->sub_in_c_}; + memcpy(group_conv->group_convs_[i]->in_[FIRST_INPUT]->shape_, in_shape, DIMENSION_4D * sizeof(float)); + + TensorC *out_tensor = group_conv->conv_base_.base_.out_[OUTPUT_INDEX]; +- int out_shape[] = {GetBatch(out_tensor), GetHeight(out_tensor), GetWidth(out_tensor), group_conv->sub_out_c_}; ++ int out_shape[] = {TensorCGetBatch(out_tensor), TensorCGetHeight(out_tensor), TensorCGetWidth(out_tensor), group_conv->sub_out_c_}; + memcpy(group_conv->group_convs_[i]->out_[OUTPUT_INDEX]->shape_, out_shape, DIMENSION_4D * sizeof(float)); + } + return; +@@ -293,11 +293,11 @@ int GroupConvolutionCompute(KernelBase *self) { + for (int i = 0; i < group_conv->group_; ++i) { + // first, malloc data for sub_kernel's tensors. + TensorC *sub_kernel_in_tensor = group_conv->group_convs_[i]->in_[FIRST_INPUT]; +- sub_kernel_in_tensor->data_ = self->env_->Alloc(self->env_->allocator_, GetSize(sub_kernel_in_tensor)); ++ sub_kernel_in_tensor->data_ = self->env_->Alloc(self->env_->allocator_, TensorCGetSize(sub_kernel_in_tensor)); + NNACL_MALLOC_CHECK_NULL_RETURN_ERR(sub_kernel_in_tensor->data_); + + TensorC *sub_kernel_out_tensor = group_conv->group_convs_[i]->out_[OUTPUT_INDEX]; +- sub_kernel_out_tensor->data_ = self->env_->Alloc(self->env_->allocator_, GetSize(sub_kernel_out_tensor)); ++ sub_kernel_out_tensor->data_ = self->env_->Alloc(self->env_->allocator_, TensorCGetSize(sub_kernel_out_tensor)); + NNACL_MALLOC_CHECK_NULL_RETURN_ERR(sub_kernel_out_tensor->data_); + + // second, separate group conv input into several parts. This step must be in runtime stage. +@@ -358,7 +358,7 @@ void GroupConvReleaseSubConv(KernelBase *current_conv) { + + if (current_conv->in_ != NULL) { + for (int j = 0; j < current_conv->in_size_; j++) { +- if (IsConst(current_conv->in_[j])) { ++ if (TensorCIsConst(current_conv->in_[j])) { + free(current_conv->in_[j]->data_); + current_conv->in_[j]->data_ = NULL; + } +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/group_norm.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/group_norm.c +index 8facea24..7fdaa55a 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/group_norm.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/group_norm.c +@@ -36,9 +36,9 @@ int GroupNormResize(struct KernelBase *self) { + NNACL_CHECK_FALSE(in0->shape_size_ < C1NUM, NNACL_GROUP_NORM_SHAPE_SIZE_INVALID); + NNACL_CHECK_FALSE(in0->format_ != Format_NCHW, NNACL_GROUP_NORM_FORMAT_INVALID); + +- param->unit_ = GetHeight(in0) * GetWidth(in0); +- param->batch_ = GetBatch(in0); +- param->channel_ = GetChannel(in0); ++ param->unit_ = TensorCGetHeight(in0) * TensorCGetWidth(in0); ++ param->batch_ = TensorCGetBatch(in0); ++ param->channel_ = TensorCGetChannel(in0); + return self->Prepare(self); + } + +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/layer_norm.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/layer_norm.c +index 1104abab..9456c2ba 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/layer_norm.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/layer_norm.c +@@ -75,7 +75,7 @@ int LayerNormResize(KernelBase *self) { + compute->params_inner_size_ *= input->shape_[i]; + } + +- int out_num = GetElementNum(self->out_[OUTPUT_INDEX]); ++ int out_num = TensorCGetElementNum(self->out_[OUTPUT_INDEX]); + self->thread_nr_ = self->UpdateThread(TC_PTYPE(PrimType_LayerNormFusion), compute->norm_inner_size_, + compute->norm_inner_size_, out_num, self->thread_nr_); + self->thread_nr_ = NNACL_MIN(compute->norm_outer_size_, self->thread_nr_); +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/local_response_norm.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/local_response_norm.c +index 30e79de6..bb2a02b5 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/local_response_norm.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/local_response_norm.c +@@ -37,10 +37,10 @@ int LocalResponseNormRun(void *cdata, int task_id, float l, float r) { + float *output_ptr = (float *)output->data_; + NNACL_CHECK_NULL_RETURN_ERR(output_ptr); + +- int batch = GetBatch(input); +- int height = GetHeight(input); +- int width = GetWidth(input); +- int channel = GetChannel(input); ++ int batch = TensorCGetBatch(input); ++ int height = TensorCGetHeight(input); ++ int width = TensorCGetWidth(input); ++ int channel = TensorCGetChannel(input); + + NNACL_CHECK_INT_MUL_NOT_OVERFLOW(batch, width, NNACL_ERR); + int size_bw = batch * width; +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/matmul_base.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/matmul_base.c +index f1c9eee4..883fc1ba 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/matmul_base.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/matmul_base.c +@@ -244,7 +244,7 @@ int MatmulBasePackMatrixB(MatmulStruct *matmul) { + + int MatmulBaseBackupConstMatrix(MatmulStruct *matmul, MatrixInfo *matrix_info, int index) { + NNACL_CHECK_TRUE_RET(index < (int)matmul->base_.in_size_, NNACL_ERR); +- size_t backup_size = (size_t)GetElementNum(matmul->base_.in_[index]) * sizeof(float); ++ size_t backup_size = (size_t) TensorCGetElementNum(matmul->base_.in_[index]) * sizeof(float); + NNACL_CHECK_TRUE_RET(backup_size > 0, NNACL_ERR); + matrix_info->origin_ptr_ = (float *)(matmul->base_.env_->Alloc(matmul->base_.env_->allocator_, backup_size)); + NNACL_MALLOC_CHECK_NULL_RETURN_ERR(matrix_info->origin_ptr_); +@@ -419,7 +419,7 @@ int MatmulBasePackBiasMatrix(MatmulStruct *matmul) { + float *bias_src = matmul->matrix_c_.has_origin_ ? matmul->matrix_c_.origin_ptr_ : (float *)bias_tensor->data_; + NNACL_CHECK_NULL_RETURN_ERR(bias_src); + +- int bias_num = GetElementNum(bias_tensor); ++ int bias_num = TensorCGetElementNum(bias_tensor); + NNACL_CHECK_TRUE_RET(bias_num > 0 && matmul->compute_.col_align_ >= bias_num, NNACL_ERR); + + matmul->matrix_c_.pack_size_ = matmul->compute_.col_align_; +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/non_zero.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/non_zero.c +index cfaafaf7..d23cfc78 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/non_zero.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/non_zero.c +@@ -37,7 +37,7 @@ int NonZeroCompute(KernelBase *self) { + int *coordiate_values = (int *)self->env_->Alloc(self->env_->allocator_, input->shape_size_ * sizeof(int)); + NNACL_MALLOC_CHECK_NULL_RETURN_ERR(coordiate_values); + +- for (int i = 0; i < GetElementNum(input); i += 1) { ++ for (int i = 0; i < TensorCGetElementNum(input); i += 1) { + if (input_data[i]) { + for (size_t j = 0; j < input->shape_size_; j++) { + output_data[non_zero_count + (int)j * non_zero_nums] = coordiate_values[j]; +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/one_hot.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/one_hot.c +index 579c42de..ce932782 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/one_hot.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/one_hot.c +@@ -172,11 +172,11 @@ int OneHotResize(KernelBase *self) { + if (one_hot->outer_size_ == 0) { + return NNACL_ONE_HOT_OUTER_SIZE_INVALID; + } +- one_hot->inner_size_ = GetElementNum(indices) / one_hot->outer_size_; ++ one_hot->inner_size_ = TensorCGetElementNum(indices) / one_hot->outer_size_; + NNACL_CHECK_FALSE(one_hot->inner_size_ <= 0, NNACL_ONE_HOT_INNER_SIZE_INVALID); + + self->thread_nr_ = self->UpdateThread(TC_PTYPE(PrimType_OneHot), one_hot->inner_size_, one_hot->outer_size_, +- GetElementNum(self->out_[OUTPUT_INDEX]), self->thread_nr_); ++ TensorCGetElementNum(self->out_[OUTPUT_INDEX]), self->thread_nr_); + return NNACL_OK; + } + +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/ones_like.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/ones_like.c +index b5f20258..87edca6b 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/ones_like.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/ones_like.c +@@ -29,7 +29,7 @@ int OnesLikeCompute(KernelBase *self) { + NNACL_CHECK_NULL_RETURN_ERR(output_tensor); + void *output_ptr = output_tensor->data_; + NNACL_CHECK_NULL_RETURN_ERR(output_ptr); +- size_t num = (size_t)GetElementNum(output_tensor); ++ size_t num = (size_t) TensorCGetElementNum(output_tensor); + + if (output_tensor->data_type_ == kNumberTypeFloat32) { + float *output = (float *)output_ptr; +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/pad.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/pad.c +index 3cbf6130..86fa0e75 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/pad.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/pad.c +@@ -219,9 +219,9 @@ int MirrorPadImpl(void *cdata, int task_id, float l, float r) { + NNACL_CHECK_NULL_RETURN_ERR(output_data); + + /* Common Mirror pad */ +- int unit = UP_DIV(GetElementNum(output), pad->base_.thread_nr_); ++ int unit = UP_DIV(TensorCGetElementNum(output), pad->base_.thread_nr_); + int begin = unit * task_id; +- int end = NNACL_MIN(begin + unit, GetElementNum(output)); ++ int end = NNACL_MIN(begin + unit, TensorCGetElementNum(output)); + if (pad->data_type_ == kNumberTypeFloat16) { + #ifdef ENABLE_FP16 + MirrorPadFp16((float16_t *)input_data, (float16_t *)output_data, pad->in_, pad->in_strides_, pad->out_strides_, +@@ -259,7 +259,7 @@ int PadCopyPaddingFromInput(PadStruct *pad) { + NNACL_CHECK_NULL_RETURN_ERR(padding_data); + + (void)PadExtendDims(pad->in_, input_tensor->shape_, DEFAULT_PAD_NDIMS, input_tensor->shape_size_, 1); +- (void)PadExtendDims(pad->paddings_, padding_data, MAX_PAD_SIZE, GetElementNum(padding_tensor), 0); ++ (void)PadExtendDims(pad->paddings_, padding_data, MAX_PAD_SIZE, TensorCGetElementNum(padding_tensor), 0); + pad->paddings_size_ = MAX_PAD_SIZE; + + return NNACL_OK; +@@ -293,7 +293,7 @@ int PadCompute(KernelBase *self) { + if (self->in_size_ == THREE_TENSOR) { + TensorC *pad_value_tensor = self->in_[THIRD_INPUT]; + NNACL_CHECK_NULL_RETURN_ERR(pad_value_tensor); +- NNACL_CHECK_FALSE(GetElementNum(pad_value_tensor) != 1, NNACL_PAD_PADDING_VALID_INVALID); ++ NNACL_CHECK_FALSE(TensorCGetElementNum(pad_value_tensor) != 1, NNACL_PAD_PADDING_VALID_INVALID); + void *pad_valud = pad_value_tensor->data_; + if (pad->data_type_ == kNumberTypeFloat16) { + #ifdef ENABLE_FP16 +@@ -312,7 +312,7 @@ int PadCompute(KernelBase *self) { + if (pad->pad_mode_ == PaddingMode_Constant) { + TensorC *output = self->out_[OUTPUT_INDEX]; + NNACL_CHECK_NULL_RETURN_ERR(output); +- size_t output_size = GetElementNum(output); ++ size_t output_size = TensorCGetElementNum(output); + void *output_data = output->data_; + if (fabsf(pad->constant_value_ - 0.0f) < 1e-5) { + memset(output_data, 0, output_size * (int)DataTypeCSize(pad->data_type_)); +@@ -357,7 +357,7 @@ int PadResize(KernelBase *self) { + + int rank = input->shape_size_; + NNACL_CHECK_FALSE(input->shape_size_ > DEFAULT_PAD_NDIMS, NNACL_PAD_SHAPE_INVALID); +- NNACL_CHECK_FALSE(GetElementNum(padding) != rank + rank, NNACL_PAD_SHAPE_INVALID); ++ NNACL_CHECK_FALSE(TensorCGetElementNum(padding) != rank + rank, NNACL_PAD_SHAPE_INVALID); + + if (pad->pad_mode_ == PaddingMode_Constant) { + (void)PadExtendDims(pad->in_, input->shape_, DEFAULT_PAD_NDIMS, rank, 1); +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/pooling.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/pooling.c +index 62a2232d..6d91f02e 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/pooling.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/pooling.c +@@ -100,14 +100,14 @@ int PoolingResize(KernelBase *self) { + PoolingComputeParam *compute = &pooling->compute_; + PoolingParameter *param = (PoolingParameter *)self->param_; + +- compute->input_batch_ = GetBatch(in_tensor); +- compute->input_channel_ = GetChannel(in_tensor); +- compute->input_h_ = GetHeight(in_tensor); +- compute->input_w_ = GetWidth(in_tensor); +- compute->output_batch_ = GetBatch(out_tensor); +- compute->output_channel_ = GetChannel(out_tensor); +- compute->output_h_ = GetHeight(out_tensor); +- compute->output_w_ = GetWidth(out_tensor); ++ compute->input_batch_ = TensorCGetBatch(in_tensor); ++ compute->input_channel_ = TensorCGetChannel(in_tensor); ++ compute->input_h_ = TensorCGetHeight(in_tensor); ++ compute->input_w_ = TensorCGetWidth(in_tensor); ++ compute->output_batch_ = TensorCGetBatch(out_tensor); ++ compute->output_channel_ = TensorCGetChannel(out_tensor); ++ compute->output_h_ = TensorCGetHeight(out_tensor); ++ compute->output_w_ = TensorCGetWidth(out_tensor); + compute->window_h_ = param->window_h_; + compute->window_w_ = param->window_w_; + if (param->global_) { +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/prior_box.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/prior_box.c +index 2462b061..99de70d8 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/prior_box.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/prior_box.c +@@ -71,7 +71,7 @@ int RunPriorBox(void *cdata, int task_id, float l, float r) { + NNACL_CHECK_NULL_RETURN_ERR(output_tensor); + float *output_data = output_tensor->data_; + NNACL_CHECK_NULL_RETURN_ERR(output_data); +- return PriorBox(prior_box->output_, output_data, GetSize(output_tensor), task_id, prior_box->base_.thread_nr_); ++ return PriorBox(prior_box->output_, output_data, TensorCGetSize(output_tensor), task_id, prior_box->base_.thread_nr_); + } + + int PriorBoxRelease(KernelBase *self) { +@@ -97,12 +97,12 @@ int PriorBoxResize(KernelBase *self) { + TensorC *output_tensor = prior_box->base_.out_[OUTPUT_INDEX]; + NNACL_CHECK_NULL_RETURN_ERR(output_tensor); + +- prior_box->fmap_w_ = GetWidth(input0_tensor); ++ prior_box->fmap_w_ = TensorCGetWidth(input0_tensor); + NNACL_CHECK_ZERO_RETURN_ERR(prior_box->fmap_w_); +- prior_box->fmap_h_ = GetHeight(input1_tensor); ++ prior_box->fmap_h_ = TensorCGetHeight(input1_tensor); + NNACL_CHECK_ZERO_RETURN_ERR(prior_box->fmap_h_); +- const int image_w = param->image_size_w > 0 ? param->image_size_w : GetWidth(input1_tensor); +- const int image_h = param->image_size_h > 0 ? param->image_size_h : GetHeight(input1_tensor); ++ const int image_w = param->image_size_w > 0 ? param->image_size_w : TensorCGetWidth(input1_tensor); ++ const int image_h = param->image_size_h > 0 ? param->image_size_h : TensorCGetHeight(input1_tensor); + + prior_box->step_w_ = param->step_w > 0.0f ? param->step_w : (float)(image_w) / prior_box->fmap_w_; + prior_box->step_h_ = param->step_h > 0.0f ? param->step_h : (float)(image_h) / prior_box->fmap_h_; +@@ -136,7 +136,7 @@ int PriorBoxResize(KernelBase *self) { + PriorBoxRelease(self); + int size = Num4 + Num4 + different_aspect_ratios_size; + size = size * prior_box->fmap_h_ * prior_box->fmap_w_ * param->min_sizes_size; +- size = size + UP_ROUND(GetHeight(output_tensor), COMM_SHAPE_SIZE); ++ size = size + UP_ROUND(TensorCGetHeight(output_tensor), COMM_SHAPE_SIZE); + size = size * sizeof(float); + NNACL_CHECK_MALLOC_SIZE(size); + prior_box->output_ = (float *)self->env_->Alloc(self->env_->allocator_, size); +@@ -162,7 +162,7 @@ int PriorBoxResize(KernelBase *self) { + } + + // variance +- for (int i = 0; i < GetHeight(output_tensor) / COMM_SHAPE_SIZE; i++) { ++ for (int i = 0; i < TensorCGetHeight(output_tensor) / COMM_SHAPE_SIZE; i++) { + for (int j = 0; j < COMM_SHAPE_SIZE; j++) { + prior_box->output_[prior_box->output_size_++] = param->variances[j]; + } +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/range.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/range.c +index b8aa7c08..bdda3f99 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/range.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/range.c +@@ -28,7 +28,7 @@ int RangeCompute(KernelBase *self) { + NNACL_CHECK_NULL_RETURN_ERR(input); + TensorC *output = self->out_[OUTPUT_INDEX]; + NNACL_CHECK_NULL_RETURN_ERR(output); +- int output_num = GetElementNum(output); ++ int output_num = TensorCGetElementNum(output); + + if (self->in_size_ == THREE_TENSOR) { + TensorC *delta = self->in_[THIRD_INPUT]; +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/reduce.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/reduce.c +index 5e3d2f69..50a349fa 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/reduce.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/reduce.c +@@ -87,7 +87,7 @@ int ReduceImpl(void *cdata, int task_id, float l, float r) { + } + + int CopyReduceyInputToOutput(ReduceStruct *reduce) { +- int total_size = GetSize(reduce->base_.in_[FIRST_INPUT]); ++ int total_size = TensorCGetSize(reduce->base_.in_[FIRST_INPUT]); + NNACL_CHECK_FALSE(total_size == 0, NNACL_REDUCE_INPUT_SHAPE_SIZE_INVALID); + int block_size = UP_DIV(total_size, reduce->base_.thread_nr_); + int tmp_thread_num = UP_DIV(total_size, block_size); +@@ -130,7 +130,7 @@ int CalculateReduceCoeffOutput(KernelBase *base) { + TensorC *out_tensor = reduce->base_.out_[OUTPUT_INDEX]; + NNACL_CHECK_NULL_RETURN_ERR(out_tensor); + NNACL_CHECK_NULL_RETURN_ERR(out_tensor->data_); +- int num = GetElementNum(out_tensor); ++ int num = TensorCGetElementNum(out_tensor); + + float *out_data = (float *)out_tensor->data_; + for (int i = 0; i < num; ++i) { +@@ -150,7 +150,7 @@ void HandleReduceASumAndSumSquare(KernelBase *base) { + float *data = (float *)in_tensor->data_; + NNACL_CHECK_NULL_RETURN_VOID(data); + +- int num = GetElementNum(in_tensor); ++ int num = TensorCGetElementNum(in_tensor); + + if (((ReduceParameter *)base->param_)->mode_ == Reduce_ASum) { + for (int i = 0; i < num; ++i) { +@@ -197,7 +197,7 @@ int ReduceCommonPrepare(ReduceStruct *reduce) { + NNACL_CHECK_FALSE(axes_tensor->data_type_ != kNumberTypeInt && axes_tensor->data_type_ != kNumberTypeInt32, + NNACL_REDUCE_AXES_TENSOR_ERROR); + +- reduce->num_axes_ = GetElementNum(axes_tensor); ++ reduce->num_axes_ = TensorCGetElementNum(axes_tensor); + + if (axes_tensor->data_ != NULL && (reduce->num_axes_ <= 0 || reduce->num_axes_ > MAX_SHAPE_SIZE)) { + return NNACL_REDUCE_AXES_TENSOR_ERROR; +@@ -208,8 +208,8 @@ int ReduceCommonPrepare(ReduceStruct *reduce) { + reduce->axes_[i] = i; + } + } else { +- NNACL_CHECK_FALSE(GetSize(axes_tensor) == 0, NNACL_REDUCE_AXES_TENSOR_ERROR); +- (void)memcpy(reduce->axes_, axes_tensor->data_, GetSize(axes_tensor)); ++ NNACL_CHECK_FALSE(TensorCGetSize(axes_tensor) == 0, NNACL_REDUCE_AXES_TENSOR_ERROR); ++ (void)memcpy(reduce->axes_, axes_tensor->data_, TensorCGetSize(axes_tensor)); + } + + return NNACL_OK; +@@ -349,7 +349,7 @@ int ReduceResize(struct KernelBase *self) { + reduce->inner_sizes_[Index0] * reduce->axis_sizes_[Index0], reduce->outer_sizes_[Index0], self->thread_nr_); + } else { + self->thread_nr_ = self->UpdateThread(TC_TYPE(PrimType_ReduceFusion, Reduce_Max + 1), 0, 0, +- GetElementNum(self->out_[OUTPUT_INDEX]), self->thread_nr_); ++ TensorCGetElementNum(self->out_[OUTPUT_INDEX]), self->thread_nr_); + } + return NNACL_OK; + } +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/reshape.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/reshape.c +index aebde094..e883e7fa 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/reshape.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/reshape.c +@@ -37,7 +37,7 @@ int ParallelReshape(void *param, int task_id, float l, float r) { + int ReshapeResize(struct KernelBase *self) { + NNACL_CHECK_NULL_RETURN_ERR(self); + ReshapeStruct *reshape = (ReshapeStruct *)self; +- reshape->total_size_ = GetSize(self->in_[0]); ++ reshape->total_size_ = TensorCGetSize(self->in_[0]); + if (reshape->total_size_ == 0) { + return NNACL_OK; + } +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/scale.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/scale.c +index a5ee6687..3e867559 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/scale.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/scale.c +@@ -148,7 +148,7 @@ int ScaleInitScaleOffset(ScaleStruct *scale) { + + if (scale->base_.in_size_ == TWO_TENSOR) { + scale->malloc_offset_ = true; +- int malloc_size = GetElementNum(scale_tensor) * data_type_size; ++ int malloc_size = TensorCGetElementNum(scale_tensor) * data_type_size; + NNACL_CHECK_MALLOC_SIZE(malloc_size); + scale->offset_ = scale->base_.env_->Alloc(scale->base_.env_->allocator_, malloc_size); + memset(scale->offset_, 0, malloc_size); +@@ -162,7 +162,7 @@ int ScaleInitScaleOffset(ScaleStruct *scale) { + + if (scale_tensor->data_ != NULL) { + scale->malloc_scale_ = true; +- int malloc_size = GetElementNum(scale_tensor) * data_type_size; ++ int malloc_size = TensorCGetElementNum(scale_tensor) * data_type_size; + NNACL_CHECK_MALLOC_SIZE(malloc_size); + scale->scale_ = scale->base_.env_->Alloc(scale->base_.env_->allocator_, malloc_size); + NNACL_MALLOC_CHECK_NULL_RETURN_ERR(scale->scale_); +@@ -181,7 +181,7 @@ int ScaleInitScaleOffset(ScaleStruct *scale) { + NNACL_CHECK_NULL_RETURN_ERR(offset_tensor); + if (offset_tensor->data_ != NULL) { + scale->malloc_offset_ = true; +- int malloc_size = GetElementNum(offset_tensor) * data_type_size; ++ int malloc_size = TensorCGetElementNum(offset_tensor) * data_type_size; + NNACL_CHECK_MALLOC_SIZE(malloc_size); + scale->offset_ = scale->base_.env_->Alloc(scale->base_.env_->allocator_, malloc_size); + NNACL_MALLOC_CHECK_NULL_RETURN_ERR(scale->scale_); +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/size.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/size.c +index f88a758a..b342c146 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/size.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/size.c +@@ -25,7 +25,7 @@ int SizeCompute(KernelBase *self) { + NNACL_CHECK_NULL_RETURN_ERR(out_tensor); + int *out_data = (int *)out_tensor->data_; + NNACL_CHECK_NULL_RETURN_ERR(out_data); +- out_data[Index0] = GetElementNum(in_tensor); ++ out_data[Index0] = TensorCGetElementNum(in_tensor); + return NNACL_OK; + } + +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/softmax.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/softmax.c +index 24bb4de7..89b51cc3 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/softmax.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/softmax.c +@@ -110,7 +110,7 @@ int SoftmaxResize(struct KernelBase *self) { + int *in_shape = in_tensor->shape_; + + self->thread_nr_ = self->UpdateThread(TC_PTYPE(PrimType_Softmax), in_shape[softmax->axis_], in_shape[softmax->axis_], +- GetElementNum(self->out_[OUTPUT_INDEX]), self->thread_nr_); ++ TensorCGetElementNum(self->out_[OUTPUT_INDEX]), self->thread_nr_); + return NNACL_OK; + } + +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/stack.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/stack.c +index 11ecd5ff..cbf85783 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/stack.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/stack.c +@@ -74,8 +74,8 @@ int StackResize(KernelBase *self) { + stack->axis_ = origin_axis < 0 ? origin_axis + (int)input->shape_size_ + 1 : origin_axis; + + if (self->in_size_ == 1) { +- NNACL_CHECK_FALSE(GetElementNum(input) <= 0, NNACL_STACK_TENSOR_SHAPE_INVALID); +- stack->copy_size_ = (size_t)GetElementNum(input) * DataTypeCSize(stack->data_type_); ++ NNACL_CHECK_FALSE(TensorCGetElementNum(input) <= 0, NNACL_STACK_TENSOR_SHAPE_INVALID); ++ stack->copy_size_ = (size_t) TensorCGetElementNum(input) * DataTypeCSize(stack->data_type_); + stack->outer_size_ = 1; + } else { + NNACL_CHECK_FALSE((int)input->shape_size_ < stack->axis_, NNACL_STACK_TENSOR_SHAPE_INVALID); +@@ -85,7 +85,7 @@ int StackResize(KernelBase *self) { + } + + self->thread_nr_ = self->UpdateThread(TC_PTYPE(PrimType_Stack), stack->copy_size_, stack->copy_size_, +- GetElementNum(self->out_[OUTPUT_INDEX]), self->thread_nr_); ++ TensorCGetElementNum(self->out_[OUTPUT_INDEX]), self->thread_nr_); + self->thread_nr_ = NNACL_MIN(UP_DIV(stack->outer_size_, NNACL_STACK_STEP), self->thread_nr_); + return NNACL_OK; + } +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/strided_slice.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/strided_slice.c +index 714bcaef..9004ba36 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/strided_slice.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/strided_slice.c +@@ -126,7 +126,7 @@ int StridedSliceSoftCopyInputToOutput(StridedSliceStruct *strided_slice) { + NNACL_CHECK_NULL_RETURN_ERR(out_tensor); + NNACL_CHECK_NULL_RETURN_ERR(out_tensor->data_); + +- int total_size = GetSize(in_tensor); ++ int total_size = TensorCGetSize(in_tensor); + NNACL_CHECK_FALSE(total_size == 0, NNACL_STRIDED_SLICE_INVALID_DATA_SIZE); + + strided_slice->base_.thread_nr_ = +@@ -208,8 +208,8 @@ void StridedSliceInitFastRunParam(StridedSliceStruct *strided_slice) { + } + + strided_slice->base_.thread_nr_ = strided_slice->base_.UpdateThread( +- TC_TYPE(PrimType_StridedSlice, strided_slice->parallel_on_outer_), 1, 1, +- GetElementNum(strided_slice->base_.out_[OUTPUT_INDEX]), strided_slice->base_.thread_nr_); ++ TC_TYPE(PrimType_StridedSlice, strided_slice->parallel_on_outer_), 1, 1, ++ TensorCGetElementNum(strided_slice->base_.out_[OUTPUT_INDEX]), strided_slice->base_.thread_nr_); + + strided_slice->cal_num_per_thread_ = + strided_slice->parallel_on_split_axis_ +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/tile.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/tile.c +index aac6cfc5..25de3b4a 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/tile.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/tile.c +@@ -31,13 +31,13 @@ int TileDoubleInputScenes(TileStruct *tile) { + return NNACL_OK; + } + +- NNACL_CHECK_FALSE(GetElementNum(t) > (int)tile->base_.in_[FIRST_INPUT]->shape_size_, ++ NNACL_CHECK_FALSE(TensorCGetElementNum(t) > (int)tile->base_.in_[FIRST_INPUT]->shape_size_, + NNACL_TILE_SECOND_INPUT_NUM_INVALID); + NNACL_CHECK_FALSE(t->data_type_ != kNumberTypeInt && t->data_type_ != kNumberTypeInt32, + NNACL_TILE_SECOND_INPUT_DATA_TYPE_INVALID); + + int *input1_addr = (int *)(t->data_); +- for (int i = 0; i < GetElementNum(t); ++i) { ++ for (int i = 0; i < TensorCGetElementNum(t); ++i) { + NNACL_CHECK_FALSE(input1_addr[i] <= 0, NNACL_TILE_SECOND_INPUT_VALUE_INVALID); + tile->dims_[i] = i; + tile->multiples_[i] = input1_addr[i]; +@@ -83,7 +83,7 @@ int TileFillOneDimTileParam(TileStruct *tile) { + NNACL_CHECK_FALSE(INT_MUL_OVERFLOW(tile->in_shape_[mul_index], tile->in_strides_[mul_index]), NNACL_ERR); + tile->fast_stride_ = (size_t)(tile->in_shape_[mul_index] * tile->in_strides_[mul_index]); + NNACL_CHECK_FALSE(tile->fast_stride_ < 1, NNACL_TILE_INPUT_SHAPE_INVALID); +- tile->fast_outer_size_ = (size_t)GetElementNum(tile->base_.in_[FIRST_INPUT]) / tile->fast_stride_; ++ tile->fast_outer_size_ = (size_t) TensorCGetElementNum(tile->base_.in_[FIRST_INPUT]) / tile->fast_stride_; + } + tile->resize_done_ = true; + return NNACL_OK; +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/transpose.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/transpose.c +index 4cee01f9..06feb8dd 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/transpose.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/transpose.c +@@ -58,7 +58,7 @@ int TransposeComputeinSingleThread(TransposeStruct *transpose) { + int ResetTransposeStatus(TransposeStruct *transpose) { + transpose->num_axes_ = 0; + if (transpose->base_.in_size_ == C2NUM) { +- transpose->num_axes_ = GetElementNum(transpose->base_.in_[SECOND_INPUT]); ++ transpose->num_axes_ = TensorCGetElementNum(transpose->base_.in_[SECOND_INPUT]); + } + + TensorC *in_tensor = transpose->base_.in_[FIRST_INPUT]; +@@ -87,7 +87,7 @@ int ResetTransposeStatus(TransposeStruct *transpose) { + } + perm_data = (int *)(perm_tensor->data_); + NNACL_CHECK_NULL_RETURN_ERR(perm_data); +- int ele_num = GetElementNum(perm_tensor); ++ int ele_num = TensorCGetElementNum(perm_tensor); + for (int i = 0; i < ele_num; i++) { + for (int j = 0; j < ele_num; j++) { + if (i == perm_data[j]) { +@@ -231,7 +231,7 @@ int TransposeComputeOfflineInfo(TransposeStruct *transpose) { + } + transpose->strides_[transpose->num_axes_ - 1] = 1; + transpose->out_strides_[transpose->num_axes_ - 1] = 1; +- transpose->data_num_ = GetElementNum(transpose->base_.in_[FIRST_INPUT]); ++ transpose->data_num_ = TensorCGetElementNum(transpose->base_.in_[FIRST_INPUT]); + for (int i = transpose->num_axes_ - 2; i >= 0; i--) { + transpose->strides_[i] = transpose->in_shape_[i + 1] * transpose->strides_[i + 1]; + transpose->out_strides_[i] = transpose->out_shape_[i + 1] * transpose->out_strides_[i + 1]; +@@ -247,9 +247,9 @@ int TransposeCopyInputToOutput(TransposeStruct *transpose) { + NNACL_CHECK_NULL_RETURN_ERR(out_tensor); + NNACL_CHECK_NULL_RETURN_ERR(out_tensor->data_); + +- NNACL_CHECK_FALSE(GetSize(in_tensor) == 0, NNACL_TRANSPOSE_INPUT_TENSOR_VALUD_INVALID); ++ NNACL_CHECK_FALSE(TensorCGetSize(in_tensor) == 0, NNACL_TRANSPOSE_INPUT_TENSOR_VALUD_INVALID); + if (in_tensor->data_ != out_tensor->data_) { +- (void)memcpy(out_tensor->data_, in_tensor->data_, GetSize(in_tensor)); ++ (void)memcpy(out_tensor->data_, in_tensor->data_, TensorCGetSize(in_tensor)); + } + return NNACL_OK; + } +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/unique.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/unique.c +index 3a25401d..88f39030 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/unique.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/unique.c +@@ -30,7 +30,7 @@ int UniqueCompute(KernelBase *self) { + TensorC *output1 = self->out_[Index1]; + NNACL_CHECK_NULL_RETURN_ERR(output1); + +- int num = GetElementNum(input); ++ int num = TensorCGetElementNum(input); + int output0_len = 0; + + #ifdef ENABLE_FP16 +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/zeros_like.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/zeros_like.c +index d4bfbc7d..7a4d7cf4 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/zeros_like.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/zeros_like.c +@@ -24,7 +24,7 @@ int ZerosLikeCompute(KernelBase *self) { + TensorC *output = self->out_[OUTPUT_INDEX]; + NNACL_CHECK_NULL_RETURN_ERR(output); + NNACL_CHECK_NULL_RETURN_ERR(output->data_); +- (void)memset(output->data_, 0, GetSize(output)); ++ (void)memset(output->data_, 0, TensorCGetSize(output)); + return NNACL_OK; + } + +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/tensor_c_utils.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/tensor_c_utils.c +index ca89cae4..5dc6b68c 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/tensor_c_utils.c ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/tensor_c_utils.c +@@ -117,7 +117,7 @@ void SetDataTypeFormat(TensorC *dst, const TensorC *src) { + dst->data_type_ = src->data_type_; + } + +-int GetBatch(const TensorC *tensor) { ++int TensorCGetBatch(const TensorC *tensor) { + if (tensor->shape_size_ != DIMENSION_4D && tensor->shape_size_ != DIMENSION_2D) { + return -1; + } +@@ -149,7 +149,7 @@ int GetBatch(const TensorC *tensor) { + return -1; + } + } +-int GetHeight(const TensorC *tensor) { ++int TensorCGetHeight(const TensorC *tensor) { + if (tensor->shape_size_ != DIMENSION_4D && tensor->shape_size_ != DIMENSION_2D) { + return -1; + } +@@ -177,7 +177,7 @@ int GetHeight(const TensorC *tensor) { + return -1; + } + } +-int GetWidth(const TensorC *tensor) { ++int TensorCGetWidth(const TensorC *tensor) { + if (tensor->shape_size_ != DIMENSION_4D && tensor->shape_size_ != DIMENSION_2D) { + return -1; + } +@@ -208,7 +208,7 @@ int GetWidth(const TensorC *tensor) { + return -1; + } + } +-int GetChannel(const TensorC *tensor) { ++int TensorCGetChannel(const TensorC *tensor) { + if (tensor->shape_size_ != DIMENSION_4D && tensor->shape_size_ != DIMENSION_2D) { + return -1; + } +@@ -241,7 +241,7 @@ int GetChannel(const TensorC *tensor) { + } + } + +-void SetBatch(TensorC *tensor, int batch) { ++void TensorCSetBatch(TensorC *tensor, int batch) { + if (tensor->shape_size_ != DIMENSION_4D && tensor->shape_size_ != DIMENSION_2D) { + return; + } +@@ -278,7 +278,7 @@ void SetBatch(TensorC *tensor, int batch) { + } + } + +-void SetHeight(TensorC *tensor, int height) { ++void TensorCSetHeight(TensorC *tensor, int height) { + if (tensor->shape_size_ != DIMENSION_4D && tensor->shape_size_ != DIMENSION_2D) { + return; + } +@@ -310,7 +310,7 @@ void SetHeight(TensorC *tensor, int height) { + } + } + +-void SetWidth(TensorC *tensor, int width) { ++void TensorCSetWidth(TensorC *tensor, int width) { + if (tensor->shape_size_ != DIMENSION_4D && tensor->shape_size_ != DIMENSION_2D) { + return; + } +@@ -345,7 +345,7 @@ void SetWidth(TensorC *tensor, int width) { + } + } + +-void SetChannel(TensorC *tensor, int channel) { ++void TensorCSetChannel(TensorC *tensor, int channel) { + if (tensor->shape_size_ != DIMENSION_4D && tensor->shape_size_ != DIMENSION_2D) { + return; + } +@@ -382,13 +382,13 @@ void SetChannel(TensorC *tensor, int channel) { + } + } + +-int GetSize(const TensorC *tensor) { +- int element_num = GetElementNum(tensor); ++int TensorCGetSize(const TensorC *tensor) { ++ int element_num = TensorCGetElementNum(tensor); + int data_type_size = (int)DataTypeCSize(tensor->data_type_); + return element_num * data_type_size; + } + +-int GetElementNum(const TensorC *tensor) { ++int TensorCGetElementNum(const TensorC *tensor) { + if (tensor == NULL) { + return -1; + } +@@ -401,7 +401,7 @@ int GetElementNum(const TensorC *tensor) { + res = res * tensor->shape_[i]; + } + +- int c = GetChannel(tensor); ++ int c = TensorCGetChannel(tensor); + if (c == 0) { + return res; + } +@@ -414,7 +414,7 @@ int GetElementNum(const TensorC *tensor) { + return res; + } + +-int GetDimensionSize(const TensorC *tensor, const size_t index) { ++int TensorCGetDimensionSize(const TensorC *tensor, const size_t index) { + int dim_size = -1; + if (index < tensor->shape_size_) { + dim_size = tensor->shape_[index]; +@@ -422,7 +422,7 @@ int GetDimensionSize(const TensorC *tensor, const size_t index) { + return dim_size; + } + +-bool IsShapeSame(const TensorC *tensor1, const TensorC *tensor2) { ++bool TensorCIsShapeSame(const TensorC *tensor1, const TensorC *tensor2) { + if (tensor1->shape_size_ != tensor2->shape_size_) { + return false; + } +@@ -434,6 +434,6 @@ bool IsShapeSame(const TensorC *tensor1, const TensorC *tensor2) { + return true; + } + +-bool IsConst(const TensorC *tensor) { ++bool TensorCIsConst(const TensorC *tensor) { + return (tensor->category_ == ConstTensor || tensor->category_ == ConstScalar) && tensor->data_ != NULL; + } +diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/tensor_c_utils.h b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/tensor_c_utils.h +index f605f5c7..31b1a21d 100644 +--- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/tensor_c_utils.h ++++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/tensor_c_utils.h +@@ -26,19 +26,19 @@ + extern "C" { + #endif + +-int GetBatch(const TensorC *tensor); +-int GetHeight(const TensorC *tensor); +-int GetWidth(const TensorC *tensor); +-int GetChannel(const TensorC *tensor); +-void SetBatch(TensorC *tensor, int batch); +-void SetHeight(TensorC *tensor, int height); +-void SetWidth(TensorC *tensor, int width); +-void SetChannel(TensorC *tensor, int channel); +-int GetElementNum(const TensorC *tensor); +-int GetSize(const TensorC *tensor); +-int GetDimensionSize(const TensorC *tensor, const size_t index); +-bool IsShapeSame(const TensorC *tensor1, const TensorC *tensor2); +-bool IsConst(const TensorC *tensor); ++int TensorCGetBatch(const TensorC *tensor); ++int TensorCGetHeight(const TensorC *tensor); ++int TensorCGetWidth(const TensorC *tensor); ++int TensorCGetChannel(const TensorC *tensor); ++void TensorCSetBatch(TensorC *tensor, int batch); ++void TensorCSetHeight(TensorC *tensor, int height); ++void TensorCSetWidth(TensorC *tensor, int width); ++void TensorCSetChannel(TensorC *tensor, int channel); ++int TensorCGetElementNum(const TensorC *tensor); ++int TensorCGetSize(const TensorC *tensor); ++int TensorCGetDimensionSize(const TensorC *tensor, const size_t index); ++bool TensorCIsShapeSame(const TensorC *tensor1, const TensorC *tensor2); ++bool TensorCIsConst(const TensorC *tensor); + + #ifdef __cplusplus + } +diff --git a/mindspore/lite/src/litert/kernel/cpu/fp32/where_fp32.cc b/mindspore/lite/src/litert/kernel/cpu/fp32/where_fp32.cc +index a73fda7c..1d960fb0 100644 +--- a/mindspore/lite/src/litert/kernel/cpu/fp32/where_fp32.cc ++++ b/mindspore/lite/src/litert/kernel/cpu/fp32/where_fp32.cc +@@ -162,10 +162,10 @@ int WhereCPUKernel::RunWithTripleInputs() { + CHECK_NULL_RETURN(y); + TensorC *output = out_tensors_.at(0)->ConvertToTensorC(); + CHECK_NULL_RETURN(output); +- int condition_nums = GetElementNum(condition); +- int x_num = GetElementNum(x); +- int y_num = GetElementNum(y); +- int out_num = GetElementNum(output); ++ int condition_nums = TensorCGetElementNum(condition); ++ int x_num = TensorCGetElementNum(x); ++ int y_num = TensorCGetElementNum(y); ++ int out_num = TensorCGetElementNum(output); + + condition_ = reinterpret_cast(condition->data_); + CHECK_NULL_RETURN(condition_); +@@ -186,14 +186,14 @@ int WhereCPUKernel::RunWithTripleInputs() { + + if (((condition_nums != 1) && (condition_nums != num_max)) || ((x_num != 1) && (x_num != num_max)) || + ((y_num != 1) && (y_num != num_max))) { +- if (condition_nums != GetElementNum(y)) { ++ if (condition_nums != TensorCGetElementNum(y)) { + int ret = + BroadcastForInput(condition, x, y, &condition_broadcast_buf, &x_broadcast_buf, &y_broadcast_buf, output); + if (ret != RET_OK) { + MS_LOG(ERROR) << "BroadcastForInput failed."; + return RET_ERROR; + } +- int max_num = GetElementNum(output); ++ int max_num = TensorCGetElementNum(output); + condition_ = reinterpret_cast(condition_broadcast_buf); + x_ = x_broadcast_buf; + y_ = y_broadcast_buf; +@@ -242,7 +242,7 @@ int WhereCPUKernel::Run() { + + int WhereCPUKernel::BroadcastForInput(TensorC *condition, TensorC *x, TensorC *y, void **condition_broadcast_buf, + void **x_broadcast_buf, void **y_broadcast_buf, TensorC *output) { +- size_t broad_cast_buf_size = GetSize(output); ++ size_t broad_cast_buf_size = TensorCGetSize(output); + BroadcastShapeInfo condition_info; + condition_info.input_shape_size_ = condition->shape_size_; + condition_info.output_shape_size_ = output->shape_size_; +diff --git a/mindspore/lite/src/litert/pass/format_pass/insert_transpose.cc b/mindspore/lite/src/litert/pass/format_pass/insert_transpose.cc +index 2e4e8c01..52b27da3 100644 +--- a/mindspore/lite/src/litert/pass/format_pass/insert_transpose.cc ++++ b/mindspore/lite/src/litert/pass/format_pass/insert_transpose.cc +@@ -83,7 +83,7 @@ int InsertTranspose::RunPass(kernel::SubGraphKernel *graph, std::vectorin_tensors().at(index)->IsConst()) { ++ if (kernel->in_tensors().at(index)->TensorCIsConst()) { + TransposeConstData(kernel, index); + continue; + } +diff --git a/mindspore/lite/src/litert/runtime_shape_fusion_pass.cc b/mindspore/lite/src/litert/runtime_shape_fusion_pass.cc +index 1f343af8..d6385227 100644 +--- a/mindspore/lite/src/litert/runtime_shape_fusion_pass.cc ++++ b/mindspore/lite/src/litert/runtime_shape_fusion_pass.cc +@@ -167,7 +167,7 @@ bool ShapeFusionPass::CheckArithmetic(const LiteGraph::Node *shape_fusion, const + post_node->input_indices_.at(0) == input_idx ? post_node->input_indices_.at(1) : post_node->input_indices_.at(0); + auto tensor = src_tensors_->at(input1_index); + MS_CHECK_TRUE_RET(tensor != nullptr, false); +- if (tensor->IsConst()) { ++ if (tensor->TensorCIsConst()) { + return true; + } + auto shape_fusion_outputs = shape_fusion->output_indices_; +@@ -206,7 +206,7 @@ bool ShapeFusionPass::CheckCanFused(const LiteGraph::Node *shape_fusion, const L + bool is_supported = + std::all_of(post_node->input_indices_.begin(), post_node->input_indices_.end(), [&](uint32_t idx) { + auto tensor = src_tensors_->at(idx); +- return tensor->IsConst() || ++ return tensor->TensorCIsConst() || + std::find(shape_fusion_outputs.begin(), shape_fusion_outputs.end(), idx) != shape_fusion_outputs.end(); + }); + return is_supported; +diff --git a/mindspore/lite/src/tensor.h b/mindspore/lite/src/tensor.h +index 501e28e5..a8f09685 100644 +--- a/mindspore/lite/src/tensor.h ++++ b/mindspore/lite/src/tensor.h +@@ -209,7 +209,7 @@ class MS_API Tensor { + + void set_quant_clusters(const std::vector &clusters); + +- virtual bool IsConst() const { return ::IsConst(&tensor_c_); } ++ virtual bool IsConst() const { return ::TensorCIsConst(&tensor_c_); } + + bool IsScalar() const { return this->tensor_c_.category_ == CONST_SCALAR && this->tensor_c_.data_ != nullptr; } + +diff --git a/mindspore/lite/tools/converter/micro/coder/opcoders/base/reshape_dynamic_base_coder.cc b/mindspore/lite/tools/converter/micro/coder/opcoders/base/reshape_dynamic_base_coder.cc +index 108ba227..ef33f0ef 100644 +--- a/mindspore/lite/tools/converter/micro/coder/opcoders/base/reshape_dynamic_base_coder.cc ++++ b/mindspore/lite/tools/converter/micro/coder/opcoders/base/reshape_dynamic_base_coder.cc +@@ -31,7 +31,7 @@ using mindspore::schema::PrimitiveType_Unsqueeze; + namespace mindspore::lite::micro { + int ReshapeDynamicBaseCoder::Prepare(CoderContext *const context) { + if (input_tensors_.size() == C2NUM) { +- MS_CHECK_TRUE_MSG(input_tensors_[SECOND_INPUT]->IsConst(), RET_NOT_SUPPORT, ++ MS_CHECK_TRUE_MSG(input_tensors_[SECOND_INPUT]->TensorCIsConst(), RET_NOT_SUPPORT, + "Currently, only support the first input of reshape is non-const when shape is dynamical."); + + MS_CHECK_TRUE_MSG(input_tensors_[SECOND_INPUT]->data_type() == kNumberTypeInt32 || +diff --git a/mindspore/lite/tools/converter/micro/coder/opcoders/nnacl/fp16/lstm_mindir_dynamic_fp16_coder.cc b/mindspore/lite/tools/converter/micro/coder/opcoders/nnacl/fp16/lstm_mindir_dynamic_fp16_coder.cc +index 8c4cc31b..e1cace9a 100644 +--- a/mindspore/lite/tools/converter/micro/coder/opcoders/nnacl/fp16/lstm_mindir_dynamic_fp16_coder.cc ++++ b/mindspore/lite/tools/converter/micro/coder/opcoders/nnacl/fp16/lstm_mindir_dynamic_fp16_coder.cc +@@ -38,7 +38,7 @@ int LstmMindirDynamicFP16Coder::Prepare(CoderContext *const context) { + MS_CHECK_TRUE_MSG(in->shape().size() == C3NUM, RET_INPUT_TENSOR_ERROR, + "LstmMindirDynamicFP16Coder input must be 3D."); + } +- MS_CHECK_TRUE_MSG(input_tensors_[FOURTH_INPUT]->IsConst(), RET_INPUT_TENSOR_ERROR, ++ MS_CHECK_TRUE_MSG(input_tensors_[FOURTH_INPUT]->TensorCIsConst(), RET_INPUT_TENSOR_ERROR, + "LstmMindirDynamicFP16Coder last three inputs must be all constant."); + lstm_param_ = reinterpret_cast(parameter_); + return InitParam(); +diff --git a/mindspore/lite/tools/converter/micro/coder/opcoders/nnacl/fp16/matmul_dynamic_fp16_coder.cc b/mindspore/lite/tools/converter/micro/coder/opcoders/nnacl/fp16/matmul_dynamic_fp16_coder.cc +index 24cf7120..a78a46e8 100644 +--- a/mindspore/lite/tools/converter/micro/coder/opcoders/nnacl/fp16/matmul_dynamic_fp16_coder.cc ++++ b/mindspore/lite/tools/converter/micro/coder/opcoders/nnacl/fp16/matmul_dynamic_fp16_coder.cc +@@ -72,10 +72,10 @@ int MatMulDynamicFP16Coder::Prepare(CoderContext *const context) { + "Input tensor data type is invalid."); + MS_CHECK_TRUE_MSG(input_tensors_.size() == C2NUM || input_tensors_.size() == C3NUM, RET_INPUT_PARAM_INVALID, + "MatMul's input-num must be 2 or 3."); +- MS_CHECK_TRUE_MSG(input_tensors_[SECOND_INPUT]->IsConst(), RET_NOT_SUPPORT, ++ MS_CHECK_TRUE_MSG(input_tensors_[SECOND_INPUT]->TensorCIsConst(), RET_NOT_SUPPORT, + "Currently, only support the first input of matmul is non-const when shape is dynamical."); + if (input_tensors_.size() == C3NUM) { +- MS_CHECK_TRUE_MSG(input_tensors_[THIRD_INPUT]->IsConst(), RET_NOT_SUPPORT, ++ MS_CHECK_TRUE_MSG(input_tensors_[THIRD_INPUT]->TensorCIsConst(), RET_NOT_SUPPORT, + "Currently, only support the first input of matmul is non-const when shape is dynamical."); + } + params_ = reinterpret_cast(parameter_); +diff --git a/mindspore/lite/tools/converter/micro/coder/opcoders/nnacl/fp16/slice_dynamic_fp16_coder.cc b/mindspore/lite/tools/converter/micro/coder/opcoders/nnacl/fp16/slice_dynamic_fp16_coder.cc +index 1c6969b2..09d80a21 100644 +--- a/mindspore/lite/tools/converter/micro/coder/opcoders/nnacl/fp16/slice_dynamic_fp16_coder.cc ++++ b/mindspore/lite/tools/converter/micro/coder/opcoders/nnacl/fp16/slice_dynamic_fp16_coder.cc +@@ -31,7 +31,7 @@ int SliceDynamicFP16Coder::Prepare(CoderContext *const context) { + CHECK_NULL_RETURN(output_tensor_); + param_ = reinterpret_cast(parameter_); + CHECK_NULL_RETURN(param_); +- MS_CHECK_TRUE_MSG(input_tensors_[SECOND_INPUT]->IsConst() && input_tensors_[THIRD_INPUT]->IsConst(), RET_NOT_SUPPORT, ++ MS_CHECK_TRUE_MSG(input_tensors_[SECOND_INPUT]->TensorCIsConst() && input_tensors_[THIRD_INPUT]->TensorCIsConst(), RET_NOT_SUPPORT, + "The second and third input of slice is non-const."); + MS_CHECK_TRUE_MSG(input_tensors_[SECOND_INPUT]->data_type() == kNumberTypeInt32 && + input_tensors_[THIRD_INPUT]->data_type() == kNumberTypeInt32, +diff --git a/mindspore/lite/tools/converter/micro/coder/opcoders/nnacl/fp16/transpose_dynamic_fp16_coder.cc b/mindspore/lite/tools/converter/micro/coder/opcoders/nnacl/fp16/transpose_dynamic_fp16_coder.cc +index 59c8d8b8..15a9ea2c 100644 +--- a/mindspore/lite/tools/converter/micro/coder/opcoders/nnacl/fp16/transpose_dynamic_fp16_coder.cc ++++ b/mindspore/lite/tools/converter/micro/coder/opcoders/nnacl/fp16/transpose_dynamic_fp16_coder.cc +@@ -30,7 +30,7 @@ int TransposeDynamicFp16Coder::Prepare(CoderContext *const context) { + MS_CHECK_TRUE_MSG( + output_tensor_->data_type() == kNumberTypeInt32 || output_tensor_->data_type() == kNumberTypeFloat16, + RET_INPUT_PARAM_INVALID, "Output tensor data type is invalid."); +- MS_CHECK_TRUE_MSG(input_tensors_[SECOND_INPUT]->IsConst(), RET_NOT_SUPPORT, ++ MS_CHECK_TRUE_MSG(input_tensors_[SECOND_INPUT]->TensorCIsConst(), RET_NOT_SUPPORT, + "The second input of transpose is non-const."); + thread_num_ = 1; + MS_CHECK_RET_CODE(Init(), "init failed"); +diff --git a/mindspore/lite/tools/converter/micro/coder/opcoders/nnacl/fp32/gather_dynamic_fp32_coder.cc b/mindspore/lite/tools/converter/micro/coder/opcoders/nnacl/fp32/gather_dynamic_fp32_coder.cc +index 57d7a5dd..47eacd43 100644 +--- a/mindspore/lite/tools/converter/micro/coder/opcoders/nnacl/fp32/gather_dynamic_fp32_coder.cc ++++ b/mindspore/lite/tools/converter/micro/coder/opcoders/nnacl/fp32/gather_dynamic_fp32_coder.cc +@@ -27,7 +27,7 @@ using mindspore::schema::PrimitiveType_Gather; + namespace mindspore::lite::micro::nnacl { + int GatherDynamicFP32Coder::Prepare(CoderContext *const context) { + MS_CHECK_TRUE_MSG(input_tensors_.size() == C3NUM, RET_ERROR, "Gather's input-num must be 3."); +- MS_CHECK_TRUE_MSG(input_tensors_[FIRST_INPUT]->IsConst() && input_tensors_[THIRD_INPUT]->IsConst(), RET_NOT_SUPPORT, ++ MS_CHECK_TRUE_MSG(input_tensors_[FIRST_INPUT]->TensorCIsConst() && input_tensors_[THIRD_INPUT]->TensorCIsConst(), RET_NOT_SUPPORT, + "Currently, only support the second input of gather is non-const when shape is dynamical."); + MS_CHECK_TRUE_MSG(input_tensors_[THIRD_INPUT]->data_type() == kNumberTypeInt32 || + input_tensors_[THIRD_INPUT]->data_type() == kNumberTypeInt, +diff --git a/mindspore/lite/tools/converter/micro/coder/opcoders/nnacl/fp32/transpose_dynamic_fp32_coder.cc b/mindspore/lite/tools/converter/micro/coder/opcoders/nnacl/fp32/transpose_dynamic_fp32_coder.cc +index 7fb160d5..4d3059ee 100644 +--- a/mindspore/lite/tools/converter/micro/coder/opcoders/nnacl/fp32/transpose_dynamic_fp32_coder.cc ++++ b/mindspore/lite/tools/converter/micro/coder/opcoders/nnacl/fp32/transpose_dynamic_fp32_coder.cc +@@ -32,7 +32,7 @@ int TransposeDynamicFp32Coder::Prepare(CoderContext *const context) { + MS_CHECK_TRUE_MSG( + output_tensor_->data_type() == kNumberTypeInt32 || output_tensor_->data_type() == kNumberTypeFloat32, + RET_INPUT_PARAM_INVALID, "Output tensor data type is invalid."); +- MS_CHECK_TRUE_MSG(input_tensors_[SECOND_INPUT]->IsConst(), RET_NOT_SUPPORT, ++ MS_CHECK_TRUE_MSG(input_tensors_[SECOND_INPUT]->TensorCIsConst(), RET_NOT_SUPPORT, + "The second input of transpose is non-const."); + thread_num_ = 1; + MS_CHECK_RET_CODE(Init(), "init failed"); +-- +2.34.1 +