From b6e8a21cd33955ecbe7d979637f11e5c891bc2f8 Mon Sep 17 00:00:00 2001 From: gupengcheng0401 Date: Wed, 10 Sep 2025 12:45:10 +0800 Subject: [PATCH] remove reduant micro coder --- .../micro/cmake/cortex-m/CMakeLists.txt | 41 --- .../converter/micro/cmake/file_list.cmake | 11 - .../opcoders/cmsis-nn/int8/add_int8_coder.cc | 128 ---------- .../opcoders/cmsis-nn/int8/add_int8_coder.h | 54 ---- .../cmsis-nn/int8/conv2d_base_coder.cc | 60 ----- .../cmsis-nn/int8/conv2d_base_coder.h | 44 ---- .../cmsis-nn/int8/conv2d_int8_coder.cc | 188 -------------- .../cmsis-nn/int8/conv2d_int8_coder.h | 70 ------ .../cmsis-nn/int8/dwconv_int8_coder.cc | 163 ------------ .../cmsis-nn/int8/dwconv_int8_coder.h | 78 ------ .../int8/fullconnection_int8_coder.cc | 80 ------ .../cmsis-nn/int8/fullconnection_int8_coder.h | 54 ---- .../opcoders/cmsis-nn/int8/mul_int8_coder.cc | 78 ------ .../opcoders/cmsis-nn/int8/mul_int8_coder.h | 48 ---- .../cmsis-nn/int8/pooling_int8_coder.cc | 109 -------- .../cmsis-nn/int8/pooling_int8_coder.h | 60 ----- .../cmsis-nn/int8/reshape_int8_coder.cc | 49 ---- .../cmsis-nn/int8/reshape_int8_coder.h | 36 --- .../cmsis-nn/int8/softmax_int8_coder.cc | 84 ------- .../cmsis-nn/int8/softmax_int8_coder.h | 46 ---- .../coder/opcoders/custom/custom_coder.cc | 170 ------------- .../coder/opcoders/custom/custom_coder.h | 49 ---- .../nnacl/fp32_grad/activation_grad_coder.cc | 59 ----- .../nnacl/fp32_grad/activation_grad_coder.h | 38 --- .../opcoders/nnacl/fp32_grad/adam_coder.cc | 69 ----- .../opcoders/nnacl/fp32_grad/adam_coder.h | 38 --- .../opcoders/nnacl/fp32_grad/assign_coder.cc | 48 ---- .../opcoders/nnacl/fp32_grad/assign_coder.h | 37 --- .../nnacl/fp32_grad/biasadd_grad_coder.cc | 68 ----- .../nnacl/fp32_grad/biasadd_grad_coder.h | 37 --- ...softmax_cross_entropy_with_logits_coder.cc | 82 ------ .../softmax_cross_entropy_with_logits_coder.h | 46 ---- .../micro/coder/train/train_generator.cc | 89 ------- .../micro/coder/train/train_generator.h | 43 ---- .../micro/coder/train/train_session.cc | 143 ----------- .../micro/coder/train/train_session.h | 48 ---- .../micro/providers/nnie/CMakeLists.txt | 35 --- .../micro/providers/nnie/nnie_interfaces.cc | 237 ------------------ .../micro/providers/nnie/nnie_interfaces.h | 49 ---- .../micro/providers/nnie/nnie_micro.cc | 220 ---------------- .../micro/providers/nnie/nnie_micro.h | 33 --- 41 files changed, 3119 deletions(-) delete mode 100644 mindspore-lite/tools/converter/micro/cmake/cortex-m/CMakeLists.txt delete mode 100644 mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/add_int8_coder.cc delete mode 100644 mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/add_int8_coder.h delete mode 100644 mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/conv2d_base_coder.cc delete mode 100644 mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/conv2d_base_coder.h delete mode 100644 mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/conv2d_int8_coder.cc delete mode 100644 mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/conv2d_int8_coder.h delete mode 100644 mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/dwconv_int8_coder.cc delete mode 100644 mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/dwconv_int8_coder.h delete mode 100644 mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/fullconnection_int8_coder.cc delete mode 100644 mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/fullconnection_int8_coder.h delete mode 100644 mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/mul_int8_coder.cc delete mode 100644 mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/mul_int8_coder.h delete mode 100644 mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/pooling_int8_coder.cc delete mode 100644 mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/pooling_int8_coder.h delete mode 100644 mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/reshape_int8_coder.cc delete mode 100644 mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/reshape_int8_coder.h delete mode 100644 mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/softmax_int8_coder.cc delete mode 100644 mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/softmax_int8_coder.h delete mode 100644 mindspore-lite/tools/converter/micro/coder/opcoders/custom/custom_coder.cc delete mode 100644 mindspore-lite/tools/converter/micro/coder/opcoders/custom/custom_coder.h delete mode 100644 mindspore-lite/tools/converter/micro/coder/opcoders/nnacl/fp32_grad/activation_grad_coder.cc delete mode 100644 mindspore-lite/tools/converter/micro/coder/opcoders/nnacl/fp32_grad/activation_grad_coder.h delete mode 100644 mindspore-lite/tools/converter/micro/coder/opcoders/nnacl/fp32_grad/adam_coder.cc delete mode 100644 mindspore-lite/tools/converter/micro/coder/opcoders/nnacl/fp32_grad/adam_coder.h delete mode 100644 mindspore-lite/tools/converter/micro/coder/opcoders/nnacl/fp32_grad/assign_coder.cc delete mode 100644 mindspore-lite/tools/converter/micro/coder/opcoders/nnacl/fp32_grad/assign_coder.h delete mode 100644 mindspore-lite/tools/converter/micro/coder/opcoders/nnacl/fp32_grad/biasadd_grad_coder.cc delete mode 100644 mindspore-lite/tools/converter/micro/coder/opcoders/nnacl/fp32_grad/biasadd_grad_coder.h delete mode 100644 mindspore-lite/tools/converter/micro/coder/opcoders/nnacl/fp32_grad/softmax_cross_entropy_with_logits_coder.cc delete mode 100644 mindspore-lite/tools/converter/micro/coder/opcoders/nnacl/fp32_grad/softmax_cross_entropy_with_logits_coder.h delete mode 100644 mindspore-lite/tools/converter/micro/coder/train/train_generator.cc delete mode 100644 mindspore-lite/tools/converter/micro/coder/train/train_generator.h delete mode 100644 mindspore-lite/tools/converter/micro/coder/train/train_session.cc delete mode 100644 mindspore-lite/tools/converter/micro/coder/train/train_session.h delete mode 100644 mindspore-lite/tools/converter/micro/providers/nnie/CMakeLists.txt delete mode 100644 mindspore-lite/tools/converter/micro/providers/nnie/nnie_interfaces.cc delete mode 100644 mindspore-lite/tools/converter/micro/providers/nnie/nnie_interfaces.h delete mode 100644 mindspore-lite/tools/converter/micro/providers/nnie/nnie_micro.cc delete mode 100644 mindspore-lite/tools/converter/micro/providers/nnie/nnie_micro.h diff --git a/mindspore-lite/tools/converter/micro/cmake/cortex-m/CMakeLists.txt b/mindspore-lite/tools/converter/micro/cmake/cortex-m/CMakeLists.txt deleted file mode 100644 index 07dbb8f6..00000000 --- a/mindspore-lite/tools/converter/micro/cmake/cortex-m/CMakeLists.txt +++ /dev/null @@ -1,41 +0,0 @@ -cmake_minimum_required(VERSION 3.12) -project(Micro) -set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fstack-protector-strong -Wno-strict-aliasing") - -set(CMAKE_VERBOSE_MAKEFILE on) -set(MICRO_DIR ${CMAKE_CURRENT_SOURCE_DIR}/../..) -include_directories(${NNACL_DIR}/..) - -include(${TOP_DIR}/cmake/utils.cmake) -include(${TOP_DIR}/cmake/external_libs/cmsis.cmake) - -set(CMSIS_DIR ${CMAKE_BINARY_DIR}/cmsis) -message("build cmsis kernels") -include_directories(${CMSIS_DIR}/CMSIS/Core/Include) -include_directories(${CMSIS_DIR}/CMSIS/DSP/Include) -include_directories(${CMSIS_DIR}/CMSIS/NN/Include) - -file(GLOB CMSIS_OPS - ${CMSIS_DIR}/CMSIS/NN/Source/BasicMathFunctions/*.c - ${CMSIS_DIR}/CMSIS/NN/Source/ActivationFunctions/*.c - ${CMSIS_DIR}/CMSIS/NN/Source/ConcatenationFunctions/*.c - ${CMSIS_DIR}/CMSIS/NN/Source/ConvolutionFunctions/*.c - ${CMSIS_DIR}/CMSIS/NN/Source/FullyConnectedFunctions/*.c - ${CMSIS_DIR}/CMSIS/NN/Source/NNSupportFunctions/*.c - ${CMSIS_DIR}/CMSIS/NN/Source/PoolingFunctions/*.c - ${CMSIS_DIR}/CMSIS/NN/Source/ReshapeFunctions/*.c - ${CMSIS_DIR}/CMSIS/NN/Source/SoftmaxFunctions/*.c - ) - -add_library(cmsis_nn STATIC ${CMSIS_OPS}) - -include_directories(${MICRO_DIR}/coder/) -set(WRAPPER_DIR ${MICRO_DIR}/coder/wrapper/) -file(GLOB WRAPPER_SRC - ${WRAPPER_DIR}/base/*.c - ${WRAPPER_DIR}/fp32/*.c - ${WRAPPER_DIR}/int8/*.c - ) - -# generate static library -add_library(wrapper STATIC ${WRAPPER_SRC}) diff --git a/mindspore-lite/tools/converter/micro/cmake/file_list.cmake b/mindspore-lite/tools/converter/micro/cmake/file_list.cmake index b2f15bdd..7eb06a1a 100644 --- a/mindspore-lite/tools/converter/micro/cmake/file_list.cmake +++ b/mindspore-lite/tools/converter/micro/cmake/file_list.cmake @@ -12,11 +12,6 @@ set(CODER_SRC ${MICRO_DIR}/coder/utils/type_cast.cc ) -set(CODER_SRC ${CODER_SRC} - ${MICRO_DIR}/coder/train/train_session.cc - ${MICRO_DIR}/coder/train/train_generator.cc - ) - set(CODER_ALLOCATOR_SRC ${MICRO_DIR}/coder/allocator/allocator.cc ${MICRO_DIR}/coder/allocator/memory_manager.cc @@ -36,20 +31,14 @@ file(GLOB CODER_OPCODERS_SRC ${MICRO_DIR}/coder/opcoders/serializers/nnacl_serializer/*.cc #### base coder ${MICRO_DIR}/coder/opcoders/base/*.cc - #### cmsis int8 coder - ${MICRO_DIR}/coder/opcoders/cmsis-nn/int8/*.cc #### nnacl fp16 coder ${MICRO_DIR}/coder/opcoders/nnacl/fp16/*.cc #### nnacl fp32 coder ${MICRO_DIR}/coder/opcoders/nnacl/fp32/*.cc - #### nnacl fp32_grad coder - ${MICRO_DIR}/coder/opcoders/nnacl/fp32_grad/*.cc #### nnacl int8 coder ${MICRO_DIR}/coder/opcoders/nnacl/int8/*.cc #### nnacl dequant coder ${MICRO_DIR}/coder/opcoders/nnacl/dequant/*.cc - #### custom - ${MICRO_DIR}/coder/opcoders/custom/*.cc ) set(REGISTRY_SRC diff --git a/mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/add_int8_coder.cc b/mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/add_int8_coder.cc deleted file mode 100644 index 31fbde7a..00000000 --- a/mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/add_int8_coder.cc +++ /dev/null @@ -1,128 +0,0 @@ -/** - * Copyright 2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "coder/opcoders/cmsis-nn/int8/add_int8_coder.h" -#include -#include -#include "coder/log.h" -#include "coder/opcoders/file_collector.h" -#include "coder/opcoders/nnacl/int8/add_int8_coder.h" -#include "coder/opcoders/serializers/serializer.h" -#include "coder/utils/common.h" -#include "mindspore/ops/op_def/array_ops.h" -#include "nnacl/arithmetic_parameter.h" -#include "nnacl/int8/quantize.h" - -using mindspore::schema::PrimitiveType_AddFusion; - -namespace mindspore::lite::micro::cmsis { -int AddInt8Coder::Prepare(CoderContext *const context) { - MS_CHECK_GE(input_tensors_.size(), 2, RET_ERROR); - input1_ = input_tensors_.at(0); - input2 = input_tensors_.at(1); - - MS_CHECK_PTR(input1_); - MS_CHECK_PTR(input2); - - MS_CHECK_TRUE(!input1_->quant_params().empty(), "input1_ quant_params is empty"); - MS_CHECK_TRUE(!input2->quant_params().empty(), "input2_ quant_params is empty"); - MS_CHECK_TRUE(!output_tensor_->quant_params().empty(), "output quant_params is empty"); - - input_1_offset_ = -input1_->quant_params().at(0).zeroPoint; - input_2_offset_ = -input2->quant_params().at(0).zeroPoint; - out_offset_ = output_tensor_->quant_params().at(0).zeroPoint; - const double input1_scale = input1_->quant_params().at(0).scale; - const double input2_scale = input2->quant_params().at(0).scale; - const double output_scale = output_tensor_->quant_params().at(0).scale; - - const double twice_max_input_scale = 2 * std::max(input1_scale, input2_scale); - MS_CHECK_TRUE(twice_max_input_scale > 0, "twice_max_input_scale should larger than 0."); - MS_CHECK_TRUE(output_scale > 0, "output_scale should larger than 0."); - const double real_input1_multiplier = static_cast(input1_scale) / twice_max_input_scale; - const double real_input2_multiplier = static_cast(input2_scale) / twice_max_input_scale; - const double real_output_multiplier = - twice_max_input_scale / ((1 << static_cast(kLeftShift)) * static_cast(output_scale)); - - MS_CHECK_TRUE((real_input1_multiplier >= 0) && (real_input1_multiplier <= 1), - "real_input1_multiplier should be in (0, 1)"); - QuantizeMultiplier(real_input1_multiplier, &input_1_mult_, &input_1_shift_); - MS_CHECK_TRUE((real_input2_multiplier >= 0) && (real_input2_multiplier <= 1), - "real_input2_multiplier should be in (0, 1)"); - QuantizeMultiplier(real_input2_multiplier, &input_2_mult_, &input_2_shift_); - MS_CHECK_TRUE((real_output_multiplier >= 0) && (real_output_multiplier <= 1), - "real_output_multiplier should be in (0, 1)"); - QuantizeMultiplier(real_output_multiplier, &out_mult_, &out_shift_); - - out_activation_min_ = std::numeric_limits::min(); - out_activation_max_ = std::numeric_limits::max(); - - MS_CHECK_TRUE(input1_->ElementsNum() == input2->ElementsNum(), "tensor length not match"); - - block_size_ = input1_->ElementsNum(); - - return RET_OK; -} - -int AddInt8Coder::DoCode(CoderContext *const context) { - Serializer code; - code.precision(kPrecision); - - Collect(context, - { - "CMSIS/NN/Include/arm_nnfunctions.h", - }, - { - "arm_elementwise_add_s8.c", - }); - - code.CodeFunction("arm_elementwise_add_s8", input1_, input2, input_1_offset_, input_1_mult_, input_1_shift_, - input_2_offset_, input_2_mult_, input_2_shift_, kLeftShift, output_tensor_, out_offset_, out_mult_, - out_shift_, out_activation_min_, out_activation_max_, block_size_); - - MS_LOG(INFO) << "AddInt8Coder has been called"; - context->AppendCode(code.str()); - return RET_OK; -} - -std::unique_ptr AddFusionInt8CoderCreator(const std::vector &in_tensors, - const std::vector &out_tensors, - const LiteGraph::Node *node, size_t node_index, Target target, - int schema_version) { - if (node == nullptr) { - MS_LOG(ERROR) << "node is null"; - return nullptr; - } - if (in_tensors.size() != kTwo) { - MS_LOG(ERROR) << "in_tensors size error."; - return nullptr; - } - std::unique_ptr coder; - if (in_tensors[0]->ElementsNum() == in_tensors[1]->ElementsNum()) { - coder = std::make_unique(in_tensors, out_tensors, node, node_index, target); - } else { - coder = - std::make_unique(in_tensors, out_tensors, node, node_index, target); - } - if (coder == nullptr) { - return nullptr; - } - - coder->SetSchemaVersion(schema_version); - return coder; -} - -REG_OPERATOR_CODER(kCortex_M, kNumberTypeInt8, PrimitiveType_AddFusion, AddFusionInt8CoderCreator) -} // namespace mindspore::lite::micro::cmsis diff --git a/mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/add_int8_coder.h b/mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/add_int8_coder.h deleted file mode 100644 index 13e5a1c2..00000000 --- a/mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/add_int8_coder.h +++ /dev/null @@ -1,54 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_MICRO_CODER_OPCODERS_CMSIS_NN_INT8_ADD_INT8_CODER_H_ -#define MINDSPORE_LITE_TOOLS_CONVERTER_MICRO_CODER_OPCODERS_CMSIS_NN_INT8_ADD_INT8_CODER_H_ - -#include -#include "coder/opcoders/op_coder.h" - -namespace mindspore::lite::micro::cmsis { -class AddInt8Coder final : public OperatorCoder { - public: - AddInt8Coder(const std::vector &in_tensors, const std::vector &out_tensors, - const LiteGraph::Node *node, size_t node_index, Target target) - : OperatorCoder(in_tensors, out_tensors, node, node_index, target) {} - - ~AddInt8Coder() override = default; - int Prepare(CoderContext *const context) override; - - int DoCode(CoderContext *const context) override; - - private: - Tensor *input1_{nullptr}; - Tensor *input2{nullptr}; - - int32_t input_1_offset_{0}; - int32_t input_1_mult_{0}; - int32_t input_1_shift_{0}; - int32_t input_2_offset_{0}; - int32_t input_2_mult_{0}; - int32_t input_2_shift_{0}; - int32_t left_shift_{0}; - int32_t out_offset_{0}; - int32_t out_mult_{0}; - int32_t out_shift_{0}; - int32_t out_activation_min_{0}; - int32_t out_activation_max_{0}; - uint32_t block_size_{0}; -}; -} // namespace mindspore::lite::micro::cmsis -#endif // MINDSPORE_LITE_TOOLS_CONVERTER_MICRO_CODER_OPCODERS_CMSIS_NN_INT8_ADD_INT8_CODER_H_ diff --git a/mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/conv2d_base_coder.cc b/mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/conv2d_base_coder.cc deleted file mode 100644 index 431e7842..00000000 --- a/mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/conv2d_base_coder.cc +++ /dev/null @@ -1,60 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "coder/opcoders/cmsis-nn/int8/conv2d_base_coder.h" -#include "nnacl/int8/quantize.h" - -namespace mindspore::lite::micro::cmsis { -int Conv2DBaseCoder::SetQuantArgs() { - int channel = output_tensor_->Channel(); - size_t channel_data_size = static_cast(channel) * sizeof(int32_t); - output_mult_ = reinterpret_cast(malloc(channel_data_size)); - MS_CHECK_PTR(output_mult_); - output_shift_ = reinterpret_cast(malloc(channel_data_size)); - MS_CHECK_PTR(output_shift_); - - const ::QuantArg *filter_quant_args = conv_quant_arg_->filter_quant_args_; - auto input_scale = static_cast(conv_quant_arg_->input_quant_args_[0].scale_); - auto output_scale = static_cast(conv_quant_arg_->output_quant_args_[0].scale_); - int32_t significand; - int channel_shift; - if (conv_quant_arg_->filter_arg_num_ > 1) { - for (int i = 0; i < channel; ++i) { - // If per-tensor quantization parameter is specified, broadcast it along the - // quantization dimension (channels_out). - MS_CHECK_TRUE(conv_quant_arg_->filter_arg_num_ == static_cast(channel), "quant num not match"); - const auto filter_scale = static_cast(filter_quant_args[i].scale_); - MS_CHECK_TRUE(output_scale > 0, "output_scale should larger than 0."); - const double effective_output_scale = input_scale * filter_scale / output_scale; - QuantizeMultiplier(effective_output_scale, &significand, &channel_shift); - output_mult_[i] = significand; - output_shift_[i] = channel_shift; - } - } else { - // broadcast multiplier and shift to all array if per-tensor - const auto filter_scale = static_cast(filter_quant_args[0].scale_); - MS_CHECK_TRUE(output_scale > 0, "output_scale should larger than 0."); - const double effective_output_scale = input_scale * filter_scale / output_scale; - QuantizeMultiplier(effective_output_scale, &significand, &channel_shift); - for (int i = 0; i < channel; ++i) { - output_mult_[i] = significand; - output_shift_[i] = channel_shift; - } - } - - return RET_OK; -} -} // namespace mindspore::lite::micro::cmsis diff --git a/mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/conv2d_base_coder.h b/mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/conv2d_base_coder.h deleted file mode 100644 index 7fb49373..00000000 --- a/mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/conv2d_base_coder.h +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_MICRO_CODER_OPCODERS_CMSIS_NN_INT8_CONV2D_BASE_CODER_H_ -#define MINDSPORE_LITE_TOOLS_CONVERTER_MICRO_CODER_OPCODERS_CMSIS_NN_INT8_CONV2D_BASE_CODER_H_ - -#include -#include -#include "coder/opcoders/base/conv2d_base_coder.h" -#include "nnacl/conv_parameter.h" - -namespace mindspore::lite::micro::cmsis { -class Conv2DBaseCoder : public micro::Conv2DBaseCoder { - public: - explicit Conv2DBaseCoder(const std::vector &in_tensors, const std::vector &out_tensors, - const LiteGraph::Node *node, size_t node_index, Target target) - : micro::Conv2DBaseCoder(in_tensors, out_tensors, node, node_index, target) {} - - ~Conv2DBaseCoder() override { - free(output_mult_); - free(output_shift_); - } - - protected: - int SetQuantArgs(); - - int32_t *output_mult_{nullptr}; - int32_t *output_shift_{nullptr}; -}; -} // namespace mindspore::lite::micro::cmsis -#endif // MINDSPORE_LITE_TOOLS_CONVERTER_MICRO_CODER_OPCODERS_CMSIS_NN_INT8_CONV2D_BASE_CODER_H_ diff --git a/mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/conv2d_int8_coder.cc b/mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/conv2d_int8_coder.cc deleted file mode 100644 index 66bd94e2..00000000 --- a/mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/conv2d_int8_coder.cc +++ /dev/null @@ -1,188 +0,0 @@ -/** - * Copyright 2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "coder/opcoders/cmsis-nn/int8/conv2d_int8_coder.h" -#include -#include -#include -#include "coder/opcoders/serializers/serializer.h" -#include "coder/opcoders/file_collector.h" -#include "coder/utils/common.h" -#include "src/common/prim_util.h" - -using mindspore::schema::PrimitiveType_Conv2DFusion; - -namespace mindspore::lite::micro::cmsis { -int Conv2DInt8Coder::Prepare(CoderContext *const context) { - Conv2DBaseCoder::Init(); - MS_CHECK_RET_CODE(micro::Conv2DBaseCoder::CheckLayout(input_tensor_), "CheckLayout failed"); - MS_CHECK_RET_CODE(micro::Conv2DBaseCoder::SetQuantParam(), "SetQuantParam failed"); - MS_CHECK_RET_CODE(Conv2DBaseCoder::SetQuantArgs(), "SetQuantArgs failed"); - MS_CHECK_RET_CODE(SetParameters(), "SetParameters failed"); - CheckSupportOptimize(); - MS_CHECK_RET_CODE(InitTmpBuffer(), "InitTmpBuffer failed"); - return RET_OK; -} - -int Conv2DInt8Coder::DoCode(CoderContext *const context) { - Serializer code; - code.precision(kPrecision); - Collect(context, - { - "CMSIS/NN/Include/arm_nnfunctions.h", - }, - {}); - if (opt_ != Convolve_1x1_fast) { - code.CodeFunction("memset", buffer_, 0, buffer_size_); - } - code.CodeArray("output_shift", output_shift_, output_ch_); - code.CodeArray("output_mult", output_mult_, output_ch_); - switch (opt_) { - case Basic: - Collect(context, {}, - { - "arm_convolve_s8.c", - "arm_nn_mat_mult_kernel_s8_s16.c", - "arm_q7_to_q15_with_offset.c", - }); - code.CodeFunction("arm_convolve_s8", input_tensor_, input_x_, input_y_, input_ch_, input_batches_, filter_tensor_, - output_ch_, kernel_x_, kernel_y_, pad_x_, pad_y_, stride_x_, stride_y_, bias_tensor_, - output_tensor_, "output_shift", "output_mult", out_offset_, input_offset_, out_activation_min_, - out_activation_max_, output_x_, output_y_, buffer_); - break; - case Convolve_1_x_n: - Collect(context, {}, - { - "arm_convolve_1_x_n_s8.c", - "arm_nn_mat_mul_core_1x_s8.c", - }); - code.CodeFunction("arm_convolve_1_x_n_s8", input_tensor_, input_x_, input_ch_, input_batches_, filter_tensor_, - output_ch_, kernel_x_, pad_x_, stride_x_, bias_tensor_, output_tensor_, "output_shift", - "output_mult", out_offset_, input_offset_, out_activation_min_, out_activation_max_, output_x_, - buffer_); - break; - case Convolve_1x1_fast: - Collect(context, {}, - { - "arm_convolve_1x1_s8_fast.c", - "arm_nn_mat_mult_nt_t_s8.c", - "arm_nn_mat_mul_core_4x_s8.c", - "arm_nn_mat_mul_core_1x_s8.c", - }); - code.CodeFunction("arm_convolve_1x1_s8_fast", input_tensor_, input_x_, input_y_, input_ch_, input_batches_, - filter_tensor_, output_ch_, pad_x_, pad_y_, stride_x_, stride_y_, bias_tensor_, output_tensor_, - "output_shift", "output_mult", out_offset_, input_offset_, out_activation_min_, - out_activation_max_, output_x_, output_y_, buffer_); - break; - default: - MS_LOG(ERROR) << "opt enum value is not defined"; - return RET_ERROR; - } - - context->AppendCode(code.str()); - return RET_OK; -} - -int Conv2DInt8Coder::SetParameters() { - MS_CHECK_TRUE(input_tensor_->Channel() == filter_tensor_->DimensionSize(kNHWC_C), - "input Channel and filter size not match!"); - MS_CHECK_TRUE(output_tensor_->Channel() == filter_tensor_->DimensionSize(kNHWC_N), - "output Channel and filter size not match!"); - - input_x_ = input_tensor_->Width(); - input_y_ = input_tensor_->Height(); - input_ch_ = input_tensor_->Channel(); - input_batches_ = input_tensor_->Batch(); - - kernel_x_ = filter_tensor_->DimensionSize(kNHWC_W); - kernel_y_ = filter_tensor_->DimensionSize(kNHWC_H); - pad_x_ = conv_param_->pad_l_; - pad_y_ = conv_param_->pad_u_; - - stride_x_ = conv_param_->stride_w_; - stride_y_ = conv_param_->stride_h_; - - MS_CHECK_TRUE(!input_tensor_->quant_params().empty(), "input quant_params is empty"); - MS_CHECK_TRUE(!output_tensor_->quant_params().empty(), "output quant_params is empty"); - LiteQuantParam input_quant_arg = input_tensor_->quant_params().at(0); - LiteQuantParam output_quant_arg = output_tensor_->quant_params().at(0); - - input_offset_ = -input_quant_arg.zeroPoint; - out_offset_ = output_quant_arg.zeroPoint; - - output_x_ = output_tensor_->DimensionSize(kNHWC_W); - output_y_ = output_tensor_->DimensionSize(kNHWC_H); - output_ch_ = output_tensor_->Channel(); - - CalculateActivationRangeQuantized(conv_param_->act_type_ == ActType_Relu, conv_param_->act_type_ == ActType_Relu6, - output_quant_arg.zeroPoint, static_cast(output_quant_arg.scale), - &out_activation_min_, &out_activation_max_); - return RET_OK; -} - -void Conv2DInt8Coder::CheckSupportOptimize() { - if ((pad_x_ == 0) && (pad_y_ == 0) && (input_ch_ % kTensorChannelBaseLengthFour == 0) && (stride_x_ == 1) && - (stride_y_ == 1) && (kernel_x_ == 1) && (kernel_y_ == 1)) { - opt_ = Convolve_1x1_fast; - return; - } - - if ((output_x_ == 1) && (input_x_ == 1) && (kernel_y_ == 1) && (output_x_ % kTensorChannelBaseLengthFour == 0) && - (input_batches_ == 1)) { - opt_ = Convolve_1_x_n; - return; - } - opt_ = Basic; -} - -int Conv2DInt8Coder::InitTmpBuffer() { - const size_t kPartial = 2; - switch (opt_) { - case Basic: - buffer_size_ = - static_cast(kPartial * input_tensor_->Channel() * filter_tensor_->Width() * filter_tensor_->Height()) * - sizeof(int16_t); - break; - case Convolve_1_x_n: - buffer_size_ = - static_cast(kPartial * input_tensor_->Channel() * filter_tensor_->Width() * filter_tensor_->Height()) * - sizeof(int16_t); - break; - case Convolve_1x1_fast: - // do nothing - buffer_size_ = 0; - return RET_OK; - default: - MS_LOG(ERROR) << "opt enum value is not defined"; - return RET_ERROR; - } - buffer_ = static_cast(allocator_->Malloc(kNumberTypeInt16, buffer_size_, kWorkspace)); - MS_CHECK_PTR(buffer_); - return RET_OK; -} - -std::unique_ptr CmsisConv2DInt8OpCoderCreator(const std::vector &in_tensors, - const std::vector &out_tensors, - const LiteGraph::Node *node, size_t node_index, - Target target, int schema_version) { - MS_CHECK_PTR_RET_NULL(node); - std::unique_ptr coder = - std::make_unique(in_tensors, out_tensors, node, node_index, target); - return coder; -} - -REG_OPERATOR_CODER(kCortex_M, kNumberTypeInt8, PrimitiveType_Conv2DFusion, CPUOpCoderCreator) -} // namespace mindspore::lite::micro::cmsis diff --git a/mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/conv2d_int8_coder.h b/mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/conv2d_int8_coder.h deleted file mode 100644 index c0409d7c..00000000 --- a/mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/conv2d_int8_coder.h +++ /dev/null @@ -1,70 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_MICRO_CODER_OPCODERS_CMSIS_NN_INT8_CONV2D_INT8_CODER_H_ -#define MINDSPORE_LITE_TOOLS_CONVERTER_MICRO_CODER_OPCODERS_CMSIS_NN_INT8_CONV2D_INT8_CODER_H_ - -#include -#include -#include "coder/opcoders/cmsis-nn/int8/conv2d_base_coder.h" -#include "nnacl/conv_parameter.h" - -namespace mindspore::lite::micro::cmsis { -class Conv2DInt8Coder final : public Conv2DBaseCoder { - public: - explicit Conv2DInt8Coder(const std::vector &in_tensors, const std::vector &out_tensors, - const LiteGraph::Node *node, size_t node_index, Target target) - : Conv2DBaseCoder(in_tensors, out_tensors, node, node_index, target) {} - int Prepare(CoderContext *context) override; - - int DoCode(CoderContext *ctx) override; - - ~Conv2DInt8Coder() override = default; - - private: - enum ConvOpt { Basic = 0, Convolve_1_x_n = 1, Convolve_1x1_fast = 2 }; - - void CheckSupportOptimize(); - - int SetParameters(); - - int InitTmpBuffer(); - - uint16_t input_x_{0}; - uint16_t input_y_{0}; - uint16_t input_ch_{0}; - uint16_t input_batches_{0}; - uint16_t output_ch_{0}; - uint16_t kernel_x_{0}; - uint16_t kernel_y_{0}; - uint16_t pad_x_{0}; - uint16_t pad_y_{0}; - uint16_t stride_x_{0}; - uint16_t stride_y_{0}; - int32_t out_offset_{0}; - int32_t input_offset_{0}; - int32_t out_activation_min_{0}; - int32_t out_activation_max_{0}; - uint16_t output_x_{0}; - uint16_t output_y_{0}; - - int16_t *buffer_{nullptr}; - size_t buffer_size_{0}; - - ConvOpt opt_{ConvOpt::Basic}; -}; -} // namespace mindspore::lite::micro::cmsis -#endif // MINDSPORE_LITE_TOOLS_CONVERTER_MICRO_CODER_OPCODERS_CMSIS_NN_INT8_CONV2D_INT8_CODER_H_ diff --git a/mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/dwconv_int8_coder.cc b/mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/dwconv_int8_coder.cc deleted file mode 100644 index bdfb10e8..00000000 --- a/mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/dwconv_int8_coder.cc +++ /dev/null @@ -1,163 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "coder/opcoders/cmsis-nn/int8/dwconv_int8_coder.h" -#include -#include "coder/opcoders/serializers/serializer.h" -#include "coder/opcoders/file_collector.h" -#include "coder/log.h" -#include "coder/utils/common.h" - -namespace mindspore::lite::micro::cmsis { -int DWConvInt8Coder::Prepare(CoderContext *const context) { - Conv2DBaseCoder::Init(); - MS_CHECK_RET_CODE(micro::Conv2DBaseCoder::CheckLayout(input_tensor_), "Check layout failed."); - MS_CHECK_RET_CODE(micro::Conv2DBaseCoder::SetQuantParam(), "SetQuantParam failed"); - MS_CHECK_RET_CODE(Conv2DBaseCoder::SetQuantArgs(), "SetQuantArgs failed"); - MS_CHECK_RET_CODE(InitWeightBias(), "InitWeightBias failed"); - MS_CHECK_RET_CODE(SetParameters(), "SetParameters failed"); - CheckSupportOptimize(); - MS_CHECK_RET_CODE(InitTmpBuffer(), "InitTmpBuffer failed"); - return RET_OK; -} - -int DWConvInt8Coder::DoCode(CoderContext *const context) { - Serializer code; - code.precision(kPrecision); - - Collect(context, - { - "CMSIS/NN/Include/arm_nnfunctions.h", - }, - {}); - code.CodeArray("output_shift", output_shift_, output_ch_); - code.CodeArray("output_mult", output_mult_, output_ch_); - switch (optimize_) { - case Conv_3x3: - Collect(context, {}, - { - "arm_depthwise_conv_3x3_s8.c", - }); - code.CodeFunction("arm_depthwise_conv_3x3_s8", input_tensor_, input_x_, input_y_, input_ch_, filter_tensor_, - output_ch_, pad_x_, pad_y_, stride_x_, stride_y_, bias_tensor_, output_tensor_, "output_shift", - "output_mult", output_x_, output_y_, output_offset_, input_offset_, output_activation_min_, - output_activation_max_, dilation_x_, dilation_y_, "NULL"); - break; - case Conv_opt: - // arm_depthwise_conv_s8_opt also depends on arm_depthwise_conv_s8 - Collect(context, {}, - { - "arm_depthwise_conv_s8.c", - "arm_depthwise_conv_s8_opt.c", - }); - code.CodeFunction("arm_depthwise_conv_s8_opt", input_tensor_, input_x_, input_y_, input_ch_, filter_tensor_, - output_ch_, kernel_x_, kernel_y_, pad_x_, pad_y_, stride_x_, stride_y_, bias_tensor_, - output_tensor_, "output_shift", "output_mult", output_x_, output_y_, output_offset_, - input_offset_, output_activation_min_, output_activation_max_, dilation_x_, dilation_y_, - "NULL"); - break; - case Basic: - Collect(context, {}, - { - "arm_depthwise_conv_s8.c", - }); - code.CodeFunction("arm_depthwise_conv_s8", input_tensor_, input_x_, input_y_, input_ch_, filter_tensor_, - output_ch_, ch_mult_, kernel_x_, kernel_y_, pad_x_, pad_y_, stride_x_, stride_y_, bias_tensor_, - output_tensor_, "output_shift", "output_mult", output_x_, output_y_, output_offset_, - input_offset_, output_activation_min_, output_activation_max_, dilation_x_, dilation_y_, - "NULL"); - break; - default: - MS_LOG(ERROR) << "unsupported optimize_r"; - break; - } - context->AppendCode(code.str()); - return RET_OK; -} - -int DWConvInt8Coder::InitWeightBias() { - auto *origin_weight = reinterpret_cast(filter_tensor_->data()); - MS_CHECK_PTR(origin_weight); - auto pack_weight_size = - static_cast(filter_tensor_->Batch() * filter_tensor_->Height() * filter_tensor_->Width()); - packed_weight_ = - static_cast(allocator_->Malloc(kNumberTypeInt8, pack_weight_size * sizeof(int8_t), kOfflinePackWeight)); - MS_ASSERT(packed_weight_); - PackNCHWToNHWCInt8(origin_weight, packed_weight_, 1, filter_tensor_->Height() * filter_tensor_->Width(), - filter_tensor_->Batch()); - return RET_OK; -} - -int DWConvInt8Coder::SetParameters() { - input_x_ = input_tensor_->Width(); - input_y_ = input_tensor_->Height(); - input_ch_ = input_tensor_->Channel(); - output_ch_ = output_tensor_->Channel(); - - // depth_multiplier - MS_CHECK_TRUE(input_tensor_->Channel() > 0, "input_tensor_->Channel() should larger than 0."); - ch_mult_ = output_tensor_->Channel() / input_tensor_->Channel(); - - kernel_x_ = filter_tensor_->Width(); - kernel_y_ = filter_tensor_->Height(); - - pad_y_ = conv_param_->pad_u_; - pad_x_ = conv_param_->pad_l_; - - stride_y_ = conv_param_->stride_h_; - stride_x_ = conv_param_->stride_w_; - - MS_CHECK_TRUE(!input_tensor_->quant_params().empty(), "input quant params shouldn't be empty"); - LiteQuantParam input_quant_arg = input_tensor_->quant_params().at(0); - MS_CHECK_TRUE(!output_tensor_->quant_params().empty(), "output quant params shouldn't be empty"); - LiteQuantParam output_quant_arg = output_tensor_->quant_params().at(0); - - output_x_ = output_tensor_->Width(); - output_y_ = output_tensor_->Height(); - input_offset_ = -input_quant_arg.zeroPoint; - output_offset_ = output_quant_arg.zeroPoint; - - CalculateActivationRangeQuantized(conv_param_->act_type_ == ActType_Relu, conv_param_->act_type_ == ActType_Relu6, - output_quant_arg.zeroPoint, output_quant_arg.scale, &output_activation_min_, - &output_activation_max_); - return RET_OK; -} - -void DWConvInt8Coder::CheckSupportOptimize() { - if (ch_mult_ == 1) { - if ((kernel_x_ == kKernelWHSizeThree) && (kernel_y_ == kKernelWHSizeThree) && (pad_y_ <= 1)) { - optimize_ = Conv_3x3; - buffer_size_ = 0; - } else { - optimize_ = Conv_opt; - buffer_size_ = input_ch_ * kernel_x_ * kernel_y_ * sizeof(int16_t); - } - } else { - optimize_ = Basic; - buffer_size_ = 0; - } -} - -int DWConvInt8Coder::InitTmpBuffer() { - if (buffer_size_ != 0) { - buffer = static_cast(allocator_->Malloc(kNumberTypeInt16, buffer_size_, kWorkspace)); - MS_CHECK_PTR(buffer); - } else { - buffer = nullptr; - } - return 0; -} -} // namespace mindspore::lite::micro::cmsis diff --git a/mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/dwconv_int8_coder.h b/mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/dwconv_int8_coder.h deleted file mode 100644 index dc5ffdb5..00000000 --- a/mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/dwconv_int8_coder.h +++ /dev/null @@ -1,78 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_MICRO_CODER_OPCODERS_CMSIS_NN_INT8_DWCONV_INT8_CODER_H_ -#define MINDSPORE_LITE_TOOLS_CONVERTER_MICRO_CODER_OPCODERS_CMSIS_NN_INT8_DWCONV_INT8_CODER_H_ - -#include -#include "coder/opcoders/cmsis-nn/int8/conv2d_base_coder.h" -#include "src/litert/kernel/cpu/int8/convolution_depthwise_int8.h" - -namespace mindspore::lite::micro::cmsis { -class DWConvInt8Coder final : public Conv2DBaseCoder { - public: - DWConvInt8Coder(const std::vector &in_tensors, const std::vector &out_tensors, - const LiteGraph::Node *node, size_t node_index, Target target) - : Conv2DBaseCoder(in_tensors, out_tensors, node, node_index, target) {} - - ~DWConvInt8Coder() override = default; - - int Prepare(CoderContext *context) override; - - int DoCode(CoderContext *context) override; - - private: - enum DwConvOpt { - Basic = 0, - Conv_3x3 = 1, - Conv_opt = 2, - }; - - int SetParameters(); - - void CheckSupportOptimize(); - - int InitTmpBuffer(); - - int InitWeightBias(); - - int32_t input_x_{0}; - int32_t input_y_{0}; - int32_t input_ch_{0}; - int32_t output_ch_{0}; - int32_t ch_mult_{0}; - int32_t kernel_x_{0}; - int32_t kernel_y_{0}; - int32_t pad_x_{0}; - int32_t pad_y_{0}; - int32_t stride_x_{0}; - int32_t stride_y_{0}; - int32_t output_x_{0}; - int32_t output_y_{0}; - int32_t output_offset_{0}; - int32_t input_offset_{0}; - int32_t output_activation_min_{0}; - int32_t output_activation_max_{0}; - uint16_t dilation_x_{0}; - uint16_t dilation_y_{0}; - - int8_t *packed_weight_{nullptr}; - DwConvOpt optimize_{Basic}; - size_t buffer_size_{0}; - int16_t *buffer{nullptr}; -}; -} // namespace mindspore::lite::micro::cmsis -#endif // MINDSPORE_LITE_TOOLS_CONVERTER_MICRO_CODER_OPCODERS_CMSIS_NN_INT8_DWCONV_INT8_CODER_H_ diff --git a/mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/fullconnection_int8_coder.cc b/mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/fullconnection_int8_coder.cc deleted file mode 100644 index fe2717c8..00000000 --- a/mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/fullconnection_int8_coder.cc +++ /dev/null @@ -1,80 +0,0 @@ -/** - * Copyright 2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "coder/opcoders/cmsis-nn/int8/fullconnection_int8_coder.h" -#include "coder/opcoders/serializers/serializer.h" -#include "coder/opcoders/file_collector.h" -#include "coder/utils/common.h" - -using mindspore::schema::PrimitiveType_FullConnection; - -namespace mindspore::lite::micro::cmsis { -int FullConnectionInt8Coder::Prepare(CoderContext *const context) { - FullConnectionBaseCoder::Init(); - ConfigInputOutput(); - MS_CHECK_RET_CODE(SetParameters(), "SetParameters failed"); - return RET_OK; -} - -void FullConnectionInt8Coder::ConfigInputOutput() { output_tensor_->set_format(mindspore::NHWC); } - -int FullConnectionInt8Coder::DoCode(CoderContext *const context) { - Serializer code; - code.precision(kPrecision); - - Collect(context, - { - "CMSIS/NN/Include/arm_nnfunctions.h", - }, - { - "arm_fully_connected_s8.c", - "arm_nn_vec_mat_mult_t_s8.c", - }); - - code.CodeFunction("arm_fully_connected_s8", input_tensor_, filter_tensor_, col_dim_, row_dim_, nb_batches_, - input_offset_, filter_offset_, out_multiplier_, out_shift_, output_offset_, bias_tensor_, - output_tensor_, output_activation_min_, output_activation_max_, "NULL"); - context->AppendCode(code.str()); - return RET_OK; -} - -int FullConnectionInt8Coder::SetParameters() { - MS_CHECK_TRUE(output_tensor_->shape().size() == kTensorDimensionSizeTwo, "output tensor size should be 2"); - MS_CHECK_TRUE(!input_tensor_->quant_params().empty(), "input quant_params is empty"); - MS_CHECK_TRUE(!filter_tensor_->quant_params().empty(), "filter quant_params is empty"); - MS_CHECK_TRUE(!output_tensor_->quant_params().empty(), "output quant_params is empty"); - LiteQuantParam input_quant_arg = input_tensor_->quant_params().at(0); - LiteQuantParam filter_quant_arg = filter_tensor_->quant_params().at(0); - LiteQuantParam output_quant_arg = output_tensor_->quant_params().at(0); - - double real_multiplier = input_quant_arg.scale * filter_quant_arg.scale / output_quant_arg.scale; - QuantizeMultiplier(real_multiplier, &out_multiplier_, &out_shift_); - CalculateActivationRangeQuantized(fc_param_->act_type_ == ActType_Relu, fc_param_->act_type_ == ActType_Relu6, - output_quant_arg.zeroPoint, output_quant_arg.scale, &output_activation_min_, - &output_activation_max_); - - input_offset_ = -input_quant_arg.zeroPoint; - filter_offset_ = -filter_quant_arg.zeroPoint; - output_offset_ = output_quant_arg.zeroPoint; - - col_dim_ = filter_tensor_->DimensionSize(filter_tensor_->shape().size() - 1); - row_dim_ = output_tensor_->DimensionSize(1); - nb_batches_ = input_tensor_->Batch(); - return RET_OK; -} - -REG_OPERATOR_CODER(kCortex_M, kNumberTypeInt8, PrimitiveType_FullConnection, CPUOpCoderCreator) -} // namespace mindspore::lite::micro::cmsis diff --git a/mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/fullconnection_int8_coder.h b/mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/fullconnection_int8_coder.h deleted file mode 100644 index 8e4bdec4..00000000 --- a/mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/fullconnection_int8_coder.h +++ /dev/null @@ -1,54 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_MICRO_CODER_OPCODERS_CMSIS_NN_INT8_FULLCONNECTION_INT8_CODER_H_ -#define MINDSPORE_LITE_TOOLS_CONVERTER_MICRO_CODER_OPCODERS_CMSIS_NN_INT8_FULLCONNECTION_INT8_CODER_H_ - -#include -#include -#include "coder/opcoders/op_coder.h" -#include "coder/opcoders/base/full_connection_base_coder.h" -#include "nnacl/int8/quantize.h" -namespace mindspore::lite::micro::cmsis { -class FullConnectionInt8Coder final : public FullConnectionBaseCoder { - public: - FullConnectionInt8Coder(const std::vector &in_tensors, const std::vector &out_tensors, - const LiteGraph::Node *node, size_t node_index, Target target) - : FullConnectionBaseCoder(in_tensors, out_tensors, node, node_index, target) {} - - int Prepare(CoderContext *const context) override; - - int DoCode(CoderContext *const context) override; - - ~FullConnectionInt8Coder() override = default; - - private: - int SetParameters(); - void ConfigInputOutput(); - - uint16_t col_dim_{0}; - uint16_t row_dim_{0}; - uint16_t nb_batches_{0}; - int32_t input_offset_{0}; - int32_t filter_offset_{0}; - int32_t out_multiplier_{0}; - int32_t out_shift_{0}; - int32_t output_offset_{0}; - int32_t output_activation_min_{0}; - int32_t output_activation_max_{0}; -}; -} // namespace mindspore::lite::micro::cmsis -#endif // MINDSPORE_LITE_TOOLS_CONVERTER_MICRO_CODER_OPCODERS_CMSIS_NN_INT8_FULLCONNECTION_INT8_CODER_H_ diff --git a/mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/mul_int8_coder.cc b/mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/mul_int8_coder.cc deleted file mode 100644 index 5f0fe01b..00000000 --- a/mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/mul_int8_coder.cc +++ /dev/null @@ -1,78 +0,0 @@ -/** - * Copyright 2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "coder/opcoders/cmsis-nn/int8/mul_int8_coder.h" -#include -#include "coder/opcoders/serializers/serializer.h" -#include "nnacl/int8/quantize.h" -#include "coder/opcoders/file_collector.h" - -using mindspore::schema::PrimitiveType_MulFusion; - -namespace mindspore::lite::micro::cmsis { -int MulInt8Coder::Prepare(CoderContext *const context) { - input1_ = OperatorCoder::input_tensors().at(0); - input2_ = OperatorCoder::input_tensors().at(1); - - MS_CHECK_PTR(input1_); - MS_CHECK_PTR(input2_); - - MS_CHECK_TRUE(!input1_->quant_params().empty(), "input1_ quant_params is empty"); - MS_CHECK_TRUE(!input2_->quant_params().empty(), "input2_ quant_params is empty"); - MS_CHECK_TRUE(!output_tensor_->quant_params().empty(), "output quant_params is empty"); - - input_1_offset_ = -input1_->quant_params().at(0).zeroPoint; - input_2_offset_ = -input2_->quant_params().at(0).zeroPoint; - out_offset_ = output_tensor_->quant_params().at(0).zeroPoint; - const double input1_scale = input1_->quant_params().at(0).scale; - const double input2_scale = input2_->quant_params().at(0).scale; - const double output_scale = output_tensor_->quant_params().at(0).scale; - - const double real_multiplier = input1_scale * input2_scale / output_scale; - - QuantizeMultiplier(real_multiplier, &out_mult_, &out_shift_); - - CalculateActivationRangeQuantized(false, false, out_offset_, output_scale, &out_activation_min_, - &out_activation_max_); - - MS_CHECK_TRUE(input1_->ElementsNum() == input2_->ElementsNum(), "tensor length not match"); - - block_size_ = input1_->ElementsNum(); - - return RET_OK; -} - -int MulInt8Coder::DoCode(CoderContext *const context) { - Serializer code; - code.precision(kPrecision); - - Collect(context, - { - "CMSIS/NN/Include/arm_nnfunctions.h", - }, - { - "arm_elementwise_mul_s8.c", - }); - - code.CodeFunction("arm_elementwise_mul_s8", input1_, input2_, input_1_offset_, input_2_offset_, output_tensor_, - out_offset_, out_mult_, out_shift_, out_activation_min_, out_activation_max_, block_size_); - - MS_LOG(INFO) << "MulInt8Coder has been called"; - context->AppendCode(code.str()); - return RET_OK; -} -REG_OPERATOR_CODER(kAllTargets, kNumberTypeInt8, PrimitiveType_MulFusion, CPUOpCoderCreator) -} // namespace mindspore::lite::micro::cmsis diff --git a/mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/mul_int8_coder.h b/mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/mul_int8_coder.h deleted file mode 100644 index a16d6956..00000000 --- a/mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/mul_int8_coder.h +++ /dev/null @@ -1,48 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_MICRO_CODER_OPCODERS_CMSIS_NN_INT8_MUL_INT8_CODER_H_ -#define MINDSPORE_LITE_TOOLS_CONVERTER_MICRO_CODER_OPCODERS_CMSIS_NN_INT8_MUL_INT8_CODER_H_ - -#include -#include "coder/opcoders/op_coder.h" - -namespace mindspore::lite::micro::cmsis { -class MulInt8Coder final : public OperatorCoder { - public: - MulInt8Coder(const std::vector &in_tensors, const std::vector &out_tensors, - const LiteGraph::Node *node, size_t node_index, Target target) - : OperatorCoder(in_tensors, out_tensors, node, node_index, target) {} - ~MulInt8Coder() override = default; - int Prepare(CoderContext *const context) override; - - int DoCode(CoderContext *const context) override; - - private: - Tensor *input1_{nullptr}; - Tensor *input2_{nullptr}; - - int32_t input_1_offset_{0}; - int32_t input_2_offset_{0}; - int32_t out_offset_{0}; - int32_t out_mult_{0}; - int32_t out_shift_{0}; - int32_t out_activation_min_{0}; - int32_t out_activation_max_{0}; - uint32_t block_size_{0}; -}; -} // namespace mindspore::lite::micro::cmsis -#endif // MINDSPORE_LITE_TOOLS_CONVERTER_MICRO_CODER_OPCODERS_CMSIS_NN_INT8_MUL_INT8_CODER_H_ diff --git a/mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/pooling_int8_coder.cc b/mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/pooling_int8_coder.cc deleted file mode 100644 index 2c1f8d25..00000000 --- a/mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/pooling_int8_coder.cc +++ /dev/null @@ -1,109 +0,0 @@ -/** - * Copyright 2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include -#include -#include "coder/opcoders/cmsis-nn/int8/pooling_int8_coder.h" -#include "coder/opcoders/serializers/serializer.h" -#include "coder/opcoders/file_collector.h" -#include "coder/utils/common.h" - -using mindspore::schema::PrimitiveType_AvgPoolFusion; -using mindspore::schema::PrimitiveType_MaxPoolFusion; - -namespace mindspore::lite::micro::cmsis { -int PoolingInt8Coder::Prepare(CoderContext *const context) { - this->pooling_parameter_ = reinterpret_cast(parameter_); - // get tensors - MS_CHECK_RET_CODE(SetParameters(), "SetParameters failed"); - - if (pooling_parameter_->pool_mode_ == PoolMode_AvgPool) { - buffer_size_ = input_tensor_->Channel() * sizeof(int32_t); - buffer_ = static_cast(allocator_->Malloc(kNumberTypeInt32, buffer_size_, kWorkspace)); - MS_CHECK_PTR(buffer_); - } - - return RET_OK; -} - -int PoolingInt8Coder::DoCode(CoderContext *const context) { - // init struct PoolingParameters - std::string pooling_func; - - if (pooling_parameter_->pool_mode_ == PoolMode_AvgPool) { - Collect(context, {}, - { - "arm_avgpool_s8.c", - }); - pooling_func = "arm_avgpool_s8"; - } else if (pooling_parameter_->pool_mode_ == PoolMode_MaxPool) { - Collect(context, {}, - { - "arm_max_pool_s8.c", - }); - pooling_func = "arm_max_pool_s8"; - } else { - MS_LOG(ERROR) << "unsupported pad mode"; - return RET_ERROR; - } - Collect(context, - { - "CMSIS/NN/Include/arm_nnfunctions.h", - }, - {}); - - Serializer code; - code.precision(kPrecision); - - code.CodeFunction(pooling_func, dim_src_height_, dim_src_width_, dim_dst_height_, dim_dst_width_, stride_height_, - stride_width_, dim_kernel_height_, dim_kernel_width_, padding_height_, padding_width_, act_min_, - act_max_, ch_src_, input_tensor_, buffer_, output_tensor_); - context->AppendCode(code.str()); - return RET_OK; -} - -int PoolingInt8Coder::SetParameters() { - dim_src_height_ = input_tensor_->Height(); - dim_src_width_ = input_tensor_->Width(); - dim_dst_height_ = output_tensor_->DimensionSize(1); - dim_dst_width_ = output_tensor_->DimensionSize(kTensorDimensionSizeTwo); - ch_src_ = input_tensor_->Channel(); - - stride_height_ = pooling_parameter_->stride_h_; - stride_width_ = pooling_parameter_->stride_w_; - - dim_kernel_height_ = pooling_parameter_->window_h_; - dim_kernel_width_ = pooling_parameter_->window_w_; - - // only use pad_u_ and pad_l_ because their value is consistent with tf - // ref: src/ops/conv2d.cc:ConvInferShape - padding_height_ = pooling_parameter_->pad_u_; - padding_width_ = pooling_parameter_->pad_l_; - - MS_CHECK_TRUE(!output_tensor_->quant_params().empty(), "output quant_params is empty"); - LiteQuantParam output_quant_arg = output_tensor_->quant_params().at(0); - CalculateActivationRangeQuantized(pooling_parameter_->act_type_ == ActType_Relu, - pooling_parameter_->act_type_ == ActType_Relu6, output_quant_arg.zeroPoint, - output_quant_arg.scale, &act_min_, &act_max_); - - MS_CHECK_TRUE(input_tensor_->Channel() == output_tensor_->Channel(), - "input Channel and output Channel size not match!"); - return RET_OK; -} - -REG_OPERATOR_CODER(kCortex_M, kNumberTypeInt8, PrimitiveType_AvgPoolFusion, CPUOpCoderCreator) -REG_OPERATOR_CODER(kCortex_M, kNumberTypeInt8, PrimitiveType_MaxPoolFusion, CPUOpCoderCreator) -} // namespace mindspore::lite::micro::cmsis diff --git a/mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/pooling_int8_coder.h b/mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/pooling_int8_coder.h deleted file mode 100644 index 05800968..00000000 --- a/mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/pooling_int8_coder.h +++ /dev/null @@ -1,60 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_MICRO_CODER_OPCODERS_CMSIS_NN_INT8_POOLING_INT8_CODER_H_ -#define MINDSPORE_LITE_TOOLS_CONVERTER_MICRO_CODER_OPCODERS_CMSIS_NN_INT8_POOLING_INT8_CODER_H_ - -#include -#include -#include -#include "coder/opcoders/op_coder.h" -#include "nnacl/int8/pooling_int8.h" - -namespace mindspore::lite::micro::cmsis { -class PoolingInt8Coder final : public OperatorCoder { - public: - PoolingInt8Coder(const std::vector &in_tensors, const std::vector &out_tensors, - const LiteGraph::Node *node, size_t node_index, Target target) - : OperatorCoder(in_tensors, out_tensors, node, node_index, target) {} - ~PoolingInt8Coder() override = default; - - int Prepare(CoderContext *const context) override; - - int DoCode(CoderContext *const context) override; - - private: - int SetParameters(); - - int dim_src_height_{0}; - int dim_src_width_{0}; - int dim_dst_height_{0}; - int dim_dst_width_{0}; - int stride_height_{0}; - int stride_width_{0}; - int dim_kernel_height_{0}; - int dim_kernel_width_{0}; - int padding_height_{0}; - int padding_width_{0}; - int act_min_{0}; - int act_max_{0}; - int ch_src_{0}; - - int32_t *buffer_{nullptr}; - size_t buffer_size_{0}; - PoolingParameter *pooling_parameter_{nullptr}; -}; -} // namespace mindspore::lite::micro::cmsis -#endif // MINDSPORE_LITE_TOOLS_CONVERTER_MICRO_CODER_OPCODERS_CMSIS_NN_INT8_POOLING_INT8_CODER_H_ diff --git a/mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/reshape_int8_coder.cc b/mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/reshape_int8_coder.cc deleted file mode 100644 index 3522d57f..00000000 --- a/mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/reshape_int8_coder.cc +++ /dev/null @@ -1,49 +0,0 @@ -/** - * Copyright 2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "coder/opcoders/cmsis-nn/int8/reshape_int8_coder.h" -#include -#include -#include "coder/opcoders/serializers/serializer.h" - -using mindspore::schema::PrimitiveType_Reshape; - -namespace mindspore::lite::micro::cmsis { -int ReshapeInt8Coder::DoCode(CoderContext *const context) { - int elements_num = input_tensor_->ElementsNum(); - - std::vector input_quant_args = input_tensor_->quant_params(); - std::vector output_quant_args = output_tensor_->quant_params(); - MS_CHECK_TRUE(!input_quant_args.empty(), "input quant_params is empty"); - MS_CHECK_TRUE(!output_quant_args.empty(), "output quant_params is empty"); - // in Int8Reshape, the following values are checked. then it will do a memory copy - // para.in_args_.scale_ == para.out_args_.scale_ && para.in_args_.zp_ == para.out_args_.zp_ - MS_CHECK_TRUE((input_quant_args.at(0).scale == output_quant_args.at(0).scale && - input_quant_args.at(0).zeroPoint == output_quant_args.at(0).zeroPoint), - "the quant arg of input and output should be the same!"); - - Serializer code; - code.precision(kPrecision); - - code.CodeFunction("memcpy", output_tensor_, input_tensor_, elements_num); - - MS_LOG(INFO) << "ReshapeInt8Coder has been called"; - context->AppendCode(code.str()); - return RET_OK; -} - -REG_OPERATOR_CODER(kCortex_M, kNumberTypeInt8, PrimitiveType_Reshape, CPUOpCoderCreator) -} // namespace mindspore::lite::micro::cmsis diff --git a/mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/reshape_int8_coder.h b/mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/reshape_int8_coder.h deleted file mode 100644 index 9182b2d5..00000000 --- a/mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/reshape_int8_coder.h +++ /dev/null @@ -1,36 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_MICRO_CODER_OPCODERS_CMSIS_NN_INT8_RESHAPE_INT8_CODER_H_ -#define MINDSPORE_LITE_TOOLS_CONVERTER_MICRO_CODER_OPCODERS_CMSIS_NN_INT8_RESHAPE_INT8_CODER_H_ - -#include -#include "coder/opcoders/op_coder.h" - -namespace mindspore::lite::micro::cmsis { -class ReshapeInt8Coder final : public OperatorCoder { - public: - ReshapeInt8Coder(const std::vector &in_tensors, const std::vector &out_tensors, - const LiteGraph::Node *node, size_t node_index, Target target) - : OperatorCoder(in_tensors, out_tensors, node, node_index, target) {} - - ~ReshapeInt8Coder() override = default; - int Prepare(CoderContext *const context) override { return RET_OK; } - - int DoCode(CoderContext *const context) override; -}; -} // namespace mindspore::lite::micro::cmsis -#endif // MINDSPORE_LITE_TOOLS_CONVERTER_MICRO_CODER_OPCODERS_CMSIS_NN_INT8_RESHAPE_INT8_CODER_H_ diff --git a/mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/softmax_int8_coder.cc b/mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/softmax_int8_coder.cc deleted file mode 100644 index 739d24c8..00000000 --- a/mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/softmax_int8_coder.cc +++ /dev/null @@ -1,84 +0,0 @@ -/** - * Copyright 2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "coder/opcoders/cmsis-nn/int8/softmax_int8_coder.h" -#include -#include "coder/opcoders/serializers/serializer.h" -#include "coder/opcoders/file_collector.h" - -using mindspore::schema::PrimitiveType_Softmax; -namespace mindspore::lite::micro::cmsis { -int SoftMaxInt8Coder::Prepare(CoderContext *const context) { - SoftmaxBaseCoder::Init(); - - MS_CHECK_TRUE(!input_tensor_->quant_params().empty(), "input quant_params is empty"); - LiteQuantParam in_quant_arg = input_tensor_->quant_params().at(0); - quant_params_.in_quant_args_.zp_ = -in_quant_arg.zeroPoint; - - std::vector out_quant_args = output_tensor_->quant_params(); - MS_CHECK_TRUE(!out_quant_args.empty(), "output quant_params is empty"); - quant_params_.out_quant_arg_.scale_ = static_cast(out_quant_args.at(0).scale); - quant_params_.out_quant_arg_.zp_ = out_quant_args.at(0).zeroPoint; - quant_params_.output_activation_min_ = std::numeric_limits::min(); - quant_params_.output_activation_max_ = std::numeric_limits::max(); - - const int total_signed_bits = 31; - const int input_integer_bits = 5; - const double input_real_multiplier = - MSMIN(in_quant_arg.scale * (1 << (unsigned int)(total_signed_bits - input_integer_bits)), - (1ll << total_signed_bits) - 1.0); - // mult, shift - QuantizeMultiplier(input_real_multiplier, &mult_, &shift_); - // Calculate Input Radius - const double max_input_rescaled = 1.0 * ((1 << input_integer_bits) - 1) * - (1ll << static_cast((total_signed_bits - input_integer_bits))) / - (1ll << static_cast(shift_)); - diff_min_ = -1.0 * static_cast(std::floor(max_input_rescaled)); - - const int trailing_dim = static_cast(input_tensor_->shape().size()) - 1; - const int dims_count = input_tensor_->shape().size(); - MS_CHECK_TRUE(trailing_dim >= 0 && trailing_dim < dims_count, "trailing_dim should be in [0, dims_count)"); - num_rows_ = 1; - for (int i = 0; i < dims_count; ++i) { - num_rows_ *= (i == trailing_dim) ? 1 : input_tensor_->DimensionSize(i); - } - - MS_CHECK_TRUE(input_tensor_->DimensionSize(trailing_dim) == output_tensor_->DimensionSize(trailing_dim), - "input and output DimensionSize mismatch"); - row_size_ = input_tensor_->DimensionSize(trailing_dim); - - ReSize(); - return RET_OK; -} - -int SoftMaxInt8Coder::DoCode(CoderContext *const context) { - Serializer code; - code.precision(kPrecision); - - Collect(context, - { - "CMSIS/NN/Include/arm_nnfunctions.h", - }, - { - "arm_softmax_s8.c", - }); - code.CodeFunction("arm_softmax_s8", input_tensor_, num_rows_, row_size_, mult_, shift_, diff_min_, output_tensor_); - MS_LOG(INFO) << "SoftMaxInt8Coder has been called"; - context->AppendCode(code.str()); - return RET_OK; -} -REG_OPERATOR_CODER(kCortex_M, kNumberTypeInt8, PrimitiveType_Softmax, CPUOpCoderCreator) -} // namespace mindspore::lite::micro::cmsis diff --git a/mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/softmax_int8_coder.h b/mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/softmax_int8_coder.h deleted file mode 100644 index 514e3e49..00000000 --- a/mindspore-lite/tools/converter/micro/coder/opcoders/cmsis-nn/int8/softmax_int8_coder.h +++ /dev/null @@ -1,46 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_MICRO_CODER_OPCODERS_CMSIS_NN_INT8_SOFTMAX_INT8_CODER_H_ -#define MINDSPORE_LITE_TOOLS_CONVERTER_MICRO_CODER_OPCODERS_CMSIS_NN_INT8_SOFTMAX_INT8_CODER_H_ - -#include -#include -#include -#include "coder/opcoders/base/softmax_base_coder.h" - -namespace mindspore::lite::micro::cmsis { -class SoftMaxInt8Coder final : public SoftmaxBaseCoder { - public: - SoftMaxInt8Coder(const std::vector &in_tensors, const std::vector &out_tensors, - const LiteGraph::Node *node, size_t node_index, Target target) - : SoftmaxBaseCoder(in_tensors, out_tensors, node, node_index, target) {} - - ~SoftMaxInt8Coder() override = default; - - int Prepare(CoderContext *const context) override; - - int DoCode(CoderContext *const context) override; - - private: - int32_t num_rows_{0}; - int32_t row_size_{0}; - int32_t mult_{0}; - int32_t shift_{0}; - int32_t diff_min_{0}; -}; -} // namespace mindspore::lite::micro::cmsis -#endif // MINDSPORE_LITE_TOOLS_CONVERTER_MICRO_CODER_OPCODERS_CMSIS_NN_INT8_SOFTMAX_INT8_CODER_H_ diff --git a/mindspore-lite/tools/converter/micro/coder/opcoders/custom/custom_coder.cc b/mindspore-lite/tools/converter/micro/coder/opcoders/custom/custom_coder.cc deleted file mode 100644 index 2a40de01..00000000 --- a/mindspore-lite/tools/converter/micro/coder/opcoders/custom/custom_coder.cc +++ /dev/null @@ -1,170 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include -#include -#include "tools/converter/micro/coder/opcoders/op_coder.h" -#include "tools/converter/micro/coder/opcoders/file_collector.h" -#include "tools/converter/micro/coder/opcoders/serializers/serializer.h" -#include "tools/converter/micro/coder/opcoders/custom/custom_coder.h" -#include "tools/converter/micro/coder/opcoders/op_coder_register.h" -#include "tools/converter/micro/coder/opcoders/kernel_registry.h" -#include "src/common/prim_util.h" -#include "nnacl/custom_parameter.h" - -using mindspore::schema::PrimitiveType_Custom; - -namespace mindspore::lite::micro { -std::map CustomCoder::const_tensor_map_; - -void CustomCoder::Populate(const void *prim) { - auto op = static_cast(prim)->value_as_Custom(); - type_ = op->type()->str(); - for (size_t i = 0; i < op->attr()->size(); ++i) { - auto attr = op->attr()->Get(i); - std::string data; - for (size_t j = 0; j < attr->data()->size(); ++j) { - data.push_back(static_cast(attr->data()->Get(j))); - } - attrs_[attr->name()->str()] = data; - } -} - -int CustomCoder::Prepare(CoderContext *const context) { - if (GetPrimitiveType(node_->primitive_, schema_version_) != PrimitiveType_Custom) { - MS_LOG(ERROR) << "Primitive type should be custom"; - return RET_ERROR; - } - Populate(node_->primitive_); - for (const auto &tensor : input_tensors_) { - if (tensor->category() == lite::Category::CONST_TENSOR) { - if (!const_tensor_map_.count(tensor)) { - auto buff = allocator_->Malloc(kNumberTypeUInt8, tensor->Size(), kOfflinePackWeight); - auto ret = memcpy_s(buff, tensor->Size(), tensor->data(), tensor->Size()); - if (ret != EOK) { - MS_LOG(ERROR) << "memcpy_s failed, src_len = " << tensor->Size() << ", dst_len = " << tensor->Size() - << ", ret = " << ret; - return RET_ERROR; - } - const_tensor_map_[tensor] = buff; - } - } - } - - return RET_OK; -} - -int CustomCoder::TransformTensors(Serializer *code, std::string array_name, const std::vector &tensors) { - if (tensors.size() > 16) { - MS_LOG(ERROR) << "The number of tensors is too large"; - return RET_ERROR; - } - (*code) << "\t\tTensorC " << array_name << "[" << tensors.size() << "];\n"; - for (size_t i = 0; i < tensors.size(); ++i) { - if (tensors[i]->category() == lite::Category::CONST_TENSOR) { - if (!const_tensor_map_.count(tensors[i])) { - MS_LOG(ERROR) << "can't find the const tensor's runtime address"; - return RET_ERROR; - } - (*code) << "\t\t" << array_name << "[" << i - << "].data_ = " << allocator_->GetRuntimeAddr(const_tensor_map_[tensors[i]]) << ";\n"; - } else { - (*code) << "\t\t" << array_name << "[" << i << "].data_ = " << allocator_->GetRuntimeAddr(tensors[i]) << ";\n"; - } - for (size_t j = 0; j < tensors[i]->shape().size(); ++j) { - (*code) << "\t\t" << array_name << "[" << i << "].shape_[" << j << "] = " << tensors[i]->shape()[j] << ";\n"; - } - (*code) << "\t\t" << array_name << "[" << i << "].shape_size_ = " << tensors[i]->shape().size() << ";\n"; - (*code) << "\t\t" << array_name << "[" << i << "].data_type_ = " << tensors[i]->data_type() << ";\n"; - (*code) << "\t\t" << array_name << "[" << i << "].format_ = " << tensors[i]->format() << ";\n"; - if (tensors[i]->tensor_name().size() > MAX_STR_LEN) { - MS_LOG(ERROR) << "tensor name is too long: " << tensors[i]->tensor_name(); - return RET_ERROR; - } - (*code) << "\t\t" << array_name << "[" << i << "].name_ = " - << "malloc(" << tensors[i]->tensor_name().length() + 1 << ");\n"; - (*code) << "\t\tstrcpy(" << array_name << "[" << i << "].name_, " - << "\"" << tensors[i]->tensor_name() << "\"" - << ");\n"; - } - - return RET_OK; -} - -int CustomCoder::TransformParams(Serializer *code, std::string var_name) { - if (attrs_.size() > MAX_ATTR_NUM) { - MS_LOG(ERROR) << "Attrs's number exceeds the maximum"; - return RET_ERROR; - } - - (*code) << "\t\tCustomParameter " << var_name << ";\n"; - if (type_.size() > MAX_STR_LEN) { - MS_LOG(ERROR) << "type name is too long: " << type_; - return RET_ERROR; - } - (*code) << "\t\tstrcpy(" << var_name << ".type, " - << "\"" << type_ << "\"" - << ");\n"; - int i = 0; - for (auto iter = attrs_.begin(); iter != attrs_.end(); ++iter) { - if (iter->first.size() > MAX_STR_LEN) { - MS_LOG(ERROR) << "attr name is too long: " << iter->first; - return RET_ERROR; - } - (*code) << "\t\tstrcpy(" << var_name << ".attr_name[" << i << "], " - << "\"" << iter->first << "\"" - << ");\n"; - (*code) << "\t\t" << var_name << ".attr_data[" << i << "] = " - << "malloc(" << iter->second.size() + 1 << ");\n"; - (*code) << "\t\tstrcpy(" << var_name << ".attr_data[" << i++ << "], " - << "\"" << iter->second << "\"" - << ");\n"; - } - (*code) << "\t\t" << var_name << ".attr_num = " << attrs_.size() << ";\n"; - return RET_OK; -} - -void CustomCoder::FreeParams(Serializer *code, std::string var_name) { - int i = 0; - for (auto iter = attrs_.begin(); iter != attrs_.end(); ++iter) { - (*code) << "\t\tfree(" << var_name << ".attr_data[" << i++ << "]);\n"; - } -} - -void CustomCoder::FreeTensors(Serializer *code, std::string array_name, size_t tensors_num) { - for (size_t i = 0; i < tensors_num; i++) { - (*code) << "\t\tfree(" << array_name << "[" << i << "].name_);\n"; - } -} - -int CustomCoder::DoCode(CoderContext *const context) { - Collect(context, {"nnacl/custom_parameter.h", "nnacl/tensor_c.h", "src/registered_kernel.h"}, {}); - Serializer code; - MS_CHECK_RET_CODE(TransformTensors(&code, "inputs", input_tensors_), "Transform input tensors error!"); - MS_CHECK_RET_CODE(TransformTensors(&code, "outputs", output_tensors_), "Transform output tensors error!"); - MS_CHECK_RET_CODE(TransformParams(&code, "param"), "Transform output tensors error!"); - code.CodeFunction(kCustomKernelName, "inputs", input_tensors_.size(), "outputs", output_tensors_.size(), "¶m"); - FreeParams(&code, "param"); - FreeTensors(&code, "inputs", input_tensors_.size()); - FreeTensors(&code, "outputs", output_tensors_.size()); - context->AppendCode(code.str()); - return 0; -} - -REG_OPERATOR_CODER(kAllTargets, kNumberTypeInt8, PrimitiveType_Custom, CPUOpCoderCreator) -REG_OPERATOR_CODER(kAllTargets, kNumberTypeUInt8, PrimitiveType_Custom, CPUOpCoderCreator) -REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Custom, CPUOpCoderCreator) -} // namespace mindspore::lite::micro diff --git a/mindspore-lite/tools/converter/micro/coder/opcoders/custom/custom_coder.h b/mindspore-lite/tools/converter/micro/coder/opcoders/custom/custom_coder.h deleted file mode 100644 index c5e7b246..00000000 --- a/mindspore-lite/tools/converter/micro/coder/opcoders/custom/custom_coder.h +++ /dev/null @@ -1,49 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_MICRO_CODER_OPCODERS_CUSTOM_CUSTOM_CODER_H_ -#define MINDSPORE_LITE_TOOLS_CONVERTER_MICRO_CODER_OPCODERS_CUSTOM_CUSTOM_CODER_H_ - -#include -#include -#include -#include "tools/converter/micro/coder/opcoders/op_coder.h" - -namespace mindspore::lite::micro { -class CustomCoder final : public OperatorCoder { - public: - CustomCoder(const std::vector &in_tensors, const std::vector &out_tensors, - const LiteGraph::Node *node, size_t node_index, Target target) - : OperatorCoder(in_tensors, out_tensors, node, node_index, target) {} - - ~CustomCoder() override = default; - - int Prepare(CoderContext *const context) override; - - int DoCode(CoderContext *const context) override; - - private: - void Populate(const void *prim); - int TransformTensors(Serializer *code, std::string array_name, const std::vector &tensors); - int TransformParams(Serializer *code, std::string var_name); - void FreeParams(Serializer *code, std::string var_name); - void FreeTensors(Serializer *code, std::string array_name, size_t tensors_num); - - std::string type_; - std::map attrs_; - static std::map const_tensor_map_; -}; -} // namespace mindspore::lite::micro -#endif // MINDSPORE_LITE_TOOLS_CONVERTER_MICRO_CODER_OPCODERS_CUSTOM_CUSTOM_CODER_H_ diff --git a/mindspore-lite/tools/converter/micro/coder/opcoders/nnacl/fp32_grad/activation_grad_coder.cc b/mindspore-lite/tools/converter/micro/coder/opcoders/nnacl/fp32_grad/activation_grad_coder.cc deleted file mode 100644 index b9d940d3..00000000 --- a/mindspore-lite/tools/converter/micro/coder/opcoders/nnacl/fp32_grad/activation_grad_coder.cc +++ /dev/null @@ -1,59 +0,0 @@ -/** - * Copyright 2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "coder/opcoders/nnacl/fp32_grad/activation_grad_coder.h" -#include "nnacl/fp32_grad/activation_grad_fp32.h" -#include "coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h" -#include "coder/opcoders/file_collector.h" - -using mindspore::schema::PrimitiveType_ActivationGrad; - -namespace mindspore::lite::micro::nnacl { -int ActivationGradCoder::DoCode(CoderContext *const context) { - MS_CHECK_TRUE(input_tensors_.size() == DIMENSION_2D, "inputs size is not equal to two"); - Tensor *input0 = input_tensors_.at(0); - Tensor *input1 = input_tensors_.at(1); - // attribute - auto *activation_parameter = reinterpret_cast(parameter_); - int count = input_tensor_->ElementsNum(); - Collect(context, - { - "nnacl/fp32_grad/activation_grad_fp32.h", - }, - { - "activation_grad_fp32.c", - }); - NNaclFp32Serializer code; - - switch (activation_parameter->type_) { - case schema::ActivationType_RELU: - code.CodeFunction("ReluGrad", input0, input1, count, output_tensor_); - break; - case schema::ActivationType_ELU: - code.CodeFunction("EluGrad", input0, input1, count, output_tensor_, activation_parameter->alpha_); - break; - default: - MS_LOG(ERROR) << "Activation type error"; - return RET_ERROR; - } - MS_LOG(DEBUG) << "ActivationGradCode has been called"; - context->AppendCode(code.str()); - return lite::RET_OK; -} - -REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_ActivationGrad, - CPUOpCoderCreator) -} // namespace mindspore::lite::micro::nnacl diff --git a/mindspore-lite/tools/converter/micro/coder/opcoders/nnacl/fp32_grad/activation_grad_coder.h b/mindspore-lite/tools/converter/micro/coder/opcoders/nnacl/fp32_grad/activation_grad_coder.h deleted file mode 100644 index 00699ba1..00000000 --- a/mindspore-lite/tools/converter/micro/coder/opcoders/nnacl/fp32_grad/activation_grad_coder.h +++ /dev/null @@ -1,38 +0,0 @@ -/** - * Copyright 2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_MICRO_CODER_OPCODERS_NNACL_FP32_GRAD_ACTIVATION_GRAD_CODER_H_ -#define MINDSPORE_LITE_TOOLS_CONVERTER_MICRO_CODER_OPCODERS_NNACL_FP32_GRAD_ACTIVATION_GRAD_CODER_H_ - -#include -#include "coder/opcoders/op_coder.h" - -namespace mindspore::lite::micro::nnacl { -class ActivationGradCoder final : public OperatorCoder { - public: - ActivationGradCoder(const std::vector &in_tensors, const std::vector &out_tensors, - const LiteGraph::Node *node, size_t node_index, Target target) - : OperatorCoder(in_tensors, out_tensors, node, node_index, target) {} - - ~ActivationGradCoder() override = default; - - int Prepare(CoderContext *const context) override { return RET_OK; } - - int DoCode(CoderContext *const context) override; -}; -} // namespace mindspore::lite::micro::nnacl - -#endif // MINDSPORE_LITE_TOOLS_CONVERTER_MICRO_CODER_OPCODERS_NNACL_FP32_GRAD_ACTIVATION_GRAD_CODER_H_ diff --git a/mindspore-lite/tools/converter/micro/coder/opcoders/nnacl/fp32_grad/adam_coder.cc b/mindspore-lite/tools/converter/micro/coder/opcoders/nnacl/fp32_grad/adam_coder.cc deleted file mode 100644 index c4427cf9..00000000 --- a/mindspore-lite/tools/converter/micro/coder/opcoders/nnacl/fp32_grad/adam_coder.cc +++ /dev/null @@ -1,69 +0,0 @@ -/** - * Copyright 2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "coder/opcoders/nnacl/fp32_grad/adam_coder.h" -#include "nnacl/fp32_grad/optimizer.h" -#include "coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h" -#include "coder/opcoders/file_collector.h" - -using mindspore::schema::PrimitiveType_Adam; - -namespace mindspore::lite::micro::nnacl { -namespace { -constexpr int kWeightIdx = 0; -constexpr int kMomentVector1stIdx = 1; -constexpr int kMomentVector2stIdx = 2; -constexpr int kBeta1PowerIdx = 3; -constexpr int kBeta2PowerIdx = 4; -constexpr int kLearningRateIdx = 5; -constexpr int kBeta1Idx = 6; -constexpr int kBeta2Idx = 7; -constexpr int kEpsilonIdx = 8; -constexpr int kGradientIdx = 9; -} // namespace -int AdamCoder::DoCode(CoderContext *const context) { - MS_CHECK_TRUE(input_tensors_.size() >= DIMENSION_10D, "inputs size is less than 10"); - auto weight = input_tensors_.at(kWeightIdx); - auto m = input_tensors_.at(kMomentVector1stIdx); - auto v = input_tensors_.at(kMomentVector2stIdx); - auto beta1_power = input_tensors_.at(kBeta1PowerIdx); - auto beta2_power = input_tensors_.at(kBeta2PowerIdx); - auto learning_rate = reinterpret_cast( - input_tensors_.at(kLearningRateIdx)->MutableData())[0]; // use model origin lr, unsupported to config - auto beta1 = reinterpret_cast(input_tensors_.at(kBeta1Idx)->MutableData())[0]; - auto beta2 = reinterpret_cast(input_tensors_.at(kBeta2Idx)->MutableData())[0]; - auto eps = reinterpret_cast(input_tensors_.at(kEpsilonIdx)->MutableData())[0]; - auto gradient = input_tensors_.at(kGradientIdx); - int length = input_tensors_.at(kWeightIdx)->ElementsNum(); - - // attribute - auto *adam_param = reinterpret_cast(parameter_); - Collect(context, - { - "nnacl/fp32/adam_fp32.h", - }, - { - "adam_fp32.c", - }); - NNaclFp32Serializer code; - code.CodeFunction("DoAdam", m, v, gradient, weight, beta1, beta2, beta1_power, beta2_power, eps, learning_rate, - adam_param->use_nesterov_, 0, length); - context->AppendCode(code.str()); - return lite::RET_OK; -} - -REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Adam, CPUOpCoderCreator) -} // namespace mindspore::lite::micro::nnacl diff --git a/mindspore-lite/tools/converter/micro/coder/opcoders/nnacl/fp32_grad/adam_coder.h b/mindspore-lite/tools/converter/micro/coder/opcoders/nnacl/fp32_grad/adam_coder.h deleted file mode 100644 index 3e67b483..00000000 --- a/mindspore-lite/tools/converter/micro/coder/opcoders/nnacl/fp32_grad/adam_coder.h +++ /dev/null @@ -1,38 +0,0 @@ -/** - * Copyright 2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_MICRO_CODER_OPCODERS_NNACL_FP32_GRAD_ADAM_CODER_H_ -#define MINDSPORE_LITE_TOOLS_CONVERTER_MICRO_CODER_OPCODERS_NNACL_FP32_GRAD_ADAM_CODER_H_ - -#include -#include "coder/opcoders/op_coder.h" - -namespace mindspore::lite::micro::nnacl { -class AdamCoder final : public OperatorCoder { - public: - AdamCoder(const std::vector &in_tensors, const std::vector &out_tensors, - const LiteGraph::Node *node, size_t node_index, Target target) - : OperatorCoder(in_tensors, out_tensors, node, node_index, target) {} - - ~AdamCoder() override = default; - - int Prepare(CoderContext *const context) override { return RET_OK; } - - int DoCode(CoderContext *const context) override; -}; -} // namespace mindspore::lite::micro::nnacl - -#endif // MINDSPORE_LITE_TOOLS_CONVERTER_MICRO_CODER_OPCODERS_NNACL_FP32_GRAD_ADAM_CODER_H_ diff --git a/mindspore-lite/tools/converter/micro/coder/opcoders/nnacl/fp32_grad/assign_coder.cc b/mindspore-lite/tools/converter/micro/coder/opcoders/nnacl/fp32_grad/assign_coder.cc deleted file mode 100644 index e84c2e4d..00000000 --- a/mindspore-lite/tools/converter/micro/coder/opcoders/nnacl/fp32_grad/assign_coder.cc +++ /dev/null @@ -1,48 +0,0 @@ -/** - * Copyright 2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "coder/opcoders/nnacl/fp32_grad/assign_coder.h" -#include -#include "schema/inner/ops_generated.h" -#include "coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h" - -namespace mindspore::lite::micro::nnacl { -using mindspore::schema::PrimitiveType_Assign; - -int AssignCoder::Prepare(CoderContext *const context) { return RET_OK; } - -int AssignCoder::DoCode(CoderContext *const context) { - MS_CHECK_TRUE(input_tensors_.size() == DIMENSION_2D, "inputs size is not equal to two"); - Tensor *input0 = input_tensors_.at(0); - Tensor *input1 = input_tensors_.at(1); - if (input0->Size() != input1->Size()) { - MS_LOG(ERROR) << "input0 size: " << input0->Size() << ", input1 size: " << input1->Size(); - return RET_ERROR; - } - - NNaclFp32Serializer code; - // Get Tensor Pointer - std::string input0_str = allocator_->GetRuntimeAddr(input0); - std::string input1_str = allocator_->GetRuntimeAddr(input1); - size_t data_size = input0->Size(); - // assign, just assign input1'data to input0 - code.CodeFunction("memcpy", input0_str, input1_str, data_size); - context->AppendCode(code.str()); - return RET_OK; -} - -REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Assign, CPUOpCoderCreator) -} // namespace mindspore::lite::micro::nnacl diff --git a/mindspore-lite/tools/converter/micro/coder/opcoders/nnacl/fp32_grad/assign_coder.h b/mindspore-lite/tools/converter/micro/coder/opcoders/nnacl/fp32_grad/assign_coder.h deleted file mode 100644 index 9588dc25..00000000 --- a/mindspore-lite/tools/converter/micro/coder/opcoders/nnacl/fp32_grad/assign_coder.h +++ /dev/null @@ -1,37 +0,0 @@ -/** - * Copyright 2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_MICRO_CODER_OPCODERS_NNACL_FP32_GRAD_ASSIGN_CODER_H_ -#define MINDSPORE_LITE_TOOLS_CONVERTER_MICRO_CODER_OPCODERS_NNACL_FP32_GRAD_ASSIGN_CODER_H_ - -#include -#include "coder/opcoders/op_coder.h" - -namespace mindspore::lite::micro::nnacl { -class AssignCoder final : public OperatorCoder { - public: - AssignCoder(const std::vector &in_tensors, const std::vector &out_tensors, - const LiteGraph::Node *node, size_t node_index, Target target) - : OperatorCoder(in_tensors, out_tensors, node, node_index, target) {} - ~AssignCoder() override = default; - - int Prepare(CoderContext *const context) override; - - int DoCode(CoderContext *const context) override; -}; -} // namespace mindspore::lite::micro::nnacl - -#endif // MINDSPORE_LITE_TOOLS_CONVERTER_MICRO_CODER_OPCODERS_NNACL_FP32_GRAD_ASSIGN_CODER_H_ diff --git a/mindspore-lite/tools/converter/micro/coder/opcoders/nnacl/fp32_grad/biasadd_grad_coder.cc b/mindspore-lite/tools/converter/micro/coder/opcoders/nnacl/fp32_grad/biasadd_grad_coder.cc deleted file mode 100644 index 77617886..00000000 --- a/mindspore-lite/tools/converter/micro/coder/opcoders/nnacl/fp32_grad/biasadd_grad_coder.cc +++ /dev/null @@ -1,68 +0,0 @@ -/** - * Copyright 2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "coder/opcoders/nnacl/fp32_grad/biasadd_grad_coder.h" -#include -#include "schema/inner/ops_generated.h" -#include "coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h" - -namespace mindspore::lite::micro::nnacl { -using mindspore::schema::PrimitiveType_BiasAddGrad; - -int BiasAddGradCoder::Prepare(CoderContext *const context) { - auto dims = input_tensor_->shape(); - auto *bias_param = reinterpret_cast(parameter_); - bias_param->ndim_ = dims.size(); - for (unsigned int i = 0; i < bias_param->ndim_; i++) { - bias_param->in_shape0_[i] = dims[i]; - bias_param->out_shape_[i] = 1; // 1 dimension for N,H,W, - } - bias_param->out_shape_[bias_param->ndim_ - 1] = dims[bias_param->ndim_ - 1]; - for (auto i = bias_param->ndim_; i < DIMENSION_4D; i++) { - bias_param->in_shape0_[i] = 0; - bias_param->out_shape_[i] = 0; - } - return RET_OK; -} - -int BiasAddGradCoder::DoCode(CoderContext *const context) { - auto *bias_param = reinterpret_cast(parameter_); - size_t nhw_size = 1; - size_t channels = bias_param->in_shape0_[bias_param->ndim_ - 1]; // C in NHWC - for (size_t i = 0; i < bias_param->ndim_ - 1; i++) { - nhw_size *= static_cast(bias_param->in_shape0_[i]); - } - - size_t total_size = channels * nhw_size; - - NNaclFp32Serializer code; - // Get Tensor Pointer - std::string input_str = allocator_->GetRuntimeAddr(input_tensor_); - std::string output_str = allocator_->GetRuntimeAddr(output_tensor_); - - code << "\t\tfor (size_t c = 0; c < " << channels << "; ++c) {\n"; - code << "\t\t\t(" << output_str << ")[c] = 0;\n"; - code << "\t\t\tfor (size_t offset = 0; offset < " << total_size << "; offset += " << channels << ") {\n"; - code << "\t\t\t\t(" << output_str << ")[c] += (" << input_str << ")[offset + c];\n"; - code << "\t\t\t}\n"; - code << "\t\t}\n"; - - context->AppendCode(code.str()); - return RET_OK; -} - -REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_BiasAddGrad, CPUOpCoderCreator) -} // namespace mindspore::lite::micro::nnacl diff --git a/mindspore-lite/tools/converter/micro/coder/opcoders/nnacl/fp32_grad/biasadd_grad_coder.h b/mindspore-lite/tools/converter/micro/coder/opcoders/nnacl/fp32_grad/biasadd_grad_coder.h deleted file mode 100644 index 034485e2..00000000 --- a/mindspore-lite/tools/converter/micro/coder/opcoders/nnacl/fp32_grad/biasadd_grad_coder.h +++ /dev/null @@ -1,37 +0,0 @@ -/** - * Copyright 2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_MICRO_CODER_OPCODERS_NNACL_FP32_GRAD_BIASADD_GRAD_CODER_H_ -#define MINDSPORE_LITE_TOOLS_CONVERTER_MICRO_CODER_OPCODERS_NNACL_FP32_GRAD_BIASADD_GRAD_CODER_H_ - -#include -#include "coder/opcoders/op_coder.h" - -namespace mindspore::lite::micro::nnacl { -class BiasAddGradCoder final : public OperatorCoder { - public: - BiasAddGradCoder(const std::vector &in_tensors, const std::vector &out_tensors, - const LiteGraph::Node *node, size_t node_index, Target target) - : OperatorCoder(in_tensors, out_tensors, node, node_index, target) {} - ~BiasAddGradCoder() override = default; - - int Prepare(CoderContext *const context) override; - - int DoCode(CoderContext *const context) override; -}; -} // namespace mindspore::lite::micro::nnacl - -#endif // MINDSPORE_LITE_TOOLS_CONVERTER_MICRO_CODER_OPCODERS_NNACL_FP32_GRAD_BIASADD_GRAD_CODER_H_ diff --git a/mindspore-lite/tools/converter/micro/coder/opcoders/nnacl/fp32_grad/softmax_cross_entropy_with_logits_coder.cc b/mindspore-lite/tools/converter/micro/coder/opcoders/nnacl/fp32_grad/softmax_cross_entropy_with_logits_coder.cc deleted file mode 100644 index c194f0ca..00000000 --- a/mindspore-lite/tools/converter/micro/coder/opcoders/nnacl/fp32_grad/softmax_cross_entropy_with_logits_coder.cc +++ /dev/null @@ -1,82 +0,0 @@ -/** - * Copyright 2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "coder/opcoders/nnacl/fp32_grad/softmax_cross_entropy_with_logits_coder.h" -#include -#include "nnacl/fp32_grad/softmax_crossentropy_parameter.h" -#include "coder/opcoders/file_collector.h" -#include "schema/inner/ops_generated.h" -#include "coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h" - -namespace mindspore::lite::micro::nnacl { -using mindspore::schema::PrimitiveType_SoftmaxCrossEntropyWithLogits; - -int SoftmaxCrossEntropyWithLogitsCoder::Prepare(CoderContext *const context) { - MS_CHECK_TRUE(input_tensor_ != nullptr, "input_tensor is nullptr."); - size_t data_size = input_tensor_->ElementsNum(); - auto dims = input_tensor_->shape(); - auto *softmax_cross_entropy_param = reinterpret_cast(parameter_); - softmax_cross_entropy_param->n_dim_ = DIMENSION_2D; - CHECK_LESS_RETURN(dims.size(), DIMENSION_2D); - softmax_cross_entropy_param->number_of_classes_ = dims.at(1); - softmax_cross_entropy_param->batch_size_ = dims.at(0); - - losses_ = reinterpret_cast(allocator_->Malloc(kNumberTypeFloat32, data_size * sizeof(float), kWorkspace)); - sum_data_ = reinterpret_cast(allocator_->Malloc(kNumberTypeFloat32, dims[0] * sizeof(float), kWorkspace)); - n_dim_ = DIMENSION_2D; - element_size_ = data_size; - softmax_params_.axis_ = 1; - for (size_t i = 0; i < dims.size(); i++) { - input_shape_[i] = dims.at(i); - } - return RET_OK; -} - -int SoftmaxCrossEntropyWithLogitsCoder::DoCode(CoderContext *const context) { - MS_CHECK_TRUE(input_tensors_.size() == DIMENSION_2D, "inputs size is not equal to two"); - Collect(context, - { - "nnacl/fp32/softmax_fp32.h", - "nnacl/fp32_grad/softmax_cross_entropy_with_logits.h", - }, - { - "softmax_fp32.c", - "softmax_cross_entropy_with_logits.c", - }); - NNaclFp32Serializer code, init_code; - code.CodeStruct("softmax_params", softmax_params_); - code.CodeStruct("input_shape", input_shape_, DIMENSION_5D); - - // Get Tensor Pointer - std::string in_str = allocator_->GetRuntimeAddr(input_tensor_); - std::string labels_str = allocator_->GetRuntimeAddr(input_tensors_.at(1)); - std::string out_str = allocator_->GetRuntimeAddr(output_tensor_); - std::string grad_str = "NULL"; - if (output_tensors_.size() > 1) { - grad_str = allocator_->GetRuntimeAddr(output_tensors_.at(1)); - } - auto *softmax_cross_entropy_param = reinterpret_cast(parameter_); - code.CodeFunction("Softmax", in_str, losses_, sum_data_, "softmax_params.axis_", n_dim_, "input_shape"); - code.CodeFunction("ForwardPostExecute", labels_str, losses_, grad_str, out_str, - softmax_cross_entropy_param->number_of_classes_, softmax_cross_entropy_param->batch_size_); - - context->AppendCode(code.str()); - return RET_OK; -} - -REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_SoftmaxCrossEntropyWithLogits, - CPUOpCoderCreator) -} // namespace mindspore::lite::micro::nnacl diff --git a/mindspore-lite/tools/converter/micro/coder/opcoders/nnacl/fp32_grad/softmax_cross_entropy_with_logits_coder.h b/mindspore-lite/tools/converter/micro/coder/opcoders/nnacl/fp32_grad/softmax_cross_entropy_with_logits_coder.h deleted file mode 100644 index 3aea3e4d..00000000 --- a/mindspore-lite/tools/converter/micro/coder/opcoders/nnacl/fp32_grad/softmax_cross_entropy_with_logits_coder.h +++ /dev/null @@ -1,46 +0,0 @@ -/** - * Copyright 2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_MICRO_CODER_OPCODERS_NNACL_FP32_GRAD_SOFTMAX_CROSS_ENTROPY_WITH_LOGITS_CODER_H_ -#define MINDSPORE_LITE_TOOLS_CONVERTER_MICRO_CODER_OPCODERS_NNACL_FP32_GRAD_SOFTMAX_CROSS_ENTROPY_WITH_LOGITS_CODER_H_ - -#include -#include "nnacl/softmax_parameter.h" -#include "coder/opcoders/op_coder.h" - -namespace mindspore::lite::micro::nnacl { -class SoftmaxCrossEntropyWithLogitsCoder final : public OperatorCoder { - public: - SoftmaxCrossEntropyWithLogitsCoder(const std::vector &in_tensors, const std::vector &out_tensors, - const LiteGraph::Node *node, size_t node_index, Target target) - : OperatorCoder(in_tensors, out_tensors, node, node_index, target) {} - ~SoftmaxCrossEntropyWithLogitsCoder() override = default; - - int Prepare(CoderContext *const context) override; - - int DoCode(CoderContext *const context) override; - - private: - int n_dim_{0}; - int element_size_{0}; - int input_shape_[DIMENSION_5D] = {0}; - SoftmaxParameter softmax_params_; - float *losses_{nullptr}; - float *sum_data_{nullptr}; -}; -} // namespace mindspore::lite::micro::nnacl - -#endif // MICRO_CODER_OPCODERS_NNACL_FP32_GRAD_SOFTMAX_CROSS_ENTROPY_WITH_LOGITS_CODER_H_ diff --git a/mindspore-lite/tools/converter/micro/coder/train/train_generator.cc b/mindspore-lite/tools/converter/micro/coder/train/train_generator.cc deleted file mode 100644 index 80b7a9fd..00000000 --- a/mindspore-lite/tools/converter/micro/coder/train/train_generator.cc +++ /dev/null @@ -1,89 +0,0 @@ -/** - * Copyright 2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "coder/train/train_generator.h" -#include -#include "coder/generator/component/train_component.h" -#include "coder/opcoders/parallel.h" -#include "coder/generator/component/component.h" -#include "tools/common/string_util.h" - -namespace mindspore::lite::micro { -void TrainGenerator::CodeTrainAndEvalFunc(std::ofstream &ofs) { - size_t i = 0; - size_t code_blocks_size = code_blocks_with_flag_.size(); - while (i < code_blocks_size) { - bool is_train_only = code_blocks_with_flag_.at(i).second; - if (!is_train_only) { - ofs << " {\n" << code_blocks_with_flag_.at(i).first << " }\n"; - i++; - continue; - } - - size_t j = i; - while (j < code_blocks_size && code_blocks_with_flag_.at(j).second) { // is loss or grad op - j++; - } - ofs << " if (train_mode) {\n"; - for (; i < j; i++) { - auto code_block = code_blocks_with_flag_.at(i).first; - (void)FindAndReplaceAll(&code_block, " ", " "); - ofs << " {\n" << code_block << " }\n"; - } - ofs << " }\n"; - } -} - -void TrainGenerator::CodeNetExecuteFunc(std::ofstream &ofs) { - ofs << "void Execute" << ctx_->GetCurModelIndex() << "(bool train_mode) {\n"; - if (config_->support_parallel()) { - ofs << " " << gThreadNum << " = GetCurrentThreadNum();\n"; - ofs << " SetSpinCountMaxValue();\n"; - } - - CodeTrainAndEvalFunc(ofs); - - if (config_->support_parallel()) { - ofs << " SetSpinCountMinValue();\n"; - } - - ofs << "}\n"; -} - -int TrainGenerator::CodeNetHFile() { - std::string net_include_file = model_dir_ + net_inc_hfile_; - std::ofstream ofs(net_include_file); - MS_CHECK_TRUE(!ofs.bad(), "filed to open file"); - MS_LOG(INFO) << "write " << net_include_file; - CodeCommonNetH(ofs); - CodeCopyTrainOutputsState(ofs, ctx_->GetCurModelIndex()); - ofs << kEndExternCpp; - ofs.close(); - return RET_OK; -} - -int TrainGenerator::CodeNetCFile() { - std::string net_impl_file = net_src_file_path_ + net_src_cfile_; - std::ofstream ofs(net_impl_file); - MS_CHECK_TRUE(!ofs.bad(), "filed to open file"); - MS_LOG(INFO) << "write " << net_impl_file; - CodeCommonNetC(ofs); - CodeCopyTrainOutputsImplement(ofs, ctx_); - CodeNetExecuteFunc(ofs); - ofs.close(); - return RET_OK; -} -} // namespace mindspore::lite::micro diff --git a/mindspore-lite/tools/converter/micro/coder/train/train_generator.h b/mindspore-lite/tools/converter/micro/coder/train/train_generator.h deleted file mode 100644 index 47e4a2f8..00000000 --- a/mindspore-lite/tools/converter/micro/coder/train/train_generator.h +++ /dev/null @@ -1,43 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_MICRO_CODER_TRAIN_TRAIN_GENERATOR_H_ -#define MINDSPORE_LITE_TOOLS_CONVERTER_MICRO_CODER_TRAIN_TRAIN_GENERATOR_H_ - -#include -#include -#include -#include -#include "tools/converter/micro/coder/generator/generator.h" - -namespace mindspore::lite::micro { -class TrainGenerator : public Generator { - public: - TrainGenerator(std::unique_ptr ctx, std::vector> code_blocks_with_flag) - : Generator(std::move(ctx)), code_blocks_with_flag_(std::move(code_blocks_with_flag)) {} - ~TrainGenerator() override = default; - - private: - void CodeTrainAndEvalFunc(std::ofstream &ofs); - void CodeNetExecuteFunc(std::ofstream &ofs) override; - int CodeNetHFile() override; - int CodeNetCFile() override; - - private: - std::vector> code_blocks_with_flag_; // -}; -} // namespace mindspore::lite::micro -#endif // MINDSPORE_LITE_TOOLS_CONVERTER_MICRO_CODER_TRAIN_TRAIN_GENERATOR_H_ diff --git a/mindspore-lite/tools/converter/micro/coder/train/train_session.cc b/mindspore-lite/tools/converter/micro/coder/train/train_session.cc deleted file mode 100644 index 7def5724..00000000 --- a/mindspore-lite/tools/converter/micro/coder/train/train_session.cc +++ /dev/null @@ -1,143 +0,0 @@ -/** - * Copyright 2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "tools/converter/micro/coder/train/train_session.h" -#include -#include -#include -#include -#include -#include -#include "include/errorcode.h" -#include "tools/converter/micro/coder/utils/train_utils.h" -#include "tools/converter/micro/coder/train/train_generator.h" - -namespace mindspore::lite::micro { -int CoderTrainSession::Build() { - int ret = CoderSession::Build(); - MS_CHECK_RET_CODE(ret, "code session build failed."); - MS_CHECK_RET_CODE(CompileTrainCoders(), "CompileTrainCoders failed"); - MS_CHECK_RET_CODE(coder_graph_->CompileTrainOutputs(train_op_coders_), "CompileTrainOutputs failed!"); - MS_CHECK_RET_CODE(coder_graph_->CompileEvalOutputs(train_op_coders_), "CompileEvalOutputs failed!"); - MS_CHECK_RET_CODE(CompileEvalCoders(coder_graph_->GetEvalOutputsMap()), "CompileTrainCoders failed."); - return RET_OK; -} - -int CoderTrainSession::Run(const std::string &model_name) { - MS_LOG(INFO) << "start run op coders"; - int ret = Preprocess(); - MS_CHECK_RET_CODE(ret, "preprocess failed"); - - ret = DoCode(); - MS_CHECK_RET_CODE(ret, "do code failed"); - - PassArgsToContext(model_name); - MS_LOG(INFO) << "run op coders success"; - return RET_OK; -} - -int CoderTrainSession::GenerateCode() { - MS_LOG(INFO) << "CoderSession::GenerateCode start"; - auto generator = std::make_shared(std::move(context_), code_blocks_with_flag_); - MS_CHECK_PTR(generator); - - int ret = generator->GenerateCode(); - if (ret != RET_OK) { - MS_LOG(ERROR) << "generate code failed"; - } - MS_LOG(INFO) << "CoderSession::GenerateCode done"; - return ret; -} -int CoderTrainSession::DoCode() { - int ret = RET_OK; - size_t last_idx = context_->code_blocks().size(); - for (const auto &op_coder : op_coders_) { - MS_CHECK_PTR(op_coder); - MS_LOG(DEBUG) << "code: " << op_coder->name(); - ret = op_coder->DoCode(this->context_.get()); - MS_CHECK_RET_CODE(ret, "do coder " << op_coder->name() << " failed"); - auto code_blocks = context_->code_blocks(); - auto cur_indx = code_blocks.size(); - MS_CHECK_TRUE_MSG(cur_indx > last_idx, RET_ERROR, "append code failed."); - bool is_train_only = - std::find(eval_op_coders_.begin(), eval_op_coders_.end(), op_coder.get()) == eval_op_coders_.end(); - for (; last_idx < cur_indx; last_idx++) { - code_blocks_with_flag_.emplace_back(code_blocks.at(last_idx), is_train_only); - } - } - return ret; -} - -int CoderTrainSession::UpdateCodeBlocksWithFlag() { - auto code_blocks = context_->code_blocks(); - MS_CHECK_TRUE_MSG(code_blocks.size() == code_blocks_with_flag_.size(), RET_ERROR, "code blocks size is unmatched."); - for (size_t i = 0; i < code_blocks.size(); i++) { - code_blocks_with_flag_.at(i).first = code_blocks.at(i); - } - return RET_OK; -} - -int CoderTrainSession::PassArgsToContext(const std::string &model_name) { - int ret = CoderSession::PassArgsToContext(model_name); - MS_CHECK_RET_CODE(ret, "PassArgsToContext failed"); - if (Configurator::GetInstance()->debug_mode()) { - ret = UpdateCodeBlocksWithFlag(); - MS_CHECK_RET_CODE(ret, "update code_blocks_with_flag_ failed."); - } - context_->set_graph_train_outputs(coder_graph_->train_output_tensors()); - context_->set_graph_eval_outputs(coder_graph_->eval_output_tensors()); - context_->set_model_name(model_name); - return ret; -} - -void CoderTrainSession::FindEvalCoders(OperatorCoder *coder) { - if (coder == nullptr) { - return; - } - if (std::find(eval_op_coders_.begin(), eval_op_coders_.end(), coder) == - eval_op_coders_.end()) { // kernel is not already in vector - for (auto in_coder : coder->input_ops()) { - FindEvalCoders(in_coder); - } - if (!IsLossCoder(coder)) { - eval_op_coders_.emplace_back(coder); - } - } -} - -int CoderTrainSession::CompileTrainCoders() { - train_op_coders_.clear(); - (void)std::transform(op_coders_.begin(), op_coders_.end(), std::back_inserter(train_op_coders_), - [](const std::unique_ptr &coder) { return coder.get(); }); - return RET_OK; -} - -int CoderTrainSession::CompileEvalCoders(const std::map> &eval_outputs_map) { - eval_op_coders_.clear(); - for (const auto &item : eval_outputs_map) { - std::string kernel_name = item.first; - auto iter = std::find_if(train_op_coders_.begin(), train_op_coders_.end(), - [&kernel_name](const OperatorCoder *coder) { return (coder->name() == kernel_name); }); - MS_CHECK_TRUE_MSG(iter != train_op_coders_.end(), RET_ERROR, "can't find output coder in Eval mode."); - MS_CHECK_TRUE_MSG(*iter != nullptr, RET_ERROR, "find output coder in Eval mode."); - (void)FindEvalCoders(*iter); - } - if (eval_op_coders_.empty()) { - eval_op_coders_ = train_op_coders_; - } - return RET_OK; -} -} // namespace mindspore::lite::micro diff --git a/mindspore-lite/tools/converter/micro/coder/train/train_session.h b/mindspore-lite/tools/converter/micro/coder/train/train_session.h deleted file mode 100644 index 7501dce5..00000000 --- a/mindspore-lite/tools/converter/micro/coder/train/train_session.h +++ /dev/null @@ -1,48 +0,0 @@ -/** - * Copyright 2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_MICRO_CODER_TRAIN_TRAIN_SESSION_H_ -#define MINDSPORE_LITE_TOOLS_CONVERTER_MICRO_CODER_TRAIN_TRAIN_SESSION_H_ - -#include -#include -#include -#include -#include "tools/converter/micro/coder/session.h" -namespace mindspore::lite::micro { -class CoderTrainSession : public CoderSession { - public: - int Build() override; - - int Run(const std::string &model_name) override; - - int GenerateCode() override; - - private: - int DoCode() override; - int UpdateCodeBlocksWithFlag(); - int PassArgsToContext(const std::string &model_name) override; - void FindEvalCoders(OperatorCoder *coder); - int CompileTrainCoders(); - int CompileEvalCoders(const std::map> &eval_outputs_map); - - private: - std::vector> code_blocks_with_flag_; // - std::vector train_op_coders_; - std::vector eval_op_coders_; -}; -} // namespace mindspore::lite::micro -#endif // MINDSPORE_LITE_TOOLS_CONVERTER_MICRO_CODER_TRAIN_TRAIN_SESSION_H_ diff --git a/mindspore-lite/tools/converter/micro/providers/nnie/CMakeLists.txt b/mindspore-lite/tools/converter/micro/providers/nnie/CMakeLists.txt deleted file mode 100644 index 699d8695..00000000 --- a/mindspore-lite/tools/converter/micro/providers/nnie/CMakeLists.txt +++ /dev/null @@ -1,35 +0,0 @@ -cmake_minimum_required(VERSION 3.14) -project(micro_nnie_kernel) -if(NOT DEFINED PKG_PATH) - message(FATAL_ERROR "PKG_PATH not set") -endif() -message("PKG_PATH:${PKG_PATH}") -include_directories(${CMAKE_CURRENT_SOURCE_DIR}) -include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../) -set(THIRD_PATRY_PATH ${LITE_DIR}/providers/nnie/third_patry/) -if(${MSLITE_REGISTRY_DEVICE} STREQUAL "Hi3516D") - include_directories(${THIRD_PATRY_PATH}/hi3516_sdk/) - link_directories(${THIRD_PATRY_PATH}/hi3516_sdk/lib) -elseif(${MSLITE_REGISTRY_DEVICE} STREQUAL "Hi3519A") - include_directories(${THIRD_PATRY_PATH}/hi3519_sdk/) - link_directories(${THIRD_PATRY_PATH}/hi3519_sdk/lib) -elseif(${MSLITE_REGISTRY_DEVICE} STREQUAL "Hi3559A") - include_directories(${THIRD_PATRY_PATH}/hi3559_sdk/) - link_directories(${THIRD_PATRY_PATH}/hi3559_sdk/lib) -endif() - -include_directories(${PKG_PATH}/tools/codegen/include) -include_directories(${PKG_PATH}/runtime/) -include_directories(${LITE_DIR}/providers/nnie) -include_directories(${LITE_DIR}/providers/nnie_proposal) - -set(SRC - ${CMAKE_CURRENT_SOURCE_DIR}/nnie_micro.cc - ${CMAKE_CURRENT_SOURCE_DIR}/nnie_interfaces.cc - ${LITE_DIR}/providers/nnie/src/nnie_common.cc - ${LITE_DIR}/providers/nnie/src/nnie_memory.cc - ${LITE_DIR}/providers/nnie/src/nnie_print.cc - ${LITE_DIR}/providers/nnie_proposal/src/proposal.cc) - -add_library(micro_nnie SHARED ${SRC}) -target_link_libraries(micro_nnie nnie mpi VoiceEngine upvqe dnvqe mindspore::securec) diff --git a/mindspore-lite/tools/converter/micro/providers/nnie/nnie_interfaces.cc b/mindspore-lite/tools/converter/micro/providers/nnie/nnie_interfaces.cc deleted file mode 100644 index 636e06e6..00000000 --- a/mindspore-lite/tools/converter/micro/providers/nnie/nnie_interfaces.cc +++ /dev/null @@ -1,237 +0,0 @@ -/** - * Copyright 2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "nnie/nnie_interfaces.h" -#include -#include "src/nnie_print.h" -#include "include/hi_common.h" -#include "include/hi_comm_vb.h" -#include "include/mpi_sys.h" -#include "include/mpi_vb.h" - -namespace mindspore { -namespace nnie { -constexpr int kNNIEMaxNameLen = 128; - -static int FillRoiPooling(NnieRunCfg *cfg, NnieTensors *inputs, int idx) { - int *roi_shape = inputs->shape_[idx]; - if (roi_shape[1] != NNIE_COORDI_NUM) { - LOGE("Roi shape err!"); - return HI_FAILURE; - } - - if (roi_shape[0] > (int64_t)(cfg->cfg_.max_roi_num_)) { - LOGE("NNIE_RUNTIME_CONFIG_PATH: The maximum [max_roi_num] value set is less than the actual value: %d < %d.", - cfg->cfg_.max_roi_num_, (int)(roi_shape[0])); - return HI_FAILURE; - } - cfg->param_.rpn_bbox_.unShape.stWhc.u32Height = roi_shape[0]; - HI_U32 dst_stride = cfg->param_.rpn_bbox_.u32Stride; - HI_S32 *proposal_result = NNIE_CONVERT_64BIT_ADDR(HI_S32, cfg->param_.rpn_bbox_.u64VirAddr); - float *float_src_data = reinterpret_cast(inputs->data_[idx]); - constexpr int kIndexLeft = 0; - constexpr int kIndexRight = 1; - constexpr int kIndexWidth = 2; - constexpr int kIndexHeight = 3; - for (size_t j = 0; j < cfg->param_.rpn_bbox_.unShape.stWhc.u32Height; j++) { - proposal_result[dst_stride / sizeof(HI_U32) * j + kIndexLeft] = *(float_src_data++) * NNIE_QUANT_BASE; - proposal_result[dst_stride / sizeof(HI_U32) * j + kIndexRight] = *(float_src_data++) * NNIE_QUANT_BASE; - proposal_result[dst_stride / sizeof(HI_U32) * j + kIndexWidth] = *(float_src_data++) * NNIE_QUANT_BASE; - proposal_result[dst_stride / sizeof(HI_U32) * j + kIndexHeight] = *(float_src_data++) * NNIE_QUANT_BASE; - } - NnieMemFlushCache(cfg->param_.rpn_bbox_.u64PhyAddr, - NNIE_CONVERT_64BIT_ADDR(HI_VOID, cfg->param_.rpn_bbox_.u64VirAddr), - dst_stride * cfg->param_.rpn_bbox_.unShape.stWhc.u32Height); - - return HI_SUCCESS; -} - -int NnieInit(NnieHandle *h, NnieTensors *inputs) { - NnieModel *model = &(h->cfg_.model_); - - if (inputs->size_ <= 0) { - LOGE("inputs size need greater than 0!"); - return HI_FAILURE; - } - if (NnieLoadModel(h->model_buf_, h->buf_size_, model) != HI_SUCCESS) { - LOGE("NnieLoadModel failed!"); - return HI_FAILURE; - } - - std::vector input_shape; - for (int i = 0; i < inputs->shape_len_[0]; i++) { - input_shape.push_back(inputs->shape_[0][i]); - } - if (NnieCommCreate(&h->cfg_, input_shape) != HI_SUCCESS) { - NnieCommDelete(&h->cfg_.param_, &h->cfg_.model_); - return HI_FAILURE; - } - return HI_SUCCESS; -} - -static size_t GetFillIndex(char **input_name, size_t input_size, const HI_CHAR *name) { - char prefix[kNNIEMaxNameLen]; - size_t i; - for (i = 0; i < input_size; ++i) { - char *post = strrchr(input_name[i], '_'); - if (post && (!strcmp(post + 1, "pre") || !strcmp(post + 1, "post"))) { - HI_U32 prefix_len = (HI_U32)(post - input_name[i]); - if (prefix_len >= kNNIEMaxNameLen) return input_size; - strncpy(prefix, input_name[i], prefix_len); - prefix[prefix_len] = '\0'; - if (strcmp(prefix, name) == 0) break; - } else { - if (strcmp(input_name[i], name) == 0) break; - } - } - if (i == input_size) { - for (i = 0; i < input_size; ++i) { - char *post = strrchr(input_name[i], '_'); - if (post && (!strcmp(post + 1, "pre") || !strcmp(post + 1, "post"))) { - HI_U32 prefix_len = (HI_U32)(post - input_name[i]); - if (prefix_len >= kNNIEMaxNameLen) return input_size; - strncpy(prefix, input_name[i], prefix_len); - prefix[prefix_len] = '\0'; - if (strncmp(prefix, name, prefix_len) == 0) break; - } else { - if (strncmp(input_name[i], name, strlen(input_name[i])) == 0) break; - } - } - } - return i; -} - -int NnieFillData(NnieHandle *h, NnieTensors *inputs) { - SVP_NNIE_MODEL_S *model = h->cfg_.param_.model_; - unsigned int seg_id = h->cfg_.run_idx_.seg_idx_; - bool run_box = false; - size_t i, j; - if (model->astSeg[seg_id].enNetType == SVP_NNIE_NET_TYPE_ROI) { - run_box = true; - for (i = 0; i < static_cast(inputs->size_); i++) { - if (!strcmp(inputs->name_[i], "proposal")) { - if (FillRoiPooling(&h->cfg_, inputs, i)) { - return HI_FAILURE; - } - break; - } - } - if (i == static_cast(inputs->size_)) { - LOGE("Can't find proposal out!"); - return HI_FAILURE; - } - } else if (inputs->size_ != model->astSeg[seg_id].u16SrcNum) { - LOGE("Input Size Err!"); - return HI_FAILURE; - } - - for (i = 0; i < model->astSeg[seg_id].u16SrcNum; i++) { - if (h->cfg_.param_.mem_cfg_.seg_[seg_id].src_node_[i]) { - continue; - } - j = GetFillIndex(inputs->name_, inputs->size_, model->astSeg[seg_id].astSrcNode[i].szName); - if (j == static_cast(inputs->size_)) { - if (run_box && !strcmp(inputs->name_[i], "proposal")) { - continue; - } else { - j = i; - LOGW("input tensor name(%s) can't match wk node name(%s).", inputs->name_[i], - model->astSeg[seg_id].astSrcNode[i].szName); - } - } - - auto input_data_type = inputs->dtype_[j]; - SVP_BLOB_TYPE_E src_type = h->cfg_.param_.seg_data_[seg_id].src_[i].enType; - if (SVP_BLOB_TYPE_U8 <= src_type && src_type <= SVP_BLOB_TYPE_YVU422SP) { - if (!(input_data_type == NnieDataType::NNIE_INT8 || input_data_type == NnieDataType::NNIE_UINT8)) { - LOGE("Nnie input node type error!"); - return HI_FAILURE; - } - } else { - if (input_data_type != NnieDataType::NNIE_FLOAT32) { - LOGE("Nnie input node type error!"); - return HI_FAILURE; - } - } - HI_U32 input_size = CalcInputSize(inputs, &j); - if (NnieCommFillData(&h->cfg_, inputs->data_[j], input_size, i) != HI_SUCCESS) { - LOGE("FillData failed!"); - return HI_FAILURE; - } - } - return HI_SUCCESS; -} - -HI_U32 CalcInputSize(const NnieTensors *inputs, const size_t *j) { - HI_U32 input_size = 1; - for (int n = 0; n < inputs->shape_len_[*j]; n++) { - input_size *= inputs->shape_[*j][n]; - } - return input_size; -} - -int NnieRun(NnieHandle *h, NnieTensors *outputs) { - SVP_NNIE_MODEL_S *model = h->cfg_.param_.model_; - unsigned int seg_id = h->cfg_.run_idx_.seg_idx_; - bool run_box = false; - int i, j; - if (model->astSeg[seg_id].enNetType == SVP_NNIE_NET_TYPE_ROI) { - run_box = true; - } - - if (NnieCommRun(&h->cfg_, run_box)) { - LOGE("Nnie Run Fail!"); - return HI_FAILURE; - } - - // Get output data - if (outputs->size_ != model->astSeg[seg_id].u16DstNum) { - LOGE("seg%d: %d output tensors are required, but there are %d outputs.", seg_id, model->astSeg[seg_id].u16DstNum, - outputs->size_); - return HI_FAILURE; - } - for (i = 0; i < model->astSeg[seg_id].u16DstNum; i++) { - if (h->cfg_.param_.mem_cfg_.seg_[seg_id].dst_node_[i]) { - continue; - } - j = GetFillIndex(outputs->name_, outputs->size_, model->astSeg[seg_id].astDstNode[i].szName); - if (j == outputs->size_) { - j = i; - LOGW("output tensor name(%s) can't match wk node name(%s).", outputs->name_[j], - model->astSeg[seg_id].astDstNode[i].szName); - } - if (outputs->dtype_[j] == NNIE_FLOAT32) { - HI_U32 output_size = 1; - for (int n = 0; n < outputs->shape_len_[j]; n++) { - output_size *= outputs->shape_[j][n]; - } - if (NnieCommGetOutputData(&h->cfg_, reinterpret_cast(outputs->data_[j]), output_size, i) != HI_SUCCESS) { - return HI_FAILURE; - } - } else { - LOGE("Unsupported DataType!"); - return HI_FAILURE; - } - } - return HI_SUCCESS; -} - -void NnieClose(NnieHandle *h) { - NnieCommDelete(&h->cfg_.param_, &h->cfg_.model_); - h->load_model_ = 0; -} -} // namespace nnie -} // namespace mindspore diff --git a/mindspore-lite/tools/converter/micro/providers/nnie/nnie_interfaces.h b/mindspore-lite/tools/converter/micro/providers/nnie/nnie_interfaces.h deleted file mode 100644 index 39db6d25..00000000 --- a/mindspore-lite/tools/converter/micro/providers/nnie/nnie_interfaces.h +++ /dev/null @@ -1,49 +0,0 @@ -/** - * Copyright 2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_MICRO_PROVIDERS_NNIE_NNIE_INTERFACES_H_ -#define MINDSPORE_LITE_TOOLS_CONVERTER_MICRO_PROVIDERS_NNIE_NNIE_INTERFACES_H_ - -#include "src/nnie_common.h" - -namespace mindspore { -namespace nnie { -typedef struct { - int load_model_; - int roi_used_; - char *model_buf_; - int buf_size_; - NnieRunCfg cfg_; -} NnieHandle; - -typedef enum { NNIE_INT8, NNIE_UINT8, NNIE_FLOAT32 } NnieDataType; - -typedef struct { - void *data_[SVP_NNIE_MAX_INPUT_NUM]; - char *name_[SVP_NNIE_MAX_INPUT_NUM]; - int *shape_[SVP_NNIE_MAX_INPUT_NUM]; - int shape_len_[SVP_NNIE_MAX_INPUT_NUM]; - NnieDataType dtype_[SVP_NNIE_MAX_INPUT_NUM]; - int size_; -} NnieTensors; - -int NnieInit(NnieHandle *h, NnieTensors *inputs); -int NnieFillData(NnieHandle *h, NnieTensors *inputs); -int NnieRun(NnieHandle *h, NnieTensors *outputs); -void NnieClose(NnieHandle *h); -HI_U32 CalcInputSize(const NnieTensors *inputs, const size_t *j); -} // namespace nnie -} // namespace mindspore -#endif // MINDSPORE_LITE_TOOLS_CONVERTER_MICRO_PROVIDERS_NNIE_NNIE_INTERFACES_H_ diff --git a/mindspore-lite/tools/converter/micro/providers/nnie/nnie_micro.cc b/mindspore-lite/tools/converter/micro/providers/nnie/nnie_micro.cc deleted file mode 100644 index bd1ff24c..00000000 --- a/mindspore-lite/tools/converter/micro/providers/nnie/nnie_micro.cc +++ /dev/null @@ -1,220 +0,0 @@ -/** - * Copyright 2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "nnie/nnie_micro.h" -#include -#include -#include -#include -#include -#include -#include -#include -#include "nnie/nnie_interfaces.h" -#include "src/proposal.h" -#include "include/ir/dtype/type_id.h" -#include "include/c_api/status_c.h" - -namespace mindspore { -namespace { -using nnie::NnieDataType; -using nnie::NnieTensors; -constexpr auto ENV_TIME_STEP = "TIME_STEP"; -constexpr auto ENV_MAX_ROI_NUM = "MAX_ROI_NUM"; -constexpr int kNumInput2 = 2; -constexpr int kDefaultROINum = 300; -constexpr int kNCHWDims = 4; -constexpr int kNCHWFormatH = 2; -constexpr int kNCHWFormatW = 3; -constexpr int kNCHWFormatC = 1; -static std::map data_type_size_map = {{mindspore::kNumberTypeInt8, sizeof(int8_t)}, - {mindspore::kNumberTypeUInt8, sizeof(uint8_t)}, - {mindspore::kNumberTypeFloat32, sizeof(float)}}; - -int MakeTensorList(TensorC *tensors, int tensor_num, NnieTensors *tensor_list) { - if (tensor_num > SVP_NNIE_MAX_INPUT_NUM) { - printf("tensors' number is larger than 16\n"); - return kMSStatusLiteError; - } - tensor_list->size_ = tensor_num; - for (int i = 0; i < tensor_num; ++i) { - tensor_list->data_[i] = tensors[i].data_; - tensor_list->shape_[i] = tensors[i].shape_; - tensor_list->shape_len_[i] = tensors[i].shape_size_; - tensor_list->name_[i] = tensors[i].name_; - switch (tensors[i].data_type_) { - case mindspore::kNumberTypeInt8: - tensor_list->dtype_[i] = NnieDataType::NNIE_INT8; - break; - case mindspore::kNumberTypeUInt8: - tensor_list->dtype_[i] = NnieDataType::NNIE_UINT8; - break; - case mindspore::kNumberTypeFloat32: - tensor_list->dtype_[i] = NnieDataType::NNIE_FLOAT32; - break; - default: - printf("The tensor's data type is unsupported, %d\n", tensors[i].data_type_); - return kMSStatusLiteError; - } - } - return 0; -} - -static bool GetIntCustomAttr(const char *key, int *value, CustomParameter *param) { - for (int i = 0; i < param->attr_num; ++i) { - if (!strcmp(param->attr_name[i], key)) { - *value = atoi(param->attr_data[i]); - return true; - } - } - return false; -} - -static int GetIntEnv(const char *env_key, int default_data) { - auto *env_data = std::getenv(env_key); - int result = default_data; - if (env_data != nullptr) { - auto iter = std::find_if(env_data, env_data + strlen(env_data), [](char val) { return val < '0' || val > '9'; }); - if (iter != env_data) { - *iter = '\0'; - result = atoi(env_data); - } else { - printf("%s ENV is invalid, now set to default value %d", env_key, default_data); - } - } else { - printf("%s ENV is invalid, now set to default value %d", env_key, default_data); - } - return result; -} -} // namespace -namespace nnie { -static int NnieKernel(TensorC *inputs, int input_num, TensorC *outputs, int output_num, CustomParameter *param) { - int id; - if (!GetIntCustomAttr("id", &id, param)) { - printf("Not find the id attr!\n"); - return kMSStatusLiteError; - } - - static NnieHandle handle = { - .load_model_ = 0, - .roi_used_ = 0, - }; - handle.model_buf_ = reinterpret_cast(inputs[input_num - 1].data_); - if (data_type_size_map.find(inputs[input_num - 1].data_type_) == data_type_size_map.end()) { - printf("Unsupported data type: %d\n", inputs[input_num - 1].data_type_); - return kMSStatusLiteError; - } - size_t data_type_size = data_type_size_map.at(inputs[input_num - 1].data_type_); - handle.buf_size_ = - std::accumulate(inputs[input_num - 1].shape_, inputs[input_num - 1].shape_ + inputs[input_num - 1].shape_size_, - data_type_size, std::multiplies()); - handle.cfg_.run_idx_.seg_idx_ = id; - NnieTensors input_list; - if (MakeTensorList(inputs, input_num - 1, &input_list)) return kMSStatusLiteError; - if (!handle.load_model_) { - handle.cfg_.cfg_.max_roi_num_ = GetIntEnv(ENV_MAX_ROI_NUM, kDefaultROINum); - handle.cfg_.cfg_.step_ = GetIntEnv(ENV_TIME_STEP, 1); - if (NnieInit(&handle, &input_list) != HI_SUCCESS) return kMSStatusLiteError; - handle.load_model_ = 1; - } - if (NnieFillData(&handle, &input_list) != HI_SUCCESS) return kMSStatusLiteError; - NnieTensors output_list; - if (MakeTensorList(outputs, output_num, &output_list)) return kMSStatusLiteError; - if (NnieRun(&handle, &output_list) != HI_SUCCESS) return kMSStatusLiteError; - return 0; -} -} // namespace nnie - -namespace proposal { -static int ProposalKernel(TensorC *inputs, int input_num, TensorC *outputs, int output_num, CustomParameter *param) { - int ndims, image_height, image_width; - if (input_num != kNumInput2) { - printf("inputs tensor num error.\n"); - return kMSStatusLiteError; - } - if (output_num != 1) { - LOGE("outputs tensor num error."); - return kMSStatusLiteError; - } - if (!GetIntCustomAttr("proposal_id", &ndims, param)) { - printf("Can't find the proposal_id attr!\n"); - return kMSStatusLiteError; - } - if (!GetIntCustomAttr("image_height", &image_height, param)) { - printf("Can't find the image_height attr!\n"); - return kMSStatusLiteError; - } - if (!GetIntCustomAttr("image_width", &image_width, param)) { - printf("Can't find the image_width attr!\n"); - return kMSStatusLiteError; - } - int max_roi_num_int = GetIntEnv(ENV_MAX_ROI_NUM, kDefaultROINum); - ProposalParam pparam; - memset(&pparam, 0, sizeof(ProposalParam)); - - std::vector proposal_input{"rpn_cls_score", "rpn_bbox_pred"}; - TensorC *reorder_inputs[kNumInput2]; - for (size_t i = 0; i < proposal_input.size(); ++i) { - for (int j = 0; j < input_num; ++j) { - if (proposal_input[i] == inputs[j].name_) { - reorder_inputs[i] = &inputs[j]; - break; - } - } - } - for (int i = 0; i < input_num; i++) { - auto ptr_shape = reorder_inputs[i]->shape_; - if ((reorder_inputs[i]->shape_size_ == kNCHWDims)) { - pparam.inputs_height_[i] = ptr_shape[kNCHWFormatH]; - pparam.inputs_width_[i] = ptr_shape[kNCHWFormatW]; - pparam.inputs_channel_[i] = ptr_shape[kNCHWFormatC]; - if (i == 0) { - pparam.inputs_stride_ = ptr_shape[kNCHWFormatW] * sizeof(float); - } - } else { - printf("proposal only support input shape size == 4.\n"); - return kMSStatusLiteError; - } - } - if (ProposalInit(&pparam, max_roi_num_int, image_height, image_width)) { - printf("proposal init failed!\n"); - return kMSStatusLiteError; - } - for (int i = 0; i < kNumInput2; i++) { - pparam.inputs_[i] = reinterpret_cast(reorder_inputs[i]->data_); - } - pparam.rpn_bounding_box_.data_ = outputs[0].data_; - if (ProposalRun(&pparam)) { - printf("proposal run failed!\n"); - return kMSStatusLiteError; - } - - ProposalDeInit(&pparam); - return 0; -} -} // namespace proposal -} // namespace mindspore - -int CustomKernel(TensorC *inputs, int input_num, TensorC *outputs, int output_num, CustomParameter *param) { - if (!strcmp(param->type, "NNIE")) { - return mindspore::nnie::NnieKernel(inputs, input_num, outputs, output_num, param); - } else if (!strcmp(param->type, "Proposal")) { - return mindspore::proposal::ProposalKernel(inputs, input_num, outputs, output_num, param); - } else { - printf("Unknown custom op type: %s\n", param->type); - return kMSStatusLiteError; - } -} diff --git a/mindspore-lite/tools/converter/micro/providers/nnie/nnie_micro.h b/mindspore-lite/tools/converter/micro/providers/nnie/nnie_micro.h deleted file mode 100644 index b4698a2f..00000000 --- a/mindspore-lite/tools/converter/micro/providers/nnie/nnie_micro.h +++ /dev/null @@ -1,33 +0,0 @@ -/** - * Copyright 2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_MICRO_PROVIDERS_NNIE_NNIE_MICRO_H_ -#define MINDSPORE_LITE_TOOLS_CONVERTER_MICRO_PROVIDERS_NNIE_NNIE_MICRO_H_ - -#include "nnacl/custom_parameter.h" -#include "nnacl/tensor_c.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int CustomKernel(TensorC *inputs, int input_num, TensorC *outputs, int output_num, CustomParameter *param); - -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_TOOLS_CONVERTER_MICRO_PROVIDERS_NNIE_NNIE_MICRO_H_ -- Gitee