From e0fda00f663ee418c8b58a8aabaef32433f148e8 Mon Sep 17 00:00:00 2001 From: TonyWang222 Date: Fri, 21 Oct 2022 19:34:56 +0800 Subject: [PATCH] add lite graph, change nnrt delegate to new nnrt interface. Signed-off-by: TonyWang222 Change-Id: If9910bb8a2e57d6e0f6a8e27f48b782c3d0e4bef --- .../optimizer/trt_pass/graph_partitioner.cc | 10 ++-- .../optimizer/trt_pass/graph_partitioner.h | 2 +- .../irpass/less_batch_normalization.cc | 6 +-- mindspore/lite/include/lite_utils.h | 1 + mindspore/lite/micro/coder/graph.cc | 53 ++++++++++--------- .../coder/opcoders/base/conv2d_base_coder.h | 2 +- .../base/detection_post_process_base_coder.h | 2 +- .../coder/opcoders/base/dtype_cast_coder.h | 2 +- .../base/full_connection_base_coder.h | 2 +- .../opcoders/base/quant_dtype_cast_coder.h | 2 +- .../coder/opcoders/base/reduce_base_coder.h | 2 +- .../coder/opcoders/base/reshape_base_coder.h | 2 +- .../coder/opcoders/base/resize_base_coder.h | 2 +- .../coder/opcoders/base/softmax_base_coder.h | 2 +- .../opcoders/base/strided_slice_base_coder.h | 2 +- .../opcoders/cmsis-nn/int8/add_int8_coder.h | 2 +- .../cmsis-nn/int8/conv2d_base_coder.h | 2 +- .../cmsis-nn/int8/conv2d_int8_coder.cc | 4 +- .../cmsis-nn/int8/conv2d_int8_coder.h | 2 +- .../cmsis-nn/int8/dwconv_int8_coder.h | 2 +- .../cmsis-nn/int8/fullconnection_int8_coder.h | 2 +- .../opcoders/cmsis-nn/int8/mul_int8_coder.h | 2 +- .../cmsis-nn/int8/pooling_int8_coder.h | 2 +- .../cmsis-nn/int8/reshape_int8_coder.h | 2 +- .../cmsis-nn/int8/softmax_int8_coder.h | 2 +- .../coder/opcoders/custom/custom_coder.h | 2 +- .../nnacl/fp32/activation_fp32_coder.h | 2 +- .../opcoders/nnacl/fp32/addn_fp32_coder.h | 2 +- .../nnacl/fp32/arithmetic_fp32_coder.h | 2 +- .../nnacl/fp32/arithmetic_self_fp32_coder.h | 2 +- .../nnacl/fp32/assign_add_fp32_coder.h | 2 +- .../nnacl/fp32/batchnorm_fp32_coder.h | 2 +- .../opcoders/nnacl/fp32/biasadd_fp32_coder.h | 2 +- .../opcoders/nnacl/fp32/concat_fp32_coder.h | 2 +- .../nnacl/fp32/conv2d_delegate_fp32_coder.cc | 10 ++-- .../nnacl/fp32/conv2d_delegate_fp32_coder.h | 12 ++--- .../fp32/convolution_depthwise_fp32_coder.h | 2 +- .../nnacl/fp32/convolution_fp32_coder.h | 2 +- .../fp32/convolution_winograd_fp32_coder.h | 2 +- .../opcoders/nnacl/fp32/deconv2d_fp32_coder.h | 2 +- .../opcoders/nnacl/fp32/exp_fp32_coder.h | 2 +- .../nnacl/fp32/full_connection_fp32_coder.h | 2 +- .../opcoders/nnacl/fp32/gather_fp32_coder.h | 2 +- .../opcoders/nnacl/fp32/lstm_fp32_coder.h | 2 +- .../nnacl/fp32/matmul_fp32_base_coder.h | 2 +- .../opcoders/nnacl/fp32/matmul_fp32_coder.h | 2 +- .../opcoders/nnacl/fp32/pad_fp32_coder.h | 2 +- .../opcoders/nnacl/fp32/pooling_fp32_coder.h | 2 +- .../opcoders/nnacl/fp32/power_fp32_coder.h | 2 +- .../opcoders/nnacl/fp32/reduce_fp32_coder.h | 2 +- .../opcoders/nnacl/fp32/resize_fp32_coder.h | 2 +- .../opcoders/nnacl/fp32/scale_fp32_coder.h | 2 +- .../opcoders/nnacl/fp32/softmax_fp32_coder.h | 2 +- .../opcoders/nnacl/fp32/splice_fp32_coder.h | 2 +- .../opcoders/nnacl/fp32/tile_fp32_coder.h | 2 +- .../nnacl/fp32/transpose_fp32_coder.h | 2 +- .../nnacl/int8/activation_int8_coder.cc | 4 +- .../opcoders/nnacl/int8/add_int8_coder.h | 2 +- .../nnacl/int8/batchnorm_int8_coder.h | 2 +- .../opcoders/nnacl/int8/concat_int8_coder.h | 2 +- .../nnacl/int8/conv2d_1x1_int8_coder.h | 2 +- .../nnacl/int8/conv2d_3x3_int8_coder.h | 2 +- .../opcoders/nnacl/int8/conv2d_int8_coder.cc | 4 +- .../opcoders/nnacl/int8/conv2d_int8_coder.h | 2 +- .../int8/convolution_depthwise_int8_coder.h | 2 +- .../nnacl/int8/deconvolution_int8_coder.h | 2 +- .../int8/detection_post_process_int8_coder.h | 2 +- .../opcoders/nnacl/int8/div_int8_coder.h | 2 +- .../nnacl/int8/fullconnection_int8_coder.h | 2 +- .../nnacl/int8/matmul_base_int8_coder.h | 2 +- .../opcoders/nnacl/int8/matmul_int8_coder.h | 2 +- .../opcoders/nnacl/int8/pooling_int8_coder.h | 2 +- .../opcoders/nnacl/int8/reduce_int8_coder.h | 2 +- .../opcoders/nnacl/int8/relux_int8_coder.h | 6 +-- .../opcoders/nnacl/int8/reshape_int8_coder.h | 2 +- .../opcoders/nnacl/int8/resize_int8_coder.h | 2 +- .../opcoders/nnacl/int8/sigmoid_int8_coder.h | 2 +- .../opcoders/nnacl/int8/softmax_int8_coder.h | 2 +- .../opcoders/nnacl/int8/sub_int8_coder.h | 2 +- .../nnacl/int8/transpose_int8_coder.h | 2 +- .../lite/micro/coder/opcoders/op_coder.h | 8 +-- .../micro/coder/opcoders/op_coder_builder.cc | 2 +- .../micro/coder/opcoders/op_coder_builder.h | 4 +- .../micro/coder/opcoders/op_coder_register.h | 2 +- mindspore/lite/micro/coder/session.cc | 6 +-- mindspore/lite/micro/coder/session.h | 2 +- mindspore/lite/micro/coder/train.cc | 2 +- .../delegate/nnrt/checker/primitive_check.cc | 1 - mindspore/lite/src/train/train_export.cc | 19 +++---- mindspore/lite/src/train/train_export.h | 2 +- mindspore/lite/src/train/train_session.cc | 12 +++-- 91 files changed, 157 insertions(+), 153 deletions(-) diff --git a/mindspore/ccsrc/backend/optimizer/trt_pass/graph_partitioner.cc b/mindspore/ccsrc/backend/optimizer/trt_pass/graph_partitioner.cc index afa7498641..fa61ef726a 100644 --- a/mindspore/ccsrc/backend/optimizer/trt_pass/graph_partitioner.cc +++ b/mindspore/ccsrc/backend/optimizer/trt_pass/graph_partitioner.cc @@ -72,7 +72,7 @@ std::unordered_map CollectNodeInfo(const FuncGraphPtr &fun } if (!AnfAlgo::IsRealKernel(node)) { - res[node] = NodeInfo(NodeType::kSupport, i); + res[node] = NodeInfo(MSNodeType::kSupport, i); continue; } @@ -80,12 +80,12 @@ std::unordered_map CollectNodeInfo(const FuncGraphPtr &fun const auto &converter_factory = TrtOpFactory::GetInstance(); ConvertFunc convert_func = converter_factory.GetConvertFunc(op_name); if (!convert_func) { - res[node] = NodeInfo(NodeType::kUnsupported, i); + res[node] = NodeInfo(MSNodeType::kUnsupported, i); continue; } // Trt requires certain input to be weight. - res[node] = WeightCheck(node) ? NodeInfo(NodeType::kSupport, i) : NodeInfo(NodeType::kUnsupported, i); + res[node] = WeightCheck(node) ? NodeInfo(MSNodeType::kSupport, i) : NodeInfo(MSNodeType::kUnsupported, i); } return res; @@ -128,8 +128,8 @@ void GraphPartitioner::NewSubGraph(NodeInfo *node_info) { static size_t trt_id = 0; static size_t native_id = 0; - node_info->graph_id_ = node_info->type() == NodeType::kSupport ? std::string("T_") + std::to_string(trt_id++) - : std::string("N_") + std::to_string(native_id++); + node_info->graph_id_ = node_info->type() == MSNodeType::kSupport ? std::string("T_") + std::to_string(trt_id++) + : std::string("N_") + std::to_string(native_id++); } bool GraphPartitioner::ExistCycleAfterMerge(const AnfNodePtr &node, const std::string &target_graph_id) { diff --git a/mindspore/ccsrc/backend/optimizer/trt_pass/graph_partitioner.h b/mindspore/ccsrc/backend/optimizer/trt_pass/graph_partitioner.h index 0eb38f4abc..de1a3d29c8 100644 --- a/mindspore/ccsrc/backend/optimizer/trt_pass/graph_partitioner.h +++ b/mindspore/ccsrc/backend/optimizer/trt_pass/graph_partitioner.h @@ -34,7 +34,7 @@ enum class NodeType : char { // Class keep node information about type, topo index, and sub graph id. struct NodeInfo { - NodeInfo() : topo_index_(0), type_(NodeType::kInvalid), graph_id_(""), final_(false) {} + NodeInfo() : topo_index_(0), type_(MSNodeType::kInvalid), graph_id_(""), final_(false) {} explicit NodeInfo(const NodeType &t, const size_t &i) : topo_index_(i), type_(t), graph_id_(""), final_(false) {} const size_t topo_index() const { return topo_index_; } diff --git a/mindspore/ccsrc/frontend/optimizer/irpass/less_batch_normalization.cc b/mindspore/ccsrc/frontend/optimizer/irpass/less_batch_normalization.cc index 8fd894b408..e7e6298e59 100644 --- a/mindspore/ccsrc/frontend/optimizer/irpass/less_batch_normalization.cc +++ b/mindspore/ccsrc/frontend/optimizer/irpass/less_batch_normalization.cc @@ -251,7 +251,7 @@ const std::set kNeedRemoveNodeSet{ prim::kPrimLoad, prim::kPrimRefToEmbed, prim::kPrimApplyMomentum, prim::kPrimMomentum, prim::kPrimApplyFtrl, prim::kPrimSGD, prim::kPrimApplyRMSProp, prim::kPrimAdam}; static std::unordered_map> kRemoveIndex{ - {RemoveNodeType::kOtherNode, {2}}, {RemoveNodeType::kOptimizerNode, {3, 5, 6}}}; + {RemoveMSNodeType::kOtherNode, {2}}, {RemoveMSNodeType::kOptimizerNode, {3, 5, 6}}}; bool NeedRemove(const ParameterPtr &a, const std::vector ¶meter_list) { if (a == nullptr) { @@ -276,9 +276,9 @@ CNodePtr ConvertRemoveNodeToVirtualNode(const CNodePtr &cnode) { std::vector args; size_t index = 0; const auto &inputs = cnode->inputs(); - auto remove_index = kRemoveIndex[RemoveNodeType::kOptimizerNode]; + auto remove_index = kRemoveIndex[RemoveMSNodeType::kOptimizerNode]; if (IsPrimitiveCNode(cnode, prim::kPrimLoad) || IsPrimitiveCNode(cnode, prim::kPrimRefToEmbed)) { - remove_index = kRemoveIndex[RemoveNodeType::kOtherNode]; + remove_index = kRemoveIndex[RemoveMSNodeType::kOtherNode]; } (void)std::copy_if( diff --git a/mindspore/lite/include/lite_utils.h b/mindspore/lite/include/lite_utils.h index 0c34601f1c..0b6d1a2337 100644 --- a/mindspore/lite/include/lite_utils.h +++ b/mindspore/lite/include/lite_utils.h @@ -619,6 +619,7 @@ using AllocatorPtr = std::shared_ptr; class Delegate; using DelegatePtr = std::shared_ptr; +// using TensorPtrVector = std::vector; using TensorPtrVector = std::vector; using Uint32Vector = std::vector; template diff --git a/mindspore/lite/micro/coder/graph.cc b/mindspore/lite/micro/coder/graph.cc index 350df107b9..5b272e3fdb 100644 --- a/mindspore/lite/micro/coder/graph.cc +++ b/mindspore/lite/micro/coder/graph.cc @@ -60,9 +60,9 @@ int CoderGraph::ConvertTensors() { }; // deal with allTensors - uint32_t tensorCount = model_->all_tensors_.size(); + uint32_t tensorCount = model_->graph_.all_tensors_.size(); for (uint32_t i = 0; i < tensorCount; ++i) { - schema::Tensor *origin_tensor = model_->all_tensors_.at(i); + schema::Tensor *origin_tensor = static_cast(model_->graph_.all_tensors_.at(i)); MS_CHECK_PTR_WITH_EXE(origin_tensor, clear_tensors()); // tensor dims std::vector shape; @@ -83,7 +83,7 @@ int CoderGraph::ConvertTensors() { lite::Tensor(TypeId(origin_data_type), shape, static_cast(origin_tensor->format()), TensorCategory(origin_tensor)); MS_CHECK_PTR(dstTensor); - if (origin_tensor->nodeType() == NodeType_ValueNode && origin_tensor->data() != nullptr && + if ((MSNodeType)origin_tensor->nodeType() == MSNodeType::NodeType_ValueNode && origin_tensor->data() != nullptr && origin_tensor->data()->size() > 0) { // copy data, this is weight && bias MS_CHECK_TRUE_WITH_EXE(origin_tensor->data()->size() > 0, "invalid meta_tensor data size.", delete dstTensor); @@ -132,13 +132,13 @@ int CoderGraph::InitGraphInOutTensors() { std::vector input_indices; for (auto in_node_index : graph_input_node_indexes) { in_node_index = static_cast(in_node_index); - auto in_node = model_->all_nodes_.at(in_node_index); + auto in_node = model_->graph_.all_nodes_.at(in_node_index); MS_CHECK_PTR(in_node); for (uint32_t i = 0; i < in_node->input_indices_.size(); i++) { auto in_tensor_index = size_t(in_node->input_indices_.at(i)); bool is_graph_input = false; - for (uint32_t j = 0; j < model_->input_indices_.size(); j++) { - if (in_tensor_index == size_t(model_->input_indices_.at(j))) { + for (uint32_t j = 0; j < model_->graph_.input_indices_.size(); j++) { + if (in_tensor_index == size_t(model_->graph_.input_indices_.at(j))) { input_indices.push_back(static_cast(in_tensor_index)); is_graph_input = true; break; @@ -158,12 +158,12 @@ int CoderGraph::InitGraphInOutTensors() { auto graph_output_node_indexes = lite::GetGraphOutputNodes(model_); for (auto out_node_index : graph_output_node_indexes) { out_node_index = static_cast(out_node_index); - auto *out_node = model_->all_nodes_.at(out_node_index); + auto *out_node = model_->graph_.all_nodes_.at(out_node_index); for (uint32_t i = 0; i < out_node->output_indices_.size(); i++) { auto out_tensor_index = size_t(out_node->output_indices_.at(i)); bool is_graph_output = false; - for (uint32_t j = 0; j < model_->output_indices_.size(); j++) { - if (out_tensor_index == size_t(model_->output_indices_.at(j))) { + for (uint32_t j = 0; j < model_->graph_.output_indices_.size(); j++) { + if (out_tensor_index == size_t(model_->graph_.output_indices_.at(j))) { output_indices.push_back(static_cast(out_tensor_index)); is_graph_output = true; break; @@ -247,22 +247,23 @@ std::vector CoderGraph::output_indices() const { return this->output_i void CoderGraph::DumpUnSupportLayer(Target target) { std::cerr << "==========dump all unsupported layer for codegen=====" << std::endl; - std::for_each(model_->all_nodes_.begin(), model_->all_nodes_.end(), [this, target](const Model::Node *node) { - if (node->primitive_ == nullptr) { - return; - } - // fake create opcoders - uint32_t input_idx = node->input_indices_.at(0); - Tensor *t = all_tensors_.at(input_idx); - TypeId dtype = t->data_type(); - int pt = GetPrimitiveType(node->primitive_, reinterpret_cast(model_)->GetSchemaVersion()); - CoderKey key(target, dtype, pt); - // search from the opcoder registry - if (OpCoderFactory::GetInstance()->FindOpCoder(key) == nullptr) { - std::cerr << node->name_ << ", primitive type: " - << mindspore::schema::EnumNamePrimitiveType(static_cast(pt)) - << ", data_type: " << EnumNameDataType(dtype) << std::endl; - } - }); + std::for_each( + model_->graph_.all_nodes_.begin(), model_->graph_.all_nodes_.end(), [this, target](const LiteGraph::Node *node) { + if (node->primitive_ == nullptr) { + return; + } + // fake create opcoders + uint32_t input_idx = node->input_indices_.at(0); + Tensor *t = all_tensors_.at(input_idx); + TypeId dtype = t->data_type(); + int pt = GetPrimitiveType(node->primitive_, reinterpret_cast(model_)->GetSchemaVersion()); + CoderKey key(target, dtype, pt); + // search from the opcoder registry + if (OpCoderFactory::GetInstance()->FindOpCoder(key) == nullptr) { + std::cerr << node->name_ << ", primitive type: " + << mindspore::schema::EnumNamePrimitiveType(static_cast(pt)) + << ", data_type: " << EnumNameDataType(dtype) << std::endl; + } + }); } } // namespace mindspore::lite::micro diff --git a/mindspore/lite/micro/coder/opcoders/base/conv2d_base_coder.h b/mindspore/lite/micro/coder/opcoders/base/conv2d_base_coder.h index e226c1b9e2..ff5ea1871b 100644 --- a/mindspore/lite/micro/coder/opcoders/base/conv2d_base_coder.h +++ b/mindspore/lite/micro/coder/opcoders/base/conv2d_base_coder.h @@ -29,7 +29,7 @@ namespace mindspore::lite::micro { class Conv2DBaseCoder : public OperatorCoder { public: Conv2DBaseCoder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : OperatorCoder(in_tensors, out_tensors, node, node_index, target) {} ~Conv2DBaseCoder() override; diff --git a/mindspore/lite/micro/coder/opcoders/base/detection_post_process_base_coder.h b/mindspore/lite/micro/coder/opcoders/base/detection_post_process_base_coder.h index dceaaf562e..f0c8c669ce 100644 --- a/mindspore/lite/micro/coder/opcoders/base/detection_post_process_base_coder.h +++ b/mindspore/lite/micro/coder/opcoders/base/detection_post_process_base_coder.h @@ -30,7 +30,7 @@ namespace mindspore::lite::micro { class DetectionPostProcessBaseCoder : public OperatorCoder { public: DetectionPostProcessBaseCoder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : OperatorCoder(in_tensors, out_tensors, node, node_index, target) {} ~DetectionPostProcessBaseCoder() override = default; diff --git a/mindspore/lite/micro/coder/opcoders/base/dtype_cast_coder.h b/mindspore/lite/micro/coder/opcoders/base/dtype_cast_coder.h index 86087e61dc..2affc4c722 100644 --- a/mindspore/lite/micro/coder/opcoders/base/dtype_cast_coder.h +++ b/mindspore/lite/micro/coder/opcoders/base/dtype_cast_coder.h @@ -26,7 +26,7 @@ namespace mindspore::lite::micro { class DTypeCastCoder final : public OperatorCoder { public: DTypeCastCoder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : OperatorCoder(in_tensors, out_tensors, node, node_index, target) {} ~DTypeCastCoder() override = default; diff --git a/mindspore/lite/micro/coder/opcoders/base/full_connection_base_coder.h b/mindspore/lite/micro/coder/opcoders/base/full_connection_base_coder.h index 41a2f2e25f..99fe4edb06 100644 --- a/mindspore/lite/micro/coder/opcoders/base/full_connection_base_coder.h +++ b/mindspore/lite/micro/coder/opcoders/base/full_connection_base_coder.h @@ -25,7 +25,7 @@ namespace mindspore::lite::micro { class FullConnectionBaseCoder : public OperatorCoder { public: FullConnectionBaseCoder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : OperatorCoder(in_tensors, out_tensors, node, node_index, target) {} ~FullConnectionBaseCoder() override; diff --git a/mindspore/lite/micro/coder/opcoders/base/quant_dtype_cast_coder.h b/mindspore/lite/micro/coder/opcoders/base/quant_dtype_cast_coder.h index 276f6c967d..8f74f51453 100644 --- a/mindspore/lite/micro/coder/opcoders/base/quant_dtype_cast_coder.h +++ b/mindspore/lite/micro/coder/opcoders/base/quant_dtype_cast_coder.h @@ -26,7 +26,7 @@ namespace mindspore::lite::micro { class QuantDTypeCastCoder final : public OperatorCoder { public: QuantDTypeCastCoder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : OperatorCoder(in_tensors, out_tensors, node, node_index, target) {} ~QuantDTypeCastCoder() override = default; diff --git a/mindspore/lite/micro/coder/opcoders/base/reduce_base_coder.h b/mindspore/lite/micro/coder/opcoders/base/reduce_base_coder.h index b942c31bb9..11f7887f8f 100644 --- a/mindspore/lite/micro/coder/opcoders/base/reduce_base_coder.h +++ b/mindspore/lite/micro/coder/opcoders/base/reduce_base_coder.h @@ -26,7 +26,7 @@ namespace mindspore::lite::micro { class ReduceBaseCoder : public OperatorCoder { public: ReduceBaseCoder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : OperatorCoder(in_tensors, out_tensors, node, node_index, target) {} ~ReduceBaseCoder() override = default; diff --git a/mindspore/lite/micro/coder/opcoders/base/reshape_base_coder.h b/mindspore/lite/micro/coder/opcoders/base/reshape_base_coder.h index 5bb1dfcd80..767363e969 100644 --- a/mindspore/lite/micro/coder/opcoders/base/reshape_base_coder.h +++ b/mindspore/lite/micro/coder/opcoders/base/reshape_base_coder.h @@ -24,7 +24,7 @@ namespace mindspore::lite::micro { class ReshapeBaseCoder final : public OperatorCoder { public: ReshapeBaseCoder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : OperatorCoder(in_tensors, out_tensors, node, node_index, target) {} ~ReshapeBaseCoder() override = default; diff --git a/mindspore/lite/micro/coder/opcoders/base/resize_base_coder.h b/mindspore/lite/micro/coder/opcoders/base/resize_base_coder.h index 1d2ccdc05a..dac371de9e 100644 --- a/mindspore/lite/micro/coder/opcoders/base/resize_base_coder.h +++ b/mindspore/lite/micro/coder/opcoders/base/resize_base_coder.h @@ -26,7 +26,7 @@ namespace mindspore::lite::micro { class ResizeBaseCoder : public OperatorCoder { public: ResizeBaseCoder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : OperatorCoder(in_tensors, out_tensors, node, node_index, target) {} ~ResizeBaseCoder() override = default; diff --git a/mindspore/lite/micro/coder/opcoders/base/softmax_base_coder.h b/mindspore/lite/micro/coder/opcoders/base/softmax_base_coder.h index 79941dfbfc..bfd84f7bf1 100644 --- a/mindspore/lite/micro/coder/opcoders/base/softmax_base_coder.h +++ b/mindspore/lite/micro/coder/opcoders/base/softmax_base_coder.h @@ -28,7 +28,7 @@ namespace mindspore::lite::micro { class SoftmaxBaseCoder : public OperatorCoder { public: SoftmaxBaseCoder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : OperatorCoder(in_tensors, out_tensors, node, node_index, target) {} ~SoftmaxBaseCoder() override { softmax_param_ = nullptr; } diff --git a/mindspore/lite/micro/coder/opcoders/base/strided_slice_base_coder.h b/mindspore/lite/micro/coder/opcoders/base/strided_slice_base_coder.h index 761f00aea8..9873d217d7 100644 --- a/mindspore/lite/micro/coder/opcoders/base/strided_slice_base_coder.h +++ b/mindspore/lite/micro/coder/opcoders/base/strided_slice_base_coder.h @@ -22,7 +22,7 @@ namespace mindspore::lite::micro { class StridedSliceBaseCoder final : public OperatorCoder { public: StridedSliceBaseCoder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : OperatorCoder(in_tensors, out_tensors, node, node_index, target) {} ~StridedSliceBaseCoder() override = default; diff --git a/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/add_int8_coder.h b/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/add_int8_coder.h index 5fc681725f..91c0d97f74 100644 --- a/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/add_int8_coder.h +++ b/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/add_int8_coder.h @@ -25,7 +25,7 @@ namespace mindspore::lite::micro::cmsis { class AddInt8Coder final : public OperatorCoder { public: AddInt8Coder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : OperatorCoder(in_tensors, out_tensors, node, node_index, target) {} ~AddInt8Coder() override = default; diff --git a/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/conv2d_base_coder.h b/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/conv2d_base_coder.h index 588d50b32b..74d1aa5378 100644 --- a/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/conv2d_base_coder.h +++ b/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/conv2d_base_coder.h @@ -27,7 +27,7 @@ namespace mindspore::lite::micro::cmsis { class Conv2DBaseCoder : public micro::Conv2DBaseCoder { public: explicit Conv2DBaseCoder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : micro::Conv2DBaseCoder(in_tensors, out_tensors, node, node_index, target) {} ~Conv2DBaseCoder() override { diff --git a/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/conv2d_int8_coder.cc b/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/conv2d_int8_coder.cc index aa5d1aa86e..c0ed63c5c5 100644 --- a/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/conv2d_int8_coder.cc +++ b/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/conv2d_int8_coder.cc @@ -174,8 +174,8 @@ int Conv2DInt8Coder::InitTmpBuffer() { std::unique_ptr CmsisConv2DInt8OpCoderCreator(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target, - int schema_version) { + const LiteGraph::Node *node, size_t node_index, + Target target, int schema_version) { MS_CHECK_PTR_RET_NULL(node); std::unique_ptr coder = std::make_unique(in_tensors, out_tensors, node, node_index, target); diff --git a/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/conv2d_int8_coder.h b/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/conv2d_int8_coder.h index 7d4ad6eeb9..f7a470e0ad 100644 --- a/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/conv2d_int8_coder.h +++ b/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/conv2d_int8_coder.h @@ -27,7 +27,7 @@ namespace mindspore::lite::micro::cmsis { class Conv2DInt8Coder final : public Conv2DBaseCoder { public: explicit Conv2DInt8Coder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : Conv2DBaseCoder(in_tensors, out_tensors, node, node_index, target) {} int Prepare(CoderContext *context) override; diff --git a/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/dwconv_int8_coder.h b/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/dwconv_int8_coder.h index 7bfd7fa0d4..18b61628d5 100644 --- a/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/dwconv_int8_coder.h +++ b/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/dwconv_int8_coder.h @@ -25,7 +25,7 @@ namespace mindspore::lite::micro::cmsis { class DWConvInt8Coder final : public Conv2DBaseCoder { public: DWConvInt8Coder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : Conv2DBaseCoder(in_tensors, out_tensors, node, node_index, target) {} ~DWConvInt8Coder() override = default; diff --git a/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/fullconnection_int8_coder.h b/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/fullconnection_int8_coder.h index e0370dc4d7..afd5254c6c 100644 --- a/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/fullconnection_int8_coder.h +++ b/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/fullconnection_int8_coder.h @@ -26,7 +26,7 @@ namespace mindspore::lite::micro::cmsis { class FullConnectionInt8Coder final : public FullConnectionBaseCoder { public: FullConnectionInt8Coder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : FullConnectionBaseCoder(in_tensors, out_tensors, node, node_index, target) {} int Prepare(CoderContext *const context) override; diff --git a/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/mul_int8_coder.h b/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/mul_int8_coder.h index 5e9f8cd41e..f6a59f00a2 100644 --- a/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/mul_int8_coder.h +++ b/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/mul_int8_coder.h @@ -24,7 +24,7 @@ namespace mindspore::lite::micro::cmsis { class MulInt8Coder final : public OperatorCoder { public: MulInt8Coder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : OperatorCoder(in_tensors, out_tensors, node, node_index, target) {} ~MulInt8Coder() override = default; int Prepare(CoderContext *const context) override; diff --git a/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/pooling_int8_coder.h b/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/pooling_int8_coder.h index f955cb0358..6890d0d766 100644 --- a/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/pooling_int8_coder.h +++ b/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/pooling_int8_coder.h @@ -28,7 +28,7 @@ namespace mindspore::lite::micro::cmsis { class PoolingInt8Coder final : public OperatorCoder { public: PoolingInt8Coder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : OperatorCoder(in_tensors, out_tensors, node, node_index, target) {} ~PoolingInt8Coder() override = default; diff --git a/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/reshape_int8_coder.h b/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/reshape_int8_coder.h index 714daf2a9f..68ab9c0727 100644 --- a/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/reshape_int8_coder.h +++ b/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/reshape_int8_coder.h @@ -24,7 +24,7 @@ namespace mindspore::lite::micro::cmsis { class ReshapeInt8Coder final : public OperatorCoder { public: ReshapeInt8Coder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : OperatorCoder(in_tensors, out_tensors, node, node_index, target) {} ~ReshapeInt8Coder() override = default; diff --git a/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/softmax_int8_coder.h b/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/softmax_int8_coder.h index c9e502c753..bb774a6cdd 100644 --- a/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/softmax_int8_coder.h +++ b/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/softmax_int8_coder.h @@ -27,7 +27,7 @@ namespace mindspore::lite::micro::cmsis { class SoftMaxInt8Coder final : public SoftmaxBaseCoder { public: SoftMaxInt8Coder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : SoftmaxBaseCoder(in_tensors, out_tensors, node, node_index, target) {} ~SoftMaxInt8Coder() override = default; diff --git a/mindspore/lite/micro/coder/opcoders/custom/custom_coder.h b/mindspore/lite/micro/coder/opcoders/custom/custom_coder.h index 5b2ce86ed9..0772b55ccd 100644 --- a/mindspore/lite/micro/coder/opcoders/custom/custom_coder.h +++ b/mindspore/lite/micro/coder/opcoders/custom/custom_coder.h @@ -25,7 +25,7 @@ namespace mindspore::lite::micro { class CustomCoder final : public OperatorCoder { public: CustomCoder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : OperatorCoder(in_tensors, out_tensors, node, node_index, target) {} ~CustomCoder() override = default; diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/activation_fp32_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/activation_fp32_coder.h index ae38c3bca9..dadd4959da 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/activation_fp32_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/activation_fp32_coder.h @@ -25,7 +25,7 @@ namespace mindspore::lite::micro::nnacl { class ActivationFP32Coder final : public OperatorCoder { public: ActivationFP32Coder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : OperatorCoder(in_tensors, out_tensors, node, node_index, target) {} ~ActivationFP32Coder() override = default; diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/addn_fp32_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/addn_fp32_coder.h index 6441d1c8cf..418a144890 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/addn_fp32_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/addn_fp32_coder.h @@ -23,7 +23,7 @@ namespace mindspore::lite::micro::nnacl { class AddNFP32Coder final : public OperatorCoder { public: AddNFP32Coder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : OperatorCoder(in_tensors, out_tensors, node, node_index, target) {} ~AddNFP32Coder() override = default; diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/arithmetic_fp32_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/arithmetic_fp32_coder.h index 35ec95050a..f92ecbed46 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/arithmetic_fp32_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/arithmetic_fp32_coder.h @@ -80,7 +80,7 @@ class ArithmeticFP32Coder final : public OperatorCoder { public: ArithmeticFP32Coder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : OperatorCoder(in_tensors, out_tensors, node, node_index, target) {} ~ArithmeticFP32Coder() override = default; diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/arithmetic_self_fp32_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/arithmetic_self_fp32_coder.h index a60b18deab..cc897b9d16 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/arithmetic_self_fp32_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/arithmetic_self_fp32_coder.h @@ -92,7 +92,7 @@ using mindspore::schema::PrimitiveType_Erf; class ArithmeticSelfFP32Coder final : public OperatorCoder { public: ArithmeticSelfFP32Coder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : OperatorCoder(in_tensors, out_tensors, node, node_index, target) {} int Prepare(CoderContext *const context) override; int DoCode(CoderContext *const context) override; diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/assign_add_fp32_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/assign_add_fp32_coder.h index e817e7db2d..aa897ed823 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/assign_add_fp32_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/assign_add_fp32_coder.h @@ -25,7 +25,7 @@ namespace mindspore::lite::micro::nnacl { class AssignAddFP32Coder final : public OperatorCoder { public: AssignAddFP32Coder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : OperatorCoder(in_tensors, out_tensors, node, node_index, target) {} ~AssignAddFP32Coder() override = default; diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/batchnorm_fp32_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/batchnorm_fp32_coder.h index 5d222c487f..4806fa2cbe 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/batchnorm_fp32_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/batchnorm_fp32_coder.h @@ -25,7 +25,7 @@ namespace mindspore::lite::micro::nnacl { class BatchnormFP32Coder final : public OperatorCoder { public: BatchnormFP32Coder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : OperatorCoder(in_tensors, out_tensors, node, node_index, target) {} ~BatchnormFP32Coder() override = default; diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/biasadd_fp32_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/biasadd_fp32_coder.h index 60064f2af8..f9e3abcffd 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/biasadd_fp32_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/biasadd_fp32_coder.h @@ -25,7 +25,7 @@ namespace mindspore::lite::micro::nnacl { class BiasAddFP32Coder final : public OperatorCoder { public: BiasAddFP32Coder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : OperatorCoder(in_tensors, out_tensors, node, node_index, target) {} ~BiasAddFP32Coder() override = default; diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/concat_fp32_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/concat_fp32_coder.h index 302bb0e5dc..a9567f9bc0 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/concat_fp32_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/concat_fp32_coder.h @@ -25,7 +25,7 @@ namespace mindspore::lite::micro::nnacl { class ConcatFP32Coder final : public OperatorCoder { public: ConcatFP32Coder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : OperatorCoder(in_tensors, out_tensors, node, node_index, target) {} ~ConcatFP32Coder() override = default; diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/conv2d_delegate_fp32_coder.cc b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/conv2d_delegate_fp32_coder.cc index 3205a5b143..6356d31739 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/conv2d_delegate_fp32_coder.cc +++ b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/conv2d_delegate_fp32_coder.cc @@ -63,8 +63,8 @@ void SetInputOutputShapeInfo(ConvParameter *conv_param, const lite::Tensor *inpu std::unique_ptr CPUConvolutionFP32CoderSelect(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target, - int schema_version) { + const LiteGraph::Node *node, size_t node_index, + Target target, int schema_version) { const void *primitive = node->primitive_; if (primitive == nullptr) { return nullptr; @@ -101,14 +101,14 @@ std::unique_ptr CPUConvolutionFP32CoderSelect(const std::vector CreateDelegateConv(const std::vector &in_tensors, - const std::vector &out_tensors, const Model::Node *node, + const std::vector &out_tensors, const LiteGraph::Node *node, size_t node_index, Target target, int schema_version) { return CPUOpCoderCreator(in_tensors, out_tensors, node, node_index, target, schema_version); } std::unique_ptr CPUConvDwFp32CoderCreator(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target, + const LiteGraph::Node *node, size_t node_index, Target target, int schema_version) { return CPUOpCoderCreator(in_tensors, out_tensors, node, node_index, target, schema_version); @@ -116,7 +116,7 @@ std::unique_ptr CPUConvDwFp32CoderCreator(const std::vector CPUConv2DFusionFP32CoderCreator(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, + const LiteGraph::Node *node, size_t node_index, Target target, int schema_version) { const void *primitive = node->primitive_; if (primitive == nullptr) { diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/conv2d_delegate_fp32_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/conv2d_delegate_fp32_coder.h index de80050c8b..401f533a46 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/conv2d_delegate_fp32_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/conv2d_delegate_fp32_coder.h @@ -24,7 +24,7 @@ namespace mindspore::lite::micro::nnacl { class ConvDelegateCoder : public OperatorCoder { public: ConvDelegateCoder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : OperatorCoder(in_tensors, out_tensors, node, node_index, target) {} ~ConvDelegateCoder() override = default; @@ -38,21 +38,21 @@ class ConvDelegateCoder : public OperatorCoder { void SetInputOutputShapeInfo(ConvParameter *conv_param, const lite::Tensor *input, const lite::Tensor *output); std::unique_ptr CPUConvolutionFP32CoderSelect(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target, - int schema_version); + const LiteGraph::Node *node, size_t node_index, + Target target, int schema_version); std::unique_ptr CreateDelegateConv(const std::vector &in_tensors, - const std::vector &out_tensors, const Model::Node *node, + const std::vector &out_tensors, const LiteGraph::Node *node, size_t node_index, Target target, int schema_version); std::unique_ptr CPUConvDwFp32CoderCreator(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target, + const LiteGraph::Node *node, size_t node_index, Target target, int schema_version); std::unique_ptr CPUConv2DFusionFP32CoderCreator(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, + const LiteGraph::Node *node, size_t node_index, Target target, int schema_version); } // namespace mindspore::lite::micro::nnacl diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/convolution_depthwise_fp32_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/convolution_depthwise_fp32_coder.h index f38a25b24a..fbc321524f 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/convolution_depthwise_fp32_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/convolution_depthwise_fp32_coder.h @@ -25,7 +25,7 @@ namespace mindspore::lite::micro::nnacl { class ConvolutionDepthwiseFP32Coder final : public Conv2DBaseCoder { public: ConvolutionDepthwiseFP32Coder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : Conv2DBaseCoder(in_tensors, out_tensors, node, node_index, target) {} ~ConvolutionDepthwiseFP32Coder() override = default; diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/convolution_fp32_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/convolution_fp32_coder.h index 42c2ee583b..a3e8bae29f 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/convolution_fp32_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/convolution_fp32_coder.h @@ -27,7 +27,7 @@ namespace mindspore::lite::micro::nnacl { class ConvolutionFP32Coder final : public Conv2DBaseCoder { public: ConvolutionFP32Coder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : Conv2DBaseCoder(in_tensors, out_tensors, node, node_index, target) {} int Prepare(CoderContext *const context) override; diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/convolution_winograd_fp32_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/convolution_winograd_fp32_coder.h index 1e236ae622..a55c0a718c 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/convolution_winograd_fp32_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/convolution_winograd_fp32_coder.h @@ -27,7 +27,7 @@ namespace mindspore::lite::micro::nnacl { class ConvolutionWinogradFP32Coder : public Conv2DBaseCoder { public: ConvolutionWinogradFP32Coder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target, int output_unit) + const LiteGraph::Node *node, size_t node_index, Target target, int output_unit) : Conv2DBaseCoder(in_tensors, out_tensors, node, node_index, target), output_unit_(output_unit) {} int Prepare(CoderContext *const context) override; diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/deconv2d_fp32_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/deconv2d_fp32_coder.h index 3c3d7e442a..d962df15ab 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/deconv2d_fp32_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/deconv2d_fp32_coder.h @@ -29,7 +29,7 @@ namespace mindspore::lite::micro::nnacl { class DeConvolutionFP32Coder final : public Conv2DBaseCoder { public: DeConvolutionFP32Coder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : Conv2DBaseCoder(in_tensors, out_tensors, node, node_index, target) {} int Prepare(CoderContext *const context) override; diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/exp_fp32_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/exp_fp32_coder.h index 85715cc44c..b518e271f0 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/exp_fp32_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/exp_fp32_coder.h @@ -23,7 +23,7 @@ namespace mindspore::lite::micro::nnacl { class ExpFP32Coder final : public OperatorCoder { public: ExpFP32Coder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : OperatorCoder(in_tensors, out_tensors, node, node_index, target) {} ~ExpFP32Coder() override = default; diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/full_connection_fp32_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/full_connection_fp32_coder.h index 1f39ecd57a..d2566a9fdc 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/full_connection_fp32_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/full_connection_fp32_coder.h @@ -28,7 +28,7 @@ namespace mindspore::lite::micro::nnacl { class FullConnectionFP32Coder final : public MatMulFP32BaseCoder { public: FullConnectionFP32Coder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : MatMulFP32BaseCoder(in_tensors, out_tensors, node, node_index, target) {} int Prepare(CoderContext *const context) override; diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/gather_fp32_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/gather_fp32_coder.h index 616b14c255..357c5687c6 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/gather_fp32_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/gather_fp32_coder.h @@ -25,7 +25,7 @@ namespace mindspore::lite::micro::nnacl { class GatherFP32Coder final : public OperatorCoder { public: GatherFP32Coder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : OperatorCoder(in_tensors, out_tensors, node, node_index, target) {} ~GatherFP32Coder() override = default; diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/lstm_fp32_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/lstm_fp32_coder.h index 875abf8bea..5d7072fc78 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/lstm_fp32_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/lstm_fp32_coder.h @@ -25,7 +25,7 @@ namespace mindspore::lite::micro::nnacl { class LstmFP32Coder final : public OperatorCoder { public: LstmFP32Coder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : OperatorCoder(in_tensors, out_tensors, node, node_index, target) {} ~LstmFP32Coder() override = default; diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/matmul_fp32_base_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/matmul_fp32_base_coder.h index c03c5f3372..202bde2198 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/matmul_fp32_base_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/matmul_fp32_base_coder.h @@ -25,7 +25,7 @@ namespace mindspore::lite::micro::nnacl { class MatMulFP32BaseCoder : public OperatorCoder { public: MatMulFP32BaseCoder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : OperatorCoder(in_tensors, out_tensors, node, node_index, target) {} ~MatMulFP32BaseCoder() override = default; diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/matmul_fp32_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/matmul_fp32_coder.h index 79f6f611ee..22979b8510 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/matmul_fp32_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/matmul_fp32_coder.h @@ -25,7 +25,7 @@ namespace mindspore::lite::micro::nnacl { class MatMulFP32Coder final : public MatMulFP32BaseCoder { public: MatMulFP32Coder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : MatMulFP32BaseCoder(in_tensors, out_tensors, node, node_index, target) {} ~MatMulFP32Coder() override = default; diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/pad_fp32_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/pad_fp32_coder.h index 19aff13921..a2e7bc5333 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/pad_fp32_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/pad_fp32_coder.h @@ -25,7 +25,7 @@ namespace mindspore::lite::micro::nnacl { class PadFP32Coder final : public OperatorCoder { public: PadFP32Coder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : OperatorCoder(in_tensors, out_tensors, node, node_index, target) {} ~PadFP32Coder() override = default; diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/pooling_fp32_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/pooling_fp32_coder.h index ae66ea8a0f..29e34ab430 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/pooling_fp32_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/pooling_fp32_coder.h @@ -25,7 +25,7 @@ namespace mindspore::lite::micro::nnacl { class PoolingFP32Coder final : public OperatorCoder { public: PoolingFP32Coder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : OperatorCoder(in_tensors, out_tensors, node, node_index, target) {} ~PoolingFP32Coder() override = default; diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/power_fp32_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/power_fp32_coder.h index 07ccb7f601..4506a5a387 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/power_fp32_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/power_fp32_coder.h @@ -25,7 +25,7 @@ namespace mindspore::lite::micro::nnacl { class PowerFP32Coder final : public OperatorCoder { public: PowerFP32Coder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : OperatorCoder(in_tensors, out_tensors, node, node_index, target) {} ~PowerFP32Coder() override = default; diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/reduce_fp32_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/reduce_fp32_coder.h index 7fea26c0e4..307efb8828 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/reduce_fp32_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/reduce_fp32_coder.h @@ -26,7 +26,7 @@ namespace mindspore::lite::micro::nnacl { class ReduceFP32Coder final : public ReduceBaseCoder { public: ReduceFP32Coder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : ReduceBaseCoder(in_tensors, out_tensors, node, node_index, target) {} ~ReduceFP32Coder() override = default; diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/resize_fp32_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/resize_fp32_coder.h index ac1da325d3..583093db2c 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/resize_fp32_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/resize_fp32_coder.h @@ -31,7 +31,7 @@ namespace mindspore::lite::micro::nnacl { class ResizeFP32Coder final : public ResizeBaseCoder { public: ResizeFP32Coder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : ResizeBaseCoder(in_tensors, out_tensors, node, node_index, target) {} ~ResizeFP32Coder() override { FreeTmpBuffer(); }; int Prepare(CoderContext *const context) override; diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/scale_fp32_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/scale_fp32_coder.h index 8c3bd65e29..a9ad5743b7 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/scale_fp32_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/scale_fp32_coder.h @@ -26,7 +26,7 @@ namespace mindspore::lite::micro::nnacl { class ScaleFP32Coder final : public OperatorCoder { public: ScaleFP32Coder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : OperatorCoder(in_tensors, out_tensors, node, node_index, target) {} ~ScaleFP32Coder() override = default; int Prepare(CoderContext *const context) override; diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/softmax_fp32_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/softmax_fp32_coder.h index 322d4d1c51..136d3f7936 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/softmax_fp32_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/softmax_fp32_coder.h @@ -23,7 +23,7 @@ namespace mindspore::lite::micro::nnacl { class SoftMaxFP32Coder final : public SoftmaxBaseCoder { public: SoftMaxFP32Coder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : SoftmaxBaseCoder(in_tensors, out_tensors, node, node_index, target) {} int Prepare(CoderContext *const context) override; diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/splice_fp32_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/splice_fp32_coder.h index a60b58fae6..f5f09dbca4 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/splice_fp32_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/splice_fp32_coder.h @@ -22,7 +22,7 @@ namespace mindspore::lite::micro::nnacl { class SpliceFP32Coder final : public OperatorCoder { public: SpliceFP32Coder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : OperatorCoder(in_tensors, out_tensors, node, node_index, target) {} ~SpliceFP32Coder() override = default; diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/tile_fp32_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/tile_fp32_coder.h index 3b370fc3da..19f9b1e6b0 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/tile_fp32_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/tile_fp32_coder.h @@ -25,7 +25,7 @@ namespace mindspore::lite::micro::nnacl { class TileFP32Coder final : public OperatorCoder { public: TileFP32Coder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : OperatorCoder(in_tensors, out_tensors, node, node_index, target) {} ~TileFP32Coder() override = default; diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/transpose_fp32_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/transpose_fp32_coder.h index 47519700a6..5efe7a300a 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/transpose_fp32_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/transpose_fp32_coder.h @@ -25,7 +25,7 @@ namespace mindspore::lite::micro::nnacl { class TransposeFp32Coder final : public OperatorCoder { public: TransposeFp32Coder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : OperatorCoder(in_tensors, out_tensors, node, node_index, target) {} ~TransposeFp32Coder() override = default; diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/int8/activation_int8_coder.cc b/mindspore/lite/micro/coder/opcoders/nnacl/int8/activation_int8_coder.cc index ef83ef0286..862d43e122 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/int8/activation_int8_coder.cc +++ b/mindspore/lite/micro/coder/opcoders/nnacl/int8/activation_int8_coder.cc @@ -26,8 +26,8 @@ using mindspore::schema::PrimitiveType_Activation; namespace mindspore::lite::micro::nnacl { std::unique_ptr CPUActivationINT8CoderCreator(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target, - int schema_version) { + const LiteGraph::Node *node, size_t node_index, + Target target, int schema_version) { const void *primitive_c = node->primitive_; if (primitive_c == nullptr) { return nullptr; diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/int8/add_int8_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/int8/add_int8_coder.h index d6c4d6415f..6bb931a364 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/int8/add_int8_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/int8/add_int8_coder.h @@ -25,7 +25,7 @@ namespace mindspore::lite::micro::nnacl { class AddInt8Coder final : public OperatorCoder { public: AddInt8Coder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : OperatorCoder(in_tensors, out_tensors, node, node_index, target) {} ~AddInt8Coder() override = default; diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/int8/batchnorm_int8_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/int8/batchnorm_int8_coder.h index 0fdd560824..b8137e1703 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/int8/batchnorm_int8_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/int8/batchnorm_int8_coder.h @@ -26,7 +26,7 @@ namespace mindspore::lite::micro::nnacl { class BatchNormInt8Coder final : public OperatorCoder { public: BatchNormInt8Coder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : OperatorCoder(in_tensors, out_tensors, node, node_index, target) { batchnorm_param_ = reinterpret_cast(parameter_); } diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/int8/concat_int8_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/int8/concat_int8_coder.h index bc666683e8..1229e51a5a 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/int8/concat_int8_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/int8/concat_int8_coder.h @@ -26,7 +26,7 @@ namespace mindspore::lite::micro::nnacl { class ConcatInt8Coder final : public OperatorCoder { public: ConcatInt8Coder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : OperatorCoder(in_tensors, out_tensors, node, node_index, target) {} ~ConcatInt8Coder() { diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_1x1_int8_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_1x1_int8_coder.h index 29185d6273..c5984f27bc 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_1x1_int8_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_1x1_int8_coder.h @@ -26,7 +26,7 @@ namespace mindspore::lite::micro::nnacl { class Conv2D1x1Int8Coder final : public Conv2DBaseCoder { public: Conv2D1x1Int8Coder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : Conv2DBaseCoder(in_tensors, out_tensors, node, node_index, target) {} int Prepare(CoderContext *const context) override; diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_3x3_int8_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_3x3_int8_coder.h index 7bcea0b5f3..826e82c903 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_3x3_int8_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_3x3_int8_coder.h @@ -26,7 +26,7 @@ namespace mindspore::lite::micro::nnacl { class Conv2D3x3Int8Coder final : public Conv2DBaseCoder { public: Conv2D3x3Int8Coder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : Conv2DBaseCoder(in_tensors, out_tensors, node, node_index, target) {} int Prepare(CoderContext *const context) override; diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_int8_coder.cc b/mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_int8_coder.cc index 8bdd00e6b3..447c548d81 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_int8_coder.cc +++ b/mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_int8_coder.cc @@ -247,7 +247,7 @@ int Conv2DINT8Coder::DoCode(CoderContext *const context) { std::unique_ptr CPUConv2DINT8CoderCreator(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target, + const LiteGraph::Node *node, size_t node_index, Target target, int schema_version) { const void *primitive = node->primitive_; if (primitive == nullptr) { @@ -284,7 +284,7 @@ std::unique_ptr CPUConv2DINT8CoderCreator(const std::vector CPUConv2DFusionINT8CoderCreator(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, + const LiteGraph::Node *node, size_t node_index, Target target, int schema_version) { const void *primitive = node->primitive_; if (primitive == nullptr) { diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_int8_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_int8_coder.h index c34ccf670b..dff69d6e4f 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_int8_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_int8_coder.h @@ -27,7 +27,7 @@ namespace mindspore::lite::micro::nnacl { class Conv2DINT8Coder final : public Conv2DBaseCoder { public: explicit Conv2DINT8Coder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : Conv2DBaseCoder(in_tensors, out_tensors, node, node_index, target) {} int Prepare(CoderContext *const context) override; diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/int8/convolution_depthwise_int8_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/int8/convolution_depthwise_int8_coder.h index 08464a462a..1045289f13 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/int8/convolution_depthwise_int8_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/int8/convolution_depthwise_int8_coder.h @@ -25,7 +25,7 @@ namespace mindspore::lite::micro { class ConvolutionDepthwiseINT8Coder : public Conv2DBaseCoder { public: ConvolutionDepthwiseINT8Coder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : Conv2DBaseCoder(in_tensors, out_tensors, node, node_index, target) {} ~ConvolutionDepthwiseINT8Coder() override = default; diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/int8/deconvolution_int8_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/int8/deconvolution_int8_coder.h index f404e398ad..a25b5d90d8 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/int8/deconvolution_int8_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/int8/deconvolution_int8_coder.h @@ -26,7 +26,7 @@ namespace mindspore::lite::micro::nnacl { class DeconvolutionInt8Coder final : public Conv2DBaseCoder { public: DeconvolutionInt8Coder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : Conv2DBaseCoder(in_tensors, out_tensors, node, node_index, target) {} ~DeconvolutionInt8Coder() override { delete matmul_param_; } diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/int8/detection_post_process_int8_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/int8/detection_post_process_int8_coder.h index 97f86a8ba2..7ebdf66390 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/int8/detection_post_process_int8_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/int8/detection_post_process_int8_coder.h @@ -26,7 +26,7 @@ namespace mindspore::lite::micro::nnacl { class DetectionPostProcessInt8Coder final : public DetectionPostProcessBaseCoder { public: DetectionPostProcessInt8Coder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : DetectionPostProcessBaseCoder(in_tensors, out_tensors, node, node_index, target) {} ~DetectionPostProcessInt8Coder() override = default; diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/int8/div_int8_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/int8/div_int8_coder.h index ceccad806b..3d01808cfc 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/int8/div_int8_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/int8/div_int8_coder.h @@ -27,7 +27,7 @@ namespace mindspore::lite::micro::nnacl { class DivInt8Coder final : public OperatorCoder { public: DivInt8Coder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : OperatorCoder(in_tensors, out_tensors, node, node_index, target) {} ~DivInt8Coder() override = default; diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/int8/fullconnection_int8_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/int8/fullconnection_int8_coder.h index dd12382f20..18aa387b9d 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/int8/fullconnection_int8_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/int8/fullconnection_int8_coder.h @@ -28,7 +28,7 @@ namespace mindspore::lite::micro::nnacl { class FullConnectionInt8Coder final : public MatMulBaseInt8Coder { public: FullConnectionInt8Coder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : MatMulBaseInt8Coder(in_tensors, out_tensors, node, node_index, target) {} ~FullConnectionInt8Coder() override = default; diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/int8/matmul_base_int8_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/int8/matmul_base_int8_coder.h index c893324df3..6b66d263c1 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/int8/matmul_base_int8_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/int8/matmul_base_int8_coder.h @@ -23,7 +23,7 @@ namespace mindspore::lite::micro::nnacl { class MatMulBaseInt8Coder : public OperatorCoder { public: MatMulBaseInt8Coder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : OperatorCoder(in_tensors, out_tensors, node, node_index, target) {} ~MatMulBaseInt8Coder() override; diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/int8/matmul_int8_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/int8/matmul_int8_coder.h index 482c86c95f..80ba36c89a 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/int8/matmul_int8_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/int8/matmul_int8_coder.h @@ -25,7 +25,7 @@ namespace mindspore::lite::micro::nnacl { class MatMulInt8Coder final : public MatMulBaseInt8Coder { public: MatMulInt8Coder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : MatMulBaseInt8Coder(in_tensors, out_tensors, node, node_index, target) {} ~MatMulInt8Coder() override = default; diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/int8/pooling_int8_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/int8/pooling_int8_coder.h index dd068c8468..92455d6a2b 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/int8/pooling_int8_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/int8/pooling_int8_coder.h @@ -27,7 +27,7 @@ namespace mindspore::lite::micro::nnacl { class PoolingInt8Coder final : public OperatorCoder { public: PoolingInt8Coder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : OperatorCoder(in_tensors, out_tensors, node, node_index, target) {} ~PoolingInt8Coder() override = default; diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/int8/reduce_int8_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/int8/reduce_int8_coder.h index bd9d05dfb9..1e40cb03d8 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/int8/reduce_int8_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/int8/reduce_int8_coder.h @@ -27,7 +27,7 @@ namespace mindspore::lite::micro::nnacl { class ReduceInt8Coder final : public ReduceBaseCoder { public: ReduceInt8Coder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : ReduceBaseCoder(in_tensors, out_tensors, node, node_index, target) {} ~ReduceInt8Coder() override { diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/int8/relux_int8_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/int8/relux_int8_coder.h index f42db86a03..b0195105c6 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/int8/relux_int8_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/int8/relux_int8_coder.h @@ -29,7 +29,7 @@ namespace mindspore::lite::micro::nnacl { class ReluxInt8Coder : public OperatorCoder { public: ReluxInt8Coder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : OperatorCoder(in_tensors, out_tensors, node, node_index, target) {} ~ReluxInt8Coder() override = default; @@ -48,7 +48,7 @@ class ReluxInt8Coder : public OperatorCoder { class ReluInt8Coder final : public ReluxInt8Coder { public: ReluInt8Coder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : ReluxInt8Coder(in_tensors, out_tensors, node, node_index, target) {} ~ReluInt8Coder() override = default; @@ -64,7 +64,7 @@ class ReluInt8Coder final : public ReluxInt8Coder { class Relu6Int8Coder final : public ReluxInt8Coder { public: Relu6Int8Coder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : ReluxInt8Coder(in_tensors, out_tensors, node, node_index, target) {} ~Relu6Int8Coder() override = default; diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/int8/reshape_int8_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/int8/reshape_int8_coder.h index 4053f4b02b..92bd9ff4c9 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/int8/reshape_int8_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/int8/reshape_int8_coder.h @@ -24,7 +24,7 @@ namespace mindspore::lite::micro::nnacl { class ReshapeInt8Coder final : public OperatorCoder { public: ReshapeInt8Coder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : OperatorCoder(in_tensors, out_tensors, node, node_index, target) {} ~ReshapeInt8Coder() override = default; diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/int8/resize_int8_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/int8/resize_int8_coder.h index bb280639b1..e23b430a5e 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/int8/resize_int8_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/int8/resize_int8_coder.h @@ -26,7 +26,7 @@ namespace mindspore::lite::micro::nnacl { class ResizeInt8Coder final : public ResizeBaseCoder { public: ResizeInt8Coder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : ResizeBaseCoder(in_tensors, out_tensors, node, node_index, target) {} ~ResizeInt8Coder() override; diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/int8/sigmoid_int8_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/int8/sigmoid_int8_coder.h index f6d015a197..86211d04d5 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/int8/sigmoid_int8_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/int8/sigmoid_int8_coder.h @@ -26,7 +26,7 @@ namespace mindspore::lite::micro::nnacl { class SigmodInt8Coder final : public OperatorCoder { public: SigmodInt8Coder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : OperatorCoder(in_tensors, out_tensors, node, node_index, target) {} ~SigmodInt8Coder() override = default; diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/int8/softmax_int8_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/int8/softmax_int8_coder.h index c8ce497469..7722b5c3d5 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/int8/softmax_int8_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/int8/softmax_int8_coder.h @@ -26,7 +26,7 @@ namespace mindspore::lite::micro::nnacl { class SoftMaxInt8Coder final : public SoftmaxBaseCoder { public: SoftMaxInt8Coder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : SoftmaxBaseCoder(in_tensors, out_tensors, node, node_index, target) {} ~SoftMaxInt8Coder() override = default; diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/int8/sub_int8_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/int8/sub_int8_coder.h index fa34895bc5..5c771452a4 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/int8/sub_int8_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/int8/sub_int8_coder.h @@ -27,7 +27,7 @@ namespace mindspore::lite::micro::nnacl { class SubInt8Coder final : public OperatorCoder { public: SubInt8Coder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : OperatorCoder(in_tensors, out_tensors, node, node_index, target) {} ~SubInt8Coder() override = default; diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/int8/transpose_int8_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/int8/transpose_int8_coder.h index e15dac0b7f..29c0f49451 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/int8/transpose_int8_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/int8/transpose_int8_coder.h @@ -24,7 +24,7 @@ namespace mindspore::lite::micro::nnacl { class TransposeInt8Coder final : public OperatorCoder { public: TransposeInt8Coder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : OperatorCoder(in_tensors, out_tensors, node, node_index, target) {} ~TransposeInt8Coder() override = default; diff --git a/mindspore/lite/micro/coder/opcoders/op_coder.h b/mindspore/lite/micro/coder/opcoders/op_coder.h index a3044c7d47..025c20ac39 100644 --- a/mindspore/lite/micro/coder/opcoders/op_coder.h +++ b/mindspore/lite/micro/coder/opcoders/op_coder.h @@ -36,7 +36,7 @@ constexpr int kPrecision = 19; class OperatorCoder { public: OperatorCoder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) + const LiteGraph::Node *node, size_t node_index, Target target) : input_tensors_(in_tensors), output_tensors_(out_tensors), target_(target), @@ -70,7 +70,7 @@ class OperatorCoder { void set_parameter(OpParameter *parameter); - const Model::Node *node() const { return this->node_; } + const LiteGraph::Node *node() const { return this->node_; } void AddInitialParameters(Tensor *parameter) { initial_parameters_.push_back(parameter); } @@ -91,7 +91,7 @@ class OperatorCoder { std::vector input_tensors_; std::vector output_tensors_; Target target_{kTargetUnknown}; - const Model::Node *node_{nullptr}; + const LiteGraph::Node *node_{nullptr}; Tensor *input_tensor_{nullptr}; Tensor *output_tensor_{nullptr}; @@ -117,7 +117,7 @@ class OperatorCoder { // a template func for normal op_coder creator template std::unique_ptr CPUOpCoderCreator(const std::vector &in_tensors, - const std::vector &out_tensors, const Model::Node *node, + const std::vector &out_tensors, const LiteGraph::Node *node, size_t node_index, Target target, int schema_version) { if (node == nullptr) { MS_LOG(ERROR) << "node is null"; diff --git a/mindspore/lite/micro/coder/opcoders/op_coder_builder.cc b/mindspore/lite/micro/coder/opcoders/op_coder_builder.cc index 571fbdc739..6c897f103a 100644 --- a/mindspore/lite/micro/coder/opcoders/op_coder_builder.cc +++ b/mindspore/lite/micro/coder/opcoders/op_coder_builder.cc @@ -69,7 +69,7 @@ OpCoderBuilder &OpCoderBuilder::outputs(const std::vector &outputs) { return *this; } -OpCoderBuilder &OpCoderBuilder::node(const Model::Node *node) { +OpCoderBuilder &OpCoderBuilder::node(const LiteGraph::Node *node) { this->node_ = node; return *this; } diff --git a/mindspore/lite/micro/coder/opcoders/op_coder_builder.h b/mindspore/lite/micro/coder/opcoders/op_coder_builder.h index 2da028a2f7..cd566aeab6 100644 --- a/mindspore/lite/micro/coder/opcoders/op_coder_builder.h +++ b/mindspore/lite/micro/coder/opcoders/op_coder_builder.h @@ -31,7 +31,7 @@ class OpCoderBuilder { OpCoderBuilder &outputs(const std::vector &outputs); - OpCoderBuilder &node(const Model::Node *node); + OpCoderBuilder &node(const LiteGraph::Node *node); OpCoderBuilder ¶meter(OpParameter *parameter); @@ -54,7 +54,7 @@ class OpCoderBuilder { std::vector outputs_; - const mindspore::lite::Model::Node *node_ = nullptr; + const mindspore::lite::LiteGraph::Node *node_ = nullptr; OpParameter *parameter_{nullptr}; diff --git a/mindspore/lite/micro/coder/opcoders/op_coder_register.h b/mindspore/lite/micro/coder/opcoders/op_coder_register.h index 19d0c5fd39..9b15c34464 100644 --- a/mindspore/lite/micro/coder/opcoders/op_coder_register.h +++ b/mindspore/lite/micro/coder/opcoders/op_coder_register.h @@ -27,7 +27,7 @@ namespace mindspore::lite::micro { class OperatorCoder; using CoderCreatorFunc = std::function( - const std::vector &in_tensors, const std::vector &out_tensors, const Model::Node *node, + const std::vector &in_tensors, const std::vector &out_tensors, const LiteGraph::Node *node, size_t node_index, Target target, int schema_version)>; class CoderKey { diff --git a/mindspore/lite/micro/coder/session.cc b/mindspore/lite/micro/coder/session.cc index 3e4c27ef16..8c70f9fcc7 100644 --- a/mindspore/lite/micro/coder/session.cc +++ b/mindspore/lite/micro/coder/session.cc @@ -202,7 +202,7 @@ int CoderSession::InitTensorsRef() { return RET_OK; } -OpParameter *CoderSession::GenParameterAndInfer(const Model::Node *node, const std::vector &inputs, +OpParameter *CoderSession::GenParameterAndInfer(const LiteGraph::Node *node, const std::vector &inputs, std::vector *outputs) const { auto primitive = node->primitive_; MS_CHECK_PTR_RET_NULL(primitive); @@ -234,10 +234,10 @@ int CoderSession::CreateOpCoders() { Target code_target = config->target(); CodeMode code_mode = config->code_mode(); bool support_parallel = config->support_parallel(); - uint32_t nodes_size = model->all_nodes_.size(); + uint32_t nodes_size = model->graph_.all_nodes_.size(); OpCoderBuilder builder; for (uint32_t i = 0; i < nodes_size; ++i) { - const auto *node = model->all_nodes_.at(i); + const auto *node = model->graph_.all_nodes_.at(i); if (node == nullptr) { MS_LOG(ERROR) << "node is nullptr"; return RET_ERROR; diff --git a/mindspore/lite/micro/coder/session.h b/mindspore/lite/micro/coder/session.h index 2dd8c3b39b..909d5481b8 100644 --- a/mindspore/lite/micro/coder/session.h +++ b/mindspore/lite/micro/coder/session.h @@ -43,7 +43,7 @@ class CoderSession { int GenerateCode(); private: - OpParameter *GenParameterAndInfer(const Model::Node *node, const std::vector &inputs, + OpParameter *GenParameterAndInfer(const LiteGraph::Node *node, const std::vector &inputs, std::vector *outputs) const; int InitOpcodersInputsAndOutputs(); int InitTensorsRef(); diff --git a/mindspore/lite/micro/coder/train.cc b/mindspore/lite/micro/coder/train.cc index 6532285d2e..83c5ed176a 100644 --- a/mindspore/lite/micro/coder/train.cc +++ b/mindspore/lite/micro/coder/train.cc @@ -68,7 +68,7 @@ int Train::TransformGraphForTrain(CoderContext *context, const std::vectornode(); + const LiteGraph::Node *node = opcoder->node(); int primitive_type = GetPrimitiveType(node->primitive_, schema_version); auto item = std::find(loss_types.begin(), loss_types.end(), primitive_type); if (item != loss_types.end()) { diff --git a/mindspore/lite/src/delegate/nnrt/checker/primitive_check.cc b/mindspore/lite/src/delegate/nnrt/checker/primitive_check.cc index 8df160ba7e..4e6868c76b 100644 --- a/mindspore/lite/src/delegate/nnrt/checker/primitive_check.cc +++ b/mindspore/lite/src/delegate/nnrt/checker/primitive_check.cc @@ -181,7 +181,6 @@ Status CheckTensorSupported(const schema::Tensor *primitive) { } } return mindspore::kSuccess; - ; } } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/train/train_export.cc b/mindspore/lite/src/train/train_export.cc index 7559b45846..ea401c5878 100644 --- a/mindspore/lite/src/train/train_export.cc +++ b/mindspore/lite/src/train/train_export.cc @@ -93,7 +93,7 @@ void TrainExport::TagQuantizedNodes() { for (auto &node : meta_graph_->nodes) { if (node->quantType != schema::QuantType_QUANT_WEIGHT) { for (auto t_idx : node->inputIndex) { - if ((meta_graph_->allTensors.at(t_idx)->nodeType == NodeType_ValueNode) && + if (((MSNodeType)meta_graph_->allTensors.at(t_idx)->nodeType == MSNodeType::NodeType_ValueNode) && (meta_graph_->allTensors.at(t_idx)->quantParams.size() > 0)) { node->quantType = schema::QuantType_QUANT_WEIGHT; } @@ -163,7 +163,8 @@ std::unique_ptr TrainExport::CreateTensor(const mindspore::lite tensorT->offset = 0; tensorT->dataType = tensor->data_type(); tensorT->enableHuffmanCode = false; - if ((tensorT->nodeType == NodeType_ValueNode) && (scTensor->data() != nullptr) && (scTensor->data()->size() > 0)) { + if (((MSNodeType)tensorT->nodeType == MSNodeType::NodeType_ValueNode) && (scTensor->data() != nullptr) && + (scTensor->data()->size() > 0)) { if (NeedQuantization(tensor)) { QuantTensorData(tensorT.get(), tensor); } else { @@ -174,10 +175,10 @@ std::unique_ptr TrainExport::CreateTensor(const mindspore::lite return tensorT; } -Model::Node *TrainExport::FindNode(const mindspore::kernel::LiteKernel *kernel, const Model *model) { - auto nodes = model->all_nodes_; +LiteGraph::Node *TrainExport::FindNode(const mindspore::kernel::LiteKernel *kernel, const Model *model) { + auto nodes = model->graph_.all_nodes_; auto it = std::find_if(nodes.begin(), nodes.end(), - [&kernel](mindspore::lite::Model::Node *n) { return (kernel->name() == n->name_); }); + [&kernel](mindspore::lite::LiteGraph::Node *n) { return (kernel->name() == n->name_); }); if (it == nodes.end()) { return nullptr; } @@ -268,7 +269,7 @@ std::unique_ptr TrainExport::CreateTransformConst(size_t last_i MS_LOG(ERROR) << "Could not create tensor "; return nullptr; } - tensorT->nodeType = lite::NodeType_ValueNode; + tensorT->nodeType = (int)lite::MSNodeType::NodeType_ValueNode; tensorT->dataType = TypeId::kNumberTypeInt32; tensorT->dims = {kTransformTensorDim}; tensorT->format = schema::Format_NCHW; @@ -354,7 +355,7 @@ int TrainExport::ExportNet(const std::vector &k int tensor_idx = offset; quant_type_ = quant_type; if (meta_graph_ == nullptr) { - int status = ExportInit(model->name_, model->version_); + int status = ExportInit(model->graph_.name_, model->graph_.version_); if (status != RET_OK) { return status; } @@ -406,7 +407,7 @@ int TrainExport::ExportNet(const std::vector &k for (auto id : map_index) { size_t pid = id - offset; mindspore::lite::Tensor *tensor = tensors.at(pid); - schema::Tensor *scTensor = model->all_tensors_.at(pid); + schema::Tensor *scTensor = static_cast(model->graph_.all_tensors_.at(pid)); auto tensorT = CreateTensor(tensor, scTensor); if (tensorT == nullptr) { MS_LOG(ERROR) << "error in tensor creation"; @@ -451,7 +452,7 @@ int TrainExport::TopologicalSort() { } // put all const tensor index into sinked_tensor_idxes for (size_t i = 0; i < meta_graph_->allTensors.size(); i++) { - if (meta_graph_->allTensors.at(i)->nodeType == NodeType_ValueNode) { + if (static_cast(meta_graph_->allTensors.at(i)->nodeType) == MSNodeType::NodeType_ValueNode) { sinked_tensor_idxes.push_back(i); } } diff --git a/mindspore/lite/src/train/train_export.h b/mindspore/lite/src/train/train_export.h index 477d71a5d5..cadb3329b1 100644 --- a/mindspore/lite/src/train/train_export.h +++ b/mindspore/lite/src/train/train_export.h @@ -60,7 +60,7 @@ class TrainExport { bool IsNodeNonDepend(const std::unique_ptr &node, const std::vector &sinked_tensor_idxes); int TopologicalSort(); void PrepareRemap(int offset); - Model::Node *FindNode(const mindspore::kernel::LiteKernel *kernel, const Model *model); + LiteGraph::Node *FindNode(const mindspore::kernel::LiteKernel *kernel, const Model *model); std::unique_ptr CreateTensor(const Tensor *tensor, schema::Tensor *scTensor); std::unique_ptr CreateCNode(const mindspore::kernel::LiteKernel *kernel, std::vector inputIndex, std::vector outputIndex, diff --git a/mindspore/lite/src/train/train_session.cc b/mindspore/lite/src/train/train_session.cc index 6aecca175b..be5bf671f0 100644 --- a/mindspore/lite/src/train/train_session.cc +++ b/mindspore/lite/src/train/train_session.cc @@ -121,7 +121,7 @@ void TrainSession::FreeWorkSpace() { } int TrainSession::InitCallBack() { - sched_mix_precision_callback_ = [&](const Model::Node *node) { + sched_mix_precision_callback_ = [&](const LiteGraph::Node *node) { if (!context_->IsCpuFloat16Enabled()) { return false; } @@ -131,7 +131,8 @@ int TrainSession::InitCallBack() { MS_LOG(DEBUG) << "Debug: " << node->name_ << " fp32"; return false; } - auto is_fp16 = model_->all_tensors_.at(out_tensor_indexs[0])->dataType() == kNumberTypeFloat16; + auto is_fp16 = static_cast(model_->graph_.all_tensors_.at(out_tensor_indexs[0]))->dataType() == + kNumberTypeFloat16; MS_LOG(DEBUG) << "Debug: " << node->name_ << ((is_fp16) ? " fp16" : " fp32"); return is_fp16; } @@ -142,8 +143,9 @@ int TrainSession::InitCallBack() { auto in_size = node->input_indices_.size(); bool force_fp16 = false; for (std::size_t k = 0; k < in_size; k++) { - schema::Tensor *tensor = model_->all_tensors_.at(node->input_indices_[k]); - if ((tensor->dataType() == kNumberTypeFloat16) && (tensor->nodeType() == NodeType_ValueNode)) { + schema::Tensor *tensor = static_cast(model_->graph_.all_tensors_.at(node->input_indices_[k])); + if ((tensor->dataType() == kNumberTypeFloat16) && + (static_cast(tensor->nodeType()) == MSNodeType::NodeType_ValueNode)) { force_fp16 = true; break; } @@ -1012,7 +1014,7 @@ int TrainSession::Export(const std::string &file_name, ModelType model_type, Qua bool orig_train_state = IsTrain(); Eval(); TrainExport texport(file_name); - int status = texport.ExportInit(model_.get()->name_, model_.get()->version_); + int status = texport.ExportInit(model_.get()->graph_.name_, model_.get()->graph_.version_); if (status != RET_OK) { MS_LOG(ERROR) << "cannot init export"; return status; -- Gitee