diff --git a/tf_adapter_2.x/npu_device/core/optimizers/meta/npu_hcom_tailing_optimizer.cpp b/tf_adapter_2.x/npu_device/core/optimizers/meta/npu_hcom_tailing_optimizer.cpp index afee34778e6d2666c62cc4fbad8ad09e02f83254..50634d80207431e08c239c7818d9585df2954b93 100644 --- a/tf_adapter_2.x/npu_device/core/optimizers/meta/npu_hcom_tailing_optimizer.cpp +++ b/tf_adapter_2.x/npu_device/core/optimizers/meta/npu_hcom_tailing_optimizer.cpp @@ -56,7 +56,7 @@ tensorflow::Status TailingOptimizeInner(tensorflow::FunctionLibraryDefinition *l } } } - if (node->type_string() == kNpuAllocFloatStatusOp && node->attrs().Find(kNpuLossScaleAttr) != nullptr) { + if ((node->type_string() == kNpuAllocFloatStatusOp) && (node->attrs().Find(kNpuLossScaleAttr) != nullptr)) { std::unordered_set edges_to_remove; tensorflow::Node *last_allreduce = nullptr; for (auto in_edge : node->in_edges()) { diff --git a/tf_adapter_2.x/npu_device/core/optimizers/meta/npu_weight_update_grouping_optimizer.cpp b/tf_adapter_2.x/npu_device/core/optimizers/meta/npu_weight_update_grouping_optimizer.cpp index d36a3ca41bc63785867b010d1b63ecb13b544e16..d906256666a6820affc7807a6c9600849daff988 100644 --- a/tf_adapter_2.x/npu_device/core/optimizers/meta/npu_weight_update_grouping_optimizer.cpp +++ b/tf_adapter_2.x/npu_device/core/optimizers/meta/npu_weight_update_grouping_optimizer.cpp @@ -57,7 +57,7 @@ tensorflow::Status WeightUpdateGroupingOptimizeInner(tensorflow::FunctionLibrary } } - if (node->type_string() == kHcomBroadcast && node->attrs().Find(kWeightUpdateGroupingAttr) != nullptr) { + if ((node->type_string() == kHcomBroadcast) && (node->attrs().Find(kWeightUpdateGroupingAttr)) != nullptr) { std::unordered_set edges_to_remove; tensorflow::Node *read_var_node = nullptr; for (auto in_edge : node->in_edges()) { @@ -114,7 +114,7 @@ tensorflow::Status WeightUpdateGroupingOptimizeInner(tensorflow::FunctionLibrary (void)graph->AddEdge(var_node, 0, new_read_var_node, 0); (void)graph->AddEdge(new_read_var_node, 0, node, 0); for (auto var_edge : var_node->out_edges()) { - if (var_edge->dst() != new_read_var_node && var_edge->dst() != assign_node) { + if ((var_edge->dst() != new_read_var_node) && (var_edge->dst() != assign_node)) { (void)graph->AddControlEdge(assign_node, var_edge->dst()); } } diff --git a/tf_adapter_2.x/npu_device/core/optimizers/runtime/node_placer.cpp b/tf_adapter_2.x/npu_device/core/optimizers/runtime/node_placer.cpp index 92d8c163bd83a959e3b0f7f83b44bf0e78b83dfd..5549135b7ed3aeb4ae474ad3a1cc59a771473909 100644 --- a/tf_adapter_2.x/npu_device/core/optimizers/runtime/node_placer.cpp +++ b/tf_adapter_2.x/npu_device/core/optimizers/runtime/node_placer.cpp @@ -316,7 +316,7 @@ tensorflow::Status NodePlacer::PlaceCpuNodeSubgraphs(size_t depth) const { bool NodePlacer::IsClusterMustPlaceOnNpu(const Cluster &cluster) { for (auto node : cluster.nodes) { auto iter = node_placement_.find(node); - if (iter != node_placement_.end() && iter->second == Placement::NPU) { + if ((iter != node_placement_.end()) && (iter->second == Placement::NPU)) { DLOG() << cluster.name << " must place on npu as has determined npu node " << node->name(); return true; } @@ -405,7 +405,7 @@ void NodePlacer::Concrete(tensorflow::Node *src, tensorflow::Node *dst) { DLOG() << "Concrete node " << src->name() << " with " << dst->name() << " to cluster " << target->name; auto iter = concrete_clusters_.find(src); - if (iter != concrete_clusters_.end() && iter->second == target) { + if ((iter != concrete_clusters_.end()) && (iter->second == target)) { DLOG() << "Node " << src->name() << " has already concrete with " << dst->name() << " in cluster " << target->name; return; } @@ -468,7 +468,7 @@ tensorflow::Status NodePlacer::BuildConcreteCluster() { std::queue> q; for (auto &node : cluster->nodes) { auto iter = concrete_clusters_.find(node); - if (iter != concrete_clusters_.end() && iter->second != cluster) { + if ((iter != concrete_clusters_.end()) && (iter->second != cluster)) { q.push(iter->second); } } diff --git a/tf_adapter_2.x/npu_device/core/optimizers/runtime/npu_build_npu_op_optimizer.cpp b/tf_adapter_2.x/npu_device/core/optimizers/runtime/npu_build_npu_op_optimizer.cpp index 5abc96bfe1936fcf04cc732018042f6bf2a90c87..dd26e7985310e913d92b2df735a89f7c70380d15 100644 --- a/tf_adapter_2.x/npu_device/core/optimizers/runtime/npu_build_npu_op_optimizer.cpp +++ b/tf_adapter_2.x/npu_device/core/optimizers/runtime/npu_build_npu_op_optimizer.cpp @@ -113,7 +113,7 @@ tensorflow::Status TryToBuildShapeForDynDims(const std::mapMutableGraph()), key) || key != nullptr) { + if (IsGraphNeedLoop(*(graph->MutableGraph()), key) || (key != nullptr)) { graph->SetLoopType(NpuConcreteGraph::LoopType::BUILTIN_LOOP); } graph->SetExecutionType(NpuConcreteGraph::ExecutionType::MIX); diff --git a/tf_adapter_2.x/npu_device/core/optimizers/runtime/npu_trans_resource_input_to_node_optimizer.cpp b/tf_adapter_2.x/npu_device/core/optimizers/runtime/npu_trans_resource_input_to_node_optimizer.cpp index 2bd9a99c4d7cde50b25f9dbd7934c45244a1419a..6cb0a2bc50730936039a863ad82f596033690ea1 100644 --- a/tf_adapter_2.x/npu_device/core/optimizers/runtime/npu_trans_resource_input_to_node_optimizer.cpp +++ b/tf_adapter_2.x/npu_device/core/optimizers/runtime/npu_trans_resource_input_to_node_optimizer.cpp @@ -180,7 +180,8 @@ tensorflow::Status TransHasSubgraphNode(TFE_Context *context, tensorflow::Graph functions.emplace_back(const_cast(node->attrs().Find("then_branch"))->mutable_func()); functions.emplace_back(const_cast(node->attrs().Find("else_branch"))->mutable_func()); } else if (node->IsCaseNode()) { - for (auto &f : *const_cast(node->attrs().Find("branches"))->mutable_list()->mutable_func()) { + for (auto &f : + *const_cast(node->attrs().Find("branches"))->mutable_list()->mutable_func()) { functions.emplace_back(&f); } } else {