From 5e7b9f8243b89ff014d8aff967e81906d5509f4a Mon Sep 17 00:00:00 2001 From: Cruz Zhao Date: Wed, 17 Dec 2025 16:22:33 +0800 Subject: [PATCH] anolis: sched: correct nr_tasks/nr_high_running/nr_under_running ANBZ: #27433 cfs_rq->nr_tasks indicates the number of tasks directly enqueued on this cfs_rq. When group identity flip, we should account the se into cfs_rq->nr_tasks only when the se is task. cfs_rq->nr_tasks will influence nr_high_running and nr_under_running, which will influnce ID_ABSOLUTE_EXPEL. When group identity flip, there may be some cgroups with non-zero identity, so we recount nr_high_running and nr_under_running. Signed-off-by: Cruz Zhao --- kernel/sched/fair.c | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 924ddef6348a..e2fbf1264e22 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1325,7 +1325,6 @@ static int tg_clear_counters_down(struct task_group *tg, void *data) struct rq *rq = data; struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; - WARN_ONCE(!RB_EMPTY_ROOT(&cfs_rq->under_timeline.rb_root), "cfs_rq->undertimeline is not empty\n"); #ifdef CONFIG_SCHED_SMT cfs_rq->h_nr_expel_immune = 0; #endif @@ -1334,6 +1333,7 @@ static int tg_clear_counters_down(struct task_group *tg, void *data) return 0; } +static int task_is_throttled_fair(struct task_struct *p, int cpu); static int __group_identity_flip(void *data) { struct rq *rq; @@ -1346,24 +1346,30 @@ static int __group_identity_flip(void *data) rq = this_rq(); rq_lock(rq, &rf); rq->nr_expel_immune = 0; + rq->nr_high_running = 0; + rq->nr_under_running = 0; + rq->nr_absolute_expeller = 0; + rcu_read_lock(); walk_tg_tree_from(&root_task_group, tg_clear_counters_down, tg_nop, (void *)rq); + rcu_read_unlock(); if (!enable) goto out; list_for_each_entry(p, &rq->cfs_tasks, se.group_node) { se = &p->se; - for_each_sched_entity(se) { - if (!se->on_rq) - break; - cfs_rq = cfs_rq_of(se); + cfs_rq = cfs_rq_of(se); + if (cfs_rq) cfs_rq->nr_tasks++; - - if (cfs_rq_throttled(cfs_rq)) - break; + if (!task_is_throttled_fair(p, cpu_of(rq))) { + if (is_highclass_task(p)) + rq->nr_high_running++; + else if (is_underclass_task(p)) + rq->nr_under_running++; + if (task_is_absolute_expeller(p)) + rq->nr_absolute_expeller++; } - se = &p->se; hierarchy_update_nr_expel_immune(se, 1); } -- Gitee