From 9e25fd0895ab9a254848ea27b3e42601f32fdab6 Mon Sep 17 00:00:00 2001 From: Cruz Zhao Date: Mon, 8 Dec 2025 20:34:42 +0800 Subject: [PATCH] anolis: sched: fix the panic after group balancer disabled ANBZ: #8765 After group balancer disabled, if a task group changes cfs_quota or cfs_period, there will be a panic. This is caused by two reasons: - There is a path that doesn't controlled by group_balancer_enabled() switch. - When free a gb_sd, the task_groups doesn't change the gb_sd pointer to parent. BTW, preferred_gb_sd should also be cleared if it points to a dying gb_sd. Fixes: 4e229f827a67 ("anolis: sched: introduce group balancer sched domain") Fixes: 4bb9c29b1c1a ("anolis: sched: modify the interface of attach/detach_tg_to/from_group_balancer_sched_domain") Signed-off-by: Cruz Zhao --- kernel/sched/group_balancer.c | 24 +++++++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) diff --git a/kernel/sched/group_balancer.c b/kernel/sched/group_balancer.c index 5ea4fd24731b..1011ffeb0df3 100644 --- a/kernel/sched/group_balancer.c +++ b/kernel/sched/group_balancer.c @@ -818,6 +818,18 @@ static int tg_unset_gb_tg_down(struct task_group *tg, void *data) return 0; } +static void tg_unset_preferred_gb_sd_down(struct task_group *tg, void *data) +{ + struct group_balancer_sched_domain *gb_sd = data; + + /* + * We've hold group_balancer_sched_domain_lock write lock, so there's + * no one compete with us. + */ + if (tg->preferred_gb_sd == gb_sd) + tg->preferred_gb_sd = NULL; +} + static void free_group_balancer_sched_domain(struct group_balancer_sched_domain *gb_sd) { int cpu; @@ -846,9 +858,10 @@ static void free_group_balancer_sched_domain(struct group_balancer_sched_domain while (!RB_EMPTY_ROOT(root)) { node = root->rb_node; tg = __gb_node_2_tg(node); - rb_erase(node, root); - rb_add(node, &parent->task_groups, tg_specs_less); - walk_tg_tree_from(tg, tg_set_gb_tg_down, tg_nop, tg); + raw_spin_lock(&gb_sd->lock); + remove_tg_from_group_balancer_sched_domain_locked(tg, gb_sd, false); + raw_spin_unlock(&gb_sd->lock); + add_tg_to_group_balancer_sched_domain(tg, parent, false); } } } @@ -867,6 +880,8 @@ static void free_group_balancer_sched_domain(struct group_balancer_sched_domain if (gb_sd->kn) kernfs_remove(gb_sd->kn); + walk_tg_tree_from(&root_task_group, tg_unset_preferred_gb_sd_down, tg_nop, gb_sd); + kfree(gb_sd); } @@ -1994,6 +2009,9 @@ void tg_specs_change(struct task_group *tg, u64 specs_before) struct group_balancer_sched_domain *gb_sd; int specs = tg->specs_ratio; + if (!group_balancer_enabled()) + return; + gb_sd = tg->gb_sd; if (!gb_sd) /* tg->group_balancer is always true here, so find a gb_sd to attach. */ -- Gitee