From af82b820b67790f7ece65ab692a036c50b80e68a Mon Sep 17 00:00:00 2001 From: zhangwei123171 Date: Thu, 17 Jul 2025 14:15:33 +0800 Subject: [PATCH 1/4] sched/fair: reset idlest_cpu if not meet the conditions for preferred CPU jingdong inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I8PGAS ----------------------------------------------------- We have set the preferred CPU, but when the load is too high, the conditions for the preferred CPU are not met. At this time, the idle CPU needs to be reset. This will be corrected in the subsequent fallback process. dynamic affinity should not break this logic. Fixes: 2e1dfc02d115 ("sched: Adjust wakeup cpu range according CPU util dynamicly") Signed-off-by: zhangwei123171 Reviewed-by: zhaoxiaoqiang11 Signed-off-by: Cheng Yu --- kernel/sched/fair.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index c4c3afa6e7b4f..fdb352c870ea1 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -9179,6 +9179,8 @@ static void set_task_select_cpus(struct task_struct *p, int *idlest_cpu, p->select_cpus = p->prefer_cpus; if (sd_flag & SD_BALANCE_WAKE) schedstat_inc(p->stats.nr_wakeups_preferred_cpus); + } else if (idlest_cpu) { + *idlest_cpu = -1; } } #endif -- Gitee From 5ebce7edb721538e99a6dc732dada83e3206646c Mon Sep 17 00:00:00 2001 From: He Yujie Date: Thu, 17 Jul 2025 14:15:34 +0800 Subject: [PATCH 2/4] sched/dynamic_affinity: fix preffered_cpu offline problem hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/IB42TC -------------------------------- After the preferred_cpu goes offline, the core selection of dynamic affinity does not check whether the preferred_cpu is valid. As a result, related dynamic affinity processes are concentrated on a shared_cpu. This patch resolves this problem to checks whether the preferred_cpu is valid and compares only the usage threshold of the valid preferred_cpu. Fixes: 2e1dfc02d115 ("sched: Adjust wakeup cpu range according CPU util dynamicly") Signed-off-by: He Yujie Signed-off-by: Cheng Yu --- kernel/sched/fair.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index fdb352c870ea1..f83094121207c 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -9149,7 +9149,7 @@ static void set_task_select_cpus(struct task_struct *p, int *idlest_cpu, /* manual mode */ tg = task_group(p); - for_each_cpu(cpu, p->prefer_cpus) { + for_each_cpu_and(cpu, p->prefer_cpus, cpu_online_mask) { if (idlest_cpu && (available_idle_cpu(cpu) || sched_idle_cpu(cpu))) { *idlest_cpu = cpu; } else if (idlest_cpu) { @@ -9174,8 +9174,7 @@ static void set_task_select_cpus(struct task_struct *p, int *idlest_cpu, } rcu_read_unlock(); - if (tg_capacity > cpumask_weight(p->prefer_cpus) && - util_avg_sum * 100 <= tg_capacity * sysctl_sched_util_low_pct) { + if (util_avg_sum * 100 < tg_capacity * sysctl_sched_util_low_pct) { p->select_cpus = p->prefer_cpus; if (sd_flag & SD_BALANCE_WAKE) schedstat_inc(p->stats.nr_wakeups_preferred_cpus); -- Gitee From 947622601c1c9456590b57518a81489a5063316b Mon Sep 17 00:00:00 2001 From: He Yujie Date: Thu, 17 Jul 2025 14:15:35 +0800 Subject: [PATCH 3/4] sched/dynamic_affinity: fix CI test alway select prefer_cpu problem hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/IB42TC -------------------------------- Commit aed539da59ad fix preffer_cpu offline problem, but introducing one CI problem. When one cpu usage is 100% occupied by RT tasks, CFS tasks util_avg is 0 but cpu capacity is not 0. During core selection, commit aed539da59ad delete the judgment of prefer_cpu set capacity and prefer_cpumask weight. As a result, dynamic_affinity task always select prefer_cpu as idlest_cpu. This patch resolves this problem to add the check of the valid prefer_cpu set capacity and the valid prefer_cpu weight. Fixes: 56bfe5272963 ("sched/dynamic_affinity: fix preffered_cpu offline problem") Signed-off-by: He Yujie Signed-off-by: Cheng Yu --- kernel/sched/fair.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index f83094121207c..d2ad18d4b554f 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -9128,6 +9128,7 @@ static void set_task_select_cpus(struct task_struct *p, int *idlest_cpu, struct task_group *tg; long spare; int cpu, mode; + int nr_cpus_valid = 0; p->select_cpus = p->cpus_ptr; if (!prefer_cpus_valid(p)) @@ -9171,10 +9172,18 @@ static void set_task_select_cpus(struct task_struct *p, int *idlest_cpu, util_avg_sum += taskgroup_cpu_util(tg, cpu); tg_capacity += capacity_of(cpu); + nr_cpus_valid++; } rcu_read_unlock(); - if (util_avg_sum * 100 < tg_capacity * sysctl_sched_util_low_pct) { + /* + * Follow cases should select cpus_ptr, checking by condition of + * tg_capacity > nr_cpus_valid: + * 1. all prefer_cpus offline; + * 2. all prefer_cpus has no cfs capaicity(tg_capacity = nr_cpus_valid * 1) + */ + if (tg_capacity > nr_cpus_valid && + util_avg_sum * 100 <= tg_capacity * sysctl_sched_util_low_pct) { p->select_cpus = p->prefer_cpus; if (sd_flag & SD_BALANCE_WAKE) schedstat_inc(p->stats.nr_wakeups_preferred_cpus); -- Gitee From 8aea64193ebaccbb1b227c77dba8f63e2ab36235 Mon Sep 17 00:00:00 2001 From: He Yujie Date: Thu, 17 Jul 2025 14:15:36 +0800 Subject: [PATCH 4/4] sched/dynamic_affinity: Calculate cpu capacity in real time when realtime tasks are running on this cpu hulk inclusion category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/IBBVWN -------------------------------- The task_rq selection of dynamic affinity use cpu capacity to determine select_cpus range. When realtime tasks are running on the cpu all the time, cfs tasks and the thread of softirq is suppressed, and the cpu capacity is not updated in time. As a result, the select_cpus range is always selected for preferred_cpus. then cfs task will never be able to run because realtime tasks has been running. Therefore, if realtime tasks is running during the task_rq selection of dynamic affinity, the cpu capacity should be calculated to solve such a problem. Fixes: 2e1dfc02d115 ("sched: Adjust wakeup cpu range according CPU util dynamicly") Signed-off-by: He Yujie Signed-off-by: Cheng Yu --- kernel/sched/fair.c | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index d2ad18d4b554f..71661d6c5b548 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -9106,6 +9106,19 @@ static inline unsigned long taskgroup_cpu_util(struct task_group *tg, return cpu_util_cfs(cpu); } +static unsigned long scale_rt_capacity(int cpu); + +static inline unsigned long calc_cpu_capacity(int cpu) +{ + unsigned long capacity; + + capacity = scale_rt_capacity(cpu); + if (!capacity) + capacity = 1; + + return capacity; +} + /* * set_task_select_cpus: select the cpu range for task * @p: the task whose available cpu range will to set @@ -9171,8 +9184,12 @@ static void set_task_select_cpus(struct task_struct *p, int *idlest_cpu, } util_avg_sum += taskgroup_cpu_util(tg, cpu); - tg_capacity += capacity_of(cpu); nr_cpus_valid++; + + if (cpu_rq(cpu)->rt.rt_nr_running) + tg_capacity += calc_cpu_capacity(cpu); + else + tg_capacity += capacity_of(cpu); } rcu_read_unlock(); -- Gitee