From f26554f2fc33dea3e66ee1b8da7fa2feb4ad6f92 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Sun, 28 Apr 2024 15:04:46 +0800 Subject: [PATCH] x86: PM: Register syscore_ops for scale invariance mainline inclusion from mainline-v5.11-rc5 commit 9c7d9017a49fb8516c13b7bff59b7da2abed23e1 category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I9JNPZ CVE: NA Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=9c7d9017a49fb8516c13b7bff59b7da2abed23e1 ------------------------------------------------------ On x86 scale invariace tends to be disabled during resume from suspend-to-RAM, because the MPERF or APERF MSR values are not as expected then due to updates taking place after the platform firmware has been invoked to complete the suspend transition. That, of course, is not desirable, especially if the schedutil scaling governor is in use, because the lack of scale invariance causes it to be less reliable. To counter that effect, modify init_freq_invariance() to register a syscore_ops object for scale invariance with the ->resume callback pointing to init_counter_refs() which will run on the CPU starting the resume transition (the other CPUs will be taken care of the "online" operations taking place later). Fixes: e2b0d619b400 ("x86, sched: check for counters overflow in frequency invariant accounting") Signed-off-by: Rafael J. Wysocki Signed-off-by: Peter Zijlstra (Intel) Acked-by: Giovanni Gherdovich Link: https://lkml.kernel.org/r/1803209.Mvru99baaF@kreacher Conflicts: arch/x86/kernel/smpboot.c Signed-off-by: liwei --- arch/x86/kernel/smpboot.c | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index df96e3f81d85..ab3ad40b2ce8 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -56,7 +56,11 @@ #include #include #include +<<<<<<< HEAD #include +======= +#include +>>>>>>> 9c7d9017a49f... x86: PM: Register syscore_ops for scale invariance #include #include @@ -2115,6 +2119,23 @@ static void init_counter_refs(void) this_cpu_write(arch_prev_mperf, mperf); } +#ifdef CONFIG_PM_SLEEP +static struct syscore_ops freq_invariance_syscore_ops = { + .resume = init_counter_refs, +}; + +static void register_freq_invariance_syscore_ops(void) +{ + /* Bail out if registered already. */ + if (freq_invariance_syscore_ops.node.prev) + return; + + register_syscore_ops(&freq_invariance_syscore_ops); +} +#else +static inline void register_freq_invariance_syscore_ops(void) {} +#endif + static void init_freq_invariance(bool secondary) { bool ret = false; @@ -2135,6 +2156,7 @@ static void init_freq_invariance(bool secondary) if (ret) { init_counter_refs(); static_branch_enable(&arch_scale_freq_key); + register_freq_invariance_syscore_ops(); } else { pr_debug("Couldn't determine max cpu frequency, necessary for scale-invariant accounting.\n"); } -- Gitee