From 860874bd1cca5142eb23d123a0aeac7ec9d73d75 Mon Sep 17 00:00:00 2001 From: zhaolichang Date: Fri, 21 Mar 2025 01:08:48 +0800 Subject: [PATCH 1/3] PINCTRL: Fix the issue that CONFIG_PINCTRL_AMD do not support m option huawei inclusion category: bugfix bugzilla: https://gitee.com/src-openeuler/calamares/issues/IBS0LG CVE: NA -------------------------------------- Fix the issue that the CONFIG_PINCTRL_ADM configuration do not support the 'm' (module) option, and change it to 'y' (built-in). Fixes: cbba3eb02aa9 ("PINCTRL:ENABLE_CONFIG_PINCTRL_AMD") Signed-off-by: zhaolichang --- arch/arm64/configs/openeuler_defconfig | 2 +- arch/x86/configs/openeuler_defconfig | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig index 531f5f04d8e8..39729694001b 100644 --- a/arch/arm64/configs/openeuler_defconfig +++ b/arch/arm64/configs/openeuler_defconfig @@ -3945,7 +3945,7 @@ CONFIG_PINMUX=y CONFIG_PINCONF=y CONFIG_GENERIC_PINCONF=y # CONFIG_DEBUG_PINCTRL is not set -CONFIG_PINCTRL_AMD=m +CONFIG_PINCTRL_AMD=y # CONFIG_PINCTRL_CY8C95X0 is not set # CONFIG_PINCTRL_MCP23S08 is not set # CONFIG_PINCTRL_MICROCHIP_SGPIO is not set diff --git a/arch/x86/configs/openeuler_defconfig b/arch/x86/configs/openeuler_defconfig index 7f37a9e8b75b..6ece34678378 100644 --- a/arch/x86/configs/openeuler_defconfig +++ b/arch/x86/configs/openeuler_defconfig @@ -4157,7 +4157,7 @@ CONFIG_PINMUX=y CONFIG_PINCONF=y CONFIG_GENERIC_PINCONF=y # CONFIG_DEBUG_PINCTRL is not set -CONFIG_PINCTRL_AMD=m +CONFIG_PINCTRL_AMD=y # CONFIG_PINCTRL_CY8C95X0 is not set # CONFIG_PINCTRL_MCP23S08 is not set # CONFIG_PINCTRL_SX150X is not set -- Gitee From 056f91a0c5bb0e9afe9cbfc843fd83033d1a9d43 Mon Sep 17 00:00:00 2001 From: zhaolichang Date: Tue, 18 Mar 2025 15:24:52 +0800 Subject: [PATCH 2/3] tlbi: fix the problem of incorrect TLB flashing kunpeng inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/IBU2Y1 -------------------------------- fix the problem of incorrect TLB flashing Fixes: ebfca9b4d3c1 ("tlbi: Do not force the broadcasting of TLBI and ICache, and add TLB flush helpers based on IPI.") Signed-off-by: zhaolichang --- arch/arm64/Kconfig | 6 +- arch/arm64/include/asm/mmu_context.h | 14 +-- arch/arm64/include/asm/pgtable.h | 4 - arch/arm64/include/asm/tlbflush.h | 164 +++++++++++---------------- arch/arm64/kernel/Makefile | 1 - arch/arm64/kernel/smp.c | 13 +-- arch/arm64/kernel/tlbflush.c | 41 ------- arch/arm64/mm/context.c | 6 - 8 files changed, 80 insertions(+), 169 deletions(-) delete mode 100644 arch/arm64/kernel/tlbflush.c diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 6f72be31061b..d3ce44c166ce 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -2679,10 +2679,8 @@ config ARM64_TLBI_IPI depends on ARM64 default n help - adds new boot parameter 'disable_tlbflush_is' to disable TLB flush - within the same inner shareable domain for performance tuning. - - When this new parameter is specified, TLB entry is invalidated by + Disable TLB flush within the same inner shareable domain for performance + tuning. When this new parameter is specified, TLB entry is invalidated by __tlbi(aside1, asid) only on the CPUs specified by mm_cpumask(mm). By using TLB.IS, all CPUs within the same inner shareable domain diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index 29c0a86629e2..411c3800bd11 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -249,6 +249,11 @@ enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) static inline void __switch_mm(struct mm_struct *next) { +#ifdef CONFIG_ARM64_TLBI_IPI + unsigned int cpu = smp_processor_id(); + + cpumask_set_cpu(cpu, mm_cpumask(next)); +#endif /* * init_mm.pgd does not contain any user mappings and it is always * active for kernel addresses in TTBR1. Just set the reserved TTBR0. @@ -265,15 +270,8 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { - if (prev != next) { + if (prev != next) __switch_mm(next); -#ifdef CONFIG_ARM64_TLBI_IPI - if (unlikely(test_tlbi_ipi_switch())) { - cpumask_clear_cpu(smp_processor_id(), mm_cpumask(prev)); - local_flush_tlb_mm(prev); - } -#endif - } /* * Update the saved TTBR0_EL1 of the scheduled-in task as the previous diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index c7d494d76c36..b30e43b84a64 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -946,11 +946,7 @@ static inline int __ptep_clear_flush_young(struct vm_area_struct *vma, * context-switch, which provides a DSB to complete the TLB * invalidation. */ -#ifdef CONFIG_ARM64_TLBI_IPI - flush_tlb_page_nosync_ipi(vma, address); -#else flush_tlb_page_nosync(vma, address); -#endif } return young; diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index 66ac917641d1..af5e6681b0d2 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -16,8 +16,6 @@ #include #include #include -#include -#include /* * Raw TLBI operations. @@ -253,99 +251,17 @@ static inline void flush_tlb_all(void) } #ifdef CONFIG_ARM64_TLBI_IPI -static unsigned int disable_tlbflush_is; - -#define FLAG_TLBFLUSH_PAGE 0x0002 -#define FLAG_TLBFLUSH_SWITCH 0x0004 -#define FLAG_TLBFLUSH_MM 0x0008 - -#define TEST_TLBFLUSH_FLAG_EXTERN(flag, FLAG) \ -bool test_tlbi_ipi_##flag(void) \ -{ \ - return !!(disable_tlbflush_is & FLAG_TLBFLUSH_##FLAG); \ -} - -#define TEST_TLBFLUSH_FLAG(flag, FLAG) \ -static __always_inline TEST_TLBFLUSH_FLAG_EXTERN(flag, FLAG) - -TEST_TLBFLUSH_FLAG(mm, MM) -TEST_TLBFLUSH_FLAG(page, PAGE) -TEST_TLBFLUSH_FLAG(switch, SWITCH) - -static inline void local_flush_tlb_mm(struct mm_struct *mm) +static inline void flush_tlb_mm_nosync(struct mm_struct *mm) { - unsigned long asid = __TLBI_VADDR(0, ASID(mm)); - - dsb(nshst); - __tlbi(aside1, asid); - __tlbi_user(aside1, asid); - dsb(nsh); -} - -static inline void __flush_tlb_mm(struct mm_struct *mm) -{ - unsigned long asid = __TLBI_VADDR(0, ASID(mm)); - - dsb(ishst); - __tlbi(aside1is, asid); - __tlbi_user(aside1is, asid); - dsb(ish); -} - -static inline void ipi_flush_tlb_mm(void *arg) -{ - struct mm_struct *mm = arg; - - local_flush_tlb_mm(mm); -} - -static inline void flush_tlb_mm(struct mm_struct *mm) -{ - if (unlikely(test_tlbi_ipi_mm())) - on_each_cpu_mask(mm_cpumask(mm), ipi_flush_tlb_mm, - (void *)mm, true); - else - __flush_tlb_mm(mm); - mmu_notifier_arch_invalidate_secondary_tlbs(mm, 0, -1UL); -} - -static inline void __flush_tlb_page_nosync_ipi(unsigned long addr) -{ - dsb(ishst); - __tlbi(vale1is, addr); - __tlbi_user(vale1is, addr); -} - -static inline void __local_flush_tlb_page_nosync(unsigned long addr) -{ - dsb(nshst); - __tlbi(vale1, addr); - __tlbi_user(vale1, addr); - dsb(nsh); -} - -static inline void ipi_flush_tlb_page_nosync(void *arg) -{ - unsigned long addr = *(unsigned long *)arg; - - __local_flush_tlb_page_nosync(addr); -} + unsigned long asid; -static inline void flush_tlb_page_nosync_ipi(struct vm_area_struct *vma, unsigned long uaddr) -{ - unsigned long addr = __TLBI_VADDR(uaddr, ASID(vma->vm_mm)); - - if (unlikely(test_tlbi_ipi_page())) - on_each_cpu_mask(mm_cpumask(vma->vm_mm), - ipi_flush_tlb_page_nosync, &addr, true); - else - __flush_tlb_page_nosync_ipi(addr); - mmu_notifier_arch_invalidate_secondary_tlbs(vma->vm_mm, uaddr & PAGE_MASK, - (uaddr & PAGE_MASK) + PAGE_SIZE); + dsb(ishst); + asid = __TLBI_VADDR(0, ASID(mm)); + __tlbi(aside1is, asid); + __tlbi_user(aside1is, asid); + mmu_notifier_arch_invalidate_secondary_tlbs(mm, 0, -1UL); } - -#else /* CONFIG_ARM64_TLBI_IPI */ - +#else static inline void flush_tlb_mm(struct mm_struct *mm) { unsigned long asid; @@ -357,7 +273,7 @@ static inline void flush_tlb_mm(struct mm_struct *mm) dsb(ish); mmu_notifier_arch_invalidate_secondary_tlbs(mm, 0, -1UL); } -#endif /* CONFIG_ARM64_TLBI_IPI */ +#endif static inline void __flush_tlb_page_nosync(struct mm_struct *mm, unsigned long uaddr) @@ -372,6 +288,54 @@ static inline void __flush_tlb_page_nosync(struct mm_struct *mm, (uaddr & PAGE_MASK) + PAGE_SIZE); } +#ifdef CONFIG_ARM64_TLBI_IPI +static inline void local_flush_tlb_mm(struct mm_struct *mm) +{ + unsigned long asid = __TLBI_VADDR(0, ASID(mm)); + + dsb(nshst); + __tlbi(aside1, asid); + dsb(nsh); + mmu_notifier_arch_invalidate_secondary_tlbs(mm, 0, -1UL); +} + +static inline void flush_tlb_mm(struct mm_struct *mm) +{ + if (unlikely(cpumask_full(mm_cpumask(mm)))) { + flush_tlb_mm_nosync(mm); + dsb(ish); + } else { + on_each_cpu_mask(mm_cpumask(mm), (smp_call_func_t)local_flush_tlb_mm, mm, 1); + } +} + +struct tlb_args { + struct vm_area_struct *ta_vma; + unsigned long ta_start; + unsigned long ta_end; +}; + +static inline void local_flush_tlb_page(struct vm_area_struct *vma, + unsigned long uaddr) +{ + unsigned long addr = __TLBI_VADDR(uaddr, ASID(vma->vm_mm)); + + dsb(nshst); + __tlbi(vale1, addr); + __tlbi_user(vale1, addr); + dsb(nsh); + mmu_notifier_arch_invalidate_secondary_tlbs(vma->vm_mm, uaddr & PAGE_MASK, + (uaddr & PAGE_MASK) + PAGE_SIZE); +} + +static inline void ipi_flush_tlb_page(void *arg) +{ + struct tlb_args *ta = arg; + + local_flush_tlb_page(ta->ta_vma, ta->ta_start); +} +#endif /* CONFIG_ARM64_TLBI_IPI */ + static inline void flush_tlb_page_nosync(struct vm_area_struct *vma, unsigned long uaddr) { @@ -382,11 +346,21 @@ static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) { #ifdef CONFIG_ARM64_TLBI_IPI - flush_tlb_page_nosync_ipi(vma, uaddr); + struct tlb_args ta = { + .ta_vma = vma, + .ta_start = uaddr + }; + + if (unlikely(cpumask_full(mm_cpumask(vma->vm_mm)))) { + flush_tlb_page_nosync(vma, uaddr); + dsb(ish); + } else { + on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, &ta, 1); + } #else flush_tlb_page_nosync(vma, uaddr); -#endif dsb(ish); +#endif } static inline bool arch_tlbbatch_should_defer(struct mm_struct *mm) diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index f96459f1be83..8c5ac2b7ab2e 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -36,7 +36,6 @@ obj-y := debug-monitors.o entry.o irq.o fpsimd.o \ syscall.o proton-pack.o idreg-override.o idle.o \ patching.o -obj-$(CONFIG_ARM64_TLBI_IPI) += tlbflush.o obj-$(CONFIG_IEE) += haoc/ obj-$(CONFIG_AARCH32_EL0) += binfmt_elf32.o sys32.o signal32.o \ sys_compat.o diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 49cf07d171d2..440701144f8f 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -213,9 +213,6 @@ asmlinkage notrace void secondary_start_kernel(void) mmgrab(mm); current->active_mm = mm; -#ifdef CONFIG_ARM64_TLBI_IPI - cpumask_set_cpu(cpu, mm_cpumask(mm)); -#endif #ifdef CONFIG_IEE if (haoc_enabled) iee_setup_asid(); @@ -227,6 +224,9 @@ asmlinkage notrace void secondary_start_kernel(void) */ numa_setup_pgd(); +#ifdef CONFIG_ARM64_TLBI_IPI + cpumask_set_cpu(cpu, mm_cpumask(mm)); +#endif /* * TTBR0 is only used for the identity mapping at this stage. Make it * point to zero page to avoid speculatively fetching new entries. @@ -334,13 +334,6 @@ int __cpu_disable(void) */ irq_migrate_all_off_this_cpu(); -#ifdef CONFIG_ARM64_TLBI_IPI - /* - * Remove this CPU from the vm mask set of all processes. - */ - clear_tasks_mm_cpumask(cpu); -#endif - return 0; } diff --git a/arch/arm64/kernel/tlbflush.c b/arch/arm64/kernel/tlbflush.c deleted file mode 100644 index 3ac36c3117a4..000000000000 --- a/arch/arm64/kernel/tlbflush.c +++ /dev/null @@ -1,41 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -// Copyright (C) 2025 FUJITSU LIMITED - -#include - -static int __init disable_tlbflush_is_setup(char *str) -{ - unsigned int flags = 0; - - while (isalpha(*str)) { - if (!strncmp(str, "page,", 5)) { - str += 5; - flags |= FLAG_TLBFLUSH_PAGE; - continue; - } - - if (!strncmp(str, "switch,", 7)) { - str += 7; - flags |= FLAG_TLBFLUSH_SWITCH; - continue; - } - - if (!strcmp(str, "mm")) { - str += 2; - flags |= FLAG_TLBFLUSH_MM; - break; - } - - pr_warn("disable_tlbflush_is: Error, unknown flag\n"); - return 0; - } - - disable_tlbflush_is = flags; - pr_info("DISABLE_TLBFLUSH_IS : [%s] [%s] [%s]\n", - test_tlbi_ipi_page() ? "PAGE" : "NA", - test_tlbi_ipi_switch() ? "SWITCH" : "NA", - test_tlbi_ipi_mm() ? "MM" : "NA"); - - return 0; -} -early_param("disable_tlbflush_is", disable_tlbflush_is_setup); diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c index fe82d5d3005a..5abe1be1bcac 100644 --- a/arch/arm64/mm/context.c +++ b/arch/arm64/mm/context.c @@ -214,9 +214,6 @@ static u64 new_context(struct mm_struct *mm) set_asid: __set_bit(asid, asid_map); cur_idx = asid; -#ifdef CONFIG_ARM64_TLBI_IPI - cpumask_clear(mm_cpumask(mm)); -#endif return asid2ctxid(asid, generation); } @@ -269,9 +266,6 @@ void check_and_switch_context(struct mm_struct *mm) switch_mm_fastpath: arm64_apply_bp_hardening(); -#ifdef CONFIG_ARM64_TLBI_IPI - cpumask_set_cpu(cpu, mm_cpumask(mm)); -#endif /* * Defer TTBR0_EL1 setting for user threads to uaccess_enable() when -- Gitee From f69080d24f34e605bb635e2b4c4baf7fb7504b6f Mon Sep 17 00:00:00 2001 From: Zenghui Yu Date: Thu, 20 Mar 2025 17:52:47 +0800 Subject: [PATCH 3/3] KVM: arm64: Allow vcpus running without HCR_EL2.FB virt inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IBU2Y1 CVE: NA -------------------------------- As per ARM DDI 0487G.a, seting the HCR_EL2.FB (Force broadcast) bit causes a given set of TLBI and IC instructions to be broadcast within the Inner Shareable domain when executed from EL1 (if HCR_EL2.TGE is 0). And people complain that this leads to bad performance when running guests on Kunpeng920 which has 128 physical CPUs in the IS domain, especially in the case where vcpus are pinned to physical CPUs, where we indeed don't need broadcast invalidations. Introduce a new cmdline parameter "kvm-arm.hcr_nofb" for users and setting it at boot time allows all vcpus running without HCR_EL2.FB. Note that we now have to nuke the whole vcpu context in the general case (when vcpu is loaded on to the new physical CPU). Link: https://gitee.com/openeuler/kernel/commit/823f53c21805 Co-developed-by: Nianyao Tang Signed-off-by: Nianyao Tang Signed-off-by: Zenghui Yu Reviewed-by: Cheng Jian Signed-off-by: Yang Yingliang Signed-off-by: zhaolichang --- arch/arm64/configs/openeuler_defconfig | 1 + arch/arm64/include/asm/kvm_emulate.h | 9 ++++++++ arch/arm64/kvm/Kconfig | 15 +++++++++++++ arch/arm64/kvm/arm.c | 29 ++++++++++++++++++++++++++ include/linux/kvm_host.h | 3 +++ virt/kvm/kvm_main.c | 3 +++ 6 files changed, 60 insertions(+) diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig index 39729694001b..8f97574813ca 100644 --- a/arch/arm64/configs/openeuler_defconfig +++ b/arch/arm64/configs/openeuler_defconfig @@ -776,6 +776,7 @@ CONFIG_KVM_ARM_MULTI_LPI_TRANSLATE_CACHE=y CONFIG_ARCH_VCPU_STAT=y CONFIG_VIRT_VTIMER_IRQ_BYPASS=y CONFIG_CPU_MITIGATIONS=y +# CONFIG_ARM64_KVM_HCR_NOFB is not set # # Hardware Assisted OS Compartmentalization(HAOC) diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index f0b10cb2c87d..9020678f01cb 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -66,6 +66,10 @@ static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu) } #endif +#ifdef CONFIG_ARM64_KVM_HCR_NOFB +extern bool kvm_hcr_nofb; +#endif + static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) { vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS; @@ -101,6 +105,11 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) if (kvm_has_mte(vcpu->kvm)) vcpu->arch.hcr_el2 |= HCR_ATA; + +#ifdef CONFIG_ARM64_KVM_HCR_NOFB + if (unlikely(kvm_hcr_nofb)) + vcpu->arch.hcr_el2 &= ~HCR_FB; +#endif } static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu) diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig index 52edbd7f6340..1d1a2f72085a 100644 --- a/arch/arm64/kvm/Kconfig +++ b/arch/arm64/kvm/Kconfig @@ -99,4 +99,19 @@ config VIRT_VTIMER_IRQ_BYPASS depends on KVM && ARM64 && ARCH_HISI && HISILICON_IRQ_MBIGEN default n +config ARM64_KVM_HCR_NOFB + bool "Disable HCR_EL2.FB on kvm(EXPERIMENTAL)" + depends on ARM64 + default n + help + Add new boot parameter 'kvm-arm.hcr_nofb' to allow all vcpus running + without HCR_EL2.FB. + + NOTE(Important) + This feature is used for learning and debugging only. Please don't + enable it on commercial products. + If you know exactly what the impact of the feature is, you can + configure it as you do. + + If unsure, say N. endif # VIRTUALIZATION diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index f4194603eae5..a59671c0d3bc 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -67,6 +67,16 @@ bool kvm_ncsnp_support; /* Capability of DVMBM */ bool kvm_dvmbm_support; +#ifdef CONFIG_ARM64_KVM_HCR_NOFB +bool kvm_hcr_nofb; + +static int __init early_hcr_nofb_cfg(char *buf) +{ + return strtobool(buf, &kvm_hcr_nofb); +} +early_param("kvm-arm.hcr_nofb", early_hcr_nofb_cfg); +#endif + static DEFINE_PER_CPU(unsigned char, kvm_hyp_initialized); DEFINE_STATIC_KEY_FALSE(userspace_irqchip_in_use); @@ -563,6 +573,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { struct kvm_s2_mmu *mmu; int *last_ran; +#ifdef CONFIG_ARM64_KVM_HCR_NOFB + bool flushed = false; +#endif mmu = vcpu->arch.hw_mmu; last_ran = this_cpu_ptr(mmu->last_vcpu_ran); @@ -579,8 +592,21 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) if (*last_ran != vcpu->vcpu_id) { kvm_call_hyp(__kvm_flush_cpu_context, mmu); *last_ran = vcpu->vcpu_id; +#ifdef CONFIG_ARM64_KVM_HCR_NOFB + flushed = true; +#endif } +#ifdef CONFIG_ARM64_KVM_HCR_NOFB + /* + * If FB (Force broadcast) is cleared, we have to nuke the + * vcpu context as well in case it is loaded on to the new + * physical CPU. + */ + if (unlikely(kvm_hcr_nofb) && vcpu->pre_pcpu != cpu && !flushed) + kvm_call_hyp(__kvm_flush_cpu_context, mmu); +#endif + vcpu->cpu = cpu; #ifdef CONFIG_HISI_VIRTCCA_HOST @@ -646,6 +672,9 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) kvm_arm_vmid_clear_active(); vcpu_clear_on_unsupported_cpu(vcpu); +#ifdef CONFIG_ARM64_KVM_HCR_NOFB + vcpu->pre_pcpu = vcpu->cpu; +#endif vcpu->cpu = -1; kvm_tlbi_dvmbm_vcpu_put(vcpu); diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 7db0aeba781e..98328fe7a68b 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -336,6 +336,9 @@ struct kvm_vcpu { u64 requests; unsigned long guest_debug; +#ifdef CONFIG_ARM64_KVM_HCR_NOFB + int pre_pcpu; +#endif struct mutex mutex; struct kvm_run *run; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 421f253903ca..abc89a2de3dd 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -505,6 +505,9 @@ static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) #endif kvm_async_pf_vcpu_init(vcpu); +#ifdef CONFIG_ARM64_KVM_HCR_NOFB + vcpu->pre_pcpu = -1; +#endif kvm_vcpu_set_in_spin_loop(vcpu, false); kvm_vcpu_set_dy_eligible(vcpu, false); vcpu->preempted = false; -- Gitee