From e1541f6ac0caf6dc45c702f6287ed3b4beb8f1db Mon Sep 17 00:00:00 2001 From: Jason Baron Date: Fri, 7 Nov 2025 12:58:52 +0800 Subject: [PATCH 01/10] KVM: X86: prepend vmx/svm prefix to additional kvm_x86_ops functions ANBZ: #27210 commit b6a7cc35440e997a42fa23ad006d5d3ba768007c upstream. A subsequent patch introduces macros in preparation for simplifying the definition for vmx_x86_ops and svm_x86_ops. Making the naming more uniform expands the coverage of the macros. Add vmx/svm prefix to the following functions: update_exception_bitmap(), enable_nmi_window(), enable_irq_window(), update_cr8_intercept and enable_smi_window(). Hygon-SIG: commit KVM: X86: prepend vmx/svm prefix to additional kvm_x86_ops functions backport comment b6a7cc35440e for "Use static_call for kvm_x86_ops" Cc: Paolo Bonzini Cc: Sean Christopherson Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Borislav Petkov Cc: Peter Zijlstra Cc: Andrea Arcangeli Signed-off-by: Jason Baron Message-Id: Signed-off-by: Paolo Bonzini Signed-off-by: Wei Wang [ Wei Wang: amend commit log and resolve the conflicts ] Cc: hygon-arch@list.openanolis.cn --- arch/x86/kvm/svm/svm.c | 20 ++++++++++---------- arch/x86/kvm/vmx/nested.c | 2 +- arch/x86/kvm/vmx/vmx.c | 32 ++++++++++++++++---------------- arch/x86/kvm/vmx/vmx.h | 2 +- 4 files changed, 28 insertions(+), 28 deletions(-) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 1cf860bd70dd..e226359bd9f6 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -1879,7 +1879,7 @@ static void svm_set_segment(struct kvm_vcpu *vcpu, vmcb_mark_dirty(svm->vmcb, VMCB_SEG); } -static void update_exception_bitmap(struct kvm_vcpu *vcpu) +static void svm_update_exception_bitmap(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); @@ -3537,7 +3537,7 @@ static void svm_set_irq(struct kvm_vcpu *vcpu) SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR; } -static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) +static void svm_update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) { struct vcpu_svm *svm = to_svm(vcpu); @@ -3653,7 +3653,7 @@ static int svm_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection) return !svm_interrupt_blocked(vcpu); } -static void enable_irq_window(struct kvm_vcpu *vcpu) +static void svm_enable_irq_window(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); @@ -3677,7 +3677,7 @@ static void enable_irq_window(struct kvm_vcpu *vcpu) } } -static void enable_nmi_window(struct kvm_vcpu *vcpu) +static void svm_enable_nmi_window(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); @@ -4488,7 +4488,7 @@ static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate) return ret; } -static void enable_smi_window(struct kvm_vcpu *vcpu) +static void svm_enable_smi_window(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); @@ -4731,7 +4731,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { .vcpu_blocking = svm_vcpu_blocking, .vcpu_unblocking = svm_vcpu_unblocking, - .update_exception_bitmap = update_exception_bitmap, + .update_exception_bitmap = svm_update_exception_bitmap, .get_msr_feature = svm_get_msr_feature, .get_msr = svm_get_msr, .set_msr = svm_set_msr, @@ -4774,9 +4774,9 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { .nmi_allowed = svm_nmi_allowed, .get_nmi_mask = svm_get_nmi_mask, .set_nmi_mask = svm_set_nmi_mask, - .enable_nmi_window = enable_nmi_window, - .enable_irq_window = enable_irq_window, - .update_cr8_intercept = update_cr8_intercept, + .enable_nmi_window = svm_enable_nmi_window, + .enable_irq_window = svm_enable_irq_window, + .update_cr8_intercept = svm_update_cr8_intercept, .set_virtual_apic_mode = svm_set_virtual_apic_mode, .refresh_apicv_exec_ctrl = svm_refresh_apicv_exec_ctrl, .check_apicv_inhibit_reasons = svm_check_apicv_inhibit_reasons, @@ -4819,7 +4819,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { .smi_allowed = svm_smi_allowed, .pre_enter_smm = svm_pre_enter_smm, .pre_leave_smm = svm_pre_leave_smm, - .enable_smi_window = enable_smi_window, + .enable_smi_window = svm_enable_smi_window, .mem_enc_op = svm_mem_enc_op, .mem_enc_reg_region = svm_register_enc_region, diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index bb4597932f9d..2fa9cfe928ce 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -2550,7 +2550,7 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, * bitwise-or of what L1 wants to trap for L2, and what we want to * trap. Note that CR0.TS also needs updating - we do this later. */ - update_exception_bitmap(vcpu); + vmx_update_exception_bitmap(vcpu); vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask; vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 33761204f143..8956683e36e0 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -879,7 +879,7 @@ static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg) return *p; } -void update_exception_bitmap(struct kvm_vcpu *vcpu) +void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu) { u32 eb; @@ -2176,7 +2176,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) vmx_disable_intercept_for_msr(vcpu, MSR_IA32_XFD, MSR_TYPE_RW); vcpu->arch.xfd_no_write_intercept = true; - update_exception_bitmap(vcpu); + vmx_update_exception_bitmap(vcpu); } break; #endif @@ -3011,7 +3011,7 @@ static void enter_pmode(struct kvm_vcpu *vcpu) vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) | (vmcs_readl(CR4_READ_SHADOW) & X86_CR4_VME)); - update_exception_bitmap(vcpu); + vmx_update_exception_bitmap(vcpu); fix_pmode_seg(vcpu, VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]); fix_pmode_seg(vcpu, VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]); @@ -3091,7 +3091,7 @@ static void enter_rmode(struct kvm_vcpu *vcpu) vmcs_writel(GUEST_RFLAGS, flags); vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME); - update_exception_bitmap(vcpu); + vmx_update_exception_bitmap(vcpu); fix_rmode_seg(VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]); fix_rmode_seg(VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]); @@ -4818,7 +4818,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) vmx_set_cr4(vcpu, 0); vmx_set_efer(vcpu, 0); - update_exception_bitmap(vcpu); + vmx_update_exception_bitmap(vcpu); vpid_sync_context(vmx->vpid); if (init_event) @@ -4827,16 +4827,16 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) vmx_update_fb_clear_dis(vcpu, vmx); } -static void enable_irq_window(struct kvm_vcpu *vcpu) +static void vmx_enable_irq_window(struct kvm_vcpu *vcpu) { exec_controls_setbit(to_vmx(vcpu), CPU_BASED_INTR_WINDOW_EXITING); } -static void enable_nmi_window(struct kvm_vcpu *vcpu) +static void vmx_enable_nmi_window(struct kvm_vcpu *vcpu) { if (!enable_vnmi || vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) { - enable_irq_window(vcpu); + vmx_enable_irq_window(vcpu); return; } @@ -6629,7 +6629,7 @@ static noinstr void vmx_l1d_flush(struct kvm_vcpu *vcpu) : "eax", "ebx", "ecx", "edx"); } -static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) +static void vmx_update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) { struct vmcs12 *vmcs12 = get_vmcs12(vcpu); int tpr_threshold; @@ -8078,7 +8078,7 @@ static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) ~FEAT_CTL_SGX_LC_ENABLED; /* Refresh #PF interception to account for MAXPHYADDR changes. */ - update_exception_bitmap(vcpu); + vmx_update_exception_bitmap(vcpu); } static __init void vmx_set_cpu_caps(void) @@ -8388,7 +8388,7 @@ static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate) return 0; } -static void enable_smi_window(struct kvm_vcpu *vcpu) +static void vmx_enable_smi_window(struct kvm_vcpu *vcpu) { /* RSM will cause a vmexit anyway. */ } @@ -8447,7 +8447,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = { .vcpu_load = vmx_vcpu_load, .vcpu_put = vmx_vcpu_put, - .update_exception_bitmap = update_exception_bitmap, + .update_exception_bitmap = vmx_update_exception_bitmap, .get_msr_feature = vmx_get_msr_feature, .get_msr = vmx_get_msr, .set_msr = vmx_set_msr, @@ -8490,9 +8490,9 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = { .nmi_allowed = vmx_nmi_allowed, .get_nmi_mask = vmx_get_nmi_mask, .set_nmi_mask = vmx_set_nmi_mask, - .enable_nmi_window = enable_nmi_window, - .enable_irq_window = enable_irq_window, - .update_cr8_intercept = update_cr8_intercept, + .enable_nmi_window = vmx_enable_nmi_window, + .enable_irq_window = vmx_enable_irq_window, + .update_cr8_intercept = vmx_update_cr8_intercept, .set_virtual_apic_mode = vmx_set_virtual_apic_mode, .set_apic_access_page_addr = vmx_set_apic_access_page_addr, .refresh_apicv_exec_ctrl = vmx_refresh_apicv_exec_ctrl, @@ -8550,7 +8550,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = { .smi_allowed = vmx_smi_allowed, .pre_enter_smm = vmx_pre_enter_smm, .pre_leave_smm = vmx_pre_leave_smm, - .enable_smi_window = enable_smi_window, + .enable_smi_window = vmx_enable_smi_window, .can_emulate_instruction = vmx_can_emulate_instruction, .apic_init_signal_blocked = vmx_apic_init_signal_blocked, diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index ad741acf1b7f..61d357c2015d 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -367,7 +367,7 @@ u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa, int root_level); bool vmx_guest_inject_ac(struct kvm_vcpu *vcpu); -void update_exception_bitmap(struct kvm_vcpu *vcpu); +void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu); void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu); bool vmx_nmi_blocked(struct kvm_vcpu *vcpu); bool vmx_interrupt_blocked(struct kvm_vcpu *vcpu); -- Gitee From 514aa054e0b27a5b07758928d00d3fed1f722195 Mon Sep 17 00:00:00 2001 From: Jason Baron Date: Thu, 14 Jan 2021 22:27:55 -0500 Subject: [PATCH 02/10] KVM: x86: introduce definitions to support static calls for kvm_x86_ops ANBZ: #27210 commit 9af5471bdbb2a26a1a46cd834e9fda6db6a9670e Use static calls to improve kvm_x86_ops performance. Introduce the definitions that will be used by a subsequent patch to actualize the savings. Add a new kvm-x86-ops.h header that can be used for the definition of static calls. This header is also intended to be used to simplify the defition of svm_kvm_ops and vmx_x86_ops. Note that all functions in kvm_x86_ops are covered here except for 'pmu_ops' and 'nested ops'. I think they can be covered by static calls in a simlilar manner, but were omitted from this series to reduce scope and because I don't think they have as large of a performance impact. Hygon-SIG: commit 9af5471bdbb2 KVM: x86: introduce definitions to support static calls for kvm_x86_ops backport comment for "Use static_call for kvm_x86_ops" Cc: Paolo Bonzini Cc: Sean Christopherson Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Borislav Petkov Cc: Peter Zijlstra Cc: Andrea Arcangeli Signed-off-by: Jason Baron Message-Id: Signed-off-by: Paolo Bonzini Signed-off-by: Wei Wang [ Wei Wang: amend commit log and resolve the conflicts ] Cc: hygon-arch@list.openanolis.cn --- arch/x86/include/asm/kvm-x86-ops.h | 125 +++++++++++++++++++++++++++++ arch/x86/include/asm/kvm_host.h | 13 +++ arch/x86/kvm/x86.c | 9 +++ 3 files changed, 147 insertions(+) create mode 100644 arch/x86/include/asm/kvm-x86-ops.h diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h new file mode 100644 index 000000000000..549d7e54801d --- /dev/null +++ b/arch/x86/include/asm/kvm-x86-ops.h @@ -0,0 +1,125 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#if !defined(KVM_X86_OP) || !defined(KVM_X86_OP_NULL) +BUILD_BUG_ON(1) +#endif + +/* + * KVM_X86_OP() and KVM_X86_OP_NULL() are used to help generate + * "static_call()"s. They are also intended for use when defining + * the vmx/svm kvm_x86_ops. KVM_X86_OP() can be used for those + * functions that follow the [svm|vmx]_func_name convention. + * KVM_X86_OP_NULL() can leave a NULL definition for the + * case where there is no definition or a function name that + * doesn't match the typical naming convention is supplied. + */ +KVM_X86_OP_NULL(hardware_enable) +KVM_X86_OP_NULL(hardware_disable) +KVM_X86_OP_NULL(hardware_unsetup) +KVM_X86_OP_NULL(cpu_has_accelerated_tpr) +KVM_X86_OP(has_emulated_msr) +KVM_X86_OP(vcpu_after_set_cpuid) +KVM_X86_OP(vm_init) +KVM_X86_OP_NULL(vm_destroy) +KVM_X86_OP(vcpu_create) +KVM_X86_OP(vcpu_free) +KVM_X86_OP(vcpu_reset) +KVM_X86_OP(prepare_guest_switch) +KVM_X86_OP(vcpu_load) +KVM_X86_OP(vcpu_put) +KVM_X86_OP(update_exception_bitmap) +KVM_X86_OP(get_msr) +KVM_X86_OP(set_msr) +KVM_X86_OP(get_segment_base) +KVM_X86_OP(get_segment) +KVM_X86_OP(get_cpl) +KVM_X86_OP(set_segment) +KVM_X86_OP_NULL(get_cs_db_l_bits) +KVM_X86_OP(set_cr0) +KVM_X86_OP(set_cr4) +KVM_X86_OP(set_efer) +KVM_X86_OP(get_idt) +KVM_X86_OP(set_idt) +KVM_X86_OP(get_gdt) +KVM_X86_OP(set_gdt) +KVM_X86_OP(sync_dirty_debug_regs) +KVM_X86_OP(set_dr7) +KVM_X86_OP(cache_reg) +KVM_X86_OP(get_rflags) +KVM_X86_OP(set_rflags) +KVM_X86_OP(tlb_flush_all) +KVM_X86_OP(tlb_flush_current) +KVM_X86_OP_NULL(tlb_remote_flush) +KVM_X86_OP_NULL(tlb_remote_flush_with_range) +KVM_X86_OP(tlb_flush_gva) +KVM_X86_OP(tlb_flush_guest) +KVM_X86_OP(run) +KVM_X86_OP_NULL(handle_exit) +KVM_X86_OP_NULL(skip_emulated_instruction) +KVM_X86_OP_NULL(update_emulated_instruction) +KVM_X86_OP(set_interrupt_shadow) +KVM_X86_OP(get_interrupt_shadow) +KVM_X86_OP(patch_hypercall) +KVM_X86_OP(set_irq) +KVM_X86_OP(set_nmi) +KVM_X86_OP(queue_exception) +KVM_X86_OP(cancel_injection) +KVM_X86_OP(interrupt_allowed) +KVM_X86_OP(nmi_allowed) +KVM_X86_OP(get_nmi_mask) +KVM_X86_OP(set_nmi_mask) +KVM_X86_OP(enable_nmi_window) +KVM_X86_OP(enable_irq_window) +KVM_X86_OP(update_cr8_intercept) +KVM_X86_OP(check_apicv_inhibit_reasons) +KVM_X86_OP_NULL(pre_update_apicv_exec_ctrl) +KVM_X86_OP(refresh_apicv_exec_ctrl) +KVM_X86_OP(hwapic_irr_update) +KVM_X86_OP(hwapic_isr_update) +KVM_X86_OP_NULL(guest_apic_has_interrupt) +KVM_X86_OP(load_eoi_exitmap) +KVM_X86_OP(set_virtual_apic_mode) +KVM_X86_OP_NULL(set_apic_access_page_addr) +KVM_X86_OP(deliver_posted_interrupt) +KVM_X86_OP_NULL(sync_pir_to_irr) +KVM_X86_OP(set_tss_addr) +KVM_X86_OP(set_identity_map_addr) +KVM_X86_OP(get_mt_mask) +KVM_X86_OP(load_mmu_pgd) +KVM_X86_OP_NULL(has_wbinvd_exit) +KVM_X86_OP(write_l1_tsc_offset) +KVM_X86_OP(get_exit_info) +KVM_X86_OP(check_intercept) +KVM_X86_OP(handle_exit_irqoff) +KVM_X86_OP_NULL(request_immediate_exit) +KVM_X86_OP(sched_in) +KVM_X86_OP_NULL(slot_enable_log_dirty) +KVM_X86_OP_NULL(slot_disable_log_dirty) +KVM_X86_OP_NULL(flush_log_dirty) +KVM_X86_OP_NULL(enable_log_dirty_pt_masked) +KVM_X86_OP_NULL(pre_block) +KVM_X86_OP_NULL(post_block) +KVM_X86_OP_NULL(vcpu_blocking) +KVM_X86_OP_NULL(vcpu_unblocking) +KVM_X86_OP_NULL(update_pi_irte) +KVM_X86_OP_NULL(apicv_post_state_restore) +KVM_X86_OP_NULL(dy_apicv_has_pending_interrupt) +KVM_X86_OP_NULL(set_hv_timer) +KVM_X86_OP_NULL(cancel_hv_timer) +KVM_X86_OP(setup_mce) +KVM_X86_OP(smi_allowed) +KVM_X86_OP(pre_enter_smm) +KVM_X86_OP(pre_leave_smm) +KVM_X86_OP(enable_smi_window) +KVM_X86_OP_NULL(mem_enc_op) +KVM_X86_OP_NULL(mem_enc_reg_region) +KVM_X86_OP_NULL(mem_enc_unreg_region) +KVM_X86_OP(get_msr_feature) +KVM_X86_OP(can_emulate_instruction) +KVM_X86_OP(apic_init_signal_blocked) +KVM_X86_OP_NULL(enable_direct_tlbflush) +KVM_X86_OP_NULL(migrate_timers) +KVM_X86_OP(msr_filter_changed) +KVM_X86_OP_NULL(complete_emulated_msr) + +#undef KVM_X86_OP +#undef KVM_X86_OP_NULL diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 867c55197459..cae1fcd411f4 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1414,6 +1414,19 @@ extern u64 __read_mostly host_efer; extern bool __read_mostly allow_smaller_maxphyaddr; extern struct kvm_x86_ops kvm_x86_ops; +#define KVM_X86_OP(func) \ + DECLARE_STATIC_CALL(kvm_x86_##func, *(((struct kvm_x86_ops *)0)->func)); +#define KVM_X86_OP_NULL KVM_X86_OP +#include + +static inline void kvm_ops_static_call_update(void) +{ +#define KVM_X86_OP(func) \ + static_call_update(kvm_x86_##func, kvm_x86_ops.func); +#define KVM_X86_OP_NULL KVM_X86_OP +#include +} + #define __KVM_HAVE_ARCH_VM_ALLOC static inline struct kvm *kvm_arch_alloc_vm(void) { diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 383c051813ff..fb76b7686a4a 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -127,6 +127,15 @@ static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu); struct kvm_x86_ops kvm_x86_ops __read_mostly; EXPORT_SYMBOL_GPL(kvm_x86_ops); +#define KVM_X86_OP(func) \ + DEFINE_STATIC_CALL_NULL(kvm_x86_##func, \ + *(((struct kvm_x86_ops *)0)->func)); +#define KVM_X86_OP_NULL KVM_X86_OP +#include +EXPORT_STATIC_CALL_GPL(kvm_x86_get_cs_db_l_bits); +EXPORT_STATIC_CALL_GPL(kvm_x86_cache_reg); +EXPORT_STATIC_CALL_GPL(kvm_x86_tlb_flush_current); + static bool __read_mostly ignore_msrs = 0; module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR); -- Gitee From c16611e50c9f0fc1b1d9855316ad616cb56a7b05 Mon Sep 17 00:00:00 2001 From: Jason Baron Date: Fri, 7 Nov 2025 13:21:08 +0800 Subject: [PATCH 03/10] KVM: x86: use static calls to reduce kvm_x86_ops overhead ANBZ: #27210 commit b3646477d458fbe7694a15b9c78fbe2fa426b703 upstream. Convert kvm_x86_ops to use static calls. Note that all kvm_x86_ops are covered here except for 'pmu_ops and 'nested ops'. Here are some numbers running cpuid in a loop of 1 million calls averaged over 5 runs, measured in the vm (lower is better). Intel Xeon 3000MHz: |default |mitigations=off ------------------------------------- vanilla |.671s |.486s static call|.573s(-15%)|.458s(-6%) AMD EPYC 2500MHz: |default |mitigations=off ------------------------------------- vanilla |.710s |.609s static call|.664s(-6%) |.609s(0%) Hygon-SIG: commit b3646477d458 KVM: x86: use static calls to reduce kvm_x86_ops overhead backport comment for "Use static_call for kvm_x86_ops" Cc: Paolo Bonzini Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Borislav Petkov Cc: Peter Zijlstra Cc: Andrea Arcangeli Cc: Sean Christopherson Signed-off-by: Jason Baron Message-Id: Signed-off-by: Paolo Bonzini Signed-off-by: Wei Wang [ Wei Wang: amend commit log and resolve the conflicts ] Cc: hygon-arch@list.openanolis.cn --- arch/x86/include/asm/kvm_host.h | 8 +- arch/x86/kvm/cpuid.c | 2 +- arch/x86/kvm/hyperv.c | 4 +- arch/x86/kvm/irq.c | 3 +- arch/x86/kvm/kvm_cache_regs.h | 10 +- arch/x86/kvm/lapic.c | 30 ++-- arch/x86/kvm/mmu.h | 6 +- arch/x86/kvm/mmu/mmu.c | 13 +- arch/x86/kvm/mmu/spte.c | 2 +- arch/x86/kvm/pmu.c | 2 +- arch/x86/kvm/trace.h | 4 +- arch/x86/kvm/x86.c | 295 ++++++++++++++++---------------- arch/x86/kvm/x86.h | 6 +- 13 files changed, 190 insertions(+), 195 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index cae1fcd411f4..6c0d300f1b9c 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1438,7 +1438,7 @@ void kvm_arch_free_vm(struct kvm *kvm); static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm) { if (kvm_x86_ops.tlb_remote_flush && - !kvm_x86_ops.tlb_remote_flush(kvm)) + !static_call(kvm_x86_tlb_remote_flush)(kvm)) return 0; else return -ENOTSUPP; @@ -1830,14 +1830,12 @@ static inline bool kvm_irq_is_postable(struct kvm_lapic_irq *irq) static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) { - if (kvm_x86_ops.vcpu_blocking) - kvm_x86_ops.vcpu_blocking(vcpu); + static_call_cond(kvm_x86_vcpu_blocking)(vcpu); } static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) { - if (kvm_x86_ops.vcpu_unblocking) - kvm_x86_ops.vcpu_unblocking(vcpu); + static_call_cond(kvm_x86_vcpu_unblocking)(vcpu); } static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index cdb03a4f516d..212a4400914a 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -240,7 +240,7 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) vcpu->arch.cr3_lm_rsvd_bits = rsvd_bits(cpuid_maxphyaddr(vcpu), 63); /* Invoke the vendor callback only after the above state is updated. */ - kvm_x86_ops.vcpu_after_set_cpuid(vcpu); + static_call(kvm_x86_vcpu_after_set_cpuid)(vcpu); } static int is_efer_nx(void) diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index f7737e1df660..1bae16b912a2 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -1160,7 +1160,7 @@ static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data, addr = gfn_to_hva(kvm, gfn); if (kvm_is_error_hva(addr)) return 1; - kvm_x86_ops.patch_hypercall(vcpu, instructions); + static_call(kvm_x86_patch_hypercall)(vcpu, instructions); ((unsigned char *)instructions)[3] = 0xc3; /* ret */ if (__copy_to_user((void __user *)addr, instructions, 4)) return 1; @@ -1754,7 +1754,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu) * hypercall generates UD from non zero cpl and real mode * per HYPER-V spec */ - if (kvm_x86_ops.get_cpl(vcpu) != 0 || !is_protmode(vcpu)) { + if (static_call(kvm_x86_get_cpl)(vcpu) != 0 || !is_protmode(vcpu)) { kvm_queue_exception(vcpu, UD_VECTOR); return 1; } diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c index 814698e5b152..a035cca82f8f 100644 --- a/arch/x86/kvm/irq.c +++ b/arch/x86/kvm/irq.c @@ -143,8 +143,7 @@ void __kvm_migrate_timers(struct kvm_vcpu *vcpu) { __kvm_migrate_apic_timer(vcpu); __kvm_migrate_pit_timer(vcpu); - if (kvm_x86_ops.migrate_timers) - kvm_x86_ops.migrate_timers(vcpu); + static_call_cond(kvm_x86_migrate_timers)(vcpu); } bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args) diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h index a889563ad02d..2e11da2f5621 100644 --- a/arch/x86/kvm/kvm_cache_regs.h +++ b/arch/x86/kvm/kvm_cache_regs.h @@ -68,7 +68,7 @@ static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, int reg) return 0; if (!kvm_register_is_available(vcpu, reg)) - kvm_x86_ops.cache_reg(vcpu, reg); + static_call(kvm_x86_cache_reg)(vcpu, reg); return vcpu->arch.regs[reg]; } @@ -108,7 +108,7 @@ static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index) might_sleep(); /* on svm */ if (!kvm_register_is_available(vcpu, VCPU_EXREG_PDPTR)) - kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_PDPTR); + static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_PDPTR); return vcpu->arch.walk_mmu->pdptrs[index]; } @@ -118,7 +118,7 @@ static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask) ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS; if ((tmask & vcpu->arch.cr0_guest_owned_bits) && !kvm_register_is_available(vcpu, VCPU_EXREG_CR0)) - kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_CR0); + static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_CR0); return vcpu->arch.cr0 & mask; } @@ -132,14 +132,14 @@ static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask) ulong tmask = mask & KVM_POSSIBLE_CR4_GUEST_BITS; if ((tmask & vcpu->arch.cr4_guest_owned_bits) && !kvm_register_is_available(vcpu, VCPU_EXREG_CR4)) - kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_CR4); + static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_CR4); return vcpu->arch.cr4 & mask; } static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu) { if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3)) - kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_CR3); + static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_CR3); return vcpu->arch.cr3; } diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index b49288020c07..1ff0789aac39 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -570,7 +570,7 @@ static inline void apic_clear_irr(int vec, struct kvm_lapic *apic) if (unlikely(vcpu->arch.apicv_active)) { /* need to update RVI */ kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR); - kvm_x86_ops.hwapic_irr_update(vcpu, + static_call(kvm_x86_hwapic_irr_update)(vcpu, apic_find_highest_irr(apic)); } else { apic->irr_pending = false; @@ -601,7 +601,7 @@ static inline void apic_set_isr(int vec, struct kvm_lapic *apic) * just set SVI. */ if (unlikely(vcpu->arch.apicv_active)) - kvm_x86_ops.hwapic_isr_update(vcpu, vec); + static_call(kvm_x86_hwapic_isr_update)(vcpu, vec); else { ++apic->isr_count; BUG_ON(apic->isr_count > MAX_APIC_VECTOR); @@ -649,8 +649,8 @@ static inline void apic_clear_isr(int vec, struct kvm_lapic *apic) * and must be left alone. */ if (unlikely(vcpu->arch.apicv_active)) - kvm_x86_ops.hwapic_isr_update(vcpu, - apic_find_highest_isr(apic)); + static_call(kvm_x86_hwapic_isr_update)(vcpu, + apic_find_highest_isr(apic)); else { --apic->isr_count; BUG_ON(apic->isr_count < 0); @@ -787,7 +787,7 @@ static int apic_has_interrupt_for_ppr(struct kvm_lapic *apic, u32 ppr) { int highest_irr; if (apic->vcpu->arch.apicv_active) - highest_irr = kvm_x86_ops.sync_pir_to_irr(apic->vcpu); + highest_irr = static_call(kvm_x86_sync_pir_to_irr)(apic->vcpu); else highest_irr = apic_find_highest_irr(apic); if (highest_irr == -1 || (highest_irr & 0xF0) <= ppr) @@ -1180,7 +1180,7 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, apic->regs + APIC_TMR); } - if (kvm_x86_ops.deliver_posted_interrupt(vcpu, vector)) { + if (static_call(kvm_x86_deliver_posted_interrupt)(vcpu, vector)) { kvm_lapic_set_irr(vector, apic); kvm_make_request(KVM_REQ_EVENT, vcpu); kvm_vcpu_kick(vcpu); @@ -1916,7 +1916,7 @@ static void cancel_hv_timer(struct kvm_lapic *apic) { WARN_ON(preemptible()); WARN_ON(!apic->lapic_timer.hv_timer_in_use); - kvm_x86_ops.cancel_hv_timer(apic->vcpu); + static_call(kvm_x86_cancel_hv_timer)(apic->vcpu); apic->lapic_timer.hv_timer_in_use = false; } @@ -1933,7 +1933,7 @@ static bool start_hv_timer(struct kvm_lapic *apic) if (!ktimer->tscdeadline) return false; - if (kvm_x86_ops.set_hv_timer(vcpu, ktimer->tscdeadline, &expired)) + if (static_call(kvm_x86_set_hv_timer)(vcpu, ktimer->tscdeadline, &expired)) return false; ktimer->hv_timer_in_use = true; @@ -2367,7 +2367,7 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value) kvm_apic_set_x2apic_id(apic, vcpu->vcpu_id); if ((old_value ^ value) & (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE)) - kvm_x86_ops.set_virtual_apic_mode(vcpu); + static_call(kvm_x86_set_virtual_apic_mode)(vcpu); apic->base_address = apic->vcpu->arch.apic_base & MSR_IA32_APICBASE_BASE; @@ -2444,9 +2444,9 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event) vcpu->arch.pv_eoi.msr_val = 0; apic_update_ppr(apic); if (vcpu->arch.apicv_active) { - kvm_x86_ops.apicv_post_state_restore(vcpu); - kvm_x86_ops.hwapic_irr_update(vcpu, -1); - kvm_x86_ops.hwapic_isr_update(vcpu, -1); + static_call(kvm_x86_apicv_post_state_restore)(vcpu); + static_call(kvm_x86_hwapic_irr_update)(vcpu, -1); + static_call(kvm_x86_hwapic_isr_update)(vcpu, -1); } vcpu->arch.apic_arb_prio = 0; @@ -2707,10 +2707,10 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s) kvm_apic_update_apicv(vcpu); apic->highest_isr_cache = -1; if (vcpu->arch.apicv_active) { - kvm_x86_ops.apicv_post_state_restore(vcpu); - kvm_x86_ops.hwapic_irr_update(vcpu, + static_call(kvm_x86_apicv_post_state_restore)(vcpu); + static_call(kvm_x86_hwapic_irr_update)(vcpu, apic_find_highest_irr(apic)); - kvm_x86_ops.hwapic_isr_update(vcpu, + static_call(kvm_x86_hwapic_isr_update)(vcpu, apic_find_highest_isr(apic)); } kvm_make_request(KVM_REQ_EVENT, vcpu); diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index 581925e476d6..650cb3c98b41 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -95,7 +95,7 @@ static inline void kvm_mmu_load_pgd(struct kvm_vcpu *vcpu) if (!VALID_PAGE(root_hpa)) return; - kvm_x86_ops.load_mmu_pgd(vcpu, root_hpa | kvm_get_active_pcid(vcpu), + static_call(kvm_x86_load_mmu_pgd)(vcpu, root_hpa | kvm_get_active_pcid(vcpu), vcpu->arch.mmu->shadow_root_level); } @@ -167,8 +167,8 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned pte_access, unsigned pte_pkey, unsigned pfec) { - int cpl = kvm_x86_ops.get_cpl(vcpu); - unsigned long rflags = kvm_x86_ops.get_rflags(vcpu); + int cpl = static_call(kvm_x86_get_cpl)(vcpu); + unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu); /* * If CPL < 3, SMAP prevention are disabled if EFLAGS.AC = 1. diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 6096d0f1a62a..f00974f1152d 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -192,7 +192,7 @@ static void kvm_flush_remote_tlbs_with_range(struct kvm *kvm, int ret = -ENOTSUPP; if (range && kvm_x86_ops.tlb_remote_flush_with_range) - ret = kvm_x86_ops.tlb_remote_flush_with_range(kvm, range); + ret = static_call(kvm_x86_tlb_remote_flush_with_range)(kvm, range); if (ret) kvm_flush_remote_tlbs(kvm); @@ -1279,8 +1279,9 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, gfn_t gfn_offset, unsigned long mask) { if (kvm_x86_ops.enable_log_dirty_pt_masked) - kvm_x86_ops.enable_log_dirty_pt_masked(kvm, slot, gfn_offset, - mask); + static_call(kvm_x86_enable_log_dirty_pt_masked)(kvm, slot, + gfn_offset, + mask); else kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask); } @@ -4853,7 +4854,7 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu) if (r) goto out; kvm_mmu_load_pgd(vcpu); - kvm_x86_ops.tlb_flush_current(vcpu); + static_call(kvm_x86_tlb_flush_current)(vcpu); out: return r; } @@ -5134,7 +5135,7 @@ void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, if (is_noncanonical_address(gva, vcpu)) return; - kvm_x86_ops.tlb_flush_gva(vcpu, gva); + static_call(kvm_x86_tlb_flush_gva)(vcpu, gva); } if (!mmu->invlpg) @@ -5193,7 +5194,7 @@ void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid) } if (tlb_flush) - kvm_x86_ops.tlb_flush_gva(vcpu, gva); + static_call(kvm_x86_tlb_flush_gva)(vcpu, gva); ++vcpu->stat.invlpg; diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c index c51ad544f25b..ef55f0bc4ccf 100644 --- a/arch/x86/kvm/mmu/spte.c +++ b/arch/x86/kvm/mmu/spte.c @@ -120,7 +120,7 @@ int make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level, if (level > PG_LEVEL_4K) spte |= PT_PAGE_SIZE_MASK; if (tdp_enabled) - spte |= kvm_x86_ops.get_mt_mask(vcpu, gfn, + spte |= static_call(kvm_x86_get_mt_mask)(vcpu, gfn, kvm_is_mmio_pfn(pfn)); if (host_writable) diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index 3306f48e8dc3..f89988dd9278 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -443,7 +443,7 @@ int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data) return 1; if (!(kvm_read_cr4(vcpu) & X86_CR4_PCE) && - (kvm_x86_ops.get_cpl(vcpu) != 0) && + (static_call(kvm_x86_get_cpl)(vcpu) != 0) && (kvm_read_cr0(vcpu) & X86_CR0_PE)) return 1; diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h index e06142d1208a..4b7325f3bb14 100644 --- a/arch/x86/kvm/trace.h +++ b/arch/x86/kvm/trace.h @@ -256,7 +256,7 @@ TRACE_EVENT(name, \ __entry->guest_rip = kvm_rip_read(vcpu); \ __entry->isa = isa; \ __entry->vcpu_id = vcpu->vcpu_id; \ - kvm_x86_ops.get_exit_info(vcpu, &__entry->info1, \ + static_call(kvm_x86_get_exit_info)(vcpu, &__entry->info1, \ &__entry->info2, \ &__entry->intr_info, \ &__entry->error_code); \ @@ -738,7 +738,7 @@ TRACE_EVENT(kvm_emulate_insn, ), TP_fast_assign( - __entry->csbase = kvm_x86_ops.get_segment_base(vcpu, VCPU_SREG_CS); + __entry->csbase = static_call(kvm_x86_get_segment_base)(vcpu, VCPU_SREG_CS); __entry->len = vcpu->arch.emulate_ctxt->fetch.ptr - vcpu->arch.emulate_ctxt->fetch.data; __entry->rip = vcpu->arch.emulate_ctxt->_eip - __entry->len; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index fb76b7686a4a..e057959a0a89 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -738,7 +738,7 @@ EXPORT_SYMBOL_GPL(kvm_requeue_exception_e); */ bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl) { - if (kvm_x86_ops.get_cpl(vcpu) <= required_cpl) + if (static_call(kvm_x86_get_cpl)(vcpu) <= required_cpl) return true; kvm_queue_exception_e(vcpu, GP_VECTOR, 0); return false; @@ -898,7 +898,7 @@ int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) if (!is_pae(vcpu)) return 1; - kvm_x86_ops.get_cs_db_l_bits(vcpu, &cs_db, &cs_l); + static_call(kvm_x86_get_cs_db_l_bits)(vcpu, &cs_db, &cs_l); if (cs_l) return 1; } @@ -911,7 +911,7 @@ int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) if (!(cr0 & X86_CR0_PG) && kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)) return 1; - kvm_x86_ops.set_cr0(vcpu, cr0); + static_call(kvm_x86_set_cr0)(vcpu, cr0); kvm_post_set_cr0(vcpu, old_cr0, cr0); @@ -1021,7 +1021,7 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) { - if (kvm_x86_ops.get_cpl(vcpu) != 0 || + if (static_call(kvm_x86_get_cpl)(vcpu) != 0 || __kvm_set_xcr(vcpu, index, xcr)) { kvm_inject_gp(vcpu, 0); return 1; @@ -1082,7 +1082,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) return 1; } - if (kvm_x86_ops.set_cr4(vcpu, cr4)) + if (static_call(kvm_x86_set_cr4)(vcpu, cr4)) return 1; kvm_post_set_cr4(vcpu, old_cr4, cr4); @@ -1169,7 +1169,7 @@ void kvm_update_dr7(struct kvm_vcpu *vcpu) dr7 = vcpu->arch.guest_debug_dr7; else dr7 = vcpu->arch.dr7; - kvm_x86_ops.set_dr7(vcpu, dr7); + static_call(kvm_x86_set_dr7)(vcpu, dr7); vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_BP_ENABLED; if (dr7 & DR7_BP_EN_MASK) vcpu->arch.switch_db_regs |= KVM_DEBUGREG_BP_ENABLED; @@ -1498,7 +1498,7 @@ static int kvm_get_msr_feature(struct kvm_msr_entry *msr) rdmsrl_safe(msr->index, &msr->data); break; default: - return kvm_x86_ops.get_msr_feature(msr); + return static_call(kvm_x86_get_msr_feature)(msr); } return 0; } @@ -1569,7 +1569,7 @@ static int set_efer(struct kvm_vcpu *vcpu, struct msr_data *msr_info) efer &= ~EFER_LMA; efer |= vcpu->arch.efer & EFER_LMA; - r = kvm_x86_ops.set_efer(vcpu, efer); + r = static_call(kvm_x86_set_efer)(vcpu, efer); if (r) { WARN_ON(r > 0); return r; @@ -1675,7 +1675,7 @@ static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data, msr.index = index; msr.host_initiated = host_initiated; - return kvm_x86_ops.set_msr(vcpu, &msr); + return static_call(kvm_x86_set_msr)(vcpu, &msr); } static int kvm_set_msr_ignored_check(struct kvm_vcpu *vcpu, @@ -1708,7 +1708,7 @@ int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data, msr.index = index; msr.host_initiated = host_initiated; - ret = kvm_x86_ops.get_msr(vcpu, &msr); + ret = static_call(kvm_x86_get_msr)(vcpu, &msr); if (!ret) *data = msr.data; return ret; @@ -1749,12 +1749,12 @@ static int complete_emulated_rdmsr(struct kvm_vcpu *vcpu) kvm_rdx_write(vcpu, vcpu->run->msr.data >> 32); } - return kvm_x86_ops.complete_emulated_msr(vcpu, err); + return static_call(kvm_x86_complete_emulated_msr)(vcpu, err); } static int complete_emulated_wrmsr(struct kvm_vcpu *vcpu) { - return kvm_x86_ops.complete_emulated_msr(vcpu, vcpu->run->msr.error); + return static_call(kvm_x86_complete_emulated_msr)(vcpu, vcpu->run->msr.error); } static u64 kvm_msr_reason(int r) @@ -1826,7 +1826,7 @@ int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu) trace_kvm_msr_read_ex(ecx); } - return kvm_x86_ops.complete_emulated_msr(vcpu, r); + return static_call(kvm_x86_complete_emulated_msr)(vcpu, r); } EXPORT_SYMBOL_GPL(kvm_emulate_rdmsr); @@ -1852,7 +1852,7 @@ int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu) else trace_kvm_msr_write_ex(ecx, data); - return kvm_x86_ops.complete_emulated_msr(vcpu, r); + return static_call(kvm_x86_complete_emulated_msr)(vcpu, r); } EXPORT_SYMBOL_GPL(kvm_emulate_wrmsr); @@ -2284,7 +2284,7 @@ EXPORT_SYMBOL_GPL(kvm_read_l1_tsc); static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) { vcpu->arch.l1_tsc_offset = offset; - vcpu->arch.tsc_offset = kvm_x86_ops.write_l1_tsc_offset(vcpu, offset); + vcpu->arch.tsc_offset = static_call(kvm_x86_write_l1_tsc_offset)(vcpu, offset); } static inline bool kvm_check_tsc_unstable(void) @@ -3041,13 +3041,13 @@ static void kvmclock_reset(struct kvm_vcpu *vcpu) static void kvm_vcpu_flush_tlb_all(struct kvm_vcpu *vcpu) { ++vcpu->stat.tlb_flush; - kvm_x86_ops.tlb_flush_all(vcpu); + static_call(kvm_x86_tlb_flush_all)(vcpu); } static void kvm_vcpu_flush_tlb_guest(struct kvm_vcpu *vcpu) { ++vcpu->stat.tlb_flush; - kvm_x86_ops.tlb_flush_guest(vcpu); + static_call(kvm_x86_tlb_flush_guest)(vcpu); } static void record_steal_time(struct kvm_vcpu *vcpu) @@ -4007,10 +4007,10 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) * fringe case that is not enabled except via specific settings * of the module parameters. */ - r = kvm_x86_ops.has_emulated_msr(kvm, MSR_IA32_SMBASE); + r = static_call(kvm_x86_has_emulated_msr)(kvm, MSR_IA32_SMBASE); break; case KVM_CAP_VAPIC: - r = !kvm_x86_ops.cpu_has_accelerated_tpr(); + r = !static_call(kvm_x86_cpu_has_accelerated_tpr)(); break; case KVM_CAP_NR_VCPUS: r = KVM_SOFT_MAX_VCPUS; @@ -4271,14 +4271,14 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { /* Address WBINVD may be executed by guest */ if (need_emulate_wbinvd(vcpu)) { - if (kvm_x86_ops.has_wbinvd_exit()) + if (static_call(kvm_x86_has_wbinvd_exit)()) cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); else if (vcpu->cpu != -1 && vcpu->cpu != cpu) smp_call_function_single(vcpu->cpu, wbinvd_ipi, NULL, 1); } - kvm_x86_ops.vcpu_load(vcpu, cpu); + static_call(kvm_x86_vcpu_load)(vcpu, cpu); /* Save host pkru register if supported */ vcpu->arch.host_pkru = read_pkru(); @@ -4372,8 +4372,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) int idx; if (vcpu->preempted && !vcpu->arch.guest_state_protected) - vcpu->arch.preempted_in_kernel = !kvm_x86_ops.get_cpl(vcpu); - + vcpu->arch.preempted_in_kernel = !static_call(kvm_x86_get_cpl)(vcpu); /* * Disable page faults because we're in atomic context here. * kvm_write_guest_offset_cached() would call might_fault() @@ -4391,7 +4390,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) kvm_steal_time_set_preempted(vcpu); srcu_read_unlock(&vcpu->kvm->srcu, idx); pagefault_enable(); - kvm_x86_ops.vcpu_put(vcpu); + static_call(kvm_x86_vcpu_put)(vcpu); vcpu->arch.last_host_tsc = rdtsc(); /* * If userspace has set any breakpoints or watchpoints, dr6 is restored @@ -4405,7 +4404,7 @@ static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s) { if (vcpu->arch.apicv_active) - kvm_x86_ops.sync_pir_to_irr(vcpu); + static_call(kvm_x86_sync_pir_to_irr)(vcpu); return kvm_apic_get_state(vcpu, s); } @@ -4524,7 +4523,7 @@ static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu, for (bank = 0; bank < bank_num; bank++) vcpu->arch.mce_banks[bank*4] = ~(u64)0; - kvm_x86_ops.setup_mce(vcpu); + static_call(kvm_x86_setup_mce)(vcpu); out: return r; } @@ -4631,11 +4630,11 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft; events->interrupt.nr = vcpu->arch.interrupt.nr; events->interrupt.soft = 0; - events->interrupt.shadow = kvm_x86_ops.get_interrupt_shadow(vcpu); + events->interrupt.shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu); events->nmi.injected = vcpu->arch.nmi_injected; events->nmi.pending = vcpu->arch.nmi_pending != 0; - events->nmi.masked = kvm_x86_ops.get_nmi_mask(vcpu); + events->nmi.masked = static_call(kvm_x86_get_nmi_mask)(vcpu); events->nmi.pad = 0; events->sipi_vector = 0; /* never valid when reporting to user space */ @@ -4707,13 +4706,13 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, vcpu->arch.interrupt.nr = events->interrupt.nr; vcpu->arch.interrupt.soft = events->interrupt.soft; if (events->flags & KVM_VCPUEVENT_VALID_SHADOW) - kvm_x86_ops.set_interrupt_shadow(vcpu, - events->interrupt.shadow); + static_call(kvm_x86_set_interrupt_shadow)(vcpu, + events->interrupt.shadow); vcpu->arch.nmi_injected = events->nmi.injected; if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING) vcpu->arch.nmi_pending = events->nmi.pending; - kvm_x86_ops.set_nmi_mask(vcpu, events->nmi.masked); + static_call(kvm_x86_set_nmi_mask)(vcpu, events->nmi.masked); if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR && lapic_in_kernel(vcpu)) @@ -4914,7 +4913,7 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, if (!kvm_x86_ops.enable_direct_tlbflush) return -ENOTTY; - return kvm_x86_ops.enable_direct_tlbflush(vcpu); + return static_call(kvm_x86_enable_direct_tlbflush)(vcpu); case KVM_CAP_ENFORCE_PV_FEATURE_CPUID: vcpu->arch.pv_cpuid.enforce = cap->args[0]; @@ -5345,14 +5344,14 @@ static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr) if (addr > (unsigned int)(-3 * PAGE_SIZE)) return -EINVAL; - ret = kvm_x86_ops.set_tss_addr(kvm, addr); + ret = static_call(kvm_x86_set_tss_addr)(kvm, addr); return ret; } static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm, u64 ident_addr) { - return kvm_x86_ops.set_identity_map_addr(kvm, ident_addr); + return static_call(kvm_x86_set_identity_map_addr)(kvm, ident_addr); } static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm, @@ -5509,8 +5508,7 @@ void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) /* * Flush potentially hardware-cached dirty pages to dirty_bitmap. */ - if (kvm_x86_ops.flush_log_dirty) - kvm_x86_ops.flush_log_dirty(kvm); + static_call_cond(kvm_x86_flush_log_dirty)(kvm); } int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event, @@ -6077,7 +6075,7 @@ long kvm_arch_vm_ioctl(struct file *filp, case KVM_MEMORY_ENCRYPT_OP: { r = -ENOTTY; if (kvm_x86_ops.mem_enc_op) - r = kvm_x86_ops.mem_enc_op(kvm, argp); + r = static_call(kvm_x86_mem_enc_op)(kvm, argp); break; } case KVM_MEMORY_ENCRYPT_REG_REGION: { @@ -6089,7 +6087,7 @@ long kvm_arch_vm_ioctl(struct file *filp, r = -ENOTTY; if (kvm_x86_ops.mem_enc_reg_region) - r = kvm_x86_ops.mem_enc_reg_region(kvm, ®ion); + r = static_call(kvm_x86_mem_enc_reg_region)(kvm, ®ion); break; } case KVM_MEMORY_ENCRYPT_UNREG_REGION: { @@ -6101,7 +6099,7 @@ long kvm_arch_vm_ioctl(struct file *filp, r = -ENOTTY; if (kvm_x86_ops.mem_enc_unreg_region) - r = kvm_x86_ops.mem_enc_unreg_region(kvm, ®ion); + r = static_call(kvm_x86_mem_enc_unreg_region)(kvm, ®ion); break; } case KVM_HYPERV_EVENTFD: { @@ -6223,7 +6221,7 @@ static void kvm_init_msr_list(void) } for (i = 0; i < ARRAY_SIZE(emulated_msrs_all); i++) { - if (!kvm_x86_ops.has_emulated_msr(NULL, emulated_msrs_all[i])) + if (!static_call(kvm_x86_has_emulated_msr)(NULL, emulated_msrs_all[i])) continue; emulated_msrs[num_emulated_msrs++] = emulated_msrs_all[i]; @@ -6286,13 +6284,13 @@ static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v) static void kvm_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg) { - kvm_x86_ops.set_segment(vcpu, var, seg); + static_call(kvm_x86_set_segment)(vcpu, var, seg); } void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg) { - kvm_x86_ops.get_segment(vcpu, var, seg); + static_call(kvm_x86_get_segment)(vcpu, var, seg); } gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access, @@ -6312,7 +6310,7 @@ gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access, gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, struct x86_exception *exception) { - u32 access = (kvm_x86_ops.get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; + u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); } EXPORT_SYMBOL_GPL(kvm_mmu_gva_to_gpa_read); @@ -6320,7 +6318,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_gva_to_gpa_read); gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, struct x86_exception *exception) { - u32 access = (kvm_x86_ops.get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; + u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; access |= PFERR_FETCH_MASK; return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); } @@ -6328,7 +6326,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_gva_to_gpa_read); gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, struct x86_exception *exception) { - u32 access = (kvm_x86_ops.get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; + u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; access |= PFERR_WRITE_MASK; return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); } @@ -6378,7 +6376,7 @@ static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt, struct x86_exception *exception) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); - u32 access = (kvm_x86_ops.get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; + u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; unsigned offset; int ret; @@ -6403,7 +6401,7 @@ int kvm_read_guest_virt(struct kvm_vcpu *vcpu, gva_t addr, void *val, unsigned int bytes, struct x86_exception *exception) { - u32 access = (kvm_x86_ops.get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; + u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; /* * FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED @@ -6424,7 +6422,7 @@ static int emulator_read_std(struct x86_emulate_ctxt *ctxt, struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); u32 access = 0; - if (!system && kvm_x86_ops.get_cpl(vcpu) == 3) + if (!system && static_call(kvm_x86_get_cpl)(vcpu) == 3) access |= PFERR_USER_MASK; return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, exception); @@ -6477,7 +6475,7 @@ static int emulator_write_std(struct x86_emulate_ctxt *ctxt, gva_t addr, void *v struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); u32 access = PFERR_WRITE_MASK; - if (!system && kvm_x86_ops.get_cpl(vcpu) == 3) + if (!system && static_call(kvm_x86_get_cpl)(vcpu) == 3) access |= PFERR_USER_MASK; return kvm_write_guest_virt_helper(addr, val, bytes, vcpu, @@ -6498,8 +6496,8 @@ EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system); static int kvm_can_emulate_insn(struct kvm_vcpu *vcpu, int emul_type, void *insn, int insn_len) { - return kvm_x86_ops.can_emulate_instruction(vcpu, emul_type, - insn, insn_len); + return static_call(kvm_x86_can_emulate_instruction)(vcpu, emul_type, + insn, insn_len); } int handle_ud(struct kvm_vcpu *vcpu) @@ -6543,7 +6541,7 @@ static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva, gpa_t *gpa, struct x86_exception *exception, bool write) { - u32 access = ((kvm_x86_ops.get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0) + u32 access = ((static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0) | (write ? PFERR_WRITE_MASK : 0); /* @@ -6998,7 +6996,7 @@ static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt, static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg) { - return kvm_x86_ops.get_segment_base(vcpu, seg); + return static_call(kvm_x86_get_segment_base)(vcpu, seg); } static void emulator_invlpg(struct x86_emulate_ctxt *ctxt, ulong address) @@ -7011,7 +7009,7 @@ static int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu) if (!need_emulate_wbinvd(vcpu)) return X86EMUL_CONTINUE; - if (kvm_x86_ops.has_wbinvd_exit()) { + if (static_call(kvm_x86_has_wbinvd_exit)()) { int cpu = get_cpu(); cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); @@ -7116,27 +7114,27 @@ static int emulator_set_cr(struct x86_emulate_ctxt *ctxt, int cr, ulong val) static int emulator_get_cpl(struct x86_emulate_ctxt *ctxt) { - return kvm_x86_ops.get_cpl(emul_to_vcpu(ctxt)); + return static_call(kvm_x86_get_cpl)(emul_to_vcpu(ctxt)); } static void emulator_get_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) { - kvm_x86_ops.get_gdt(emul_to_vcpu(ctxt), dt); + static_call(kvm_x86_get_gdt)(emul_to_vcpu(ctxt), dt); } static void emulator_get_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) { - kvm_x86_ops.get_idt(emul_to_vcpu(ctxt), dt); + static_call(kvm_x86_get_idt)(emul_to_vcpu(ctxt), dt); } static void emulator_set_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) { - kvm_x86_ops.set_gdt(emul_to_vcpu(ctxt), dt); + static_call(kvm_x86_set_gdt)(emul_to_vcpu(ctxt), dt); } static void emulator_set_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) { - kvm_x86_ops.set_idt(emul_to_vcpu(ctxt), dt); + static_call(kvm_x86_set_idt)(emul_to_vcpu(ctxt), dt); } static unsigned long emulator_get_cached_segment_base( @@ -7278,7 +7276,7 @@ static int emulator_intercept(struct x86_emulate_ctxt *ctxt, struct x86_instruction_info *info, enum x86_intercept_stage stage) { - return kvm_x86_ops.check_intercept(emul_to_vcpu(ctxt), info, stage, + return static_call(kvm_x86_check_intercept)(emul_to_vcpu(ctxt), info, stage, &ctxt->exception); } @@ -7321,7 +7319,7 @@ static void emulator_write_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg, ulon static void emulator_set_nmi_mask(struct x86_emulate_ctxt *ctxt, bool masked) { - kvm_x86_ops.set_nmi_mask(emul_to_vcpu(ctxt), masked); + static_call(kvm_x86_set_nmi_mask)(emul_to_vcpu(ctxt), masked); } static unsigned emulator_get_hflags(struct x86_emulate_ctxt *ctxt) @@ -7340,7 +7338,7 @@ static void emulator_set_hflags(struct x86_emulate_ctxt *ctxt, unsigned emul_fla static int emulator_pre_leave_smm(struct x86_emulate_ctxt *ctxt, const char *smstate) { - return kvm_x86_ops.pre_leave_smm(emul_to_vcpu(ctxt), smstate); + return static_call(kvm_x86_pre_leave_smm)(emul_to_vcpu(ctxt), smstate); } static void emulator_post_leave_smm(struct x86_emulate_ctxt *ctxt) @@ -7404,7 +7402,7 @@ static const struct x86_emulate_ops emulate_ops = { static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask) { - u32 int_shadow = kvm_x86_ops.get_interrupt_shadow(vcpu); + u32 int_shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu); /* * an sti; sti; sequence only disable interrupts for the first * instruction. So, if the last instruction, be it emulated or @@ -7415,7 +7413,7 @@ static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask) if (int_shadow & mask) mask = 0; if (unlikely(int_shadow || mask)) { - kvm_x86_ops.set_interrupt_shadow(vcpu, mask); + static_call(kvm_x86_set_interrupt_shadow)(vcpu, mask); if (!mask) kvm_make_request(KVM_REQ_EVENT, vcpu); } @@ -7457,7 +7455,7 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu) struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; int cs_db, cs_l; - kvm_x86_ops.get_cs_db_l_bits(vcpu, &cs_db, &cs_l); + static_call(kvm_x86_get_cs_db_l_bits)(vcpu, &cs_db, &cs_l); ctxt->gpa_available = false; ctxt->eflags = kvm_get_rflags(vcpu); @@ -7523,7 +7521,7 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu, int emulation_type) kvm_queue_exception(vcpu, UD_VECTOR); - if (!is_guest_mode(vcpu) && kvm_x86_ops.get_cpl(vcpu) == 0) { + if (!is_guest_mode(vcpu) && static_call(kvm_x86_get_cpl)(vcpu) == 0) { vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; vcpu->run->internal.ndata = 0; @@ -7704,10 +7702,10 @@ static int kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu) int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu) { - unsigned long rflags = kvm_x86_ops.get_rflags(vcpu); + unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu); int r; - r = kvm_x86_ops.skip_emulated_instruction(vcpu); + r = static_call(kvm_x86_skip_emulated_instruction)(vcpu); if (unlikely(!r)) return 0; @@ -7967,7 +7965,7 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, r = 1; if (writeback) { - unsigned long rflags = kvm_x86_ops.get_rflags(vcpu); + unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu); toggle_interruptibility(vcpu, ctxt->interruptibility); vcpu->arch.emulate_regs_need_sync_to_vcpu = false; if (!ctxt->have_exception || @@ -7976,7 +7974,7 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, if (r && (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP))) r = kvm_vcpu_do_singlestep(vcpu); if (kvm_x86_ops.update_emulated_instruction) - kvm_x86_ops.update_emulated_instruction(vcpu); + static_call(kvm_x86_update_emulated_instruction)(vcpu); __kvm_set_rflags(vcpu, ctxt->eflags); } @@ -8303,7 +8301,7 @@ static int kvm_is_user_mode(void) int user_mode = 3; if (__this_cpu_read(current_vcpu)) - user_mode = kvm_x86_ops.get_cpl(__this_cpu_read(current_vcpu)); + user_mode = static_call(kvm_x86_get_cpl)(__this_cpu_read(current_vcpu)); return user_mode != 0; } @@ -8659,7 +8657,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) a3 &= 0xFFFFFFFF; } - if (kvm_x86_ops.get_cpl(vcpu) != 0 && + if (static_call(kvm_x86_get_cpl)(vcpu) != 0 && !(nr == KVM_HC_VM_ATTESTATION || nr == KVM_HC_PSP_OP_OBSOLETE || nr == KVM_HC_PSP_COPY_FORWARD_OP @@ -8754,7 +8752,7 @@ static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt) char instruction[3]; unsigned long rip = kvm_rip_read(vcpu); - kvm_x86_ops.patch_hypercall(vcpu, instruction); + static_call(kvm_x86_patch_hypercall)(vcpu, instruction); return emulator_write_emulated(ctxt, rip, instruction, 3, &ctxt->exception, false); @@ -8804,14 +8802,14 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu) tpr = kvm_lapic_get_cr8(vcpu); - kvm_x86_ops.update_cr8_intercept(vcpu, tpr, max_irr); + static_call(kvm_x86_update_cr8_intercept)(vcpu, tpr, max_irr); } static void kvm_inject_exception(struct kvm_vcpu *vcpu) { if (vcpu->arch.exception.error_code && !is_protmode(vcpu)) vcpu->arch.exception.error_code = false; - kvm_x86_ops.queue_exception(vcpu); + static_call(kvm_x86_queue_exception)(vcpu); } static void inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit) @@ -8841,10 +8839,10 @@ static void inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit */ else if (!vcpu->arch.exception.pending) { if (vcpu->arch.nmi_injected) { - kvm_x86_ops.set_nmi(vcpu); + static_call(kvm_x86_set_nmi)(vcpu); can_inject = false; } else if (vcpu->arch.interrupt.injected) { - kvm_x86_ops.set_irq(vcpu); + static_call(kvm_x86_set_irq)(vcpu); can_inject = false; } } @@ -8901,7 +8899,7 @@ static void inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit * The kvm_x86_ops hooks communicate this by returning -EBUSY. */ if (vcpu->arch.smi_pending) { - r = can_inject ? kvm_x86_ops.smi_allowed(vcpu, true) : -EBUSY; + r = can_inject ? static_call(kvm_x86_smi_allowed)(vcpu, true) : -EBUSY; if (r < 0) goto busy; if (r) { @@ -8910,35 +8908,35 @@ static void inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit enter_smm(vcpu); can_inject = false; } else - kvm_x86_ops.enable_smi_window(vcpu); + static_call(kvm_x86_enable_smi_window)(vcpu); } if (vcpu->arch.nmi_pending) { - r = can_inject ? kvm_x86_ops.nmi_allowed(vcpu, true) : -EBUSY; + r = can_inject ? static_call(kvm_x86_nmi_allowed)(vcpu, true) : -EBUSY; if (r < 0) goto busy; if (r) { --vcpu->arch.nmi_pending; vcpu->arch.nmi_injected = true; - kvm_x86_ops.set_nmi(vcpu); + static_call(kvm_x86_set_nmi)(vcpu); can_inject = false; - WARN_ON(kvm_x86_ops.nmi_allowed(vcpu, true) < 0); + WARN_ON(static_call(kvm_x86_nmi_allowed)(vcpu, true) < 0); } if (vcpu->arch.nmi_pending) - kvm_x86_ops.enable_nmi_window(vcpu); + static_call(kvm_x86_enable_nmi_window)(vcpu); } if (kvm_cpu_has_injectable_intr(vcpu)) { - r = can_inject ? kvm_x86_ops.interrupt_allowed(vcpu, true) : -EBUSY; + r = can_inject ? static_call(kvm_x86_interrupt_allowed)(vcpu, true) : -EBUSY; if (r < 0) goto busy; if (r) { kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu), false); - kvm_x86_ops.set_irq(vcpu); - WARN_ON(kvm_x86_ops.interrupt_allowed(vcpu, true) < 0); + static_call(kvm_x86_set_irq)(vcpu); + WARN_ON(static_call(kvm_x86_interrupt_allowed)(vcpu, true) < 0); } if (kvm_cpu_has_injectable_intr(vcpu)) - kvm_x86_ops.enable_irq_window(vcpu); + static_call(kvm_x86_enable_irq_window)(vcpu); } if (is_guest_mode(vcpu) && @@ -8963,7 +8961,7 @@ static void process_nmi(struct kvm_vcpu *vcpu) * If an NMI is already in progress, limit further NMIs to just one. * Otherwise, allow two (and we'll inject the first one immediately). */ - if (kvm_x86_ops.get_nmi_mask(vcpu) || vcpu->arch.nmi_injected) + if (static_call(kvm_x86_get_nmi_mask)(vcpu) || vcpu->arch.nmi_injected) limit = 1; vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0); @@ -9053,11 +9051,11 @@ static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, char *buf) put_smstate(u32, buf, 0x7f7c, seg.limit); put_smstate(u32, buf, 0x7f78, enter_smm_get_segment_flags(&seg)); - kvm_x86_ops.get_gdt(vcpu, &dt); + static_call(kvm_x86_get_gdt)(vcpu, &dt); put_smstate(u32, buf, 0x7f74, dt.address); put_smstate(u32, buf, 0x7f70, dt.size); - kvm_x86_ops.get_idt(vcpu, &dt); + static_call(kvm_x86_get_idt)(vcpu, &dt); put_smstate(u32, buf, 0x7f58, dt.address); put_smstate(u32, buf, 0x7f54, dt.size); @@ -9107,7 +9105,7 @@ static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf) put_smstate(u32, buf, 0x7e94, seg.limit); put_smstate(u64, buf, 0x7e98, seg.base); - kvm_x86_ops.get_idt(vcpu, &dt); + static_call(kvm_x86_get_idt)(vcpu, &dt); put_smstate(u32, buf, 0x7e84, dt.size); put_smstate(u64, buf, 0x7e88, dt.address); @@ -9117,7 +9115,7 @@ static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf) put_smstate(u32, buf, 0x7e74, seg.limit); put_smstate(u64, buf, 0x7e78, seg.base); - kvm_x86_ops.get_gdt(vcpu, &dt); + static_call(kvm_x86_get_gdt)(vcpu, &dt); put_smstate(u32, buf, 0x7e64, dt.size); put_smstate(u64, buf, 0x7e68, dt.address); @@ -9147,28 +9145,28 @@ static void enter_smm(struct kvm_vcpu *vcpu) * vCPU state (e.g. leave guest mode) after we've saved the state into * the SMM state-save area. */ - kvm_x86_ops.pre_enter_smm(vcpu, buf); + static_call(kvm_x86_pre_enter_smm)(vcpu, buf); vcpu->arch.hflags |= HF_SMM_MASK; kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, buf, sizeof(buf)); - if (kvm_x86_ops.get_nmi_mask(vcpu)) + if (static_call(kvm_x86_get_nmi_mask)(vcpu)) vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK; else - kvm_x86_ops.set_nmi_mask(vcpu, true); + static_call(kvm_x86_set_nmi_mask)(vcpu, true); kvm_set_rflags(vcpu, X86_EFLAGS_FIXED); kvm_rip_write(vcpu, 0x8000); cr0 = vcpu->arch.cr0 & ~(X86_CR0_PE | X86_CR0_EM | X86_CR0_TS | X86_CR0_PG); - kvm_x86_ops.set_cr0(vcpu, cr0); + static_call(kvm_x86_set_cr0)(vcpu, cr0); vcpu->arch.cr0 = cr0; - kvm_x86_ops.set_cr4(vcpu, 0); + static_call(kvm_x86_set_cr4)(vcpu, 0); /* Undocumented: IDT limit is set to zero on entry to SMM. */ dt.address = dt.size = 0; - kvm_x86_ops.set_idt(vcpu, &dt); + static_call(kvm_x86_set_idt)(vcpu, &dt); __kvm_set_dr(vcpu, 7, DR7_FIXED_1); @@ -9199,7 +9197,7 @@ static void enter_smm(struct kvm_vcpu *vcpu) #ifdef CONFIG_X86_64 if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) - kvm_x86_ops.set_efer(vcpu, 0); + static_call(kvm_x86_set_efer)(vcpu, 0); #endif kvm_update_cpuid_runtime(vcpu); @@ -9237,7 +9235,7 @@ void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu) vcpu->arch.apicv_active = kvm_apicv_activated(vcpu->kvm); kvm_apic_update_apicv(vcpu); - kvm_x86_ops.refresh_apicv_exec_ctrl(vcpu); + static_call(kvm_x86_refresh_apicv_exec_ctrl)(vcpu); } EXPORT_SYMBOL_GPL(kvm_vcpu_update_apicv); @@ -9254,7 +9252,7 @@ void kvm_request_apicv_update(struct kvm *kvm, bool activate, ulong bit) unsigned long old, new, expected; if (!kvm_x86_ops.check_apicv_inhibit_reasons || - !kvm_x86_ops.check_apicv_inhibit_reasons(bit)) + !static_call(kvm_x86_check_apicv_inhibit_reasons)(bit)) return; old = READ_ONCE(kvm->arch.apicv_inhibit_reasons); @@ -9274,7 +9272,7 @@ void kvm_request_apicv_update(struct kvm *kvm, bool activate, ulong bit) trace_kvm_apicv_update_request(activate, bit); if (kvm_x86_ops.pre_update_apicv_exec_ctrl) - kvm_x86_ops.pre_update_apicv_exec_ctrl(kvm, activate); + static_call(kvm_x86_pre_update_apicv_exec_ctrl)(kvm, activate); /* * Sending request to update APICV for all other vcpus, @@ -9300,7 +9298,7 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu) kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors); else { if (vcpu->arch.apicv_active) - kvm_x86_ops.sync_pir_to_irr(vcpu); + static_call(kvm_x86_sync_pir_to_irr)(vcpu); if (ioapic_in_kernel(vcpu->kvm)) kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors); } @@ -9320,7 +9318,7 @@ static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu) bitmap_or((ulong *)eoi_exit_bitmap, vcpu->arch.ioapic_handled_vectors, vcpu_to_synic(vcpu)->vec_bitmap, 256); - kvm_x86_ops.load_eoi_exitmap(vcpu, eoi_exit_bitmap); + static_call(kvm_x86_load_eoi_exitmap)(vcpu, eoi_exit_bitmap); } void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, @@ -9351,7 +9349,7 @@ void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu) if (!kvm_x86_ops.set_apic_access_page_addr) return; - kvm_x86_ops.set_apic_access_page_addr(vcpu); + static_call(kvm_x86_set_apic_access_page_addr)(vcpu); } void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu) @@ -9485,7 +9483,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) if (kvm_check_request(KVM_REQ_APF_READY, vcpu)) kvm_check_async_pf_completion(vcpu); if (kvm_check_request(KVM_REQ_MSR_FILTER_CHANGED, vcpu)) - kvm_x86_ops.msr_filter_changed(vcpu); + static_call(kvm_x86_msr_filter_changed)(vcpu); } if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) { @@ -9498,7 +9496,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) inject_pending_event(vcpu, &req_immediate_exit); if (req_int_win) - kvm_x86_ops.enable_irq_window(vcpu); + static_call(kvm_x86_enable_irq_window)(vcpu); if (kvm_lapic_enabled(vcpu)) { update_cr8_intercept(vcpu); @@ -9513,7 +9511,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) preempt_disable(); - kvm_x86_ops.prepare_guest_switch(vcpu); + static_call(kvm_x86_prepare_guest_switch)(vcpu); /* * Disable IRQs before setting IN_GUEST_MODE. Posted interrupt @@ -9544,7 +9542,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) * notified with kvm_vcpu_kick. */ if (kvm_lapic_enabled(vcpu) && vcpu->arch.apicv_active) - kvm_x86_ops.sync_pir_to_irr(vcpu); + static_call(kvm_x86_sync_pir_to_irr)(vcpu); if (kvm_vcpu_exit_request(vcpu)) { vcpu->mode = OUTSIDE_GUEST_MODE; @@ -9558,7 +9556,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) if (req_immediate_exit) { kvm_make_request(KVM_REQ_EVENT, vcpu); - kvm_x86_ops.request_immediate_exit(vcpu); + static_call(kvm_x86_request_immediate_exit)(vcpu); } trace_kvm_entry(vcpu); @@ -9582,7 +9580,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) set_debugreg(0, 7); } - exit_fastpath = kvm_x86_ops.run(vcpu); + exit_fastpath = static_call(kvm_x86_run)(vcpu); /* * Do this here before restoring debug registers on the host. And @@ -9592,7 +9590,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) */ if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) { WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP); - kvm_x86_ops.sync_dirty_debug_regs(vcpu); + static_call(kvm_x86_sync_dirty_debug_regs)(vcpu); kvm_update_dr0123(vcpu); kvm_update_dr7(vcpu); vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD; @@ -9622,7 +9620,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) if (vcpu->arch.xfd_no_write_intercept) fpu_sync_guest_vmexit_xfd_state(); - kvm_x86_ops.handle_exit_irqoff(vcpu); + static_call(kvm_x86_handle_exit_irqoff)(vcpu); if (vcpu->arch.guest_fpu.xfd_err) wrmsrl(MSR_IA32_XFD_ERR, 0); @@ -9676,13 +9674,13 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) if (vcpu->arch.apic_attention) kvm_lapic_sync_from_vapic(vcpu); - r = kvm_x86_ops.handle_exit(vcpu, exit_fastpath); + r = static_call(kvm_x86_handle_exit)(vcpu, exit_fastpath); return r; cancel_injection: if (req_immediate_exit) kvm_make_request(KVM_REQ_EVENT, vcpu); - kvm_x86_ops.cancel_injection(vcpu); + static_call(kvm_x86_cancel_injection)(vcpu); if (unlikely(vcpu->arch.apic_attention)) kvm_lapic_sync_from_vapic(vcpu); out: @@ -9692,13 +9690,13 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu) { if (!kvm_arch_vcpu_runnable(vcpu) && - (!kvm_x86_ops.pre_block || kvm_x86_ops.pre_block(vcpu) == 0)) { + (!kvm_x86_ops.pre_block || static_call(kvm_x86_pre_block)(vcpu) == 0)) { srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); kvm_vcpu_block(vcpu); vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); if (kvm_x86_ops.post_block) - kvm_x86_ops.post_block(vcpu); + static_call(kvm_x86_post_block)(vcpu); if (!kvm_check_request(KVM_REQ_UNHALT, vcpu)) return 1; @@ -10077,10 +10075,10 @@ static void __get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR); kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); - kvm_x86_ops.get_idt(vcpu, &dt); + static_call(kvm_x86_get_idt)(vcpu, &dt); sregs->idt.limit = dt.size; sregs->idt.base = dt.address; - kvm_x86_ops.get_gdt(vcpu, &dt); + static_call(kvm_x86_get_gdt)(vcpu, &dt); sregs->gdt.limit = dt.size; sregs->gdt.base = dt.address; @@ -10237,10 +10235,10 @@ static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) if (!vcpu->arch.guest_state_protected) { dt.size = sregs->idt.limit; dt.address = sregs->idt.base; - kvm_x86_ops.set_idt(vcpu, &dt); + static_call(kvm_x86_set_idt)(vcpu, &dt); dt.size = sregs->gdt.limit; dt.address = sregs->gdt.base; - kvm_x86_ops.set_gdt(vcpu, &dt); + static_call(kvm_x86_set_gdt)(vcpu, &dt); vcpu->arch.cr2 = sregs->cr2; mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3; @@ -10251,16 +10249,16 @@ static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) kvm_set_cr8(vcpu, sregs->cr8); mmu_reset_needed |= vcpu->arch.efer != sregs->efer; - kvm_x86_ops.set_efer(vcpu, sregs->efer); + static_call(kvm_x86_set_efer)(vcpu, sregs->efer); mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0; - kvm_x86_ops.set_cr0(vcpu, sregs->cr0); + static_call(kvm_x86_set_cr0)(vcpu, sregs->cr0); vcpu->arch.cr0 = sregs->cr0; mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4; cpuid_update_needed |= ((kvm_read_cr4(vcpu) ^ sregs->cr4) & (X86_CR4_OSXSAVE | X86_CR4_PKE)); - kvm_x86_ops.set_cr4(vcpu, sregs->cr4); + static_call(kvm_x86_set_cr4)(vcpu, sregs->cr4); if (cpuid_update_needed) kvm_update_cpuid_runtime(vcpu); @@ -10374,7 +10372,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, */ kvm_set_rflags(vcpu, rflags); - kvm_x86_ops.update_exception_bitmap(vcpu); + static_call(kvm_x86_update_exception_bitmap)(vcpu); r = 0; @@ -10583,7 +10581,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) kvm_hv_vcpu_init(vcpu); - r = kvm_x86_ops.vcpu_create(vcpu); + r = static_call(kvm_x86_vcpu_create)(vcpu); if (r) goto free_guest_fpu; @@ -10641,7 +10639,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) kvmclock_reset(vcpu); - kvm_x86_ops.vcpu_free(vcpu); + static_call(kvm_x86_vcpu_free)(vcpu); kmem_cache_free(x86_emulator_cache, vcpu->arch.emulate_ctxt); free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); @@ -10729,7 +10727,7 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) vcpu->arch.ia32_xss = 0; - kvm_x86_ops.vcpu_reset(vcpu, init_event); + static_call(kvm_x86_vcpu_reset)(vcpu, init_event); /* * Reset the MMU context if paging was enabled prior to INIT (which is @@ -10766,7 +10764,7 @@ int kvm_arch_hardware_enable(void) bool stable, backwards_tsc = false; kvm_user_return_msr_cpu_online(); - ret = kvm_x86_ops.hardware_enable(); + ret = static_call(kvm_x86_hardware_enable)(); if (ret != 0) return ret; @@ -10848,7 +10846,7 @@ int kvm_arch_hardware_enable(void) void kvm_arch_hardware_disable(void) { - kvm_x86_ops.hardware_disable(); + static_call(kvm_x86_hardware_disable)(); drop_user_return_notifiers(); } @@ -10869,6 +10867,7 @@ int kvm_arch_hardware_setup(void *opaque) return r; memcpy(&kvm_x86_ops, ops->runtime_ops, sizeof(kvm_x86_ops)); + kvm_ops_static_call_update(); if (!kvm_cpu_cap_has(X86_FEATURE_XSAVES)) supported_xss = 0; @@ -10902,7 +10901,7 @@ int kvm_arch_hardware_setup(void *opaque) void kvm_arch_hardware_unsetup(void) { - kvm_x86_ops.hardware_unsetup(); + static_call(kvm_x86_hardware_unsetup)(); } int kvm_arch_check_processor_compat(void *opaque) @@ -10942,7 +10941,7 @@ void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) pmu->need_cleanup = true; kvm_make_request(KVM_REQ_PMU, vcpu); } - kvm_x86_ops.sched_in(vcpu, cpu); + static_call(kvm_x86_sched_in)(vcpu, cpu); } void kvm_arch_free_vm(struct kvm *kvm) @@ -10991,7 +10990,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) kvm_hv_init_vm(kvm); kvm_mmu_init_vm(kvm); - return kvm_x86_ops.vm_init(kvm); + return static_call(kvm_x86_vm_init)(kvm); } int kvm_arch_post_init_vm(struct kvm *kvm) @@ -11098,8 +11097,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm) mutex_unlock(&kvm->slots_lock); } kvm_unload_vcpu_mmus(kvm); - if (kvm_x86_ops.vm_destroy) - kvm_x86_ops.vm_destroy(kvm); + static_call_cond(kvm_x86_vm_destroy)(kvm); kvm_free_msr_filter(srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1)); kvm_pic_destroy(kvm); kvm_ioapic_destroy(kvm); @@ -11292,7 +11290,7 @@ static void kvm_mmu_slot_apply_flags(struct kvm *kvm, */ if (new->flags & KVM_MEM_LOG_DIRTY_PAGES) { if (kvm_x86_ops.slot_enable_log_dirty) { - kvm_x86_ops.slot_enable_log_dirty(kvm, new); + static_call(kvm_x86_slot_enable_log_dirty)(kvm, new); } else { int level = kvm_dirty_log_manual_protect_and_init_set(kvm) ? @@ -11309,8 +11307,7 @@ static void kvm_mmu_slot_apply_flags(struct kvm *kvm, kvm_mmu_slot_remove_write_access(kvm, new, level); } } else { - if (kvm_x86_ops.slot_disable_log_dirty) - kvm_x86_ops.slot_disable_log_dirty(kvm, new); + static_call_cond(kvm_x86_slot_disable_log_dirty)(kvm, new); } } @@ -11349,7 +11346,7 @@ static inline bool kvm_guest_apic_has_interrupt(struct kvm_vcpu *vcpu) { return (is_guest_mode(vcpu) && kvm_x86_ops.guest_apic_has_interrupt && - kvm_x86_ops.guest_apic_has_interrupt(vcpu)); + static_call(kvm_x86_guest_apic_has_interrupt)(vcpu)); } static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu) @@ -11368,12 +11365,12 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu) if (kvm_test_request(KVM_REQ_NMI, vcpu) || (vcpu->arch.nmi_pending && - kvm_x86_ops.nmi_allowed(vcpu, false))) + static_call(kvm_x86_nmi_allowed)(vcpu, false))) return true; if (kvm_test_request(KVM_REQ_SMI, vcpu) || (vcpu->arch.smi_pending && - kvm_x86_ops.smi_allowed(vcpu, false))) + static_call(kvm_x86_smi_allowed)(vcpu, false))) return true; if (kvm_arch_interrupt_allowed(vcpu) && @@ -11407,7 +11404,7 @@ bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu) kvm_test_request(KVM_REQ_EVENT, vcpu)) return true; - if (vcpu->arch.apicv_active && kvm_x86_ops.dy_apicv_has_pending_interrupt(vcpu)) + if (vcpu->arch.apicv_active && static_call(kvm_x86_dy_apicv_has_pending_interrupt)(vcpu)) return true; return false; @@ -11428,7 +11425,7 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu) { - return kvm_x86_ops.interrupt_allowed(vcpu, false); + return static_call(kvm_x86_interrupt_allowed)(vcpu, false); } unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu) @@ -11454,7 +11451,7 @@ unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu) { unsigned long rflags; - rflags = kvm_x86_ops.get_rflags(vcpu); + rflags = static_call(kvm_x86_get_rflags)(vcpu); if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) rflags &= ~X86_EFLAGS_TF; return rflags; @@ -11466,7 +11463,7 @@ static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP && kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip)) rflags |= X86_EFLAGS_TF; - kvm_x86_ops.set_rflags(vcpu, rflags); + static_call(kvm_x86_set_rflags)(vcpu, rflags); } void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) @@ -11596,7 +11593,7 @@ static bool kvm_can_deliver_async_pf(struct kvm_vcpu *vcpu) return false; if (!kvm_pv_async_pf_enabled(vcpu) || - (vcpu->arch.apf.send_user_only && kvm_x86_ops.get_cpl(vcpu) == 0)) + (vcpu->arch.apf.send_user_only && static_call(kvm_x86_get_cpl)(vcpu) == 0)) return false; return true; @@ -11741,7 +11738,7 @@ int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons, irqfd->producer = prod; kvm_arch_start_assignment(irqfd->kvm); - ret = kvm_x86_ops.update_pi_irte(irqfd->kvm, + ret = static_call(kvm_x86_update_pi_irte)(irqfd->kvm, prod->irq, irqfd->gsi, 1); if (ret) @@ -11766,7 +11763,7 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons, * when the irq is masked/disabled or the consumer side (KVM * int this case doesn't want to receive the interrupts. */ - ret = kvm_x86_ops.update_pi_irte(irqfd->kvm, prod->irq, irqfd->gsi, 0); + ret = static_call(kvm_x86_update_pi_irte)(irqfd->kvm, prod->irq, irqfd->gsi, 0); if (ret) printk(KERN_INFO "irq bypass consumer (token %p) unregistration" " fails: %d\n", irqfd->consumer.token, ret); @@ -11777,7 +11774,7 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons, int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq, uint32_t guest_irq, bool set) { - return kvm_x86_ops.update_pi_irte(kvm, host_irq, guest_irq, set); + return static_call(kvm_x86_update_pi_irte)(kvm, host_irq, guest_irq, set); } bool kvm_vector_hashing_enabled(void) diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 1ecbfe7bc66e..a371d35050d8 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -99,7 +99,7 @@ static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu) if (!is_long_mode(vcpu)) return false; - kvm_x86_ops.get_cs_db_l_bits(vcpu, &cs_db, &cs_l); + static_call(kvm_x86_get_cs_db_l_bits)(vcpu, &cs_db, &cs_l); return cs_l; } @@ -140,7 +140,7 @@ static inline bool mmu_is_nested(struct kvm_vcpu *vcpu) static inline void kvm_vcpu_flush_tlb_current(struct kvm_vcpu *vcpu) { ++vcpu->stat.tlb_flush; - kvm_x86_ops.tlb_flush_current(vcpu); + static_call(kvm_x86_tlb_flush_current)(vcpu); } static inline int is_pae(struct kvm_vcpu *vcpu) @@ -255,7 +255,7 @@ static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk) static inline bool kvm_vcpu_latch_init(struct kvm_vcpu *vcpu) { - return is_smm(vcpu) || kvm_x86_ops.apic_init_signal_blocked(vcpu); + return is_smm(vcpu) || static_call(kvm_x86_apic_init_signal_blocked)(vcpu); } void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip); -- Gitee From 5e1ba2e86ab0d315f17fb7a22045d75c9fd3e4c6 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Sat, 8 Nov 2025 02:55:55 +0800 Subject: [PATCH 04/10] KVM: x86: return 1 unconditionally for availability of KVM_CAP_VAPIC ANBZ: #27210 commit 8a2897853c53fd3d0e381a46b194889cf6da3391 upstream. The two ioctls used to implement userspace-accelerated TPR, KVM_TPR_ACCESS_REPORTING and KVM_SET_VAPIC_ADDR, are available even if hardware-accelerated TPR can be used. So there is no reason not to report KVM_CAP_VAPIC. Hygon-SIG: commit 8a2897853c53 KVM: x86: return 1 unconditionally for availability of KVM_CAP_VAPIC backport comment for "kvm: x86: better handling of optional kvm_x86_ops" Reviewed-by: Sean Christopherson Signed-off-by: Paolo Bonzini Signed-off-by: Wei Wang [ Wei Wang: amend commit log and resolve the conflicts ] Cc: hygon-arch@list.openanolis.cn --- arch/x86/include/asm/kvm-x86-ops.h | 1 - arch/x86/include/asm/kvm_host.h | 1 - arch/x86/kvm/svm/svm.c | 6 ------ arch/x86/kvm/vmx/vmx.c | 6 ------ arch/x86/kvm/x86.c | 4 +--- 5 files changed, 1 insertion(+), 17 deletions(-) diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h index 549d7e54801d..8774c6c563a6 100644 --- a/arch/x86/include/asm/kvm-x86-ops.h +++ b/arch/x86/include/asm/kvm-x86-ops.h @@ -15,7 +15,6 @@ BUILD_BUG_ON(1) KVM_X86_OP_NULL(hardware_enable) KVM_X86_OP_NULL(hardware_disable) KVM_X86_OP_NULL(hardware_unsetup) -KVM_X86_OP_NULL(cpu_has_accelerated_tpr) KVM_X86_OP(has_emulated_msr) KVM_X86_OP(vcpu_after_set_cpuid) KVM_X86_OP(vm_init) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 6c0d300f1b9c..4825283af8b6 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1161,7 +1161,6 @@ struct kvm_x86_ops { int (*hardware_enable)(void); void (*hardware_disable)(void); void (*hardware_unsetup)(void); - bool (*cpu_has_accelerated_tpr)(void); bool (*has_emulated_msr)(struct kvm *kvm, u32 index); void (*vcpu_after_set_cpuid)(struct kvm_vcpu *vcpu); diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index e226359bd9f6..872728e465b0 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -4118,11 +4118,6 @@ static int __init svm_check_processor_compat(void) return 0; } -static bool svm_cpu_has_accelerated_tpr(void) -{ - return false; -} - /* * The kvm parameter can be NULL (module initialization, or invocation before * VM creation). Be sure to check the kvm parameter before using it. @@ -4714,7 +4709,6 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { .hardware_unsetup = svm_hardware_teardown, .hardware_enable = svm_hardware_enable, .hardware_disable = svm_hardware_disable, - .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr, .has_emulated_msr = svm_has_emulated_msr, .vcpu_create = svm_create_vcpu, diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 8956683e36e0..da6b0f6dc403 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -701,11 +701,6 @@ static inline bool cpu_need_virtualize_apic_accesses(struct kvm_vcpu *vcpu) return flexpriority_enabled && lapic_in_kernel(vcpu); } -static inline bool report_flexpriority(void) -{ - return flexpriority_enabled; -} - static int possible_passthrough_msr_slot(u32 msr) { u32 i; @@ -8431,7 +8426,6 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = { .hardware_enable = hardware_enable, .hardware_disable = hardware_disable, - .cpu_has_accelerated_tpr = report_flexpriority, .has_emulated_msr = vmx_has_emulated_msr, .vm_size = sizeof(struct kvm_vmx), diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index e057959a0a89..326ab6350988 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3981,6 +3981,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_X86_MSR_FILTER: case KVM_CAP_ENFORCE_PV_FEATURE_CPUID: case KVM_CAP_SYS_ATTRIBUTES: + case KVM_CAP_VAPIC: #ifdef CONFIG_X86_SGX_KVM case KVM_CAP_SGX_ATTRIBUTE: #endif @@ -4009,9 +4010,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) */ r = static_call(kvm_x86_has_emulated_msr)(kvm, MSR_IA32_SMBASE); break; - case KVM_CAP_VAPIC: - r = !static_call(kvm_x86_cpu_has_accelerated_tpr)(); - break; case KVM_CAP_NR_VCPUS: r = KVM_SOFT_MAX_VCPUS; break; -- Gitee From 20409009d0c474dd9e6e2847550ccc3fbd2301d3 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Tue, 1 Feb 2022 14:18:07 -0500 Subject: [PATCH 05/10] KVM: x86: use static_call_cond for optional callbacks ANBZ: #27210 commit 2a89061451c799bd36dbe1b90613c35212fc1f64 upstream. SVM implements neither update_emulated_instruction nor set_apic_access_page_addr. Remove an "if" by calling them with static_call_cond(). Hygon-SIG: commit 2a89061451c7 KVM: x86: use static_call_cond for optional callbacks backport comment for "kvm: x86: better handling of optional kvm_x86_ops" Reviewed-by: Sean Christopherson Signed-off-by: Paolo Bonzini Signed-off-by: Wei Wang [ Wei Wang: amend commit log ] Cc: hygon-arch@list.openanolis.cn --- arch/x86/kvm/x86.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 326ab6350988..ad2871eb3551 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -7971,8 +7971,7 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, kvm_rip_write(vcpu, ctxt->eip); if (r && (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP))) r = kvm_vcpu_do_singlestep(vcpu); - if (kvm_x86_ops.update_emulated_instruction) - static_call(kvm_x86_update_emulated_instruction)(vcpu); + static_call_cond(kvm_x86_update_emulated_instruction)(vcpu); __kvm_set_rflags(vcpu, ctxt->eflags); } @@ -9344,10 +9343,7 @@ void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu) if (!lapic_in_kernel(vcpu)) return; - if (!kvm_x86_ops.set_apic_access_page_addr) - return; - - static_call(kvm_x86_set_apic_access_page_addr)(vcpu); + static_call_cond(kvm_x86_set_apic_access_page_addr)(vcpu); } void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu) -- Gitee From a7ca2239986f1baf86c11acee2d1661e409b40ee Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Sat, 8 Nov 2025 03:15:54 +0800 Subject: [PATCH 06/10] KVM: x86: remove KVM_X86_OP_NULL and mark optional kvm_x86_ops ANBZ: #27210 commit e4fc23bad813591417f466beb7e833cdd2089cf6 upstream. The original use of KVM_X86_OP_NULL, which was to mark calls that do not follow a specific naming convention, is not in use anymore. Instead, let's mark calls that are optional because they are always invoked within conditionals or with static_call_cond. Those that are _not_, i.e. those that are defined with KVM_X86_OP, must be defined by both vendor modules or some kind of NULL pointer dereference is bound to happen at runtime. Hygon-SIG: commit e4fc23bad813 KVM: x86: remove KVM_X86_OP_NULL and mark optional kvm_x86_ops backport comment for "kvm: x86: better handling of optional kvm_x86_ops" Reviewed-by: Sean Christopherson Signed-off-by: Paolo Bonzini Signed-off-by: Wei Wang [ Wei Wang: amend commit log and resolve the conflicts ] Cc: hygon-arch@list.openanolis.cn --- arch/x86/include/asm/kvm-x86-ops.h | 90 +++++++++++++++--------------- arch/x86/include/asm/kvm_host.h | 4 +- arch/x86/kvm/x86.c | 2 +- 3 files changed, 48 insertions(+), 48 deletions(-) diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h index 8774c6c563a6..12bf75b5871c 100644 --- a/arch/x86/include/asm/kvm-x86-ops.h +++ b/arch/x86/include/asm/kvm-x86-ops.h @@ -1,24 +1,24 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#if !defined(KVM_X86_OP) || !defined(KVM_X86_OP_NULL) +#if !defined(KVM_X86_OP) || !defined(KVM_X86_OP_OPTIONAL) BUILD_BUG_ON(1) #endif /* - * KVM_X86_OP() and KVM_X86_OP_NULL() are used to help generate - * "static_call()"s. They are also intended for use when defining - * the vmx/svm kvm_x86_ops. KVM_X86_OP() can be used for those - * functions that follow the [svm|vmx]_func_name convention. - * KVM_X86_OP_NULL() can leave a NULL definition for the - * case where there is no definition or a function name that - * doesn't match the typical naming convention is supplied. + * KVM_X86_OP() and KVM_X86_OP_OPTIONAL() are used to help generate + * both DECLARE/DEFINE_STATIC_CALL() invocations and + * "static_call_update()" calls. + * + * KVM_X86_OP_OPTIONAL() can be used for those functions that can have + * a NULL definition, for example if "static_call_cond()" will be used + * at the call sites. */ -KVM_X86_OP_NULL(hardware_enable) -KVM_X86_OP_NULL(hardware_disable) -KVM_X86_OP_NULL(hardware_unsetup) +KVM_X86_OP(hardware_enable) +KVM_X86_OP(hardware_disable) +KVM_X86_OP(hardware_unsetup) KVM_X86_OP(has_emulated_msr) KVM_X86_OP(vcpu_after_set_cpuid) KVM_X86_OP(vm_init) -KVM_X86_OP_NULL(vm_destroy) +KVM_X86_OP_OPTIONAL(vm_destroy) KVM_X86_OP(vcpu_create) KVM_X86_OP(vcpu_free) KVM_X86_OP(vcpu_reset) @@ -32,7 +32,7 @@ KVM_X86_OP(get_segment_base) KVM_X86_OP(get_segment) KVM_X86_OP(get_cpl) KVM_X86_OP(set_segment) -KVM_X86_OP_NULL(get_cs_db_l_bits) +KVM_X86_OP(get_cs_db_l_bits) KVM_X86_OP(set_cr0) KVM_X86_OP(set_cr4) KVM_X86_OP(set_efer) @@ -47,14 +47,14 @@ KVM_X86_OP(get_rflags) KVM_X86_OP(set_rflags) KVM_X86_OP(tlb_flush_all) KVM_X86_OP(tlb_flush_current) -KVM_X86_OP_NULL(tlb_remote_flush) -KVM_X86_OP_NULL(tlb_remote_flush_with_range) +KVM_X86_OP_OPTIONAL(tlb_remote_flush) +KVM_X86_OP_OPTIONAL(tlb_remote_flush_with_range) KVM_X86_OP(tlb_flush_gva) KVM_X86_OP(tlb_flush_guest) KVM_X86_OP(run) -KVM_X86_OP_NULL(handle_exit) -KVM_X86_OP_NULL(skip_emulated_instruction) -KVM_X86_OP_NULL(update_emulated_instruction) +KVM_X86_OP(handle_exit) +KVM_X86_OP(skip_emulated_instruction) +KVM_X86_OP_OPTIONAL(update_emulated_instruction) KVM_X86_OP(set_interrupt_shadow) KVM_X86_OP(get_interrupt_shadow) KVM_X86_OP(patch_hypercall) @@ -68,57 +68,57 @@ KVM_X86_OP(get_nmi_mask) KVM_X86_OP(set_nmi_mask) KVM_X86_OP(enable_nmi_window) KVM_X86_OP(enable_irq_window) -KVM_X86_OP(update_cr8_intercept) +KVM_X86_OP_OPTIONAL(update_cr8_intercept) KVM_X86_OP(check_apicv_inhibit_reasons) -KVM_X86_OP_NULL(pre_update_apicv_exec_ctrl) +KVM_X86_OP_OPTIONAL(pre_update_apicv_exec_ctrl) KVM_X86_OP(refresh_apicv_exec_ctrl) KVM_X86_OP(hwapic_irr_update) KVM_X86_OP(hwapic_isr_update) -KVM_X86_OP_NULL(guest_apic_has_interrupt) +KVM_X86_OP_OPTIONAL(guest_apic_has_interrupt) KVM_X86_OP(load_eoi_exitmap) KVM_X86_OP(set_virtual_apic_mode) -KVM_X86_OP_NULL(set_apic_access_page_addr) +KVM_X86_OP_OPTIONAL(set_apic_access_page_addr) KVM_X86_OP(deliver_posted_interrupt) -KVM_X86_OP_NULL(sync_pir_to_irr) +KVM_X86_OP_OPTIONAL(sync_pir_to_irr) KVM_X86_OP(set_tss_addr) KVM_X86_OP(set_identity_map_addr) KVM_X86_OP(get_mt_mask) KVM_X86_OP(load_mmu_pgd) -KVM_X86_OP_NULL(has_wbinvd_exit) +KVM_X86_OP(has_wbinvd_exit) KVM_X86_OP(write_l1_tsc_offset) KVM_X86_OP(get_exit_info) KVM_X86_OP(check_intercept) KVM_X86_OP(handle_exit_irqoff) -KVM_X86_OP_NULL(request_immediate_exit) +KVM_X86_OP(request_immediate_exit) KVM_X86_OP(sched_in) -KVM_X86_OP_NULL(slot_enable_log_dirty) -KVM_X86_OP_NULL(slot_disable_log_dirty) -KVM_X86_OP_NULL(flush_log_dirty) -KVM_X86_OP_NULL(enable_log_dirty_pt_masked) -KVM_X86_OP_NULL(pre_block) -KVM_X86_OP_NULL(post_block) -KVM_X86_OP_NULL(vcpu_blocking) -KVM_X86_OP_NULL(vcpu_unblocking) -KVM_X86_OP_NULL(update_pi_irte) -KVM_X86_OP_NULL(apicv_post_state_restore) -KVM_X86_OP_NULL(dy_apicv_has_pending_interrupt) -KVM_X86_OP_NULL(set_hv_timer) -KVM_X86_OP_NULL(cancel_hv_timer) +KVM_X86_OP_OPTIONAL(slot_enable_log_dirty) +KVM_X86_OP_OPTIONAL(slot_disable_log_dirty) +KVM_X86_OP_OPTIONAL(flush_log_dirty) +KVM_X86_OP_OPTIONAL(enable_log_dirty_pt_masked) +KVM_X86_OP_OPTIONAL(pre_block) +KVM_X86_OP_OPTIONAL(post_block) +KVM_X86_OP_OPTIONAL(vcpu_blocking) +KVM_X86_OP_OPTIONAL(vcpu_unblocking) +KVM_X86_OP(update_pi_irte) +KVM_X86_OP(apicv_post_state_restore) +KVM_X86_OP_OPTIONAL(dy_apicv_has_pending_interrupt) +KVM_X86_OP_OPTIONAL(set_hv_timer) +KVM_X86_OP_OPTIONAL(cancel_hv_timer) KVM_X86_OP(setup_mce) KVM_X86_OP(smi_allowed) KVM_X86_OP(pre_enter_smm) KVM_X86_OP(pre_leave_smm) KVM_X86_OP(enable_smi_window) -KVM_X86_OP_NULL(mem_enc_op) -KVM_X86_OP_NULL(mem_enc_reg_region) -KVM_X86_OP_NULL(mem_enc_unreg_region) +KVM_X86_OP_OPTIONAL(mem_enc_op) +KVM_X86_OP_OPTIONAL(mem_enc_reg_region) +KVM_X86_OP_OPTIONAL(mem_enc_unreg_region) KVM_X86_OP(get_msr_feature) KVM_X86_OP(can_emulate_instruction) KVM_X86_OP(apic_init_signal_blocked) -KVM_X86_OP_NULL(enable_direct_tlbflush) -KVM_X86_OP_NULL(migrate_timers) +KVM_X86_OP_OPTIONAL(enable_direct_tlbflush) +KVM_X86_OP_OPTIONAL(migrate_timers) KVM_X86_OP(msr_filter_changed) -KVM_X86_OP_NULL(complete_emulated_msr) +KVM_X86_OP(complete_emulated_msr) #undef KVM_X86_OP -#undef KVM_X86_OP_NULL +#undef KVM_X86_OP_OPTIONAL diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 4825283af8b6..85dabefd42b5 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1415,14 +1415,14 @@ extern struct kvm_x86_ops kvm_x86_ops; #define KVM_X86_OP(func) \ DECLARE_STATIC_CALL(kvm_x86_##func, *(((struct kvm_x86_ops *)0)->func)); -#define KVM_X86_OP_NULL KVM_X86_OP +#define KVM_X86_OP_OPTIONAL KVM_X86_OP #include static inline void kvm_ops_static_call_update(void) { #define KVM_X86_OP(func) \ static_call_update(kvm_x86_##func, kvm_x86_ops.func); -#define KVM_X86_OP_NULL KVM_X86_OP +#define KVM_X86_OP_OPTIONAL KVM_X86_OP #include } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index ad2871eb3551..1bf4534ff321 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -130,7 +130,7 @@ EXPORT_SYMBOL_GPL(kvm_x86_ops); #define KVM_X86_OP(func) \ DEFINE_STATIC_CALL_NULL(kvm_x86_##func, \ *(((struct kvm_x86_ops *)0)->func)); -#define KVM_X86_OP_NULL KVM_X86_OP +#define KVM_X86_OP_OPTIONAL KVM_X86_OP #include EXPORT_STATIC_CALL_GPL(kvm_x86_get_cs_db_l_bits); EXPORT_STATIC_CALL_GPL(kvm_x86_cache_reg); -- Gitee From 2a0348f0b1e966573bf5f1db1bb0caa021e949ce Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Thu, 9 Dec 2021 08:12:48 -0500 Subject: [PATCH 07/10] KVM: x86: warn on incorrectly NULL members of kvm_x86_ops ANBZ: #27210 commit dd2319c61888018a5295264c9b631e151dad364d upstream. Use the newly corrected KVM_X86_OP annotations to warn about possible NULL pointer dereferences as soon as the vendor module is loaded. Hygon-SIG: commit dd2319c61888 KVM: x86: warn on incorrectly NULL members of kvm_x86_ops backport comment for "kvm: x86: better handling of optional kvm_x86_ops" Reviewed-by: Sean Christopherson Signed-off-by: Paolo Bonzini Signed-off-by: Wei Wang [ Wei Wang: amend commit log ] Cc: hygon-arch@list.openanolis.cn --- arch/x86/include/asm/kvm_host.h | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 85dabefd42b5..3fed9f835fae 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1420,10 +1420,13 @@ extern struct kvm_x86_ops kvm_x86_ops; static inline void kvm_ops_static_call_update(void) { -#define KVM_X86_OP(func) \ +#define __KVM_X86_OP(func) \ static_call_update(kvm_x86_##func, kvm_x86_ops.func); -#define KVM_X86_OP_OPTIONAL KVM_X86_OP +#define KVM_X86_OP(func) \ + WARN_ON(!kvm_x86_ops.func); __KVM_X86_OP(func) +#define KVM_X86_OP_OPTIONAL __KVM_X86_OP #include +#undef __KVM_X86_OP } #define __KVM_HAVE_ARCH_VM_ALLOC -- Gitee From f009c758bdd364c0095acac97ba44e886db04030 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Sat, 8 Nov 2025 03:26:28 +0800 Subject: [PATCH 08/10] KVM: x86: make several APIC virtualization callbacks optional ANBZ: #27210 commit abb6d479e22642c82d552970d85edd9b5fe8beb6 upstream. All their invocations are conditional on vcpu->arch.apicv_active, meaning that they need not be implemented by vendor code: even though at the moment both vendors implement APIC virtualization, all of them can be optional. In fact SVM does not need many of them, and their implementation can be deleted now. Hygon-SIG: commit abb6d479e226 KVM: x86: make several APIC virtualization callbacks optional backport comment for "kvm: x86: better handling of optional kvm_x86_ops" Reviewed-by: Sean Christopherson Signed-off-by: Paolo Bonzini Signed-off-by: Wei Wang [ Wei Wang: amend commit log and resolve the conflicts ] Cc: hygon-arch@list.openanolis.cn --- arch/x86/include/asm/kvm-x86-ops.h | 10 +++++----- arch/x86/kvm/lapic.c | 24 ++++++++++-------------- arch/x86/kvm/svm/avic.c | 18 ------------------ arch/x86/kvm/svm/svm.c | 4 ---- arch/x86/kvm/svm/svm.h | 1 - arch/x86/kvm/x86.c | 2 +- 6 files changed, 16 insertions(+), 43 deletions(-) diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h index 12bf75b5871c..ae30c999874b 100644 --- a/arch/x86/include/asm/kvm-x86-ops.h +++ b/arch/x86/include/asm/kvm-x86-ops.h @@ -72,11 +72,11 @@ KVM_X86_OP_OPTIONAL(update_cr8_intercept) KVM_X86_OP(check_apicv_inhibit_reasons) KVM_X86_OP_OPTIONAL(pre_update_apicv_exec_ctrl) KVM_X86_OP(refresh_apicv_exec_ctrl) -KVM_X86_OP(hwapic_irr_update) -KVM_X86_OP(hwapic_isr_update) +KVM_X86_OP_OPTIONAL(hwapic_irr_update) +KVM_X86_OP_OPTIONAL(hwapic_isr_update) KVM_X86_OP_OPTIONAL(guest_apic_has_interrupt) -KVM_X86_OP(load_eoi_exitmap) -KVM_X86_OP(set_virtual_apic_mode) +KVM_X86_OP_OPTIONAL(load_eoi_exitmap) +KVM_X86_OP_OPTIONAL(set_virtual_apic_mode) KVM_X86_OP_OPTIONAL(set_apic_access_page_addr) KVM_X86_OP(deliver_posted_interrupt) KVM_X86_OP_OPTIONAL(sync_pir_to_irr) @@ -100,7 +100,7 @@ KVM_X86_OP_OPTIONAL(post_block) KVM_X86_OP_OPTIONAL(vcpu_blocking) KVM_X86_OP_OPTIONAL(vcpu_unblocking) KVM_X86_OP(update_pi_irte) -KVM_X86_OP(apicv_post_state_restore) +KVM_X86_OP_OPTIONAL(apicv_post_state_restore) KVM_X86_OP_OPTIONAL(dy_apicv_has_pending_interrupt) KVM_X86_OP_OPTIONAL(set_hv_timer) KVM_X86_OP_OPTIONAL(cancel_hv_timer) diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 1ff0789aac39..b1133d4766cf 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -570,8 +570,7 @@ static inline void apic_clear_irr(int vec, struct kvm_lapic *apic) if (unlikely(vcpu->arch.apicv_active)) { /* need to update RVI */ kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR); - static_call(kvm_x86_hwapic_irr_update)(vcpu, - apic_find_highest_irr(apic)); + static_call_cond(kvm_x86_hwapic_irr_update)(vcpu, apic_find_highest_irr(apic)); } else { apic->irr_pending = false; kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR); @@ -601,7 +600,7 @@ static inline void apic_set_isr(int vec, struct kvm_lapic *apic) * just set SVI. */ if (unlikely(vcpu->arch.apicv_active)) - static_call(kvm_x86_hwapic_isr_update)(vcpu, vec); + static_call_cond(kvm_x86_hwapic_isr_update)(vcpu, vec); else { ++apic->isr_count; BUG_ON(apic->isr_count > MAX_APIC_VECTOR); @@ -649,8 +648,7 @@ static inline void apic_clear_isr(int vec, struct kvm_lapic *apic) * and must be left alone. */ if (unlikely(vcpu->arch.apicv_active)) - static_call(kvm_x86_hwapic_isr_update)(vcpu, - apic_find_highest_isr(apic)); + static_call_cond(kvm_x86_hwapic_isr_update)(vcpu, apic_find_highest_isr(apic)); else { --apic->isr_count; BUG_ON(apic->isr_count < 0); @@ -2367,7 +2365,7 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value) kvm_apic_set_x2apic_id(apic, vcpu->vcpu_id); if ((old_value ^ value) & (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE)) - static_call(kvm_x86_set_virtual_apic_mode)(vcpu); + static_call_cond(kvm_x86_set_virtual_apic_mode)(vcpu); apic->base_address = apic->vcpu->arch.apic_base & MSR_IA32_APICBASE_BASE; @@ -2444,9 +2442,9 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event) vcpu->arch.pv_eoi.msr_val = 0; apic_update_ppr(apic); if (vcpu->arch.apicv_active) { - static_call(kvm_x86_apicv_post_state_restore)(vcpu); - static_call(kvm_x86_hwapic_irr_update)(vcpu, -1); - static_call(kvm_x86_hwapic_isr_update)(vcpu, -1); + static_call_cond(kvm_x86_apicv_post_state_restore)(vcpu); + static_call_cond(kvm_x86_hwapic_irr_update)(vcpu, -1); + static_call_cond(kvm_x86_hwapic_isr_update)(vcpu, -1); } vcpu->arch.apic_arb_prio = 0; @@ -2707,11 +2705,9 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s) kvm_apic_update_apicv(vcpu); apic->highest_isr_cache = -1; if (vcpu->arch.apicv_active) { - static_call(kvm_x86_apicv_post_state_restore)(vcpu); - static_call(kvm_x86_hwapic_irr_update)(vcpu, - apic_find_highest_irr(apic)); - static_call(kvm_x86_hwapic_isr_update)(vcpu, - apic_find_highest_isr(apic)); + static_call_cond(kvm_x86_apicv_post_state_restore)(vcpu); + static_call_cond(kvm_x86_hwapic_irr_update)(vcpu, apic_find_highest_irr(apic)); + static_call_cond(kvm_x86_hwapic_isr_update)(vcpu, apic_find_highest_isr(apic)); } kvm_make_request(KVM_REQ_EVENT, vcpu); if (ioapic_in_kernel(vcpu->kvm)) diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c index f6600ca063dd..39d68c65b9fe 100644 --- a/arch/x86/kvm/svm/avic.c +++ b/arch/x86/kvm/svm/avic.c @@ -594,19 +594,6 @@ void svm_toggle_avic_for_irq_window(struct kvm_vcpu *vcpu, bool activate) vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); } -void svm_set_virtual_apic_mode(struct kvm_vcpu *vcpu) -{ - return; -} - -void svm_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr) -{ -} - -void svm_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr) -{ -} - static int svm_set_pi_irte_mode(struct kvm_vcpu *vcpu, bool activate) { int ret = 0; @@ -666,11 +653,6 @@ void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu) svm_set_pi_irte_mode(vcpu, activated); } -void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap) -{ - return; -} - int svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec) { if (!vcpu->arch.apicv_active) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 872728e465b0..16dadc1f0551 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -4771,13 +4771,9 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { .enable_nmi_window = svm_enable_nmi_window, .enable_irq_window = svm_enable_irq_window, .update_cr8_intercept = svm_update_cr8_intercept, - .set_virtual_apic_mode = svm_set_virtual_apic_mode, .refresh_apicv_exec_ctrl = svm_refresh_apicv_exec_ctrl, .check_apicv_inhibit_reasons = svm_check_apicv_inhibit_reasons, .pre_update_apicv_exec_ctrl = svm_pre_update_apicv_exec_ctrl, - .load_eoi_exitmap = svm_load_eoi_exitmap, - .hwapic_irr_update = svm_hwapic_irr_update, - .hwapic_isr_update = svm_hwapic_isr_update, .sync_pir_to_irr = kvm_lapic_find_highest_irr, .apicv_post_state_restore = avic_post_state_restore, diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 067b2ec6cb2b..055138bcab58 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -545,7 +545,6 @@ void svm_set_virtual_apic_mode(struct kvm_vcpu *vcpu); void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu); bool svm_check_apicv_inhibit_reasons(ulong bit); void svm_pre_update_apicv_exec_ctrl(struct kvm *kvm, bool activate); -void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap); void svm_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr); void svm_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr); int svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 1bf4534ff321..c6b3b160ab68 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -9315,7 +9315,7 @@ static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu) bitmap_or((ulong *)eoi_exit_bitmap, vcpu->arch.ioapic_handled_vectors, vcpu_to_synic(vcpu)->vec_bitmap, 256); - static_call(kvm_x86_load_eoi_exitmap)(vcpu, eoi_exit_bitmap); + static_call_cond(kvm_x86_load_eoi_exitmap)(vcpu, eoi_exit_bitmap); } void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, -- Gitee From f26703001c60f25bdf8e462cf8f1f981f3e6aec9 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Sat, 8 Nov 2025 03:47:34 +0800 Subject: [PATCH 09/10] KVM: x86: allow defining return-0 static calls ANBZ: #27210 commit 5be2226f417d5b06d17e6c52d6e341cf43c29e48 upstream. A few vendor callbacks are only used by VMX, but they return an integer or bool value. Introduce KVM_X86_OP_OPTIONAL_RET0 for them: if a func is NULL in struct kvm_x86_ops, it will be changed to __static_call_return0 when updating static calls. Hygon-SIG: commit 5be2226f417d upstream KVM: x86: allow defining return-0 static calls backport comment for "kvm: x86: better handling of optional kvm_x86_ops" Reviewed-by: Sean Christopherson Signed-off-by: Paolo Bonzini Signed-off-by: Wei Wang [ Wei Wang: amend commit log and resolve the conflicts ] Cc: hygon-arch@list.openanolis.cn --- arch/x86/include/asm/kvm-x86-ops.h | 15 +++++++++------ arch/x86/include/asm/kvm_host.h | 4 ++++ arch/x86/kvm/svm/avic.c | 5 ----- arch/x86/kvm/svm/svm.c | 20 -------------------- arch/x86/kvm/x86.c | 4 ++-- kernel/static_call.c | 1 + 6 files changed, 16 insertions(+), 33 deletions(-) diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h index ae30c999874b..dab34c29a218 100644 --- a/arch/x86/include/asm/kvm-x86-ops.h +++ b/arch/x86/include/asm/kvm-x86-ops.h @@ -10,7 +10,9 @@ BUILD_BUG_ON(1) * * KVM_X86_OP_OPTIONAL() can be used for those functions that can have * a NULL definition, for example if "static_call_cond()" will be used - * at the call sites. + * at the call sites. KVM_X86_OP_OPTIONAL_RET0() can be used likewise + * to make a definition optional, but in this case the default will + * be __static_call_return0. */ KVM_X86_OP(hardware_enable) KVM_X86_OP(hardware_disable) @@ -74,15 +76,15 @@ KVM_X86_OP_OPTIONAL(pre_update_apicv_exec_ctrl) KVM_X86_OP(refresh_apicv_exec_ctrl) KVM_X86_OP_OPTIONAL(hwapic_irr_update) KVM_X86_OP_OPTIONAL(hwapic_isr_update) -KVM_X86_OP_OPTIONAL(guest_apic_has_interrupt) +KVM_X86_OP_OPTIONAL_RET0(guest_apic_has_interrupt) KVM_X86_OP_OPTIONAL(load_eoi_exitmap) KVM_X86_OP_OPTIONAL(set_virtual_apic_mode) KVM_X86_OP_OPTIONAL(set_apic_access_page_addr) KVM_X86_OP(deliver_posted_interrupt) KVM_X86_OP_OPTIONAL(sync_pir_to_irr) -KVM_X86_OP(set_tss_addr) -KVM_X86_OP(set_identity_map_addr) -KVM_X86_OP(get_mt_mask) +KVM_X86_OP_OPTIONAL_RET0(set_tss_addr) +KVM_X86_OP_OPTIONAL_RET0(set_identity_map_addr) +KVM_X86_OP_OPTIONAL_RET0(get_mt_mask) KVM_X86_OP(load_mmu_pgd) KVM_X86_OP(has_wbinvd_exit) KVM_X86_OP(write_l1_tsc_offset) @@ -101,7 +103,7 @@ KVM_X86_OP_OPTIONAL(vcpu_blocking) KVM_X86_OP_OPTIONAL(vcpu_unblocking) KVM_X86_OP(update_pi_irte) KVM_X86_OP_OPTIONAL(apicv_post_state_restore) -KVM_X86_OP_OPTIONAL(dy_apicv_has_pending_interrupt) +KVM_X86_OP_OPTIONAL_RET0(dy_apicv_has_pending_interrupt) KVM_X86_OP_OPTIONAL(set_hv_timer) KVM_X86_OP_OPTIONAL(cancel_hv_timer) KVM_X86_OP(setup_mce) @@ -122,3 +124,4 @@ KVM_X86_OP(complete_emulated_msr) #undef KVM_X86_OP #undef KVM_X86_OP_OPTIONAL +#undef KVM_X86_OP_OPTIONAL_RET0 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 3fed9f835fae..d96eea53f89a 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1416,6 +1416,7 @@ extern struct kvm_x86_ops kvm_x86_ops; #define KVM_X86_OP(func) \ DECLARE_STATIC_CALL(kvm_x86_##func, *(((struct kvm_x86_ops *)0)->func)); #define KVM_X86_OP_OPTIONAL KVM_X86_OP +#define KVM_X86_OP_OPTIONAL_RET0 KVM_X86_OP #include static inline void kvm_ops_static_call_update(void) @@ -1425,6 +1426,9 @@ static inline void kvm_ops_static_call_update(void) #define KVM_X86_OP(func) \ WARN_ON(!kvm_x86_ops.func); __KVM_X86_OP(func) #define KVM_X86_OP_OPTIONAL __KVM_X86_OP +#define KVM_X86_OP_OPTIONAL_RET0(func) \ + static_call_update(kvm_x86_##func, kvm_x86_ops.func ? : \ + (void *) __static_call_return0); #include #undef __KVM_X86_OP } diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c index 39d68c65b9fe..4829af335658 100644 --- a/arch/x86/kvm/svm/avic.c +++ b/arch/x86/kvm/svm/avic.c @@ -673,11 +673,6 @@ int svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec) return 0; } -bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu) -{ - return false; -} - static void svm_ir_list_del(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi) { unsigned long flags; diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 16dadc1f0551..09f3f2dfd8ad 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -3699,16 +3699,6 @@ static void svm_enable_nmi_window(struct kvm_vcpu *vcpu) svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF); } -static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr) -{ - return 0; -} - -static int svm_set_identity_map_addr(struct kvm *kvm, u64 ident_addr) -{ - return 0; -} - void svm_flush_tlb(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); @@ -4149,11 +4139,6 @@ static bool svm_has_emulated_msr(struct kvm *kvm, u32 index) return true; } -static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) -{ - return 0; -} - static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); @@ -4777,10 +4762,6 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { .sync_pir_to_irr = kvm_lapic_find_highest_irr, .apicv_post_state_restore = avic_post_state_restore, - .set_tss_addr = svm_set_tss_addr, - .set_identity_map_addr = svm_set_identity_map_addr, - .get_mt_mask = svm_get_mt_mask, - .get_exit_info = svm_get_exit_info, .vcpu_after_set_cpuid = svm_vcpu_after_set_cpuid, @@ -4802,7 +4783,6 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { .nested_ops = &svm_nested_ops, .deliver_posted_interrupt = svm_deliver_avic_intr, - .dy_apicv_has_pending_interrupt = svm_dy_apicv_has_pending_interrupt, .update_pi_irte = svm_update_pi_irte, .setup_mce = svm_setup_mce, diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index c6b3b160ab68..c98810b8bee0 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -131,6 +131,7 @@ EXPORT_SYMBOL_GPL(kvm_x86_ops); DEFINE_STATIC_CALL_NULL(kvm_x86_##func, \ *(((struct kvm_x86_ops *)0)->func)); #define KVM_X86_OP_OPTIONAL KVM_X86_OP +#define KVM_X86_OP_OPTIONAL_RET0 KVM_X86_OP #include EXPORT_STATIC_CALL_GPL(kvm_x86_get_cs_db_l_bits); EXPORT_STATIC_CALL_GPL(kvm_x86_cache_reg); @@ -11339,8 +11340,7 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm, static inline bool kvm_guest_apic_has_interrupt(struct kvm_vcpu *vcpu) { return (is_guest_mode(vcpu) && - kvm_x86_ops.guest_apic_has_interrupt && - static_call(kvm_x86_guest_apic_has_interrupt)(vcpu)); + static_call(kvm_x86_guest_apic_has_interrupt)(vcpu)); } static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu) diff --git a/kernel/static_call.c b/kernel/static_call.c index b7ec8a0a3888..3917fde8d82f 100644 --- a/kernel/static_call.c +++ b/kernel/static_call.c @@ -514,6 +514,7 @@ long __static_call_return0(void) { return 0; } +EXPORT_SYMBOL_GPL(__static_call_return0); #ifdef CONFIG_STATIC_CALL_SELFTEST -- Gitee From a95f5139aa6d3663b115ea2a19e210d31b954145 Mon Sep 17 00:00:00 2001 From: Wei Wang Date: Sat, 8 Nov 2025 06:47:02 +0800 Subject: [PATCH 10/10] anolis: KVM/x86: make remaining kvm_x86_ops callbacks use static_call ANBZ: #27210 Several kvm_x86_ops callbacks were backported to the 5.10 kernel but were not updated to use the static_call mechanism. Align this code with the implementation in later upstream versions by applying static_call to these callbacks. Using static_call eliminates the overhead of the indirect call, providing a performance benefit. Hygon-SIG: commit none hygon KVM/x86: make remaining kvm_x86_ops callbacks use static_call Signed-off-by: Wei Wang Cc: hygon-arch@list.openanolis.cn --- arch/x86/include/asm/kvm-x86-ops.h | 6 ++++++ arch/x86/kvm/x86.c | 25 ++++++++++--------------- 2 files changed, 16 insertions(+), 15 deletions(-) diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h index dab34c29a218..b7716c8b2b07 100644 --- a/arch/x86/include/asm/kvm-x86-ops.h +++ b/arch/x86/include/asm/kvm-x86-ops.h @@ -121,6 +121,12 @@ KVM_X86_OP_OPTIONAL(enable_direct_tlbflush) KVM_X86_OP_OPTIONAL(migrate_timers) KVM_X86_OP(msr_filter_changed) KVM_X86_OP(complete_emulated_msr) +KVM_X86_OP_OPTIONAL(guest_memory_reclaimed) +KVM_X86_OP_OPTIONAL(vm_attestation) +KVM_X86_OP_OPTIONAL(get_hygon_coco_extension) +KVM_X86_OP_OPTIONAL(enable_hygon_coco_extension) +KVM_X86_OP_OPTIONAL(control_pre_system_reset) +KVM_X86_OP_OPTIONAL(control_post_system_reset) #undef KVM_X86_OP #undef KVM_X86_OP_OPTIONAL diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index c98810b8bee0..b3f2af065c7d 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -4079,9 +4079,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) * but only CSV2 guest support export to emulate * MSR_AMD64_SEV_ES_GHCB. */ - if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) - r = kvm_x86_ops.has_emulated_msr(kvm, - MSR_AMD64_SEV_ES_GHCB); + if (is_x86_vendor_hygon()) + r = static_call(kvm_x86_has_emulated_msr)(kvm, MSR_AMD64_SEV_ES_GHCB); break; case KVM_CAP_HYGON_COCO_EXT: r = 0; @@ -4093,7 +4092,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) * suggested that the userspace to utilise extensions. */ if (is_x86_vendor_hygon() && kvm_x86_ops.get_hygon_coco_extension) - r = kvm_x86_ops.get_hygon_coco_extension(kvm); + r = static_call(kvm_x86_get_hygon_coco_extension)(kvm); break; default: break; @@ -5678,8 +5677,7 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm, * Hygon CSV technology. */ if (is_x86_vendor_hygon() && kvm_x86_ops.enable_hygon_coco_extension) - r = kvm_x86_ops.enable_hygon_coco_extension(kvm, - (u32)cap->args[0]); + r = static_call(kvm_x86_enable_hygon_coco_extension)(kvm, (u32)cap->args[0]); break; default: r = -EINVAL; @@ -6117,16 +6115,14 @@ long kvm_arch_vm_ioctl(struct file *filp, r = kvm_vm_ioctl_set_msr_filter(kvm, argp); break; case KVM_CONTROL_PRE_SYSTEM_RESET: - if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && - kvm_x86_ops.control_pre_system_reset) - r = kvm_x86_ops.control_pre_system_reset(kvm); + if (is_x86_vendor_hygon() && kvm_x86_ops.control_pre_system_reset) + r = static_call(kvm_x86_control_pre_system_reset)(kvm); else r = -ENOTTY; break; case KVM_CONTROL_POST_SYSTEM_RESET: - if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && - kvm_x86_ops.control_post_system_reset) - r = kvm_x86_ops.control_post_system_reset(kvm); + if (is_x86_vendor_hygon() && kvm_x86_ops.control_post_system_reset) + r = static_call(kvm_x86_control_post_system_reset)(kvm); else r = -ENOTTY; break; @@ -8721,7 +8717,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) case KVM_HC_VM_ATTESTATION: ret = -KVM_ENOSYS; if (kvm_x86_ops.vm_attestation) - ret = kvm_x86_ops.vm_attestation(vcpu->kvm, a0, a1); + ret = static_call(kvm_x86_vm_attestation)(vcpu->kvm, a0, a1); break; case KVM_HC_PSP_OP_OBSOLETE: case KVM_HC_PSP_COPY_FORWARD_OP: @@ -9335,8 +9331,7 @@ void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, void kvm_arch_guest_memory_reclaimed(struct kvm *kvm) { - if (kvm_x86_ops.guest_memory_reclaimed) - kvm_x86_ops.guest_memory_reclaimed(kvm); + static_call_cond(kvm_x86_guest_memory_reclaimed); } void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu) -- Gitee