diff --git a/arch/arm64/kernel/paravirt.c b/arch/arm64/kernel/paravirt.c index 53bb6fa7636689d24e9657b54653c769796c5071..7fb6833e1258d4dfa2a158a3064f990c9e8aa4bc 100644 --- a/arch/arm64/kernel/paravirt.c +++ b/arch/arm64/kernel/paravirt.c @@ -259,14 +259,19 @@ static bool has_kvm_pvsched(void) return (res.a0 == SMCCC_RET_SUCCESS); } +DECLARE_STATIC_KEY_TRUE(vcpu_has_preemption); + int __init pv_sched_init(void) { int ret; - if (is_hyp_mode_available()) + if (is_hyp_mode_available()) { + static_branch_disable(&vcpu_has_preemption); return 0; + } if (!has_kvm_pvsched()) { + static_branch_disable(&vcpu_has_preemption); pr_warn("PV sched is not available\n"); return 0; } diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c index 9e1ea99ad9df4beca4771705b32b46444e7c925e..a2eb375e21e37db55289a50462d554ec5b2ff30b 100644 --- a/arch/x86/kernel/paravirt-spinlocks.c +++ b/arch/x86/kernel/paravirt-spinlocks.c @@ -33,6 +33,8 @@ bool pv_is_native_vcpu_is_preempted(void) __raw_callee_save___native_vcpu_is_preempted; } +DECLARE_STATIC_KEY_TRUE(vcpu_has_preemption); + void __init paravirt_set_cap(void) { if (!pv_is_native_spin_unlock()) @@ -40,4 +42,6 @@ void __init paravirt_set_cap(void) if (!pv_is_native_vcpu_is_preempted()) setup_force_cpu_cap(X86_FEATURE_VCPUPREEMPT); + else + static_branch_disable(&vcpu_has_preemption); } diff --git a/kernel/locking/osq_lock.c b/kernel/locking/osq_lock.c index 17618d62343f47a58c294f99d3fc146a4f58ddd7..7513d6f3cf48055c4d2a571c54dc58e48904f8bd 100644 --- a/kernel/locking/osq_lock.c +++ b/kernel/locking/osq_lock.c @@ -27,6 +27,23 @@ static inline int node_cpu(struct optimistic_spin_node *node) return node->cpu - 1; } +#ifdef vcpu_is_preempted +DEFINE_STATIC_KEY_TRUE(vcpu_has_preemption); + +static inline bool vcpu_is_preempted_node(struct optimistic_spin_node *node) +{ + if (static_branch_likely(&vcpu_has_preemption)) + return vcpu_is_preempted(node_cpu(node->prev)); + + return false; +} +#else +static inline bool vcpu_is_preempted_node(struct optimistic_spin_node *node) +{ + return false; +} +#endif + static inline struct optimistic_spin_node *decode_cpu(int encoded_cpu_val) { int cpu_nr = encoded_cpu_val - 1; @@ -147,7 +164,7 @@ bool osq_lock(struct optimistic_spin_queue *lock) * polling, be careful. */ if (smp_cond_load_relaxed(&node->locked, VAL || need_resched() || - vcpu_is_preempted(node_cpu(node->prev)))) + vcpu_is_preempted_node(node))) return true; /* unqueue */