diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c index 9cb98f7b07c9afb1a8118cbf84be125567481ea0..a29fe9b89a3d58d8e7280427a3518517b8381b71 100644 --- a/arch/x86/kernel/paravirt_patch_64.c +++ b/arch/x86/kernel/paravirt_patch_64.c @@ -38,6 +38,8 @@ unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len) extern bool pv_is_native_spin_unlock(void); extern bool pv_is_native_vcpu_is_preempted(void); +DECLARE_STATIC_KEY_TRUE(vcpu_has_preemption); + unsigned native_patch(u8 type, u16 clobbers, void *ibuf, unsigned long addr, unsigned len) { @@ -73,6 +75,7 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf, if (pv_is_native_vcpu_is_preempted()) { start = start_pv_lock_ops_vcpu_is_preempted; end = end_pv_lock_ops_vcpu_is_preempted; + static_branch_disable(&vcpu_has_preemption); goto patch_site; } goto patch_default; diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c index 3f8a35104285ab29fe2e9992430e75b6aefbff89..7588ab78870721942eae382add737ff1ba987a1a 100644 --- a/kernel/locking/mutex.c +++ b/kernel/locking/mutex.c @@ -513,6 +513,23 @@ bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, return true; } +#ifdef vcpu_is_preempted +DECLARE_STATIC_KEY_TRUE(vcpu_has_preemption); + +static inline bool vcpu_is_preempted_node(struct task_struct *owner) +{ + if (static_branch_likely(&vcpu_has_preemption)) + return vcpu_is_preempted(task_cpu(owner)); + + return false; +} +#else +static inline bool vcpu_is_preempted_node(struct task_struct *owner) +{ + return false; +} +#endif + /* * Look out! "owner" is an entirely speculative pointer access and not * reliable. @@ -539,7 +556,7 @@ bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner, * Use vcpu_is_preempted to detect lock holder preemption issue. */ if (!owner->on_cpu || need_resched() || - vcpu_is_preempted(task_cpu(owner))) { + vcpu_is_preempted_node(owner)) { ret = false; break; } diff --git a/kernel/locking/osq_lock.c b/kernel/locking/osq_lock.c index 6ef600aa0f47e7dd2cbc8901ccb6397e098ab759..5ecf7f00236c1271a94b0ee8d355904e893ad308 100644 --- a/kernel/locking/osq_lock.c +++ b/kernel/locking/osq_lock.c @@ -27,6 +27,23 @@ static inline int node_cpu(struct optimistic_spin_node *node) return node->cpu - 1; } +#ifdef vcpu_is_preempted +DEFINE_STATIC_KEY_TRUE(vcpu_has_preemption); + +static inline bool vcpu_is_preempted_node(struct optimistic_spin_node *node) +{ + if (static_branch_likely(&vcpu_has_preemption)) + return vcpu_is_preempted(node_cpu(node->prev)); + + return false; +} +#else +static inline bool vcpu_is_preempted_node(struct optimistic_spin_node *node) +{ + return false; +} +#endif + static inline struct optimistic_spin_node *decode_cpu(int encoded_cpu_val) { int cpu_nr = encoded_cpu_val - 1; @@ -140,7 +157,7 @@ bool osq_lock(struct optimistic_spin_queue *lock) * Use vcpu_is_preempted() to avoid waiting for a preempted * lock holder: */ - if (need_resched() || vcpu_is_preempted(node_cpu(node->prev))) + if (need_resched() || vcpu_is_preempted_node(node)) goto unqueue; cpu_relax();