diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index a82d1176e7c6a9d7b0edb5434b986ac568dff623..9d24d2263a8636c52440f56e247d03bc2232f9eb 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -49,15 +49,31 @@ * set this bit before looking at the lock. */ -static void -rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner) +static struct task_struct * +rt_mutex_owner_encode(struct rt_mutex *lock, struct task_struct *owner) { unsigned long val = (unsigned long)owner; if (rt_mutex_has_waiters(lock)) val |= RT_MUTEX_HAS_WAITERS; - WRITE_ONCE(lock->owner, (struct task_struct *)val); + return (struct task_struct *)val; +} + +static void +rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner) +{ + /* + * lock->wait_lock is held but explicit acquire semantics are needed + * for a new lock owner so WRITE_ONCE is insufficient. + */ + xchg_acquire(&lock->owner, rt_mutex_owner_encode(lock, owner)); +} + +static void rt_mutex_clear_owner(struct rt_mutex *lock) +{ + /* lock->wait_lock is held so the unlock provides release semantics. */ + WRITE_ONCE(lock->owner, rt_mutex_owner_encode(lock, NULL)); } static inline void clear_rt_mutex_waiters(struct rt_mutex *lock) @@ -66,7 +82,7 @@ static inline void clear_rt_mutex_waiters(struct rt_mutex *lock) ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS); } -static void fixup_rt_mutex_waiters(struct rt_mutex *lock) +static void fixup_rt_mutex_waiters(struct rt_mutex *lock, bool acquire_lock) { unsigned long owner, *p = (unsigned long *) &lock->owner; @@ -132,8 +148,21 @@ static void fixup_rt_mutex_waiters(struct rt_mutex *lock) * still set. */ owner = READ_ONCE(*p); - if (owner & RT_MUTEX_HAS_WAITERS) - WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS); + if (owner & RT_MUTEX_HAS_WAITERS) { + /* + * See rt_mutex_set_owner() and rt_mutex_clear_owner() on + * why xchg_acquire() is used for updating owner for + * locking and WRITE_ONCE() for unlocking. + * + * WRITE_ONCE() would work for the acquire case too, but + * in case that the lock acquisition failed it might + * force other lockers into the slow path unnecessarily. + */ + if (acquire_lock) + xchg_acquire(p, owner & ~RT_MUTEX_HAS_WAITERS); + else + WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS); + } } /* @@ -157,6 +186,13 @@ static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) owner = *p; } while (cmpxchg_relaxed(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner); + + /* + * The cmpxchg loop above is relaxed to avoid back-to-back ACQUIRE + * operations in the event of contention. Ensure the successful + * cmpxchg is visible. + */ + smp_mb__after_atomic(); } /* @@ -1255,7 +1291,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, * try_to_take_rt_mutex() sets the waiter bit * unconditionally. We might have to fix that up. */ - fixup_rt_mutex_waiters(lock); + fixup_rt_mutex_waiters(lock, true); raw_spin_unlock_irqrestore(&lock->wait_lock, flags); @@ -1276,7 +1312,7 @@ static inline int __rt_mutex_slowtrylock(struct rt_mutex *lock) * try_to_take_rt_mutex() sets the lock waiters bit * unconditionally. Clean this up. */ - fixup_rt_mutex_waiters(lock); + fixup_rt_mutex_waiters(lock, true); return ret; } @@ -1701,7 +1737,7 @@ void rt_mutex_init_proxy_locked(struct rt_mutex *lock, void rt_mutex_proxy_unlock(struct rt_mutex *lock) { debug_rt_mutex_proxy_unlock(lock); - rt_mutex_set_owner(lock, NULL); + rt_mutex_clear_owner(lock); } /** @@ -1838,7 +1874,7 @@ int rt_mutex_wait_proxy_lock(struct rt_mutex *lock, * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might * have to fix that up. */ - fixup_rt_mutex_waiters(lock); + fixup_rt_mutex_waiters(lock, true); raw_spin_unlock_irq(&lock->wait_lock); return ret; @@ -1894,7 +1930,7 @@ bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock, * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might * have to fix that up. */ - fixup_rt_mutex_waiters(lock); + fixup_rt_mutex_waiters(lock, false); raw_spin_unlock_irq(&lock->wait_lock);