Skip to content

Commit

Permalink
Merge branch 'locking-urgent-for-linus' of git://git.kernel.org/pub/s…
Browse files Browse the repository at this point in the history
…cm/linux/kernel/git/tip/tip

Pull locking fixes from Ingo Molnar:
 "An rtmutex (PI-futex) deadlock scenario fix, plus a locking
  documentation fix"

* 'locking-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  futex: Handle early deadlock return correctly
  futex: Fix barrier comment
  • Loading branch information
torvalds committed Feb 10, 2019
2 parents df3865f + 1a1fb98 commit d2a6aae
Show file tree
Hide file tree
Showing 2 changed files with 52 additions and 17 deletions.
32 changes: 20 additions & 12 deletions kernel/futex.c
Original file line number Diff line number Diff line change
Expand Up @@ -2221,11 +2221,11 @@ static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
* decrement the counter at queue_unlock() when some error has
* occurred and we don't end up adding the task to the list.
*/
hb_waiters_inc(hb);
hb_waiters_inc(hb); /* implies smp_mb(); (A) */

q->lock_ptr = &hb->lock;

spin_lock(&hb->lock); /* implies smp_mb(); (A) */
spin_lock(&hb->lock);
return hb;
}

Expand Down Expand Up @@ -2861,35 +2861,39 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
* and BUG when futex_unlock_pi() interleaves with this.
*
* Therefore acquire wait_lock while holding hb->lock, but drop the
* latter before calling rt_mutex_start_proxy_lock(). This still fully
* serializes against futex_unlock_pi() as that does the exact same
* lock handoff sequence.
* latter before calling __rt_mutex_start_proxy_lock(). This
* interleaves with futex_unlock_pi() -- which does a similar lock
* handoff -- such that the latter can observe the futex_q::pi_state
* before __rt_mutex_start_proxy_lock() is done.
*/
raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock);
spin_unlock(q.lock_ptr);
/*
* __rt_mutex_start_proxy_lock() unconditionally enqueues the @rt_waiter
* such that futex_unlock_pi() is guaranteed to observe the waiter when
* it sees the futex_q::pi_state.
*/
ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current);
raw_spin_unlock_irq(&q.pi_state->pi_mutex.wait_lock);

if (ret) {
if (ret == 1)
ret = 0;

spin_lock(q.lock_ptr);
goto no_block;
goto cleanup;
}


if (unlikely(to))
hrtimer_start_expires(&to->timer, HRTIMER_MODE_ABS);

ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter);

cleanup:
spin_lock(q.lock_ptr);
/*
* If we failed to acquire the lock (signal/timeout), we must
* If we failed to acquire the lock (deadlock/signal/timeout), we must
* first acquire the hb->lock before removing the lock from the
* rt_mutex waitqueue, such that we can keep the hb and rt_mutex
* wait lists consistent.
* rt_mutex waitqueue, such that we can keep the hb and rt_mutex wait
* lists consistent.
*
* In particular; it is important that futex_unlock_pi() can not
* observe this inconsistency.
Expand Down Expand Up @@ -3013,6 +3017,10 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
* there is no point where we hold neither; and therefore
* wake_futex_pi() must observe a state consistent with what we
* observed.
*
* In particular; this forces __rt_mutex_start_proxy() to
* complete such that we're guaranteed to observe the
* rt_waiter. Also see the WARN in wake_futex_pi().
*/
raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
spin_unlock(&hb->lock);
Expand Down
37 changes: 32 additions & 5 deletions kernel/locking/rtmutex.c
Original file line number Diff line number Diff line change
Expand Up @@ -1726,12 +1726,33 @@ void rt_mutex_proxy_unlock(struct rt_mutex *lock,
rt_mutex_set_owner(lock, NULL);
}

/**
* __rt_mutex_start_proxy_lock() - Start lock acquisition for another task
* @lock: the rt_mutex to take
* @waiter: the pre-initialized rt_mutex_waiter
* @task: the task to prepare
*
* Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock
* detection. It does not wait, see rt_mutex_wait_proxy_lock() for that.
*
* NOTE: does _NOT_ remove the @waiter on failure; must either call
* rt_mutex_wait_proxy_lock() or rt_mutex_cleanup_proxy_lock() after this.
*
* Returns:
* 0 - task blocked on lock
* 1 - acquired the lock for task, caller should wake it up
* <0 - error
*
* Special API call for PI-futex support.
*/
int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
struct rt_mutex_waiter *waiter,
struct task_struct *task)
{
int ret;

lockdep_assert_held(&lock->wait_lock);

if (try_to_take_rt_mutex(lock, task, NULL))
return 1;

Expand All @@ -1749,9 +1770,6 @@ int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
ret = 0;
}

if (unlikely(ret))
remove_waiter(lock, waiter);

debug_rt_mutex_print_deadlock(waiter);

return ret;
Expand All @@ -1763,12 +1781,18 @@ int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
* @waiter: the pre-initialized rt_mutex_waiter
* @task: the task to prepare
*
* Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock
* detection. It does not wait, see rt_mutex_wait_proxy_lock() for that.
*
* NOTE: unlike __rt_mutex_start_proxy_lock this _DOES_ remove the @waiter
* on failure.
*
* Returns:
* 0 - task blocked on lock
* 1 - acquired the lock for task, caller should wake it up
* <0 - error
*
* Special API call for FUTEX_REQUEUE_PI support.
* Special API call for PI-futex support.
*/
int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
struct rt_mutex_waiter *waiter,
Expand All @@ -1778,6 +1802,8 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,

raw_spin_lock_irq(&lock->wait_lock);
ret = __rt_mutex_start_proxy_lock(lock, waiter, task);
if (unlikely(ret))
remove_waiter(lock, waiter);
raw_spin_unlock_irq(&lock->wait_lock);

return ret;
Expand Down Expand Up @@ -1845,7 +1871,8 @@ int rt_mutex_wait_proxy_lock(struct rt_mutex *lock,
* @lock: the rt_mutex we were woken on
* @waiter: the pre-initialized rt_mutex_waiter
*
* Attempt to clean up after a failed rt_mutex_wait_proxy_lock().
* Attempt to clean up after a failed __rt_mutex_start_proxy_lock() or
* rt_mutex_wait_proxy_lock().
*
* Unless we acquired the lock; we're still enqueued on the wait-list and can
* in fact still be granted ownership until we're removed. Therefore we can
Expand Down

0 comments on commit d2a6aae

Please sign in to comment.