Skip to content

Commit

Permalink
locking/spinlock, arch: Update and fix spin_unlock_wait() implementat…
Browse files Browse the repository at this point in the history
…ions

This patch updates/fixes all spin_unlock_wait() implementations.

The update is in semantics; where it previously was only a control
dependency, we now upgrade to a full load-acquire to match the
store-release from the spin_unlock() we waited on. This ensures that
when spin_unlock_wait() returns, we're guaranteed to observe the full
critical section we waited on.

This fixes a number of spin_unlock_wait() users that (not
unreasonably) rely on this.

I also fixed a number of ticket lock versions to only wait on the
current lock holder, instead of for a full unlock, as this is
sufficient.

Furthermore; again for ticket locks; I added an smp_rmb() in between
the initial ticket load and the spin loop testing the current value
because I could not convince myself the address dependency is
sufficient, esp. if the loads are of different sizes.

I'm more than happy to remove this smp_rmb() again if people are
certain the address dependency does indeed work as expected.

Note: PPC32 will be fixed independently

Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Paul E. McKenney <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Signed-off-by: Ingo Molnar <[email protected]>
  • Loading branch information
Peter Zijlstra authored and Ingo Molnar committed Jun 14, 2016
1 parent b464d12 commit 726328d
Show file tree
Hide file tree
Showing 20 changed files with 145 additions and 32 deletions.
9 changes: 7 additions & 2 deletions arch/alpha/include/asm/spinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@

#include <linux/kernel.h>
#include <asm/current.h>
#include <asm/barrier.h>
#include <asm/processor.h>

/*
* Simple spin lock operations. There are two variants, one clears IRQ's
Expand All @@ -13,8 +15,11 @@

#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
#define arch_spin_is_locked(x) ((x)->lock != 0)
#define arch_spin_unlock_wait(x) \
do { cpu_relax(); } while ((x)->lock)

static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
smp_cond_load_acquire(&lock->lock, !VAL);
}

static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
{
Expand Down
7 changes: 5 additions & 2 deletions arch/arc/include/asm/spinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,11 @@

#define arch_spin_is_locked(x) ((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__)
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
#define arch_spin_unlock_wait(x) \
do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0)

static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
smp_cond_load_acquire(&lock->slock, !VAL);
}

#ifdef CONFIG_ARC_HAS_LLSC

Expand Down
19 changes: 17 additions & 2 deletions arch/arm/include/asm/spinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,8 @@
#endif

#include <linux/prefetch.h>
#include <asm/barrier.h>
#include <asm/processor.h>

/*
* sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K
Expand Down Expand Up @@ -50,8 +52,21 @@ static inline void dsb_sev(void)
* memory.
*/

#define arch_spin_unlock_wait(lock) \
do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
u16 owner = READ_ONCE(lock->tickets.owner);

for (;;) {
arch_spinlock_t tmp = READ_ONCE(*lock);

if (tmp.tickets.owner == tmp.tickets.next ||
tmp.tickets.owner != owner)
break;

wfe();
}
smp_acquire__after_ctrl_dep();
}

#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)

Expand Down
5 changes: 3 additions & 2 deletions arch/blackfin/include/asm/spinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,8 @@
#else

#include <linux/atomic.h>
#include <asm/processor.h>
#include <asm/barrier.h>

asmlinkage int __raw_spin_is_locked_asm(volatile int *ptr);
asmlinkage void __raw_spin_lock_asm(volatile int *ptr);
Expand Down Expand Up @@ -48,8 +50,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)

static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
while (arch_spin_is_locked(lock))
cpu_relax();
smp_cond_load_acquire(&lock->lock, !VAL);
}

static inline int arch_read_can_lock(arch_rwlock_t *rw)
Expand Down
10 changes: 8 additions & 2 deletions arch/hexagon/include/asm/spinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,8 @@
#define _ASM_SPINLOCK_H

#include <asm/irqflags.h>
#include <asm/barrier.h>
#include <asm/processor.h>

/*
* This file is pulled in for SMP builds.
Expand Down Expand Up @@ -176,8 +178,12 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
* SMP spinlocks are intended to allow only a single CPU at the lock
*/
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
#define arch_spin_unlock_wait(lock) \
do {while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)

static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
smp_cond_load_acquire(&lock->lock, !VAL);
}

#define arch_spin_is_locked(x) ((x)->lock != 0)

#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
Expand Down
4 changes: 4 additions & 0 deletions arch/ia64/include/asm/spinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,8 @@

#include <linux/atomic.h>
#include <asm/intrinsics.h>
#include <asm/barrier.h>
#include <asm/processor.h>

#define arch_spin_lock_init(x) ((x)->lock = 0)

Expand Down Expand Up @@ -86,6 +88,8 @@ static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
return;
cpu_relax();
}

smp_acquire__after_ctrl_dep();
}

static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
Expand Down
9 changes: 7 additions & 2 deletions arch/m32r/include/asm/spinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,8 @@
#include <linux/atomic.h>
#include <asm/dcache_clear.h>
#include <asm/page.h>
#include <asm/barrier.h>
#include <asm/processor.h>

/*
* Your basic SMP spinlocks, allowing only a single CPU anywhere
Expand All @@ -27,8 +29,11 @@

#define arch_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0)
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
#define arch_spin_unlock_wait(x) \
do { cpu_relax(); } while (arch_spin_is_locked(x))

static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
smp_cond_load_acquire(&lock->slock, VAL > 0);
}

/**
* arch_spin_trylock - Try spin lock and return a result
Expand Down
14 changes: 12 additions & 2 deletions arch/metag/include/asm/spinlock.h
Original file line number Diff line number Diff line change
@@ -1,14 +1,24 @@
#ifndef __ASM_SPINLOCK_H
#define __ASM_SPINLOCK_H

#include <asm/barrier.h>
#include <asm/processor.h>

#ifdef CONFIG_METAG_ATOMICITY_LOCK1
#include <asm/spinlock_lock1.h>
#else
#include <asm/spinlock_lnkget.h>
#endif

#define arch_spin_unlock_wait(lock) \
do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
/*
* both lock1 and lnkget are test-and-set spinlocks with 0 unlocked and 1
* locked.
*/

static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
smp_cond_load_acquire(&lock->lock, !VAL);
}

#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)

Expand Down
19 changes: 17 additions & 2 deletions arch/mips/include/asm/spinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
#include <linux/compiler.h>

#include <asm/barrier.h>
#include <asm/processor.h>
#include <asm/compiler.h>
#include <asm/war.h>

Expand Down Expand Up @@ -48,8 +49,22 @@ static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
}

#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
#define arch_spin_unlock_wait(x) \
while (arch_spin_is_locked(x)) { cpu_relax(); }

static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
u16 owner = READ_ONCE(lock->h.serving_now);
smp_rmb();
for (;;) {
arch_spinlock_t tmp = READ_ONCE(*lock);

if (tmp.h.serving_now == tmp.h.ticket ||
tmp.h.serving_now != owner)
break;

cpu_relax();
}
smp_acquire__after_ctrl_dep();
}

static inline int arch_spin_is_contended(arch_spinlock_t *lock)
{
Expand Down
8 changes: 7 additions & 1 deletion arch/mn10300/include/asm/spinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,8 @@
#define _ASM_SPINLOCK_H

#include <linux/atomic.h>
#include <asm/barrier.h>
#include <asm/processor.h>
#include <asm/rwlock.h>
#include <asm/page.h>

Expand All @@ -23,7 +25,11 @@
*/

#define arch_spin_is_locked(x) (*(volatile signed char *)(&(x)->slock) != 0)
#define arch_spin_unlock_wait(x) do { barrier(); } while (arch_spin_is_locked(x))

static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
smp_cond_load_acquire(&lock->slock, !VAL);
}

static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
Expand Down
9 changes: 7 additions & 2 deletions arch/parisc/include/asm/spinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,13 @@ static inline int arch_spin_is_locked(arch_spinlock_t *x)
}

#define arch_spin_lock(lock) arch_spin_lock_flags(lock, 0)
#define arch_spin_unlock_wait(x) \
do { cpu_relax(); } while (arch_spin_is_locked(x))

static inline void arch_spin_unlock_wait(arch_spinlock_t *x)
{
volatile unsigned int *a = __ldcw_align(x);

smp_cond_load_acquire(a, VAL);
}

static inline void arch_spin_lock_flags(arch_spinlock_t *x,
unsigned long flags)
Expand Down
3 changes: 3 additions & 0 deletions arch/s390/include/asm/spinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,8 @@
#define __ASM_SPINLOCK_H

#include <linux/smp.h>
#include <asm/barrier.h>
#include <asm/processor.h>

#define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval)

Expand Down Expand Up @@ -97,6 +99,7 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
while (arch_spin_is_locked(lock))
arch_spin_relax(lock);
smp_acquire__after_ctrl_dep();
}

/*
Expand Down
10 changes: 8 additions & 2 deletions arch/sh/include/asm/spinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,14 +19,20 @@
#error "Need movli.l/movco.l for spinlocks"
#endif

#include <asm/barrier.h>
#include <asm/processor.h>

/*
* Your basic SMP spinlocks, allowing only a single CPU anywhere
*/

#define arch_spin_is_locked(x) ((x)->lock <= 0)
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
#define arch_spin_unlock_wait(x) \
do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0)

static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
smp_cond_load_acquire(&lock->lock, VAL > 0);
}

/*
* Simple spin lock operations. There are two variants, one clears IRQ's
Expand Down
7 changes: 5 additions & 2 deletions arch/sparc/include/asm/spinlock_32.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,12 +9,15 @@
#ifndef __ASSEMBLY__

#include <asm/psr.h>
#include <asm/barrier.h>
#include <asm/processor.h> /* for cpu_relax */

#define arch_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0)

#define arch_spin_unlock_wait(lock) \
do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
smp_cond_load_acquire(&lock->lock, !VAL);
}

static inline void arch_spin_lock(arch_spinlock_t *lock)
{
Expand Down
10 changes: 7 additions & 3 deletions arch/sparc/include/asm/spinlock_64.h
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,9 @@

#ifndef __ASSEMBLY__

#include <asm/processor.h>
#include <asm/barrier.h>

/* To get debugging spinlocks which detect and catch
* deadlock situations, set CONFIG_DEBUG_SPINLOCK
* and rebuild your kernel.
Expand All @@ -23,9 +26,10 @@

#define arch_spin_is_locked(lp) ((lp)->lock != 0)

#define arch_spin_unlock_wait(lp) \
do { rmb(); \
} while((lp)->lock)
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
smp_cond_load_acquire(&lock->lock, !VAL);
}

static inline void arch_spin_lock(arch_spinlock_t *lock)
{
Expand Down
6 changes: 6 additions & 0 deletions arch/tile/lib/spinlock_32.c
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,12 @@ void arch_spin_unlock_wait(arch_spinlock_t *lock)
do {
delay_backoff(iterations++);
} while (READ_ONCE(lock->current_ticket) == curr);

/*
* The TILE architecture doesn't do read speculation; therefore
* a control dependency guarantees a LOAD->{LOAD,STORE} order.
*/
barrier();
}
EXPORT_SYMBOL(arch_spin_unlock_wait);

Expand Down
6 changes: 6 additions & 0 deletions arch/tile/lib/spinlock_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,12 @@ void arch_spin_unlock_wait(arch_spinlock_t *lock)
do {
delay_backoff(iterations++);
} while (arch_spin_current(READ_ONCE(lock->lock)) == curr);

/*
* The TILE architecture doesn't do read speculation; therefore
* a control dependency guarantees a LOAD->{LOAD,STORE} order.
*/
barrier();
}
EXPORT_SYMBOL(arch_spin_unlock_wait);

Expand Down
10 changes: 8 additions & 2 deletions arch/xtensa/include/asm/spinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,9 @@
#ifndef _XTENSA_SPINLOCK_H
#define _XTENSA_SPINLOCK_H

#include <asm/barrier.h>
#include <asm/processor.h>

/*
* spinlock
*
Expand All @@ -29,8 +32,11 @@
*/

#define arch_spin_is_locked(x) ((x)->slock != 0)
#define arch_spin_unlock_wait(lock) \
do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)

static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
smp_cond_load_acquire(&lock->slock, !VAL);
}

#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)

Expand Down
2 changes: 1 addition & 1 deletion include/asm-generic/barrier.h
Original file line number Diff line number Diff line change
Expand Up @@ -194,7 +194,7 @@ do { \
})
#endif

#endif
#endif /* CONFIG_SMP */

/* Barriers for virtual machine guests when talking to an SMP host */
#define virt_mb() __smp_mb()
Expand Down
Loading

0 comments on commit 726328d

Please sign in to comment.