Skip to content

Commit

Permalink
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm…
Browse files Browse the repository at this point in the history
…/linux/kernel/git/tip/tip

Pull locking updates from Ingo Molnar:
 "The main changes are:

   - 'qspinlock' support, enabled on x86: queued spinlocks - these are
     now the spinlock variant used by x86 as they outperform ticket
     spinlocks in every category.  (Waiman Long)

   - 'pvqspinlock' support on x86: paravirtualized variant of queued
     spinlocks.  (Waiman Long, Peter Zijlstra)

   - 'qrwlock' support, enabled on x86: queued rwlocks.  Similar to
     queued spinlocks, they are now the variant used by x86:

       CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y
       CONFIG_QUEUED_SPINLOCKS=y
       CONFIG_ARCH_USE_QUEUED_RWLOCKS=y
       CONFIG_QUEUED_RWLOCKS=y

   - various lockdep fixlets

   - various locking primitives cleanups, further WRITE_ONCE()
     propagation"

* 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (24 commits)
  locking/lockdep: Remove hard coded array size dependency
  locking/qrwlock: Don't contend with readers when setting _QW_WAITING
  lockdep: Do not break user-visible string
  locking/arch: Rename set_mb() to smp_store_mb()
  locking/arch: Add WRITE_ONCE() to set_mb()
  rtmutex: Warn if trylock is called from hard/softirq context
  arch: Remove __ARCH_HAVE_CMPXCHG
  locking/rtmutex: Drop usage of __HAVE_ARCH_CMPXCHG
  locking/qrwlock: Rename QUEUE_RWLOCK to QUEUED_RWLOCKS
  locking/pvqspinlock: Rename QUEUED_SPINLOCK to QUEUED_SPINLOCKS
  locking/pvqspinlock: Replace xchg() by the more descriptive set_mb()
  locking/pvqspinlock, x86: Enable PV qspinlock for Xen
  locking/pvqspinlock, x86: Enable PV qspinlock for KVM
  locking/pvqspinlock, x86: Implement the paravirt qspinlock call patching
  locking/pvqspinlock: Implement simple paravirt support for the qspinlock
  locking/qspinlock: Revert to test-and-set on hypervisors
  locking/qspinlock: Use a simple write to grab the lock
  locking/qspinlock: Optimize for smaller NR_CPUS
  locking/qspinlock: Extract out code snippets for the next patch
  locking/qspinlock: Add pending bit
  ...
  • Loading branch information
torvalds committed Jun 22, 2015
2 parents fc934d4 + 6872210 commit 1bf7067
Show file tree
Hide file tree
Showing 61 changed files with 1,423 additions and 102 deletions.
6 changes: 3 additions & 3 deletions Documentation/memory-barriers.txt
Original file line number Diff line number Diff line change
Expand Up @@ -1673,7 +1673,7 @@ CPU from reordering them.

There are some more advanced barrier functions:

(*) set_mb(var, value)
(*) smp_store_mb(var, value)

This assigns the value to the variable and then inserts a full memory
barrier after it, depending on the function. It isn't guaranteed to
Expand Down Expand Up @@ -1985,7 +1985,7 @@ after it has altered the task state:
CPU 1
===============================
set_current_state();
set_mb();
smp_store_mb();
STORE current->state
<general barrier>
LOAD event_indicated
Expand Down Expand Up @@ -2026,7 +2026,7 @@ between the STORE to indicate the event and the STORE to set TASK_RUNNING:
CPU 1 CPU 2
=============================== ===============================
set_current_state(); STORE event_indicated
set_mb(); wake_up();
smp_store_mb(); wake_up();
STORE current->state <write barrier>
<general barrier> STORE current->state
LOAD event_indicated
Expand Down
2 changes: 0 additions & 2 deletions arch/alpha/include/asm/cmpxchg.h
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,4 @@
#undef __ASM__MB
#undef ____cmpxchg

#define __HAVE_ARCH_CMPXCHG 1

#endif /* _ALPHA_CMPXCHG_H */
2 changes: 1 addition & 1 deletion arch/arm/include/asm/barrier.h
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ do { \
#define read_barrier_depends() do { } while(0)
#define smp_read_barrier_depends() do { } while(0)

#define set_mb(var, value) do { var = value; smp_mb(); } while (0)
#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0)

#define smp_mb__before_atomic() smp_mb()
#define smp_mb__after_atomic() smp_mb()
Expand Down
2 changes: 1 addition & 1 deletion arch/arm64/include/asm/barrier.h
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ do { \
#define read_barrier_depends() do { } while(0)
#define smp_read_barrier_depends() do { } while(0)

#define set_mb(var, value) do { var = value; smp_mb(); } while (0)
#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0)
#define nop() asm volatile("nop");

#define smp_mb__before_atomic() smp_mb()
Expand Down
2 changes: 0 additions & 2 deletions arch/avr32/include/asm/cmpxchg.h
Original file line number Diff line number Diff line change
Expand Up @@ -70,8 +70,6 @@ extern unsigned long __cmpxchg_u64_unsupported_on_32bit_kernels(
if something tries to do an invalid cmpxchg(). */
extern void __cmpxchg_called_with_bad_pointer(void);

#define __HAVE_ARCH_CMPXCHG 1

static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
unsigned long new, int size)
{
Expand Down
1 change: 0 additions & 1 deletion arch/hexagon/include/asm/cmpxchg.h
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,6 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
* looks just like atomic_cmpxchg on our arch currently with a bunch of
* variable casting.
*/
#define __HAVE_ARCH_CMPXCHG 1

#define cmpxchg(ptr, old, new) \
({ \
Expand Down
7 changes: 1 addition & 6 deletions arch/ia64/include/asm/barrier.h
Original file line number Diff line number Diff line change
Expand Up @@ -77,12 +77,7 @@ do { \
___p1; \
})

/*
* XXX check on this ---I suspect what Linus really wants here is
* acquire vs release semantics but we can't discuss this stuff with
* Linus just yet. Grrr...
*/
#define set_mb(var, value) do { (var) = (value); mb(); } while (0)
#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0)

/*
* The group barrier in front of the rsm & ssm are necessary to ensure
Expand Down
2 changes: 0 additions & 2 deletions arch/ia64/include/uapi/asm/cmpxchg.h
Original file line number Diff line number Diff line change
Expand Up @@ -61,8 +61,6 @@ extern void ia64_xchg_called_with_bad_pointer(void);
* indicated by comparing RETURN with OLD.
*/

#define __HAVE_ARCH_CMPXCHG 1

/*
* This function doesn't exist, so you'll get a linker error
* if something tries to do an invalid cmpxchg().
Expand Down
2 changes: 0 additions & 2 deletions arch/m32r/include/asm/cmpxchg.h
Original file line number Diff line number Diff line change
Expand Up @@ -107,8 +107,6 @@ __xchg_local(unsigned long x, volatile void *ptr, int size)
((__typeof__(*(ptr)))__xchg_local((unsigned long)(x), (ptr), \
sizeof(*(ptr))))

#define __HAVE_ARCH_CMPXCHG 1

static inline unsigned long
__cmpxchg_u32(volatile unsigned int *p, unsigned int old, unsigned int new)
{
Expand Down
1 change: 0 additions & 1 deletion arch/m68k/include/asm/cmpxchg.h
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,6 @@ extern unsigned long __invalid_cmpxchg_size(volatile void *,
* indicated by comparing RETURN with OLD.
*/
#ifdef CONFIG_RMW_INSNS
#define __HAVE_ARCH_CMPXCHG 1

static inline unsigned long __cmpxchg(volatile void *p, unsigned long old,
unsigned long new, int size)
Expand Down
2 changes: 1 addition & 1 deletion arch/metag/include/asm/barrier.h
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ static inline void fence(void)
#define read_barrier_depends() do { } while (0)
#define smp_read_barrier_depends() do { } while (0)

#define set_mb(var, value) do { var = value; smp_mb(); } while (0)
#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0)

#define smp_store_release(p, v) \
do { \
Expand Down
2 changes: 0 additions & 2 deletions arch/metag/include/asm/cmpxchg.h
Original file line number Diff line number Diff line change
Expand Up @@ -51,8 +51,6 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
return old;
}

#define __HAVE_ARCH_CMPXCHG 1

#define cmpxchg(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
Expand Down
4 changes: 2 additions & 2 deletions arch/mips/include/asm/barrier.h
Original file line number Diff line number Diff line change
Expand Up @@ -112,8 +112,8 @@
#define __WEAK_LLSC_MB " \n"
#endif

#define set_mb(var, value) \
do { var = value; smp_mb(); } while (0)
#define smp_store_mb(var, value) \
do { WRITE_ONCE(var, value); smp_mb(); } while (0)

#define smp_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")

Expand Down
2 changes: 0 additions & 2 deletions arch/mips/include/asm/cmpxchg.h
Original file line number Diff line number Diff line change
Expand Up @@ -138,8 +138,6 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
__xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))); \
})

#define __HAVE_ARCH_CMPXCHG 1

#define __cmpxchg_asm(ld, st, m, old, new) \
({ \
__typeof(*(m)) __ret; \
Expand Down
2 changes: 0 additions & 2 deletions arch/parisc/include/asm/cmpxchg.h
Original file line number Diff line number Diff line change
Expand Up @@ -46,8 +46,6 @@ __xchg(unsigned long x, __volatile__ void *ptr, int size)
#define xchg(ptr, x) \
((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), sizeof(*(ptr))))

#define __HAVE_ARCH_CMPXCHG 1

/* bug catcher for when unsupported size is used - won't link */
extern void __cmpxchg_called_with_bad_pointer(void);

Expand Down
2 changes: 1 addition & 1 deletion arch/powerpc/include/asm/barrier.h
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@
#define rmb() __asm__ __volatile__ ("sync" : : : "memory")
#define wmb() __asm__ __volatile__ ("sync" : : : "memory")

#define set_mb(var, value) do { var = value; mb(); } while (0)
#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0)

#ifdef __SUBARCH_HAS_LWSYNC
# define SMPWMB LWSYNC
Expand Down
1 change: 0 additions & 1 deletion arch/powerpc/include/asm/cmpxchg.h
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,6 @@ __xchg_local(volatile void *ptr, unsigned long x, unsigned int size)
* Compare and exchange - if *p == old, set it to new,
* and return the old value of *p.
*/
#define __HAVE_ARCH_CMPXCHG 1

static __always_inline unsigned long
__cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
Expand Down
2 changes: 1 addition & 1 deletion arch/s390/include/asm/barrier.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@
#define smp_mb__before_atomic() smp_mb()
#define smp_mb__after_atomic() smp_mb()

#define set_mb(var, value) do { var = value; mb(); } while (0)
#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0)

#define smp_store_release(p, v) \
do { \
Expand Down
2 changes: 0 additions & 2 deletions arch/s390/include/asm/cmpxchg.h
Original file line number Diff line number Diff line change
Expand Up @@ -32,8 +32,6 @@
__old; \
})

#define __HAVE_ARCH_CMPXCHG

#define __cmpxchg_double_op(p1, p2, o1, o2, n1, n2, insn) \
({ \
register __typeof__(*(p1)) __old1 asm("2") = (o1); \
Expand Down
2 changes: 0 additions & 2 deletions arch/score/include/asm/cmpxchg.h
Original file line number Diff line number Diff line change
Expand Up @@ -42,8 +42,6 @@ static inline unsigned long __cmpxchg(volatile unsigned long *m,
(unsigned long)(o), \
(unsigned long)(n)))

#define __HAVE_ARCH_CMPXCHG 1

#include <asm-generic/cmpxchg-local.h>

#endif /* _ASM_SCORE_CMPXCHG_H */
2 changes: 1 addition & 1 deletion arch/sh/include/asm/barrier.h
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@
#define ctrl_barrier() __asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop")
#endif

#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
#define smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)

#include <asm-generic/barrier.h>

Expand Down
2 changes: 0 additions & 2 deletions arch/sh/include/asm/cmpxchg.h
Original file line number Diff line number Diff line change
Expand Up @@ -46,8 +46,6 @@ extern void __xchg_called_with_bad_pointer(void);
* if something tries to do an invalid cmpxchg(). */
extern void __cmpxchg_called_with_bad_pointer(void);

#define __HAVE_ARCH_CMPXCHG 1

static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old,
unsigned long new, int size)
{
Expand Down
4 changes: 2 additions & 2 deletions arch/sparc/include/asm/barrier_64.h
Original file line number Diff line number Diff line change
Expand Up @@ -40,8 +40,8 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
#define dma_rmb() rmb()
#define dma_wmb() wmb()

#define set_mb(__var, __value) \
do { __var = __value; membar_safe("#StoreLoad"); } while(0)
#define smp_store_mb(__var, __value) \
do { WRITE_ONCE(__var, __value); membar_safe("#StoreLoad"); } while(0)

#ifdef CONFIG_SMP
#define smp_mb() mb()
Expand Down
1 change: 0 additions & 1 deletion arch/sparc/include/asm/cmpxchg_32.h
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,6 @@ static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int
*
* Cribbed from <asm-parisc/atomic.h>
*/
#define __HAVE_ARCH_CMPXCHG 1

/* bug catcher for when unsupported size is used - won't link */
void __cmpxchg_called_with_bad_pointer(void);
Expand Down
2 changes: 0 additions & 2 deletions arch/sparc/include/asm/cmpxchg_64.h
Original file line number Diff line number Diff line change
Expand Up @@ -65,8 +65,6 @@ static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr,

#include <asm-generic/cmpxchg-local.h>

#define __HAVE_ARCH_CMPXCHG 1

static inline unsigned long
__cmpxchg_u32(volatile int *m, int old, int new)
{
Expand Down
3 changes: 0 additions & 3 deletions arch/tile/include/asm/atomic_64.h
Original file line number Diff line number Diff line change
Expand Up @@ -105,9 +105,6 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)

#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)

/* Define this to indicate that cmpxchg is an efficient operation. */
#define __HAVE_ARCH_CMPXCHG

#endif /* !__ASSEMBLY__ */

#endif /* _ASM_TILE_ATOMIC_64_H */
5 changes: 3 additions & 2 deletions arch/x86/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,8 @@ config X86
select MODULES_USE_ELF_RELA if X86_64
select CLONE_BACKWARDS if X86_32
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_QUEUE_RWLOCK
select ARCH_USE_QUEUED_SPINLOCKS
select ARCH_USE_QUEUED_RWLOCKS
select OLD_SIGSUSPEND3 if X86_32 || IA32_EMULATION
select OLD_SIGACTION if X86_32
select COMPAT_OLD_SIGACTION if IA32_EMULATION
Expand Down Expand Up @@ -666,7 +667,7 @@ config PARAVIRT_DEBUG
config PARAVIRT_SPINLOCKS
bool "Paravirtualization layer for spinlocks"
depends on PARAVIRT && SMP
select UNINLINE_SPIN_UNLOCK
select UNINLINE_SPIN_UNLOCK if !QUEUED_SPINLOCKS
---help---
Paravirtualized spinlocks allow a pvops backend to replace the
spinlock implementation with something virtualization-friendly
Expand Down
4 changes: 2 additions & 2 deletions arch/x86/include/asm/barrier.h
Original file line number Diff line number Diff line change
Expand Up @@ -35,12 +35,12 @@
#define smp_mb() mb()
#define smp_rmb() dma_rmb()
#define smp_wmb() barrier()
#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
#define smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
#else /* !SMP */
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#define set_mb(var, value) do { var = value; barrier(); } while (0)
#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0)
#endif /* SMP */

#define read_barrier_depends() do { } while (0)
Expand Down
2 changes: 0 additions & 2 deletions arch/x86/include/asm/cmpxchg.h
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,6 @@
#include <linux/compiler.h>
#include <asm/alternative.h> /* Provides LOCK_PREFIX */

#define __HAVE_ARCH_CMPXCHG 1

/*
* Non-existant functions to indicate usage errors at link time
* (or compile-time if the compiler implements __compiletime_error().
Expand Down
29 changes: 28 additions & 1 deletion arch/x86/include/asm/paravirt.h
Original file line number Diff line number Diff line change
Expand Up @@ -712,6 +712,31 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,

#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)

#ifdef CONFIG_QUEUED_SPINLOCKS

static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock,
u32 val)
{
PVOP_VCALL2(pv_lock_ops.queued_spin_lock_slowpath, lock, val);
}

static __always_inline void pv_queued_spin_unlock(struct qspinlock *lock)
{
PVOP_VCALLEE1(pv_lock_ops.queued_spin_unlock, lock);
}

static __always_inline void pv_wait(u8 *ptr, u8 val)
{
PVOP_VCALL2(pv_lock_ops.wait, ptr, val);
}

static __always_inline void pv_kick(int cpu)
{
PVOP_VCALL1(pv_lock_ops.kick, cpu);
}

#else /* !CONFIG_QUEUED_SPINLOCKS */

static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock,
__ticket_t ticket)
{
Expand All @@ -724,7 +749,9 @@ static __always_inline void __ticket_unlock_kick(struct arch_spinlock *lock,
PVOP_VCALL2(pv_lock_ops.unlock_kick, lock, ticket);
}

#endif
#endif /* CONFIG_QUEUED_SPINLOCKS */

#endif /* SMP && PARAVIRT_SPINLOCKS */

#ifdef CONFIG_X86_32
#define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
Expand Down
10 changes: 10 additions & 0 deletions arch/x86/include/asm/paravirt_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -333,9 +333,19 @@ struct arch_spinlock;
typedef u16 __ticket_t;
#endif

struct qspinlock;

struct pv_lock_ops {
#ifdef CONFIG_QUEUED_SPINLOCKS
void (*queued_spin_lock_slowpath)(struct qspinlock *lock, u32 val);
struct paravirt_callee_save queued_spin_unlock;

void (*wait)(u8 *ptr, u8 val);
void (*kick)(int cpu);
#else /* !CONFIG_QUEUED_SPINLOCKS */
struct paravirt_callee_save lock_spinning;
void (*unlock_kick)(struct arch_spinlock *lock, __ticket_t ticket);
#endif /* !CONFIG_QUEUED_SPINLOCKS */
};

/* This contains all the paravirt structures: we get a convenient
Expand Down
Loading

0 comments on commit 1bf7067

Please sign in to comment.