Skip to content

Commit

Permalink
Merge branch 'core-locking-for-linus' of git://git.kernel.org/pub/scm…
Browse files Browse the repository at this point in the history
…/linux/kernel/git/tip/tip

Pull core locking changes from Ingo Molnar:
 - futex performance increases: larger hashes, smarter wakeups
 - mutex debugging improvements
 - lots of SMP ordering documentation updates
 - introduce the smp_load_acquire(), smp_store_release() primitives.
   (There are WIP patches that make use of them - not yet merged)
 - lockdep micro-optimizations
 - lockdep improvement: better cover IRQ contexts
 - liblockdep at last. We'll continue to monitor how useful this is

* 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (34 commits)
  futexes: Fix futex_hashsize initialization
  arch: Re-sort some Kbuild files to hopefully help avoid some conflicts
  futexes: Avoid taking the hb->lock if there's nothing to wake up
  futexes: Document multiprocessor ordering guarantees
  futexes: Increase hash table size for better performance
  futexes: Clean up various details
  arch: Introduce smp_load_acquire(), smp_store_release()
  arch: Clean up asm/barrier.h implementations using asm-generic/barrier.h
  arch: Move smp_mb__{before,after}_atomic_{inc,dec}.h into asm/atomic.h
  locking/doc: Rename LOCK/UNLOCK to ACQUIRE/RELEASE
  mutexes: Give more informative mutex warning in the !lock->owner case
  powerpc: Full barrier for smp_mb__after_unlock_lock()
  rcu: Apply smp_mb__after_unlock_lock() to preserve grace periods
  Documentation/memory-barriers.txt: Downgrade UNLOCK+BLOCK
  locking: Add an smp_mb__after_unlock_lock() for UNLOCK+BLOCK barrier
  Documentation/memory-barriers.txt: Document ACCESS_ONCE()
  Documentation/memory-barriers.txt: Prohibit speculative writes
  Documentation/memory-barriers.txt: Add long atomic examples to memory-barriers.txt
  Documentation/memory-barriers.txt: Add needed ACCESS_ONCE() calls to memory-barriers.txt
  Revert "smp/cpumask: Make CONFIG_CPUMASK_OFFSTACK=y usable without debug dependency"
  ...
  • Loading branch information
torvalds committed Jan 20, 2014
2 parents 897aea3 + 63b1a81 commit 6ffbe7d
Show file tree
Hide file tree
Showing 105 changed files with 2,825 additions and 683 deletions.
922 changes: 716 additions & 206 deletions Documentation/memory-barriers.txt

Large diffs are not rendered by default.

4 changes: 2 additions & 2 deletions Documentation/robust-futex-ABI.txt
Original file line number Diff line number Diff line change
Expand Up @@ -146,8 +146,8 @@ On removal:
1) set the 'list_op_pending' word to the address of the 'lock entry'
to be removed,
2) remove the lock entry for this lock from the 'head' list,
2) release the futex lock, and
2) clear the 'lock_op_pending' word.
3) release the futex lock, and
4) clear the 'lock_op_pending' word.

On exit, the kernel will consider the address stored in
'list_op_pending' and the address of each 'lock word' found by walking
Expand Down
5 changes: 5 additions & 0 deletions MAINTAINERS
Original file line number Diff line number Diff line change
Expand Up @@ -5141,6 +5141,11 @@ F: drivers/lguest/
F: include/linux/lguest*.h
F: tools/lguest/

LIBLOCKDEP
M: Sasha Levin <[email protected]>
S: Maintained
F: tools/lib/lockdep/

LINUX FOR IBM pSERIES (RS/6000)
M: Paul Mackerras <[email protected]>
W: http://www.ibm.com/linux/ltc/projects/ppc
Expand Down
25 changes: 5 additions & 20 deletions arch/alpha/include/asm/barrier.h
Original file line number Diff line number Diff line change
Expand Up @@ -3,33 +3,18 @@

#include <asm/compiler.h>

#define mb() \
__asm__ __volatile__("mb": : :"memory")
#define mb() __asm__ __volatile__("mb": : :"memory")
#define rmb() __asm__ __volatile__("mb": : :"memory")
#define wmb() __asm__ __volatile__("wmb": : :"memory")

#define rmb() \
__asm__ __volatile__("mb": : :"memory")

#define wmb() \
__asm__ __volatile__("wmb": : :"memory")

#define read_barrier_depends() \
__asm__ __volatile__("mb": : :"memory")
#define read_barrier_depends() __asm__ __volatile__("mb": : :"memory")

#ifdef CONFIG_SMP
#define __ASM_SMP_MB "\tmb\n"
#define smp_mb() mb()
#define smp_rmb() rmb()
#define smp_wmb() wmb()
#define smp_read_barrier_depends() read_barrier_depends()
#else
#define __ASM_SMP_MB
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#define smp_read_barrier_depends() do { } while (0)
#endif

#define set_mb(var, value) \
do { var = value; mb(); } while (0)
#include <asm-generic/barrier.h>

#endif /* __BARRIER_H */
1 change: 1 addition & 0 deletions arch/arc/include/asm/Kbuild
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
generic-y += auxvec.h
generic-y += barrier.h
generic-y += bugs.h
generic-y += bitsperlong.h
generic-y += clkdev.h
Expand Down
5 changes: 5 additions & 0 deletions arch/arc/include/asm/atomic.h
Original file line number Diff line number Diff line change
Expand Up @@ -190,6 +190,11 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)

#endif /* !CONFIG_ARC_HAS_LLSC */

#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()

/**
* __atomic_add_unless - add unless the number is a given value
* @v: pointer of type atomic_t
Expand Down
5 changes: 0 additions & 5 deletions arch/arc/include/asm/barrier.h
Original file line number Diff line number Diff line change
Expand Up @@ -30,11 +30,6 @@
#define smp_wmb() barrier()
#endif

#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()

#define smp_read_barrier_depends() do { } while (0)

#endif
Expand Down
15 changes: 15 additions & 0 deletions arch/arm/include/asm/barrier.h
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,21 @@
#define smp_wmb() dmb(ishst)
#endif

#define smp_store_release(p, v) \
do { \
compiletime_assert_atomic_type(*p); \
smp_mb(); \
ACCESS_ONCE(*p) = (v); \
} while (0)

#define smp_load_acquire(p) \
({ \
typeof(*p) ___p1 = ACCESS_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
smp_mb(); \
___p1; \
})

#define read_barrier_depends() do { } while(0)
#define smp_read_barrier_depends() do { } while(0)

Expand Down
50 changes: 50 additions & 0 deletions arch/arm64/include/asm/barrier.h
Original file line number Diff line number Diff line change
Expand Up @@ -35,10 +35,60 @@
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()

#define smp_store_release(p, v) \
do { \
compiletime_assert_atomic_type(*p); \
smp_mb(); \
ACCESS_ONCE(*p) = (v); \
} while (0)

#define smp_load_acquire(p) \
({ \
typeof(*p) ___p1 = ACCESS_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
smp_mb(); \
___p1; \
})

#else

#define smp_mb() asm volatile("dmb ish" : : : "memory")
#define smp_rmb() asm volatile("dmb ishld" : : : "memory")
#define smp_wmb() asm volatile("dmb ishst" : : : "memory")

#define smp_store_release(p, v) \
do { \
compiletime_assert_atomic_type(*p); \
switch (sizeof(*p)) { \
case 4: \
asm volatile ("stlr %w1, %0" \
: "=Q" (*p) : "r" (v) : "memory"); \
break; \
case 8: \
asm volatile ("stlr %1, %0" \
: "=Q" (*p) : "r" (v) : "memory"); \
break; \
} \
} while (0)

#define smp_load_acquire(p) \
({ \
typeof(*p) ___p1; \
compiletime_assert_atomic_type(*p); \
switch (sizeof(*p)) { \
case 4: \
asm volatile ("ldar %w0, %1" \
: "=r" (___p1) : "Q" (*p) : "memory"); \
break; \
case 8: \
asm volatile ("ldar %0, %1" \
: "=r" (___p1) : "Q" (*p) : "memory"); \
break; \
} \
___p1; \
})

#endif

#define read_barrier_depends() do { } while(0)
Expand Down
17 changes: 5 additions & 12 deletions arch/avr32/include/asm/barrier.h
Original file line number Diff line number Diff line change
Expand Up @@ -8,22 +8,15 @@
#ifndef __ASM_AVR32_BARRIER_H
#define __ASM_AVR32_BARRIER_H

#define nop() asm volatile("nop")

#define mb() asm volatile("" : : : "memory")
#define rmb() mb()
#define wmb() asm volatile("sync 0" : : : "memory")
#define read_barrier_depends() do { } while(0)
#define set_mb(var, value) do { var = value; mb(); } while(0)
/*
* Weirdest thing ever.. no full barrier, but it has a write barrier!
*/
#define wmb() asm volatile("sync 0" : : : "memory")

#ifdef CONFIG_SMP
# error "The AVR32 port does not support SMP"
#else
# define smp_mb() barrier()
# define smp_rmb() barrier()
# define smp_wmb() barrier()
# define smp_read_barrier_depends() do { } while(0)
#endif

#include <asm-generic/barrier.h>

#endif /* __ASM_AVR32_BARRIER_H */
18 changes: 1 addition & 17 deletions arch/blackfin/include/asm/barrier.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,26 +23,10 @@
# define rmb() do { barrier(); smp_check_barrier(); } while (0)
# define wmb() do { barrier(); smp_mark_barrier(); } while (0)
# define read_barrier_depends() do { barrier(); smp_check_barrier(); } while (0)
#else
# define mb() barrier()
# define rmb() barrier()
# define wmb() barrier()
# define read_barrier_depends() do { } while (0)
#endif

#else /* !CONFIG_SMP */

#define mb() barrier()
#define rmb() barrier()
#define wmb() barrier()
#define read_barrier_depends() do { } while (0)

#endif /* !CONFIG_SMP */

#define smp_mb() mb()
#define smp_rmb() rmb()
#define smp_wmb() wmb()
#define set_mb(var, value) do { var = value; mb(); } while (0)
#define smp_read_barrier_depends() read_barrier_depends()
#include <asm-generic/barrier.h>

#endif /* _BLACKFIN_BARRIER_H */
1 change: 1 addition & 0 deletions arch/cris/include/asm/Kbuild
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ header-y += arch-v10/
header-y += arch-v32/


generic-y += barrier.h
generic-y += clkdev.h
generic-y += exec.h
generic-y += kvm_para.h
Expand Down
25 changes: 0 additions & 25 deletions arch/cris/include/asm/barrier.h

This file was deleted.

8 changes: 1 addition & 7 deletions arch/frv/include/asm/barrier.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,13 +17,7 @@
#define mb() asm volatile ("membar" : : :"memory")
#define rmb() asm volatile ("membar" : : :"memory")
#define wmb() asm volatile ("membar" : : :"memory")
#define read_barrier_depends() do { } while (0)

#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#define smp_read_barrier_depends() do {} while(0)
#define set_mb(var, value) \
do { var = (value); barrier(); } while (0)
#include <asm-generic/barrier.h>

#endif /* _ASM_BARRIER_H */
1 change: 1 addition & 0 deletions arch/hexagon/include/asm/Kbuild
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
header-y += ucontext.h

generic-y += auxvec.h
generic-y += barrier.h
generic-y += bug.h
generic-y += bugs.h
generic-y += clkdev.h
Expand Down
6 changes: 5 additions & 1 deletion arch/hexagon/include/asm/atomic.h
Original file line number Diff line number Diff line change
Expand Up @@ -160,8 +160,12 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
#define atomic_sub_and_test(i, v) (atomic_sub_return(i, (v)) == 0)
#define atomic_add_negative(i, v) (atomic_add_return(i, (v)) < 0)


#define atomic_inc_return(v) (atomic_add_return(1, v))
#define atomic_dec_return(v) (atomic_sub_return(1, v))

#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()

#endif
4 changes: 0 additions & 4 deletions arch/hexagon/include/asm/barrier.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,10 +29,6 @@
#define smp_read_barrier_depends() barrier()
#define smp_wmb() barrier()
#define smp_mb() barrier()
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()

/* Set a value and use a memory barrier. Used by the scheduler somewhere. */
#define set_mb(var, value) \
Expand Down
23 changes: 23 additions & 0 deletions arch/ia64/include/asm/barrier.h
Original file line number Diff line number Diff line change
Expand Up @@ -45,13 +45,36 @@
# define smp_rmb() rmb()
# define smp_wmb() wmb()
# define smp_read_barrier_depends() read_barrier_depends()

#else

# define smp_mb() barrier()
# define smp_rmb() barrier()
# define smp_wmb() barrier()
# define smp_read_barrier_depends() do { } while(0)

#endif

/*
* IA64 GCC turns volatile stores into st.rel and volatile loads into ld.acq no
* need for asm trickery!
*/

#define smp_store_release(p, v) \
do { \
compiletime_assert_atomic_type(*p); \
barrier(); \
ACCESS_ONCE(*p) = (v); \
} while (0)

#define smp_load_acquire(p) \
({ \
typeof(*p) ___p1 = ACCESS_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
barrier(); \
___p1; \
})

/*
* XXX check on this ---I suspect what Linus really wants here is
* acquire vs release semantics but we can't discuss this stuff with
Expand Down
Loading

0 comments on commit 6ffbe7d

Please sign in to comment.