Skip to content

Commit

Permalink
Merge tag 'locking-core-2021-06-28' of git://git.kernel.org/pub/scm/l…
Browse files Browse the repository at this point in the history
…inux/kernel/git/tip/tip

Pull locking updates from Ingo Molnar:

 - Core locking & atomics:

     - Convert all architectures to ARCH_ATOMIC: move every architecture
       to ARCH_ATOMIC, then get rid of ARCH_ATOMIC and all the
       transitory facilities and #ifdefs.

       Much reduction in complexity from that series:

           63 files changed, 756 insertions(+), 4094 deletions(-)

     - Self-test enhancements

 - Futexes:

     - Add the new FUTEX_LOCK_PI2 ABI, which is a variant that doesn't
       set FLAGS_CLOCKRT (.e. uses CLOCK_MONOTONIC).

       [ The temptation to repurpose FUTEX_LOCK_PI's implicit setting of
         FLAGS_CLOCKRT & invert the flag's meaning to avoid having to
         introduce a new variant was resisted successfully. ]

     - Enhance futex self-tests

 - Lockdep:

     - Fix dependency path printouts

     - Optimize trace saving

     - Broaden & fix wait-context checks

 - Misc cleanups and fixes.

* tag 'locking-core-2021-06-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (52 commits)
  locking/lockdep: Correct the description error for check_redundant()
  futex: Provide FUTEX_LOCK_PI2 to support clock selection
  futex: Prepare futex_lock_pi() for runtime clock selection
  lockdep/selftest: Remove wait-type RCU_CALLBACK tests
  lockdep/selftests: Fix selftests vs PROVE_RAW_LOCK_NESTING
  lockdep: Fix wait-type for empty stack
  locking/selftests: Add a selftest for check_irq_usage()
  lockding/lockdep: Avoid to find wrong lock dep path in check_irq_usage()
  locking/lockdep: Remove the unnecessary trace saving
  locking/lockdep: Fix the dep path printing for backwards BFS
  selftests: futex: Add futex compare requeue test
  selftests: futex: Add futex wait test
  seqlock: Remove trailing semicolon in macros
  locking/lockdep: Reduce LOCKDEP dependency list
  locking/lockdep,doc: Improve readability of the block matrix
  locking/atomics: atomic-instrumented: simplify ifdeffery
  locking/atomic: delete !ARCH_ATOMIC remnants
  locking/atomic: xtensa: move to ARCH_ATOMIC
  locking/atomic: sparc: move to ARCH_ATOMIC
  locking/atomic: sh: move to ARCH_ATOMIC
  ...
  • Loading branch information
torvalds committed Jun 28, 2021
2 parents b89c07d + 0e8a89d commit a15286c
Show file tree
Hide file tree
Showing 77 changed files with 1,395 additions and 4,157 deletions.
4 changes: 2 additions & 2 deletions Documentation/locking/lockdep-design.rst
Original file line number Diff line number Diff line change
Expand Up @@ -453,9 +453,9 @@ There are simply four block conditions:
Block condition matrix, Y means the row blocks the column, and N means otherwise.

+---+---+---+---+
| | E | r | R |
| | W | r | R |
+---+---+---+---+
| E | Y | Y | Y |
| W | Y | Y | Y |
+---+---+---+---+
| r | Y | Y | N |
+---+---+---+---+
Expand Down
88 changes: 47 additions & 41 deletions arch/alpha/include/asm/atomic.h
Original file line number Diff line number Diff line change
Expand Up @@ -26,11 +26,11 @@

#define ATOMIC64_INIT(i) { (i) }

#define atomic_read(v) READ_ONCE((v)->counter)
#define atomic64_read(v) READ_ONCE((v)->counter)
#define arch_atomic_read(v) READ_ONCE((v)->counter)
#define arch_atomic64_read(v) READ_ONCE((v)->counter)

#define atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
#define atomic64_set(v,i) WRITE_ONCE((v)->counter, (i))
#define arch_atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
#define arch_atomic64_set(v,i) WRITE_ONCE((v)->counter, (i))

/*
* To get proper branch prediction for the main line, we must branch
Expand All @@ -39,7 +39,7 @@
*/

#define ATOMIC_OP(op, asm_op) \
static __inline__ void atomic_##op(int i, atomic_t * v) \
static __inline__ void arch_atomic_##op(int i, atomic_t * v) \
{ \
unsigned long temp; \
__asm__ __volatile__( \
Expand All @@ -55,7 +55,7 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \
} \

#define ATOMIC_OP_RETURN(op, asm_op) \
static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
{ \
long temp, result; \
__asm__ __volatile__( \
Expand All @@ -74,7 +74,7 @@ static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
}

#define ATOMIC_FETCH_OP(op, asm_op) \
static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
{ \
long temp, result; \
__asm__ __volatile__( \
Expand All @@ -92,7 +92,7 @@ static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
}

#define ATOMIC64_OP(op, asm_op) \
static __inline__ void atomic64_##op(s64 i, atomic64_t * v) \
static __inline__ void arch_atomic64_##op(s64 i, atomic64_t * v) \
{ \
s64 temp; \
__asm__ __volatile__( \
Expand All @@ -108,7 +108,8 @@ static __inline__ void atomic64_##op(s64 i, atomic64_t * v) \
} \

#define ATOMIC64_OP_RETURN(op, asm_op) \
static __inline__ s64 atomic64_##op##_return_relaxed(s64 i, atomic64_t * v) \
static __inline__ s64 \
arch_atomic64_##op##_return_relaxed(s64 i, atomic64_t * v) \
{ \
s64 temp, result; \
__asm__ __volatile__( \
Expand All @@ -127,7 +128,8 @@ static __inline__ s64 atomic64_##op##_return_relaxed(s64 i, atomic64_t * v) \
}

#define ATOMIC64_FETCH_OP(op, asm_op) \
static __inline__ s64 atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v) \
static __inline__ s64 \
arch_atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v) \
{ \
s64 temp, result; \
__asm__ __volatile__( \
Expand Down Expand Up @@ -155,18 +157,18 @@ static __inline__ s64 atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v) \
ATOMIC_OPS(add)
ATOMIC_OPS(sub)

#define atomic_add_return_relaxed atomic_add_return_relaxed
#define atomic_sub_return_relaxed atomic_sub_return_relaxed
#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed

#define atomic64_add_return_relaxed atomic64_add_return_relaxed
#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed

#define atomic_andnot atomic_andnot
#define atomic64_andnot atomic64_andnot
#define arch_atomic_andnot arch_atomic_andnot
#define arch_atomic64_andnot arch_atomic64_andnot

#undef ATOMIC_OPS
#define ATOMIC_OPS(op, asm) \
Expand All @@ -180,15 +182,15 @@ ATOMIC_OPS(andnot, bic)
ATOMIC_OPS(or, bis)
ATOMIC_OPS(xor, xor)

#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed
#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed

#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot_relaxed
#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed

#undef ATOMIC_OPS
#undef ATOMIC64_FETCH_OP
Expand All @@ -198,22 +200,26 @@ ATOMIC_OPS(xor, xor)
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP

#define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
#define arch_atomic64_cmpxchg(v, old, new) \
(arch_cmpxchg(&((v)->counter), old, new))
#define arch_atomic64_xchg(v, new) \
(arch_xchg(&((v)->counter), new))

#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
#define arch_atomic_cmpxchg(v, old, new) \
(arch_cmpxchg(&((v)->counter), old, new))
#define arch_atomic_xchg(v, new) \
(arch_xchg(&((v)->counter), new))

/**
* atomic_fetch_add_unless - add unless the number is a given value
* arch_atomic_fetch_add_unless - add unless the number is a given value
* @v: pointer of type atomic_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
*
* Atomically adds @a to @v, so long as it was not @u.
* Returns the old value of @v.
*/
static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
static __inline__ int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int c, new, old;
smp_mb();
Expand All @@ -234,18 +240,18 @@ static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
smp_mb();
return old;
}
#define atomic_fetch_add_unless atomic_fetch_add_unless
#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless

/**
* atomic64_fetch_add_unless - add unless the number is a given value
* arch_atomic64_fetch_add_unless - add unless the number is a given value
* @v: pointer of type atomic64_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
*
* Atomically adds @a to @v, so long as it was not @u.
* Returns the old value of @v.
*/
static __inline__ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
static __inline__ s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
s64 c, new, old;
smp_mb();
Expand All @@ -266,16 +272,16 @@ static __inline__ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
smp_mb();
return old;
}
#define atomic64_fetch_add_unless atomic64_fetch_add_unless
#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless

/*
* atomic64_dec_if_positive - decrement by 1 if old value positive
* arch_atomic64_dec_if_positive - decrement by 1 if old value positive
* @v: pointer of type atomic_t
*
* The function returns the old value of *v minus 1, even if
* the atomic variable, v, was not decremented.
*/
static inline s64 atomic64_dec_if_positive(atomic64_t *v)
static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
{
s64 old, tmp;
smp_mb();
Expand All @@ -295,6 +301,6 @@ static inline s64 atomic64_dec_if_positive(atomic64_t *v)
smp_mb();
return old - 1;
}
#define atomic64_dec_if_positive atomic64_dec_if_positive
#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive

#endif /* _ALPHA_ATOMIC_H */
12 changes: 6 additions & 6 deletions arch/alpha/include/asm/cmpxchg.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
sizeof(*(ptr))); \
})

#define cmpxchg_local(ptr, o, n) \
#define arch_cmpxchg_local(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
Expand All @@ -26,7 +26,7 @@
sizeof(*(ptr))); \
})

#define cmpxchg64_local(ptr, o, n) \
#define arch_cmpxchg64_local(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
cmpxchg_local((ptr), (o), (n)); \
Expand All @@ -42,7 +42,7 @@
* The leading and the trailing memory barriers guarantee that these
* operations are fully ordered.
*/
#define xchg(ptr, x) \
#define arch_xchg(ptr, x) \
({ \
__typeof__(*(ptr)) __ret; \
__typeof__(*(ptr)) _x_ = (x); \
Expand All @@ -53,7 +53,7 @@
__ret; \
})

#define cmpxchg(ptr, o, n) \
#define arch_cmpxchg(ptr, o, n) \
({ \
__typeof__(*(ptr)) __ret; \
__typeof__(*(ptr)) _o_ = (o); \
Expand All @@ -65,10 +65,10 @@
__ret; \
})

#define cmpxchg64(ptr, o, n) \
#define arch_cmpxchg64(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
cmpxchg((ptr), (o), (n)); \
arch_cmpxchg((ptr), (o), (n)); \
})

#undef ____cmpxchg
Expand Down
Loading

0 comments on commit a15286c

Please sign in to comment.