Skip to content

Commit

Permalink
Merge tag 'locking-core-2023-06-27' of git://git.kernel.org/pub/scm/l…
Browse files Browse the repository at this point in the history
…inux/kernel/git/tip/tip

Pull locking updates from Ingo Molnar:

 - Introduce cmpxchg128() -- aka. the demise of cmpxchg_double()

   The cmpxchg128() family of functions is basically & functionally the
   same as cmpxchg_double(), but with a saner interface.

   Instead of a 6-parameter horror that forced u128 - u64/u64-halves
   layout details on the interface and exposed users to complexity,
   fragility & bugs, use a natural 3-parameter interface with u128
   types.

 - Restructure the generated atomic headers, and add kerneldoc comments
   for all of the generic atomic{,64,_long}_t operations.

   The generated definitions are much cleaner now, and come with
   documentation.

 - Implement lock_set_cmp_fn() on lockdep, for defining an ordering when
   taking multiple locks of the same type.

   This gets rid of one use of lockdep_set_novalidate_class() in the
   bcache code.

 - Fix raw_cpu_generic_try_cmpxchg() bug due to an unintended variable
   shadowing generating garbage code on Clang on certain ARM builds.

* tag 'locking-core-2023-06-27' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (43 commits)
  locking/atomic: scripts: fix ${atomic}_dec_if_positive() kerneldoc
  percpu: Fix self-assignment of __old in raw_cpu_generic_try_cmpxchg()
  locking/atomic: treewide: delete arch_atomic_*() kerneldoc
  locking/atomic: docs: Add atomic operations to the driver basic API documentation
  locking/atomic: scripts: generate kerneldoc comments
  docs: scripts: kernel-doc: accept bitwise negation like ~@var
  locking/atomic: scripts: simplify raw_atomic*() definitions
  locking/atomic: scripts: simplify raw_atomic_long*() definitions
  locking/atomic: scripts: split pfx/name/sfx/order
  locking/atomic: scripts: restructure fallback ifdeffery
  locking/atomic: scripts: build raw_atomic_long*() directly
  locking/atomic: treewide: use raw_atomic*_<op>()
  locking/atomic: scripts: add trivial raw_atomic*_<op>()
  locking/atomic: scripts: factor out order template generation
  locking/atomic: scripts: remove leftover "${mult}"
  locking/atomic: scripts: remove bogus order parameter
  locking/atomic: xtensa: add preprocessor symbols
  locking/atomic: x86: add preprocessor symbols
  locking/atomic: sparc: add preprocessor symbols
  locking/atomic: sh: add preprocessor symbols
  ...
  • Loading branch information
torvalds committed Jun 27, 2023
2 parents ed3b792 + b33eb50 commit bc6cb4d
Show file tree
Hide file tree
Showing 136 changed files with 9,922 additions and 4,161 deletions.
2 changes: 0 additions & 2 deletions Documentation/core-api/this_cpu_ops.rst
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,6 @@ preemption and interrupts::
this_cpu_add_return(pcp, val)
this_cpu_xchg(pcp, nval)
this_cpu_cmpxchg(pcp, oval, nval)
this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
this_cpu_sub(pcp, val)
this_cpu_inc(pcp)
this_cpu_dec(pcp)
Expand Down Expand Up @@ -242,7 +241,6 @@ safe::
__this_cpu_add_return(pcp, val)
__this_cpu_xchg(pcp, nval)
__this_cpu_cmpxchg(pcp, oval, nval)
__this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
__this_cpu_sub(pcp, val)
__this_cpu_inc(pcp)
__this_cpu_dec(pcp)
Expand Down
8 changes: 7 additions & 1 deletion Documentation/driver-api/basics.rst
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,13 @@ Reference counting
Atomics
-------

.. kernel-doc:: arch/x86/include/asm/atomic.h
.. kernel-doc:: include/linux/atomic/atomic-instrumented.h
:internal:

.. kernel-doc:: include/linux/atomic/atomic-arch-fallback.h
:internal:

.. kernel-doc:: include/linux/atomic/atomic-long.h
:internal:

Kernel objects manipulation
Expand Down
35 changes: 0 additions & 35 deletions arch/alpha/include/asm/atomic.h
Original file line number Diff line number Diff line change
Expand Up @@ -200,25 +200,6 @@ ATOMIC_OPS(xor, xor)
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP

#define arch_atomic64_cmpxchg(v, old, new) \
(arch_cmpxchg(&((v)->counter), old, new))
#define arch_atomic64_xchg(v, new) \
(arch_xchg(&((v)->counter), new))

#define arch_atomic_cmpxchg(v, old, new) \
(arch_cmpxchg(&((v)->counter), old, new))
#define arch_atomic_xchg(v, new) \
(arch_xchg(&((v)->counter), new))

/**
* arch_atomic_fetch_add_unless - add unless the number is a given value
* @v: pointer of type atomic_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
*
* Atomically adds @a to @v, so long as it was not @u.
* Returns the old value of @v.
*/
static __inline__ int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int c, new, old;
Expand All @@ -242,15 +223,6 @@ static __inline__ int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
}
#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless

/**
* arch_atomic64_fetch_add_unless - add unless the number is a given value
* @v: pointer of type atomic64_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
*
* Atomically adds @a to @v, so long as it was not @u.
* Returns the old value of @v.
*/
static __inline__ s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
s64 c, new, old;
Expand All @@ -274,13 +246,6 @@ static __inline__ s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u
}
#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless

/*
* arch_atomic64_dec_if_positive - decrement by 1 if old value positive
* @v: pointer of type atomic_t
*
* The function returns the old value of *v minus 1, even if
* the atomic variable, v, was not decremented.
*/
static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
{
s64 old, tmp;
Expand Down
9 changes: 9 additions & 0 deletions arch/arc/include/asm/atomic-spinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,11 @@ static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
ATOMIC_OPS(add, +=, add)
ATOMIC_OPS(sub, -=, sub)

#define arch_atomic_fetch_add arch_atomic_fetch_add
#define arch_atomic_fetch_sub arch_atomic_fetch_sub
#define arch_atomic_add_return arch_atomic_add_return
#define arch_atomic_sub_return arch_atomic_sub_return

#undef ATOMIC_OPS
#define ATOMIC_OPS(op, c_op, asm_op) \
ATOMIC_OP(op, c_op, asm_op) \
Expand All @@ -92,7 +97,11 @@ ATOMIC_OPS(or, |=, or)
ATOMIC_OPS(xor, ^=, xor)

#define arch_atomic_andnot arch_atomic_andnot

#define arch_atomic_fetch_and arch_atomic_fetch_and
#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
#define arch_atomic_fetch_or arch_atomic_fetch_or
#define arch_atomic_fetch_xor arch_atomic_fetch_xor

#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP
Expand Down
24 changes: 0 additions & 24 deletions arch/arc/include/asm/atomic.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,30 +22,6 @@
#include <asm/atomic-spinlock.h>
#endif

#define arch_atomic_cmpxchg(v, o, n) \
({ \
arch_cmpxchg(&((v)->counter), (o), (n)); \
})

#ifdef arch_cmpxchg_relaxed
#define arch_atomic_cmpxchg_relaxed(v, o, n) \
({ \
arch_cmpxchg_relaxed(&((v)->counter), (o), (n)); \
})
#endif

#define arch_atomic_xchg(v, n) \
({ \
arch_xchg(&((v)->counter), (n)); \
})

#ifdef arch_xchg_relaxed
#define arch_atomic_xchg_relaxed(v, n) \
({ \
arch_xchg_relaxed(&((v)->counter), (n)); \
})
#endif

/*
* 64-bit atomics
*/
Expand Down
19 changes: 2 additions & 17 deletions arch/arc/include/asm/atomic64-arcv2.h
Original file line number Diff line number Diff line change
Expand Up @@ -159,6 +159,7 @@ arch_atomic64_cmpxchg(atomic64_t *ptr, s64 expected, s64 new)

return prev;
}
#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg

static inline s64 arch_atomic64_xchg(atomic64_t *ptr, s64 new)
{
Expand All @@ -179,14 +180,7 @@ static inline s64 arch_atomic64_xchg(atomic64_t *ptr, s64 new)

return prev;
}

/**
* arch_atomic64_dec_if_positive - decrement by 1 if old value positive
* @v: pointer of type atomic64_t
*
* The function returns the old value of *v minus 1, even if
* the atomic variable, v, was not decremented.
*/
#define arch_atomic64_xchg arch_atomic64_xchg

static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
{
Expand All @@ -212,15 +206,6 @@ static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
}
#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive

/**
* arch_atomic64_fetch_add_unless - add unless the number is a given value
* @v: pointer of type atomic64_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
*
* Atomically adds @a to @v, if it was not @u.
* Returns the old value of @v
*/
static inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
s64 old, temp;
Expand Down
17 changes: 17 additions & 0 deletions arch/arm/include/asm/assembler.h
Original file line number Diff line number Diff line change
Expand Up @@ -394,6 +394,23 @@ ALT_UP_B(.L0_\@)
#endif
.endm

/*
* Raw SMP data memory barrier
*/
.macro __smp_dmb mode
#if __LINUX_ARM_ARCH__ >= 7
.ifeqs "\mode","arm"
dmb ish
.else
W(dmb) ish
.endif
#elif __LINUX_ARM_ARCH__ == 6
mcr p15, 0, r0, c7, c10, 5 @ dmb
#else
.error "Incompatible SMP platform"
#endif
.endm

#if defined(CONFIG_CPU_V7M)
/*
* setmode is used to assert to be in svc mode during boot. For v7-M
Expand Down
15 changes: 11 additions & 4 deletions arch/arm/include/asm/atomic.h
Original file line number Diff line number Diff line change
Expand Up @@ -197,6 +197,16 @@ static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
return val; \
}

#define arch_atomic_add_return arch_atomic_add_return
#define arch_atomic_sub_return arch_atomic_sub_return
#define arch_atomic_fetch_add arch_atomic_fetch_add
#define arch_atomic_fetch_sub arch_atomic_fetch_sub

#define arch_atomic_fetch_and arch_atomic_fetch_and
#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
#define arch_atomic_fetch_or arch_atomic_fetch_or
#define arch_atomic_fetch_xor arch_atomic_fetch_xor

static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
{
int ret;
Expand All @@ -210,8 +220,7 @@ static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)

return ret;
}

#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
#define arch_atomic_cmpxchg arch_atomic_cmpxchg

#endif /* __LINUX_ARM_ARCH__ */

Expand Down Expand Up @@ -240,8 +249,6 @@ ATOMIC_OPS(xor, ^=, eor)
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP

#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))

#ifndef CONFIG_GENERIC_ATOMIC64
typedef struct {
s64 counter;
Expand Down
29 changes: 25 additions & 4 deletions arch/arm/include/asm/sync_bitops.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,14 +14,35 @@
* ops which are SMP safe even on a UP kernel.
*/

/*
* Unordered
*/

#define sync_set_bit(nr, p) _set_bit(nr, p)
#define sync_clear_bit(nr, p) _clear_bit(nr, p)
#define sync_change_bit(nr, p) _change_bit(nr, p)
#define sync_test_and_set_bit(nr, p) _test_and_set_bit(nr, p)
#define sync_test_and_clear_bit(nr, p) _test_and_clear_bit(nr, p)
#define sync_test_and_change_bit(nr, p) _test_and_change_bit(nr, p)
#define sync_test_bit(nr, addr) test_bit(nr, addr)
#define arch_sync_cmpxchg arch_cmpxchg

/*
* Fully ordered
*/

int _sync_test_and_set_bit(int nr, volatile unsigned long * p);
#define sync_test_and_set_bit(nr, p) _sync_test_and_set_bit(nr, p)

int _sync_test_and_clear_bit(int nr, volatile unsigned long * p);
#define sync_test_and_clear_bit(nr, p) _sync_test_and_clear_bit(nr, p)

int _sync_test_and_change_bit(int nr, volatile unsigned long * p);
#define sync_test_and_change_bit(nr, p) _sync_test_and_change_bit(nr, p)

#define arch_sync_cmpxchg(ptr, old, new) \
({ \
__typeof__(*(ptr)) __ret; \
__smp_mb__before_atomic(); \
__ret = arch_cmpxchg_relaxed((ptr), (old), (new)); \
__smp_mb__after_atomic(); \
__ret; \
})

#endif
14 changes: 11 additions & 3 deletions arch/arm/lib/bitops.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ UNWIND( .fnend )
ENDPROC(\name )
.endm

.macro testop, name, instr, store
.macro __testop, name, instr, store, barrier
ENTRY( \name )
UNWIND( .fnstart )
ands ip, r1, #3
Expand All @@ -38,7 +38,7 @@ UNWIND( .fnstart )
mov r0, r0, lsr #5
add r1, r1, r0, lsl #2 @ Get word offset
mov r3, r2, lsl r3 @ create mask
smp_dmb
\barrier
#if __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
.arch_extension mp
ALT_SMP(W(pldw) [r1])
Expand All @@ -50,13 +50,21 @@ UNWIND( .fnstart )
strex ip, r2, [r1]
cmp ip, #0
bne 1b
smp_dmb
\barrier
cmp r0, #0
movne r0, #1
2: bx lr
UNWIND( .fnend )
ENDPROC(\name )
.endm

.macro testop, name, instr, store
__testop \name, \instr, \store, smp_dmb
.endm

.macro sync_testop, name, instr, store
__testop \name, \instr, \store, __smp_dmb
.endm
#else
.macro bitop, name, instr
ENTRY( \name )
Expand Down
4 changes: 4 additions & 0 deletions arch/arm/lib/testchangebit.S
Original file line number Diff line number Diff line change
Expand Up @@ -10,3 +10,7 @@
.text

testop _test_and_change_bit, eor, str

#if __LINUX_ARM_ARCH__ >= 6
sync_testop _sync_test_and_change_bit, eor, str
#endif
4 changes: 4 additions & 0 deletions arch/arm/lib/testclearbit.S
Original file line number Diff line number Diff line change
Expand Up @@ -10,3 +10,7 @@
.text

testop _test_and_clear_bit, bicne, strne

#if __LINUX_ARM_ARCH__ >= 6
sync_testop _sync_test_and_clear_bit, bicne, strne
#endif
4 changes: 4 additions & 0 deletions arch/arm/lib/testsetbit.S
Original file line number Diff line number Diff line change
Expand Up @@ -10,3 +10,7 @@
.text

testop _test_and_set_bit, orreq, streq

#if __LINUX_ARM_ARCH__ >= 6
sync_testop _sync_test_and_set_bit, orreq, streq
#endif
28 changes: 0 additions & 28 deletions arch/arm64/include/asm/atomic.h
Original file line number Diff line number Diff line change
Expand Up @@ -142,24 +142,6 @@ static __always_inline long arch_atomic64_dec_if_positive(atomic64_t *v)
#define arch_atomic_fetch_xor_release arch_atomic_fetch_xor_release
#define arch_atomic_fetch_xor arch_atomic_fetch_xor

#define arch_atomic_xchg_relaxed(v, new) \
arch_xchg_relaxed(&((v)->counter), (new))
#define arch_atomic_xchg_acquire(v, new) \
arch_xchg_acquire(&((v)->counter), (new))
#define arch_atomic_xchg_release(v, new) \
arch_xchg_release(&((v)->counter), (new))
#define arch_atomic_xchg(v, new) \
arch_xchg(&((v)->counter), (new))

#define arch_atomic_cmpxchg_relaxed(v, old, new) \
arch_cmpxchg_relaxed(&((v)->counter), (old), (new))
#define arch_atomic_cmpxchg_acquire(v, old, new) \
arch_cmpxchg_acquire(&((v)->counter), (old), (new))
#define arch_atomic_cmpxchg_release(v, old, new) \
arch_cmpxchg_release(&((v)->counter), (old), (new))
#define arch_atomic_cmpxchg(v, old, new) \
arch_cmpxchg(&((v)->counter), (old), (new))

#define arch_atomic_andnot arch_atomic_andnot

/*
Expand Down Expand Up @@ -209,16 +191,6 @@ static __always_inline long arch_atomic64_dec_if_positive(atomic64_t *v)
#define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor_release
#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor

#define arch_atomic64_xchg_relaxed arch_atomic_xchg_relaxed
#define arch_atomic64_xchg_acquire arch_atomic_xchg_acquire
#define arch_atomic64_xchg_release arch_atomic_xchg_release
#define arch_atomic64_xchg arch_atomic_xchg

#define arch_atomic64_cmpxchg_relaxed arch_atomic_cmpxchg_relaxed
#define arch_atomic64_cmpxchg_acquire arch_atomic_cmpxchg_acquire
#define arch_atomic64_cmpxchg_release arch_atomic_cmpxchg_release
#define arch_atomic64_cmpxchg arch_atomic_cmpxchg

#define arch_atomic64_andnot arch_atomic64_andnot

#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
Expand Down
Loading

0 comments on commit bc6cb4d

Please sign in to comment.