Skip to content

Commit bc6cb4d

Browse files
committed
Merge tag 'locking-core-2023-06-27' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar: - Introduce cmpxchg128() -- aka. the demise of cmpxchg_double() The cmpxchg128() family of functions is basically & functionally the same as cmpxchg_double(), but with a saner interface. Instead of a 6-parameter horror that forced u128 - u64/u64-halves layout details on the interface and exposed users to complexity, fragility & bugs, use a natural 3-parameter interface with u128 types. - Restructure the generated atomic headers, and add kerneldoc comments for all of the generic atomic{,64,_long}_t operations. The generated definitions are much cleaner now, and come with documentation. - Implement lock_set_cmp_fn() on lockdep, for defining an ordering when taking multiple locks of the same type. This gets rid of one use of lockdep_set_novalidate_class() in the bcache code. - Fix raw_cpu_generic_try_cmpxchg() bug due to an unintended variable shadowing generating garbage code on Clang on certain ARM builds. * tag 'locking-core-2023-06-27' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (43 commits) locking/atomic: scripts: fix ${atomic}_dec_if_positive() kerneldoc percpu: Fix self-assignment of __old in raw_cpu_generic_try_cmpxchg() locking/atomic: treewide: delete arch_atomic_*() kerneldoc locking/atomic: docs: Add atomic operations to the driver basic API documentation locking/atomic: scripts: generate kerneldoc comments docs: scripts: kernel-doc: accept bitwise negation like ~@var locking/atomic: scripts: simplify raw_atomic*() definitions locking/atomic: scripts: simplify raw_atomic_long*() definitions locking/atomic: scripts: split pfx/name/sfx/order locking/atomic: scripts: restructure fallback ifdeffery locking/atomic: scripts: build raw_atomic_long*() directly locking/atomic: treewide: use raw_atomic*_<op>() locking/atomic: scripts: add trivial raw_atomic*_<op>() locking/atomic: scripts: factor out order template generation locking/atomic: scripts: remove leftover "${mult}" locking/atomic: scripts: remove bogus order parameter locking/atomic: xtensa: add preprocessor symbols locking/atomic: x86: add preprocessor symbols locking/atomic: sparc: add preprocessor symbols locking/atomic: sh: add preprocessor symbols ...
2 parents ed3b792 + b33eb50 commit bc6cb4d

File tree

136 files changed

+9922
-4161
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

136 files changed

+9922
-4161
lines changed

Documentation/core-api/this_cpu_ops.rst

-2
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,6 @@ preemption and interrupts::
5353
this_cpu_add_return(pcp, val)
5454
this_cpu_xchg(pcp, nval)
5555
this_cpu_cmpxchg(pcp, oval, nval)
56-
this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
5756
this_cpu_sub(pcp, val)
5857
this_cpu_inc(pcp)
5958
this_cpu_dec(pcp)
@@ -242,7 +241,6 @@ safe::
242241
__this_cpu_add_return(pcp, val)
243242
__this_cpu_xchg(pcp, nval)
244243
__this_cpu_cmpxchg(pcp, oval, nval)
245-
__this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
246244
__this_cpu_sub(pcp, val)
247245
__this_cpu_inc(pcp)
248246
__this_cpu_dec(pcp)

Documentation/driver-api/basics.rst

+7-1
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,13 @@ Reference counting
8484
Atomics
8585
-------
8686

87-
.. kernel-doc:: arch/x86/include/asm/atomic.h
87+
.. kernel-doc:: include/linux/atomic/atomic-instrumented.h
88+
:internal:
89+
90+
.. kernel-doc:: include/linux/atomic/atomic-arch-fallback.h
91+
:internal:
92+
93+
.. kernel-doc:: include/linux/atomic/atomic-long.h
8894
:internal:
8995

9096
Kernel objects manipulation

arch/alpha/include/asm/atomic.h

-35
Original file line numberDiff line numberDiff line change
@@ -200,25 +200,6 @@ ATOMIC_OPS(xor, xor)
200200
#undef ATOMIC_OP_RETURN
201201
#undef ATOMIC_OP
202202

203-
#define arch_atomic64_cmpxchg(v, old, new) \
204-
(arch_cmpxchg(&((v)->counter), old, new))
205-
#define arch_atomic64_xchg(v, new) \
206-
(arch_xchg(&((v)->counter), new))
207-
208-
#define arch_atomic_cmpxchg(v, old, new) \
209-
(arch_cmpxchg(&((v)->counter), old, new))
210-
#define arch_atomic_xchg(v, new) \
211-
(arch_xchg(&((v)->counter), new))
212-
213-
/**
214-
* arch_atomic_fetch_add_unless - add unless the number is a given value
215-
* @v: pointer of type atomic_t
216-
* @a: the amount to add to v...
217-
* @u: ...unless v is equal to u.
218-
*
219-
* Atomically adds @a to @v, so long as it was not @u.
220-
* Returns the old value of @v.
221-
*/
222203
static __inline__ int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
223204
{
224205
int c, new, old;
@@ -242,15 +223,6 @@ static __inline__ int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
242223
}
243224
#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
244225

245-
/**
246-
* arch_atomic64_fetch_add_unless - add unless the number is a given value
247-
* @v: pointer of type atomic64_t
248-
* @a: the amount to add to v...
249-
* @u: ...unless v is equal to u.
250-
*
251-
* Atomically adds @a to @v, so long as it was not @u.
252-
* Returns the old value of @v.
253-
*/
254226
static __inline__ s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
255227
{
256228
s64 c, new, old;
@@ -274,13 +246,6 @@ static __inline__ s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u
274246
}
275247
#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
276248

277-
/*
278-
* arch_atomic64_dec_if_positive - decrement by 1 if old value positive
279-
* @v: pointer of type atomic_t
280-
*
281-
* The function returns the old value of *v minus 1, even if
282-
* the atomic variable, v, was not decremented.
283-
*/
284249
static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
285250
{
286251
s64 old, tmp;

arch/arc/include/asm/atomic-spinlock.h

+9
Original file line numberDiff line numberDiff line change
@@ -81,6 +81,11 @@ static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
8181
ATOMIC_OPS(add, +=, add)
8282
ATOMIC_OPS(sub, -=, sub)
8383

84+
#define arch_atomic_fetch_add arch_atomic_fetch_add
85+
#define arch_atomic_fetch_sub arch_atomic_fetch_sub
86+
#define arch_atomic_add_return arch_atomic_add_return
87+
#define arch_atomic_sub_return arch_atomic_sub_return
88+
8489
#undef ATOMIC_OPS
8590
#define ATOMIC_OPS(op, c_op, asm_op) \
8691
ATOMIC_OP(op, c_op, asm_op) \
@@ -92,7 +97,11 @@ ATOMIC_OPS(or, |=, or)
9297
ATOMIC_OPS(xor, ^=, xor)
9398

9499
#define arch_atomic_andnot arch_atomic_andnot
100+
101+
#define arch_atomic_fetch_and arch_atomic_fetch_and
95102
#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
103+
#define arch_atomic_fetch_or arch_atomic_fetch_or
104+
#define arch_atomic_fetch_xor arch_atomic_fetch_xor
96105

97106
#undef ATOMIC_OPS
98107
#undef ATOMIC_FETCH_OP

arch/arc/include/asm/atomic.h

-24
Original file line numberDiff line numberDiff line change
@@ -22,30 +22,6 @@
2222
#include <asm/atomic-spinlock.h>
2323
#endif
2424

25-
#define arch_atomic_cmpxchg(v, o, n) \
26-
({ \
27-
arch_cmpxchg(&((v)->counter), (o), (n)); \
28-
})
29-
30-
#ifdef arch_cmpxchg_relaxed
31-
#define arch_atomic_cmpxchg_relaxed(v, o, n) \
32-
({ \
33-
arch_cmpxchg_relaxed(&((v)->counter), (o), (n)); \
34-
})
35-
#endif
36-
37-
#define arch_atomic_xchg(v, n) \
38-
({ \
39-
arch_xchg(&((v)->counter), (n)); \
40-
})
41-
42-
#ifdef arch_xchg_relaxed
43-
#define arch_atomic_xchg_relaxed(v, n) \
44-
({ \
45-
arch_xchg_relaxed(&((v)->counter), (n)); \
46-
})
47-
#endif
48-
4925
/*
5026
* 64-bit atomics
5127
*/

arch/arc/include/asm/atomic64-arcv2.h

+2-17
Original file line numberDiff line numberDiff line change
@@ -159,6 +159,7 @@ arch_atomic64_cmpxchg(atomic64_t *ptr, s64 expected, s64 new)
159159

160160
return prev;
161161
}
162+
#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
162163

163164
static inline s64 arch_atomic64_xchg(atomic64_t *ptr, s64 new)
164165
{
@@ -179,14 +180,7 @@ static inline s64 arch_atomic64_xchg(atomic64_t *ptr, s64 new)
179180

180181
return prev;
181182
}
182-
183-
/**
184-
* arch_atomic64_dec_if_positive - decrement by 1 if old value positive
185-
* @v: pointer of type atomic64_t
186-
*
187-
* The function returns the old value of *v minus 1, even if
188-
* the atomic variable, v, was not decremented.
189-
*/
183+
#define arch_atomic64_xchg arch_atomic64_xchg
190184

191185
static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
192186
{
@@ -212,15 +206,6 @@ static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
212206
}
213207
#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
214208

215-
/**
216-
* arch_atomic64_fetch_add_unless - add unless the number is a given value
217-
* @v: pointer of type atomic64_t
218-
* @a: the amount to add to v...
219-
* @u: ...unless v is equal to u.
220-
*
221-
* Atomically adds @a to @v, if it was not @u.
222-
* Returns the old value of @v
223-
*/
224209
static inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
225210
{
226211
s64 old, temp;

arch/arm/include/asm/assembler.h

+17
Original file line numberDiff line numberDiff line change
@@ -394,6 +394,23 @@ ALT_UP_B(.L0_\@)
394394
#endif
395395
.endm
396396

397+
/*
398+
* Raw SMP data memory barrier
399+
*/
400+
.macro __smp_dmb mode
401+
#if __LINUX_ARM_ARCH__ >= 7
402+
.ifeqs "\mode","arm"
403+
dmb ish
404+
.else
405+
W(dmb) ish
406+
.endif
407+
#elif __LINUX_ARM_ARCH__ == 6
408+
mcr p15, 0, r0, c7, c10, 5 @ dmb
409+
#else
410+
.error "Incompatible SMP platform"
411+
#endif
412+
.endm
413+
397414
#if defined(CONFIG_CPU_V7M)
398415
/*
399416
* setmode is used to assert to be in svc mode during boot. For v7-M

arch/arm/include/asm/atomic.h

+11-4
Original file line numberDiff line numberDiff line change
@@ -197,6 +197,16 @@ static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
197197
return val; \
198198
}
199199

200+
#define arch_atomic_add_return arch_atomic_add_return
201+
#define arch_atomic_sub_return arch_atomic_sub_return
202+
#define arch_atomic_fetch_add arch_atomic_fetch_add
203+
#define arch_atomic_fetch_sub arch_atomic_fetch_sub
204+
205+
#define arch_atomic_fetch_and arch_atomic_fetch_and
206+
#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
207+
#define arch_atomic_fetch_or arch_atomic_fetch_or
208+
#define arch_atomic_fetch_xor arch_atomic_fetch_xor
209+
200210
static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
201211
{
202212
int ret;
@@ -210,8 +220,7 @@ static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
210220

211221
return ret;
212222
}
213-
214-
#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
223+
#define arch_atomic_cmpxchg arch_atomic_cmpxchg
215224

216225
#endif /* __LINUX_ARM_ARCH__ */
217226

@@ -240,8 +249,6 @@ ATOMIC_OPS(xor, ^=, eor)
240249
#undef ATOMIC_OP_RETURN
241250
#undef ATOMIC_OP
242251

243-
#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
244-
245252
#ifndef CONFIG_GENERIC_ATOMIC64
246253
typedef struct {
247254
s64 counter;

arch/arm/include/asm/sync_bitops.h

+25-4
Original file line numberDiff line numberDiff line change
@@ -14,14 +14,35 @@
1414
* ops which are SMP safe even on a UP kernel.
1515
*/
1616

17+
/*
18+
* Unordered
19+
*/
20+
1721
#define sync_set_bit(nr, p) _set_bit(nr, p)
1822
#define sync_clear_bit(nr, p) _clear_bit(nr, p)
1923
#define sync_change_bit(nr, p) _change_bit(nr, p)
20-
#define sync_test_and_set_bit(nr, p) _test_and_set_bit(nr, p)
21-
#define sync_test_and_clear_bit(nr, p) _test_and_clear_bit(nr, p)
22-
#define sync_test_and_change_bit(nr, p) _test_and_change_bit(nr, p)
2324
#define sync_test_bit(nr, addr) test_bit(nr, addr)
24-
#define arch_sync_cmpxchg arch_cmpxchg
2525

26+
/*
27+
* Fully ordered
28+
*/
29+
30+
int _sync_test_and_set_bit(int nr, volatile unsigned long * p);
31+
#define sync_test_and_set_bit(nr, p) _sync_test_and_set_bit(nr, p)
32+
33+
int _sync_test_and_clear_bit(int nr, volatile unsigned long * p);
34+
#define sync_test_and_clear_bit(nr, p) _sync_test_and_clear_bit(nr, p)
35+
36+
int _sync_test_and_change_bit(int nr, volatile unsigned long * p);
37+
#define sync_test_and_change_bit(nr, p) _sync_test_and_change_bit(nr, p)
38+
39+
#define arch_sync_cmpxchg(ptr, old, new) \
40+
({ \
41+
__typeof__(*(ptr)) __ret; \
42+
__smp_mb__before_atomic(); \
43+
__ret = arch_cmpxchg_relaxed((ptr), (old), (new)); \
44+
__smp_mb__after_atomic(); \
45+
__ret; \
46+
})
2647

2748
#endif

arch/arm/lib/bitops.h

+11-3
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ UNWIND( .fnend )
2828
ENDPROC(\name )
2929
.endm
3030

31-
.macro testop, name, instr, store
31+
.macro __testop, name, instr, store, barrier
3232
ENTRY( \name )
3333
UNWIND( .fnstart )
3434
ands ip, r1, #3
@@ -38,7 +38,7 @@ UNWIND( .fnstart )
3838
mov r0, r0, lsr #5
3939
add r1, r1, r0, lsl #2 @ Get word offset
4040
mov r3, r2, lsl r3 @ create mask
41-
smp_dmb
41+
\barrier
4242
#if __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
4343
.arch_extension mp
4444
ALT_SMP(W(pldw) [r1])
@@ -50,13 +50,21 @@ UNWIND( .fnstart )
5050
strex ip, r2, [r1]
5151
cmp ip, #0
5252
bne 1b
53-
smp_dmb
53+
\barrier
5454
cmp r0, #0
5555
movne r0, #1
5656
2: bx lr
5757
UNWIND( .fnend )
5858
ENDPROC(\name )
5959
.endm
60+
61+
.macro testop, name, instr, store
62+
__testop \name, \instr, \store, smp_dmb
63+
.endm
64+
65+
.macro sync_testop, name, instr, store
66+
__testop \name, \instr, \store, __smp_dmb
67+
.endm
6068
#else
6169
.macro bitop, name, instr
6270
ENTRY( \name )

arch/arm/lib/testchangebit.S

+4
Original file line numberDiff line numberDiff line change
@@ -10,3 +10,7 @@
1010
.text
1111

1212
testop _test_and_change_bit, eor, str
13+
14+
#if __LINUX_ARM_ARCH__ >= 6
15+
sync_testop _sync_test_and_change_bit, eor, str
16+
#endif

arch/arm/lib/testclearbit.S

+4
Original file line numberDiff line numberDiff line change
@@ -10,3 +10,7 @@
1010
.text
1111

1212
testop _test_and_clear_bit, bicne, strne
13+
14+
#if __LINUX_ARM_ARCH__ >= 6
15+
sync_testop _sync_test_and_clear_bit, bicne, strne
16+
#endif

arch/arm/lib/testsetbit.S

+4
Original file line numberDiff line numberDiff line change
@@ -10,3 +10,7 @@
1010
.text
1111

1212
testop _test_and_set_bit, orreq, streq
13+
14+
#if __LINUX_ARM_ARCH__ >= 6
15+
sync_testop _sync_test_and_set_bit, orreq, streq
16+
#endif

arch/arm64/include/asm/atomic.h

-28
Original file line numberDiff line numberDiff line change
@@ -142,24 +142,6 @@ static __always_inline long arch_atomic64_dec_if_positive(atomic64_t *v)
142142
#define arch_atomic_fetch_xor_release arch_atomic_fetch_xor_release
143143
#define arch_atomic_fetch_xor arch_atomic_fetch_xor
144144

145-
#define arch_atomic_xchg_relaxed(v, new) \
146-
arch_xchg_relaxed(&((v)->counter), (new))
147-
#define arch_atomic_xchg_acquire(v, new) \
148-
arch_xchg_acquire(&((v)->counter), (new))
149-
#define arch_atomic_xchg_release(v, new) \
150-
arch_xchg_release(&((v)->counter), (new))
151-
#define arch_atomic_xchg(v, new) \
152-
arch_xchg(&((v)->counter), (new))
153-
154-
#define arch_atomic_cmpxchg_relaxed(v, old, new) \
155-
arch_cmpxchg_relaxed(&((v)->counter), (old), (new))
156-
#define arch_atomic_cmpxchg_acquire(v, old, new) \
157-
arch_cmpxchg_acquire(&((v)->counter), (old), (new))
158-
#define arch_atomic_cmpxchg_release(v, old, new) \
159-
arch_cmpxchg_release(&((v)->counter), (old), (new))
160-
#define arch_atomic_cmpxchg(v, old, new) \
161-
arch_cmpxchg(&((v)->counter), (old), (new))
162-
163145
#define arch_atomic_andnot arch_atomic_andnot
164146

165147
/*
@@ -209,16 +191,6 @@ static __always_inline long arch_atomic64_dec_if_positive(atomic64_t *v)
209191
#define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor_release
210192
#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
211193

212-
#define arch_atomic64_xchg_relaxed arch_atomic_xchg_relaxed
213-
#define arch_atomic64_xchg_acquire arch_atomic_xchg_acquire
214-
#define arch_atomic64_xchg_release arch_atomic_xchg_release
215-
#define arch_atomic64_xchg arch_atomic_xchg
216-
217-
#define arch_atomic64_cmpxchg_relaxed arch_atomic_cmpxchg_relaxed
218-
#define arch_atomic64_cmpxchg_acquire arch_atomic_cmpxchg_acquire
219-
#define arch_atomic64_cmpxchg_release arch_atomic_cmpxchg_release
220-
#define arch_atomic64_cmpxchg arch_atomic_cmpxchg
221-
222194
#define arch_atomic64_andnot arch_atomic64_andnot
223195

224196
#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive

0 commit comments

Comments
 (0)