Skip to content

Commit

Permalink
Merge tag 'locking-core-2024-05-13' of git://git.kernel.org/pub/scm/l…
Browse files Browse the repository at this point in the history
…inux/kernel/git/tip/tip

Pull locking updates from Ingo Molnar:

 - Over a dozen code generation micro-optimizations for the atomic
   and spinlock code

 - Add more __ro_after_init attributes

 - Robustify the lockdevent_*() macros

* tag 'locking-core-2024-05-13' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  locking/pvqspinlock/x86: Use _Q_LOCKED_VAL in PV_UNLOCK_ASM macro
  locking/qspinlock/x86: Micro-optimize virt_spin_lock()
  locking/atomic/x86: Merge __arch{,_try}_cmpxchg64_emu_local() with __arch{,_try}_cmpxchg64_emu()
  locking/atomic/x86: Introduce arch_try_cmpxchg64_local()
  locking/pvqspinlock/x86: Remove redundant CMP after CMPXCHG in __raw_callee_save___pv_queued_spin_unlock()
  locking/pvqspinlock: Use try_cmpxchg() in qspinlock_paravirt.h
  locking/pvqspinlock: Use try_cmpxchg_acquire() in trylock_clear_pending()
  locking/qspinlock: Use atomic_try_cmpxchg_relaxed() in xchg_tail()
  locking/atomic/x86: Define arch_atomic_sub() family using arch_atomic_add() functions
  locking/atomic/x86: Rewrite x86_32 arch_atomic64_{,fetch}_{and,or,xor}() functions
  locking/atomic/x86: Introduce arch_atomic64_read_nonatomic() to x86_32
  locking/atomic/x86: Introduce arch_atomic64_try_cmpxchg() to x86_32
  locking/atomic/x86: Introduce arch_try_cmpxchg64() for !CONFIG_X86_CMPXCHG64
  locking/atomic/x86: Modernize x86_32 arch_{,try_}_cmpxchg64{,_local}()
  locking/atomic/x86: Correct the definition of __arch_try_cmpxchg128()
  x86/tsc: Make __use_tsc __ro_after_init
  x86/kvm: Make kvm_async_pf_enabled __ro_after_init
  context_tracking: Make context_tracking_key __ro_after_init
  jump_label,module: Don't alloc static_key_mod for __ro_after_init keys
  locking/qspinlock: Always evaluate lockevent* non-event parameter once
  • Loading branch information
torvalds committed May 14, 2024
2 parents a7c840b + 532453e commit 48fc82c
Show file tree
Hide file tree
Showing 17 changed files with 295 additions and 175 deletions.
12 changes: 2 additions & 10 deletions arch/x86/include/asm/atomic.h
Original file line number Diff line number Diff line change
Expand Up @@ -86,23 +86,15 @@ static __always_inline int arch_atomic_add_return(int i, atomic_t *v)
}
#define arch_atomic_add_return arch_atomic_add_return

static __always_inline int arch_atomic_sub_return(int i, atomic_t *v)
{
return arch_atomic_add_return(-i, v);
}
#define arch_atomic_sub_return arch_atomic_sub_return
#define arch_atomic_sub_return(i, v) arch_atomic_add_return(-(i), v)

static __always_inline int arch_atomic_fetch_add(int i, atomic_t *v)
{
return xadd(&v->counter, i);
}
#define arch_atomic_fetch_add arch_atomic_fetch_add

static __always_inline int arch_atomic_fetch_sub(int i, atomic_t *v)
{
return xadd(&v->counter, -i);
}
#define arch_atomic_fetch_sub arch_atomic_fetch_sub
#define arch_atomic_fetch_sub(i, v) arch_atomic_fetch_add(-(i), v)

static __always_inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
{
Expand Down
79 changes: 52 additions & 27 deletions arch/x86/include/asm/atomic64_32.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,32 @@ typedef struct {

#define ATOMIC64_INIT(val) { (val) }

/*
* Read an atomic64_t non-atomically.
*
* This is intended to be used in cases where a subsequent atomic operation
* will handle the torn value, and can be used to prime the first iteration
* of unconditional try_cmpxchg() loops, e.g.:
*
* s64 val = arch_atomic64_read_nonatomic(v);
* do { } while (!arch_atomic64_try_cmpxchg(v, &val, val OP i);
*
* This is NOT safe to use where the value is not always checked by a
* subsequent atomic operation, such as in conditional try_cmpxchg() loops
* that can break before the atomic operation, e.g.:
*
* s64 val = arch_atomic64_read_nonatomic(v);
* do {
* if (condition(val))
* break;
* } while (!arch_atomic64_try_cmpxchg(v, &val, val OP i);
*/
static __always_inline s64 arch_atomic64_read_nonatomic(const atomic64_t *v)
{
/* See comment in arch_atomic_read(). */
return __READ_ONCE(v->counter);
}

#define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
#ifndef ATOMIC64_EXPORT
#define ATOMIC64_DECL_ONE __ATOMIC64_DECL
Expand Down Expand Up @@ -61,12 +87,18 @@ ATOMIC64_DECL(add_unless);
#undef __ATOMIC64_DECL
#undef ATOMIC64_EXPORT

static __always_inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
static __always_inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
{
return arch_cmpxchg64(&v->counter, o, n);
return arch_cmpxchg64(&v->counter, old, new);
}
#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg

static __always_inline bool arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
{
return arch_try_cmpxchg64(&v->counter, old, new);
}
#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg

static __always_inline s64 arch_atomic64_xchg(atomic64_t *v, s64 n)
{
s64 o;
Expand Down Expand Up @@ -195,69 +227,62 @@ static __always_inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)

static __always_inline void arch_atomic64_and(s64 i, atomic64_t *v)
{
s64 old, c = 0;
s64 val = arch_atomic64_read_nonatomic(v);

while ((old = arch_atomic64_cmpxchg(v, c, c & i)) != c)
c = old;
do { } while (!arch_atomic64_try_cmpxchg(v, &val, val & i));
}

static __always_inline s64 arch_atomic64_fetch_and(s64 i, atomic64_t *v)
{
s64 old, c = 0;
s64 val = arch_atomic64_read_nonatomic(v);

while ((old = arch_atomic64_cmpxchg(v, c, c & i)) != c)
c = old;
do { } while (!arch_atomic64_try_cmpxchg(v, &val, val & i));

return old;
return val;
}
#define arch_atomic64_fetch_and arch_atomic64_fetch_and

static __always_inline void arch_atomic64_or(s64 i, atomic64_t *v)
{
s64 old, c = 0;
s64 val = arch_atomic64_read_nonatomic(v);

while ((old = arch_atomic64_cmpxchg(v, c, c | i)) != c)
c = old;
do { } while (!arch_atomic64_try_cmpxchg(v, &val, val | i));
}

static __always_inline s64 arch_atomic64_fetch_or(s64 i, atomic64_t *v)
{
s64 old, c = 0;
s64 val = arch_atomic64_read_nonatomic(v);

while ((old = arch_atomic64_cmpxchg(v, c, c | i)) != c)
c = old;
do { } while (!arch_atomic64_try_cmpxchg(v, &val, val | i));

return old;
return val;
}
#define arch_atomic64_fetch_or arch_atomic64_fetch_or

static __always_inline void arch_atomic64_xor(s64 i, atomic64_t *v)
{
s64 old, c = 0;
s64 val = arch_atomic64_read_nonatomic(v);

while ((old = arch_atomic64_cmpxchg(v, c, c ^ i)) != c)
c = old;
do { } while (!arch_atomic64_try_cmpxchg(v, &val, val ^ i));
}

static __always_inline s64 arch_atomic64_fetch_xor(s64 i, atomic64_t *v)
{
s64 old, c = 0;
s64 val = arch_atomic64_read_nonatomic(v);

while ((old = arch_atomic64_cmpxchg(v, c, c ^ i)) != c)
c = old;
do { } while (!arch_atomic64_try_cmpxchg(v, &val, val ^ i));

return old;
return val;
}
#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor

static __always_inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
{
s64 old, c = 0;
s64 val = arch_atomic64_read_nonatomic(v);

while ((old = arch_atomic64_cmpxchg(v, c, c + i)) != c)
c = old;
do { } while (!arch_atomic64_try_cmpxchg(v, &val, val + i));

return old;
return val;
}
#define arch_atomic64_fetch_add arch_atomic64_fetch_add

Expand Down
12 changes: 2 additions & 10 deletions arch/x86/include/asm/atomic64_64.h
Original file line number Diff line number Diff line change
Expand Up @@ -80,23 +80,15 @@ static __always_inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
}
#define arch_atomic64_add_return arch_atomic64_add_return

static __always_inline s64 arch_atomic64_sub_return(s64 i, atomic64_t *v)
{
return arch_atomic64_add_return(-i, v);
}
#define arch_atomic64_sub_return arch_atomic64_sub_return
#define arch_atomic64_sub_return(i, v) arch_atomic64_add_return(-(i), v)

static __always_inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
{
return xadd(&v->counter, i);
}
#define arch_atomic64_fetch_add arch_atomic64_fetch_add

static __always_inline s64 arch_atomic64_fetch_sub(s64 i, atomic64_t *v)
{
return xadd(&v->counter, -i);
}
#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub
#define arch_atomic64_fetch_sub(i, v) arch_atomic64_fetch_add(-(i), v)

static __always_inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
{
Expand Down
Loading

0 comments on commit 48fc82c

Please sign in to comment.