Skip to content

Commit

Permalink
x86/cmpxchg: add a locked add() helper
Browse files Browse the repository at this point in the history
Mostly to remove some conditional code in spinlock.h.

Signed-off-by: Jeremy Fitzhardinge <[email protected]>
  • Loading branch information
jsgf committed Nov 25, 2011
1 parent 4a7f340 commit 3d94ae0
Show file tree
Hide file tree
Showing 2 changed files with 43 additions and 14 deletions.
42 changes: 42 additions & 0 deletions arch/x86/include/asm/cmpxchg.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@ extern void __cmpxchg_wrong_size(void)
__compiletime_error("Bad argument size for cmpxchg");
extern void __xadd_wrong_size(void)
__compiletime_error("Bad argument size for xadd");
extern void __add_wrong_size(void)
__compiletime_error("Bad argument size for add");

/*
* Constants for operation sizes. On 32-bit, the 64-bit size it set to
Expand Down Expand Up @@ -207,4 +209,44 @@ extern void __xadd_wrong_size(void)
#define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
#define xadd_local(ptr, inc) __xadd((ptr), (inc), "")

#define __add(ptr, inc, lock) \
({ \
__typeof__ (*(ptr)) __ret = (inc); \
switch (sizeof(*(ptr))) { \
case __X86_CASE_B: \
asm volatile (lock "addb %b1, %0\n" \
: "+m" (*(ptr)) : "ri" (inc) \
: "memory", "cc"); \
break; \
case __X86_CASE_W: \
asm volatile (lock "addw %w1, %0\n" \
: "+m" (*(ptr)) : "ri" (inc) \
: "memory", "cc"); \
break; \
case __X86_CASE_L: \
asm volatile (lock "addl %1, %0\n" \
: "+m" (*(ptr)) : "ri" (inc) \
: "memory", "cc"); \
break; \
case __X86_CASE_Q: \
asm volatile (lock "addq %1, %0\n" \
: "+m" (*(ptr)) : "ri" (inc) \
: "memory", "cc"); \
break; \
default: \
__add_wrong_size(); \
} \
__ret; \
})

/*
* add_*() adds "inc" to "*ptr"
*
* __add() takes a lock prefix
* add_smp() is locked when multiple CPUs are online
* add_sync() is always locked
*/
#define add_smp(ptr, inc) __add((ptr), (inc), LOCK_PREFIX)
#define add_sync(ptr, inc) __add((ptr), (inc), "lock; ")

#endif /* ASM_X86_CMPXCHG_H */
15 changes: 1 addition & 14 deletions arch/x86/include/asm/spinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -79,23 +79,10 @@ static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail;
}

#if (NR_CPUS < 256)
static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
{
asm volatile(UNLOCK_LOCK_PREFIX "incb %0"
: "+m" (lock->head_tail)
:
: "memory", "cc");
__add(&lock->tickets.head, 1, UNLOCK_LOCK_PREFIX);
}
#else
static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
{
asm volatile(UNLOCK_LOCK_PREFIX "incw %0"
: "+m" (lock->head_tail)
:
: "memory", "cc");
}
#endif

static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
{
Expand Down

0 comments on commit 3d94ae0

Please sign in to comment.