Skip to content

Commit

Permalink
locking/atomic: openrisc: move to ARCH_ATOMIC
Browse files Browse the repository at this point in the history
We'd like all architectures to convert to ARCH_ATOMIC, as once all
architectures are converted it will be possible to make significant
cleanups to the atomics headers, and this will make it much easier to
generically enable atomic functionality (e.g. debug logic in the
instrumented wrappers).

As a step towards that, this patch migrates openrisc to ARCH_ATOMIC. The
arch code provides arch_{atomic,atomic64,xchg,cmpxchg}*(), and common
code wraps these with optional instrumentation to provide the regular
functions.

Signed-off-by: Mark Rutland <[email protected]>
Acked-by: Stafford Horne <[email protected]>
Cc: Boqun Feng <[email protected]>
Cc: Jonas Bonn <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Stefan Kristiansson <[email protected]>
Cc: Will Deacon <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
  • Loading branch information
Mark Rutland authored and Peter Zijlstra committed May 26, 2021
1 parent 7e517b4 commit 3f1e931
Show file tree
Hide file tree
Showing 3 changed files with 26 additions and 21 deletions.
1 change: 1 addition & 0 deletions arch/openrisc/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
config OPENRISC
def_bool y
select ARCH_32BIT_OFF_T
select ARCH_ATOMIC
select ARCH_HAS_DMA_SET_UNCACHED
select ARCH_HAS_DMA_CLEAR_UNCACHED
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
Expand Down
42 changes: 23 additions & 19 deletions arch/openrisc/include/asm/atomic.h
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@

/* Atomically perform op with v->counter and i */
#define ATOMIC_OP(op) \
static inline void atomic_##op(int i, atomic_t *v) \
static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
int tmp; \
\
Expand All @@ -30,7 +30,7 @@ static inline void atomic_##op(int i, atomic_t *v) \

/* Atomically perform op with v->counter and i, return the result */
#define ATOMIC_OP_RETURN(op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \
static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \
int tmp; \
\
Expand All @@ -49,7 +49,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \

/* Atomically perform op with v->counter and i, return orig v->counter */
#define ATOMIC_FETCH_OP(op) \
static inline int atomic_fetch_##op(int i, atomic_t *v) \
static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \
int tmp, old; \
\
Expand All @@ -75,6 +75,8 @@ ATOMIC_FETCH_OP(and)
ATOMIC_FETCH_OP(or)
ATOMIC_FETCH_OP(xor)

ATOMIC_OP(add)
ATOMIC_OP(sub)
ATOMIC_OP(and)
ATOMIC_OP(or)
ATOMIC_OP(xor)
Expand All @@ -83,24 +85,26 @@ ATOMIC_OP(xor)
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP

#define atomic_add_return atomic_add_return
#define atomic_sub_return atomic_sub_return
#define atomic_fetch_add atomic_fetch_add
#define atomic_fetch_sub atomic_fetch_sub
#define atomic_fetch_and atomic_fetch_and
#define atomic_fetch_or atomic_fetch_or
#define atomic_fetch_xor atomic_fetch_xor
#define atomic_and atomic_and
#define atomic_or atomic_or
#define atomic_xor atomic_xor
#define arch_atomic_add_return arch_atomic_add_return
#define arch_atomic_sub_return arch_atomic_sub_return
#define arch_atomic_fetch_add arch_atomic_fetch_add
#define arch_atomic_fetch_sub arch_atomic_fetch_sub
#define arch_atomic_fetch_and arch_atomic_fetch_and
#define arch_atomic_fetch_or arch_atomic_fetch_or
#define arch_atomic_fetch_xor arch_atomic_fetch_xor
#define arch_atomic_add arch_atomic_add
#define arch_atomic_sub arch_atomic_sub
#define arch_atomic_and arch_atomic_and
#define arch_atomic_or arch_atomic_or
#define arch_atomic_xor arch_atomic_xor

/*
* Atomically add a to v->counter as long as v is not already u.
* Returns the original value at v->counter.
*
* This is often used through atomic_inc_not_zero()
*/
static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int old, tmp;

Expand All @@ -119,14 +123,14 @@ static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)

return old;
}
#define atomic_fetch_add_unless atomic_fetch_add_unless
#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless

#define atomic_read(v) READ_ONCE((v)->counter)
#define atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
#define arch_atomic_read(v) READ_ONCE((v)->counter)
#define arch_atomic_set(v,i) WRITE_ONCE((v)->counter, (i))

#include <asm/cmpxchg.h>

#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v)))
#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
#define arch_atomic_xchg(ptr, v) (arch_xchg(&(ptr)->counter, (v)))
#define arch_atomic_cmpxchg(v, old, new) (arch_cmpxchg(&((v)->counter), (old), (new)))

#endif /* __ASM_OPENRISC_ATOMIC_H */
4 changes: 2 additions & 2 deletions arch/openrisc/include/asm/cmpxchg.h
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
}
}

#define cmpxchg(ptr, o, n) \
#define arch_cmpxchg(ptr, o, n) \
({ \
(__typeof__(*(ptr))) __cmpxchg((ptr), \
(unsigned long)(o), \
Expand Down Expand Up @@ -161,7 +161,7 @@ static inline unsigned long __xchg(volatile void *ptr, unsigned long with,
}
}

#define xchg(ptr, with) \
#define arch_xchg(ptr, with) \
({ \
(__typeof__(*(ptr))) __xchg((ptr), \
(unsigned long)(with), \
Expand Down

0 comments on commit 3f1e931

Please sign in to comment.