Skip to content

Commit

Permalink
locking/atomic/x86: Use 's64 *' for 'old' argument of atomic64_try_cm…
Browse files Browse the repository at this point in the history
…pxchg()

atomic64_try_cmpxchg() declares old argument as 'long *',
this makes it impossible to use it in portable code.
If caller passes 'long *', it becomes 32-bits on 32-bit arches.
If caller passes 's64 *', it does not compile on x86_64.

Change type of old argument to 's64 *' instead.

Signed-off-by: Dmitry Vyukov <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: Andrey Ryabinin <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Mark Rutland <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Will Deacon <[email protected]>
Cc: [email protected]
Cc: [email protected]
Link: http://lkml.kernel.org/r/fa6f77f2375150d26ea796a77e8b59195fd2ab13.1497690003.git.dvyukov@google.com
Signed-off-by: Ingo Molnar <[email protected]>
  • Loading branch information
dvyukov authored and Ingo Molnar committed Jun 28, 2017
1 parent ba1c9f8 commit 007d185
Show file tree
Hide file tree
Showing 2 changed files with 7 additions and 7 deletions.
12 changes: 6 additions & 6 deletions arch/x86/include/asm/atomic64_64.h
Original file line number Diff line number Diff line change
Expand Up @@ -177,7 +177,7 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
}

#define atomic64_try_cmpxchg atomic64_try_cmpxchg
static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, long *old, long new)
static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, s64 *old, long new)
{
return try_cmpxchg(&v->counter, old, new);
}
Expand All @@ -198,7 +198,7 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
*/
static inline bool atomic64_add_unless(atomic64_t *v, long a, long u)
{
long c = atomic64_read(v);
s64 c = atomic64_read(v);
do {
if (unlikely(c == u))
return false;
Expand All @@ -217,7 +217,7 @@ static inline bool atomic64_add_unless(atomic64_t *v, long a, long u)
*/
static inline long atomic64_dec_if_positive(atomic64_t *v)
{
long dec, c = atomic64_read(v);
s64 dec, c = atomic64_read(v);
do {
dec = c - 1;
if (unlikely(dec < 0))
Expand All @@ -236,7 +236,7 @@ static inline void atomic64_and(long i, atomic64_t *v)

static inline long atomic64_fetch_and(long i, atomic64_t *v)
{
long val = atomic64_read(v);
s64 val = atomic64_read(v);

do {
} while (!atomic64_try_cmpxchg(v, &val, val & i));
Expand All @@ -253,7 +253,7 @@ static inline void atomic64_or(long i, atomic64_t *v)

static inline long atomic64_fetch_or(long i, atomic64_t *v)
{
long val = atomic64_read(v);
s64 val = atomic64_read(v);

do {
} while (!atomic64_try_cmpxchg(v, &val, val | i));
Expand All @@ -270,7 +270,7 @@ static inline void atomic64_xor(long i, atomic64_t *v)

static inline long atomic64_fetch_xor(long i, atomic64_t *v)
{
long val = atomic64_read(v);
s64 val = atomic64_read(v);

do {
} while (!atomic64_try_cmpxchg(v, &val, val ^ i));
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/include/asm/cmpxchg.h
Original file line number Diff line number Diff line change
Expand Up @@ -157,7 +157,7 @@ extern void __add_wrong_size(void)
#define __raw_try_cmpxchg(_ptr, _pold, _new, size, lock) \
({ \
bool success; \
__typeof__(_ptr) _old = (_pold); \
__typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \
__typeof__(*(_ptr)) __old = *_old; \
__typeof__(*(_ptr)) __new = (_new); \
switch (size) { \
Expand Down

0 comments on commit 007d185

Please sign in to comment.