forked from torvalds/linux
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Blackfin: SMP: convert to common asm-generic/atomic.h
Now that common code supports SMP systems, switch our SMP atomic logic over to it to avoid code duplication. Signed-off-by: Mike Frysinger <[email protected]>
- Loading branch information
Showing
1 changed file
with
10 additions
and
94 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,117 +1,33 @@ | ||
/* | ||
* Copyright 2004-2009 Analog Devices Inc. | ||
* Copyright 2004-2011 Analog Devices Inc. | ||
* | ||
* Licensed under the GPL-2 or later. | ||
*/ | ||
|
||
#ifndef __ARCH_BLACKFIN_ATOMIC__ | ||
#define __ARCH_BLACKFIN_ATOMIC__ | ||
|
||
#ifndef CONFIG_SMP | ||
# include <asm-generic/atomic.h> | ||
#else | ||
#ifdef CONFIG_SMP | ||
|
||
#include <linux/types.h> | ||
#include <asm/system.h> /* local_irq_XXX() */ | ||
|
||
/* | ||
* Atomic operations that C can't guarantee us. Useful for | ||
* resource counting etc.. | ||
*/ | ||
|
||
#define ATOMIC_INIT(i) { (i) } | ||
#define atomic_set(v, i) (((v)->counter) = i) | ||
|
||
#define atomic_read(v) __raw_uncached_fetch_asm(&(v)->counter) | ||
#include <linux/linkage.h> | ||
|
||
asmlinkage int __raw_uncached_fetch_asm(const volatile int *ptr); | ||
|
||
asmlinkage int __raw_atomic_update_asm(volatile int *ptr, int value); | ||
|
||
asmlinkage int __raw_atomic_clear_asm(volatile int *ptr, int value); | ||
|
||
asmlinkage int __raw_atomic_set_asm(volatile int *ptr, int value); | ||
|
||
asmlinkage int __raw_atomic_xor_asm(volatile int *ptr, int value); | ||
|
||
asmlinkage int __raw_atomic_test_asm(const volatile int *ptr, int value); | ||
|
||
static inline void atomic_add(int i, atomic_t *v) | ||
{ | ||
__raw_atomic_update_asm(&v->counter, i); | ||
} | ||
|
||
static inline void atomic_sub(int i, atomic_t *v) | ||
{ | ||
__raw_atomic_update_asm(&v->counter, -i); | ||
} | ||
|
||
static inline int atomic_add_return(int i, atomic_t *v) | ||
{ | ||
return __raw_atomic_update_asm(&v->counter, i); | ||
} | ||
|
||
static inline int atomic_sub_return(int i, atomic_t *v) | ||
{ | ||
return __raw_atomic_update_asm(&v->counter, -i); | ||
} | ||
#define atomic_read(v) __raw_uncached_fetch_asm(&(v)->counter) | ||
|
||
static inline void atomic_inc(volatile atomic_t *v) | ||
{ | ||
__raw_atomic_update_asm(&v->counter, 1); | ||
} | ||
|
||
static inline void atomic_dec(volatile atomic_t *v) | ||
{ | ||
__raw_atomic_update_asm(&v->counter, -1); | ||
} | ||
|
||
static inline void atomic_clear_mask(int mask, atomic_t *v) | ||
{ | ||
__raw_atomic_clear_asm(&v->counter, mask); | ||
} | ||
|
||
static inline void atomic_set_mask(int mask, atomic_t *v) | ||
{ | ||
__raw_atomic_set_asm(&v->counter, mask); | ||
} | ||
|
||
/* Atomic operations are already serializing */ | ||
#define smp_mb__before_atomic_dec() barrier() | ||
#define smp_mb__after_atomic_dec() barrier() | ||
#define smp_mb__before_atomic_inc() barrier() | ||
#define smp_mb__after_atomic_inc() barrier() | ||
|
||
#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) | ||
#define atomic_dec_return(v) atomic_sub_return(1,(v)) | ||
#define atomic_inc_return(v) atomic_add_return(1,(v)) | ||
|
||
#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) | ||
#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) | ||
|
||
#define __atomic_add_unless(v, a, u) \ | ||
({ \ | ||
int c, old; \ | ||
c = atomic_read(v); \ | ||
while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ | ||
c = old; \ | ||
c; \ | ||
}) | ||
|
||
/* | ||
* atomic_inc_and_test - increment and test | ||
* @v: pointer of type atomic_t | ||
* | ||
* Atomically increments @v by 1 | ||
* and returns true if the result is zero, or false for all | ||
* other cases. | ||
*/ | ||
#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) | ||
|
||
#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0) | ||
#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) | ||
#define atomic_add_return(i, v) __raw_atomic_update_asm(&(v)->counter, i) | ||
#define atomic_sub_return(i, v) __raw_atomic_update_asm(&(v)->counter, -(i)) | ||
|
||
#define atomic_clear_mask(m, v) __raw_atomic_clear_asm(&(v)->counter, m) | ||
#define atomic_set_mask(m, v) __raw_atomic_set_asm(&(v)->counter, m) | ||
|
||
#endif | ||
|
||
#include <asm-generic/atomic.h> | ||
|
||
#endif |