Skip to content

Commit

Permalink
locking: Convert __raw_spin* functions to arch_spin*
Browse files Browse the repository at this point in the history
Name space cleanup. No functional change.

Signed-off-by: Thomas Gleixner <[email protected]>
Acked-by: Peter Zijlstra <[email protected]>
Acked-by: David S. Miller <[email protected]>
Acked-by: Ingo Molnar <[email protected]>
Cc: [email protected]
  • Loading branch information
KAGA-KOKO committed Dec 14, 2009
1 parent edc35bd commit 0199c4e
Show file tree
Hide file tree
Showing 37 changed files with 319 additions and 319 deletions.
18 changes: 9 additions & 9 deletions arch/alpha/include/asm/spinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -12,18 +12,18 @@
* We make no fairness assumptions. They have a cost.
*/

#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
#define __raw_spin_is_locked(x) ((x)->lock != 0)
#define __raw_spin_unlock_wait(x) \
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
#define arch_spin_is_locked(x) ((x)->lock != 0)
#define arch_spin_unlock_wait(x) \
do { cpu_relax(); } while ((x)->lock)

static inline void __raw_spin_unlock(arch_spinlock_t * lock)
static inline void arch_spin_unlock(arch_spinlock_t * lock)
{
mb();
lock->lock = 0;
}

static inline void __raw_spin_lock(arch_spinlock_t * lock)
static inline void arch_spin_lock(arch_spinlock_t * lock)
{
long tmp;

Expand All @@ -43,7 +43,7 @@ static inline void __raw_spin_lock(arch_spinlock_t * lock)
: "m"(lock->lock) : "memory");
}

static inline int __raw_spin_trylock(arch_spinlock_t *lock)
static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
return !test_and_set_bit(0, &lock->lock);
}
Expand Down Expand Up @@ -169,8 +169,8 @@ static inline void __raw_write_unlock(raw_rwlock_t * lock)
#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)

#define _raw_spin_relax(lock) cpu_relax()
#define _raw_read_relax(lock) cpu_relax()
#define _raw_write_relax(lock) cpu_relax()
#define arch_spin_relax(lock) cpu_relax()
#define arch_read_relax(lock) cpu_relax()
#define arch_write_relax(lock) cpu_relax()

#endif /* _ALPHA_SPINLOCK_H */
20 changes: 10 additions & 10 deletions arch/arm/include/asm/spinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,13 +17,13 @@
* Locked value: 1
*/

#define __raw_spin_is_locked(x) ((x)->lock != 0)
#define __raw_spin_unlock_wait(lock) \
do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
#define arch_spin_is_locked(x) ((x)->lock != 0)
#define arch_spin_unlock_wait(lock) \
do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)

#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)

static inline void __raw_spin_lock(arch_spinlock_t *lock)
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
unsigned long tmp;

Expand All @@ -43,7 +43,7 @@ static inline void __raw_spin_lock(arch_spinlock_t *lock)
smp_mb();
}

static inline int __raw_spin_trylock(arch_spinlock_t *lock)
static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
unsigned long tmp;

Expand All @@ -63,7 +63,7 @@ static inline int __raw_spin_trylock(arch_spinlock_t *lock)
}
}

static inline void __raw_spin_unlock(arch_spinlock_t *lock)
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
smp_mb();

Expand Down Expand Up @@ -220,8 +220,8 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw)
#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)

#define _raw_spin_relax(lock) cpu_relax()
#define _raw_read_relax(lock) cpu_relax()
#define _raw_write_relax(lock) cpu_relax()
#define arch_spin_relax(lock) cpu_relax()
#define arch_read_relax(lock) cpu_relax()
#define arch_write_relax(lock) cpu_relax()

#endif /* __ASM_SPINLOCK_H */
20 changes: 10 additions & 10 deletions arch/blackfin/include/asm/spinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,31 +24,31 @@ asmlinkage void __raw_write_lock_asm(volatile int *ptr);
asmlinkage int __raw_write_trylock_asm(volatile int *ptr);
asmlinkage void __raw_write_unlock_asm(volatile int *ptr);

static inline int __raw_spin_is_locked(arch_spinlock_t *lock)
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
return __raw_spin_is_locked_asm(&lock->lock);
}

static inline void __raw_spin_lock(arch_spinlock_t *lock)
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
__raw_spin_lock_asm(&lock->lock);
}

#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)

static inline int __raw_spin_trylock(arch_spinlock_t *lock)
static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
return __raw_spin_trylock_asm(&lock->lock);
}

static inline void __raw_spin_unlock(arch_spinlock_t *lock)
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
__raw_spin_unlock_asm(&lock->lock);
}

static inline void __raw_spin_unlock_wait(arch_spinlock_t *lock)
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
while (__raw_spin_is_locked(lock))
while (arch_spin_is_locked(lock))
cpu_relax();
}

Expand Down Expand Up @@ -92,9 +92,9 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw)
__raw_write_unlock_asm(&rw->lock);
}

#define _raw_spin_relax(lock) cpu_relax()
#define _raw_read_relax(lock) cpu_relax()
#define _raw_write_relax(lock) cpu_relax()
#define arch_spin_relax(lock) cpu_relax()
#define arch_read_relax(lock) cpu_relax()
#define arch_write_relax(lock) cpu_relax()

#endif

Expand Down
46 changes: 23 additions & 23 deletions arch/cris/include/arch-v32/arch/spinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,39 +9,39 @@ extern void cris_spin_unlock(void *l, int val);
extern void cris_spin_lock(void *l);
extern int cris_spin_trylock(void *l);

static inline int __raw_spin_is_locked(arch_spinlock_t *x)
static inline int arch_spin_is_locked(arch_spinlock_t *x)
{
return *(volatile signed char *)(&(x)->slock) <= 0;
}

static inline void __raw_spin_unlock(arch_spinlock_t *lock)
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
__asm__ volatile ("move.d %1,%0" \
: "=m" (lock->slock) \
: "r" (1) \
: "memory");
}

static inline void __raw_spin_unlock_wait(arch_spinlock_t *lock)
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
while (__raw_spin_is_locked(lock))
while (arch_spin_is_locked(lock))
cpu_relax();
}

static inline int __raw_spin_trylock(arch_spinlock_t *lock)
static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
return cris_spin_trylock((void *)&lock->slock);
}

static inline void __raw_spin_lock(arch_spinlock_t *lock)
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
cris_spin_lock((void *)&lock->slock);
}

static inline void
__raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
{
__raw_spin_lock(lock);
arch_spin_lock(lock);
}

/*
Expand All @@ -68,64 +68,64 @@ static inline int __raw_write_can_lock(raw_rwlock_t *x)

static inline void __raw_read_lock(raw_rwlock_t *rw)
{
__raw_spin_lock(&rw->slock);
arch_spin_lock(&rw->slock);
while (rw->lock == 0);
rw->lock--;
__raw_spin_unlock(&rw->slock);
arch_spin_unlock(&rw->slock);
}

static inline void __raw_write_lock(raw_rwlock_t *rw)
{
__raw_spin_lock(&rw->slock);
arch_spin_lock(&rw->slock);
while (rw->lock != RW_LOCK_BIAS);
rw->lock = 0;
__raw_spin_unlock(&rw->slock);
arch_spin_unlock(&rw->slock);
}

static inline void __raw_read_unlock(raw_rwlock_t *rw)
{
__raw_spin_lock(&rw->slock);
arch_spin_lock(&rw->slock);
rw->lock++;
__raw_spin_unlock(&rw->slock);
arch_spin_unlock(&rw->slock);
}

static inline void __raw_write_unlock(raw_rwlock_t *rw)
{
__raw_spin_lock(&rw->slock);
arch_spin_lock(&rw->slock);
while (rw->lock != RW_LOCK_BIAS);
rw->lock = RW_LOCK_BIAS;
__raw_spin_unlock(&rw->slock);
arch_spin_unlock(&rw->slock);
}

static inline int __raw_read_trylock(raw_rwlock_t *rw)
{
int ret = 0;
__raw_spin_lock(&rw->slock);
arch_spin_lock(&rw->slock);
if (rw->lock != 0) {
rw->lock--;
ret = 1;
}
__raw_spin_unlock(&rw->slock);
arch_spin_unlock(&rw->slock);
return ret;
}

static inline int __raw_write_trylock(raw_rwlock_t *rw)
{
int ret = 0;
__raw_spin_lock(&rw->slock);
arch_spin_lock(&rw->slock);
if (rw->lock == RW_LOCK_BIAS) {
rw->lock = 0;
ret = 1;
}
__raw_spin_unlock(&rw->slock);
arch_spin_unlock(&rw->slock);
return 1;
}

#define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock)
#define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock)

#define _raw_spin_relax(lock) cpu_relax()
#define _raw_read_relax(lock) cpu_relax()
#define _raw_write_relax(lock) cpu_relax()
#define arch_spin_relax(lock) cpu_relax()
#define arch_read_relax(lock) cpu_relax()
#define arch_write_relax(lock) cpu_relax()

#endif /* __ASM_ARCH_SPINLOCK_H */
2 changes: 1 addition & 1 deletion arch/ia64/include/asm/bitops.h
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ clear_bit_unlock (int nr, volatile void *addr)
* @addr: Address to start counting from
*
* Similarly to clear_bit_unlock, the implementation uses a store
* with release semantics. See also __raw_spin_unlock().
* with release semantics. See also arch_spin_unlock().
*/
static __inline__ void
__clear_bit_unlock(int nr, void *addr)
Expand Down
26 changes: 13 additions & 13 deletions arch/ia64/include/asm/spinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
#include <asm/intrinsics.h>
#include <asm/system.h>

#define __raw_spin_lock_init(x) ((x)->lock = 0)
#define arch_spin_lock_init(x) ((x)->lock = 0)

/*
* Ticket locks are conceptually two parts, one indicating the current head of
Expand Down Expand Up @@ -103,39 +103,39 @@ static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1;
}

static inline int __raw_spin_is_locked(arch_spinlock_t *lock)
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
return __ticket_spin_is_locked(lock);
}

static inline int __raw_spin_is_contended(arch_spinlock_t *lock)
static inline int arch_spin_is_contended(arch_spinlock_t *lock)
{
return __ticket_spin_is_contended(lock);
}
#define __raw_spin_is_contended __raw_spin_is_contended
#define arch_spin_is_contended arch_spin_is_contended

static __always_inline void __raw_spin_lock(arch_spinlock_t *lock)
static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
{
__ticket_spin_lock(lock);
}

static __always_inline int __raw_spin_trylock(arch_spinlock_t *lock)
static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
{
return __ticket_spin_trylock(lock);
}

static __always_inline void __raw_spin_unlock(arch_spinlock_t *lock)
static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
{
__ticket_spin_unlock(lock);
}

static __always_inline void __raw_spin_lock_flags(arch_spinlock_t *lock,
static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
unsigned long flags)
{
__raw_spin_lock(lock);
arch_spin_lock(lock);
}

static inline void __raw_spin_unlock_wait(arch_spinlock_t *lock)
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
__ticket_spin_unlock_wait(lock);
}
Expand Down Expand Up @@ -285,8 +285,8 @@ static inline int __raw_read_trylock(raw_rwlock_t *x)
return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word;
}

#define _raw_spin_relax(lock) cpu_relax()
#define _raw_read_relax(lock) cpu_relax()
#define _raw_write_relax(lock) cpu_relax()
#define arch_spin_relax(lock) cpu_relax()
#define arch_read_relax(lock) cpu_relax()
#define arch_write_relax(lock) cpu_relax()

#endif /* _ASM_IA64_SPINLOCK_H */
Loading

0 comments on commit 0199c4e

Please sign in to comment.