Skip to content

Commit

Permalink
locking: Convert raw_spinlock to arch_spinlock
Browse files Browse the repository at this point in the history
The raw_spin* namespace was taken by lockdep for the architecture
specific implementations. raw_spin_* would be the ideal name space for
the spinlocks which are not converted to sleeping locks in preempt-rt.

Linus suggested to convert the raw_ to arch_ locks and cleanup the
name space instead of using an artifical name like core_spin,
atomic_spin or whatever

No functional change.

Signed-off-by: Thomas Gleixner <[email protected]>
Acked-by: Peter Zijlstra <[email protected]>
Acked-by: David S. Miller <[email protected]>
Acked-by: Ingo Molnar <[email protected]>
Cc: [email protected]
  • Loading branch information
KAGA-KOKO committed Dec 14, 2009
1 parent 6b6b479 commit 445c895
Show file tree
Hide file tree
Showing 51 changed files with 164 additions and 164 deletions.
6 changes: 3 additions & 3 deletions arch/alpha/include/asm/spinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,13 +17,13 @@
#define __raw_spin_unlock_wait(x) \
do { cpu_relax(); } while ((x)->lock)

static inline void __raw_spin_unlock(raw_spinlock_t * lock)
static inline void __raw_spin_unlock(arch_spinlock_t * lock)
{
mb();
lock->lock = 0;
}

static inline void __raw_spin_lock(raw_spinlock_t * lock)
static inline void __raw_spin_lock(arch_spinlock_t * lock)
{
long tmp;

Expand All @@ -43,7 +43,7 @@ static inline void __raw_spin_lock(raw_spinlock_t * lock)
: "m"(lock->lock) : "memory");
}

static inline int __raw_spin_trylock(raw_spinlock_t *lock)
static inline int __raw_spin_trylock(arch_spinlock_t *lock)
{
return !test_and_set_bit(0, &lock->lock);
}
Expand Down
2 changes: 1 addition & 1 deletion arch/alpha/include/asm/spinlock_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@

typedef struct {
volatile unsigned int lock;
} raw_spinlock_t;
} arch_spinlock_t;

#define __RAW_SPIN_LOCK_UNLOCKED { 0 }

Expand Down
6 changes: 3 additions & 3 deletions arch/arm/include/asm/spinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@

#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)

static inline void __raw_spin_lock(raw_spinlock_t *lock)
static inline void __raw_spin_lock(arch_spinlock_t *lock)
{
unsigned long tmp;

Expand All @@ -43,7 +43,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
smp_mb();
}

static inline int __raw_spin_trylock(raw_spinlock_t *lock)
static inline int __raw_spin_trylock(arch_spinlock_t *lock)
{
unsigned long tmp;

Expand All @@ -63,7 +63,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
}
}

static inline void __raw_spin_unlock(raw_spinlock_t *lock)
static inline void __raw_spin_unlock(arch_spinlock_t *lock)
{
smp_mb();

Expand Down
2 changes: 1 addition & 1 deletion arch/arm/include/asm/spinlock_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@

typedef struct {
volatile unsigned int lock;
} raw_spinlock_t;
} arch_spinlock_t;

#define __RAW_SPIN_LOCK_UNLOCKED { 0 }

Expand Down
10 changes: 5 additions & 5 deletions arch/blackfin/include/asm/spinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,29 +24,29 @@ asmlinkage void __raw_write_lock_asm(volatile int *ptr);
asmlinkage int __raw_write_trylock_asm(volatile int *ptr);
asmlinkage void __raw_write_unlock_asm(volatile int *ptr);

static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
static inline int __raw_spin_is_locked(arch_spinlock_t *lock)
{
return __raw_spin_is_locked_asm(&lock->lock);
}

static inline void __raw_spin_lock(raw_spinlock_t *lock)
static inline void __raw_spin_lock(arch_spinlock_t *lock)
{
__raw_spin_lock_asm(&lock->lock);
}

#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)

static inline int __raw_spin_trylock(raw_spinlock_t *lock)
static inline int __raw_spin_trylock(arch_spinlock_t *lock)
{
return __raw_spin_trylock_asm(&lock->lock);
}

static inline void __raw_spin_unlock(raw_spinlock_t *lock)
static inline void __raw_spin_unlock(arch_spinlock_t *lock)
{
__raw_spin_unlock_asm(&lock->lock);
}

static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
static inline void __raw_spin_unlock_wait(arch_spinlock_t *lock)
{
while (__raw_spin_is_locked(lock))
cpu_relax();
Expand Down
2 changes: 1 addition & 1 deletion arch/blackfin/include/asm/spinlock_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@

typedef struct {
volatile unsigned int lock;
} raw_spinlock_t;
} arch_spinlock_t;

#define __RAW_SPIN_LOCK_UNLOCKED { 0 }

Expand Down
12 changes: 6 additions & 6 deletions arch/cris/include/arch-v32/arch/spinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,37 +9,37 @@ extern void cris_spin_unlock(void *l, int val);
extern void cris_spin_lock(void *l);
extern int cris_spin_trylock(void *l);

static inline int __raw_spin_is_locked(raw_spinlock_t *x)
static inline int __raw_spin_is_locked(arch_spinlock_t *x)
{
return *(volatile signed char *)(&(x)->slock) <= 0;
}

static inline void __raw_spin_unlock(raw_spinlock_t *lock)
static inline void __raw_spin_unlock(arch_spinlock_t *lock)
{
__asm__ volatile ("move.d %1,%0" \
: "=m" (lock->slock) \
: "r" (1) \
: "memory");
}

static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
static inline void __raw_spin_unlock_wait(arch_spinlock_t *lock)
{
while (__raw_spin_is_locked(lock))
cpu_relax();
}

static inline int __raw_spin_trylock(raw_spinlock_t *lock)
static inline int __raw_spin_trylock(arch_spinlock_t *lock)
{
return cris_spin_trylock((void *)&lock->slock);
}

static inline void __raw_spin_lock(raw_spinlock_t *lock)
static inline void __raw_spin_lock(arch_spinlock_t *lock)
{
cris_spin_lock((void *)&lock->slock);
}

static inline void
__raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
__raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
{
__raw_spin_lock(lock);
}
Expand Down
26 changes: 13 additions & 13 deletions arch/ia64/include/asm/spinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@
#define TICKET_BITS 15
#define TICKET_MASK ((1 << TICKET_BITS) - 1)

static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
{
int *p = (int *)&lock->lock, ticket, serve;

Expand All @@ -58,7 +58,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
}
}

static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
{
int tmp = ACCESS_ONCE(lock->lock);

Expand All @@ -67,15 +67,15 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
return 0;
}

static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
{
unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;

asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
ACCESS_ONCE(*p) = (tmp + 2) & ~1;
}

static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
{
int *p = (int *)&lock->lock, ticket;

Expand All @@ -89,53 +89,53 @@ static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
}
}

static inline int __ticket_spin_is_locked(raw_spinlock_t *lock)
static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
{
long tmp = ACCESS_ONCE(lock->lock);

return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK);
}

static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
{
long tmp = ACCESS_ONCE(lock->lock);

return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1;
}

static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
static inline int __raw_spin_is_locked(arch_spinlock_t *lock)
{
return __ticket_spin_is_locked(lock);
}

static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
static inline int __raw_spin_is_contended(arch_spinlock_t *lock)
{
return __ticket_spin_is_contended(lock);
}
#define __raw_spin_is_contended __raw_spin_is_contended

static __always_inline void __raw_spin_lock(raw_spinlock_t *lock)
static __always_inline void __raw_spin_lock(arch_spinlock_t *lock)
{
__ticket_spin_lock(lock);
}

static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock)
static __always_inline int __raw_spin_trylock(arch_spinlock_t *lock)
{
return __ticket_spin_trylock(lock);
}

static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock)
static __always_inline void __raw_spin_unlock(arch_spinlock_t *lock)
{
__ticket_spin_unlock(lock);
}

static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
static __always_inline void __raw_spin_lock_flags(arch_spinlock_t *lock,
unsigned long flags)
{
__raw_spin_lock(lock);
}

static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
static inline void __raw_spin_unlock_wait(arch_spinlock_t *lock)
{
__ticket_spin_unlock_wait(lock);
}
Expand Down
2 changes: 1 addition & 1 deletion arch/ia64/include/asm/spinlock_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@

typedef struct {
volatile unsigned int lock;
} raw_spinlock_t;
} arch_spinlock_t;

#define __RAW_SPIN_LOCK_UNLOCKED { 0 }

Expand Down
6 changes: 3 additions & 3 deletions arch/m32r/include/asm/spinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@
* __raw_spin_trylock() tries to get the lock and returns a result.
* On the m32r, the result value is 1 (= Success) or 0 (= Failure).
*/
static inline int __raw_spin_trylock(raw_spinlock_t *lock)
static inline int __raw_spin_trylock(arch_spinlock_t *lock)
{
int oldval;
unsigned long tmp1, tmp2;
Expand Down Expand Up @@ -69,7 +69,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
return (oldval > 0);
}

static inline void __raw_spin_lock(raw_spinlock_t *lock)
static inline void __raw_spin_lock(arch_spinlock_t *lock)
{
unsigned long tmp0, tmp1;

Expand Down Expand Up @@ -111,7 +111,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
);
}

static inline void __raw_spin_unlock(raw_spinlock_t *lock)
static inline void __raw_spin_unlock(arch_spinlock_t *lock)
{
mb();
lock->slock = 1;
Expand Down
2 changes: 1 addition & 1 deletion arch/m32r/include/asm/spinlock_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@

typedef struct {
volatile int slock;
} raw_spinlock_t;
} arch_spinlock_t;

#define __RAW_SPIN_LOCK_UNLOCKED { 1 }

Expand Down
10 changes: 5 additions & 5 deletions arch/mips/include/asm/spinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@
* becomes equal to the the initial value of the tail.
*/

static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
static inline int __raw_spin_is_locked(arch_spinlock_t *lock)
{
unsigned int counters = ACCESS_ONCE(lock->lock);

Expand All @@ -45,15 +45,15 @@ static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
#define __raw_spin_unlock_wait(x) \
while (__raw_spin_is_locked(x)) { cpu_relax(); }

static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
static inline int __raw_spin_is_contended(arch_spinlock_t *lock)
{
unsigned int counters = ACCESS_ONCE(lock->lock);

return (((counters >> 14) - counters) & 0x1fff) > 1;
}
#define __raw_spin_is_contended __raw_spin_is_contended

static inline void __raw_spin_lock(raw_spinlock_t *lock)
static inline void __raw_spin_lock(arch_spinlock_t *lock)
{
int my_ticket;
int tmp;
Expand Down Expand Up @@ -134,7 +134,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
smp_llsc_mb();
}

static inline void __raw_spin_unlock(raw_spinlock_t *lock)
static inline void __raw_spin_unlock(arch_spinlock_t *lock)
{
int tmp;

Expand Down Expand Up @@ -174,7 +174,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
}
}

static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock)
static inline unsigned int __raw_spin_trylock(arch_spinlock_t *lock)
{
int tmp, tmp2, tmp3;

Expand Down
2 changes: 1 addition & 1 deletion arch/mips/include/asm/spinlock_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ typedef struct {
* bits 15..28: ticket
*/
unsigned int lock;
} raw_spinlock_t;
} arch_spinlock_t;

#define __RAW_SPIN_LOCK_UNLOCKED { 0 }

Expand Down
6 changes: 3 additions & 3 deletions arch/parisc/include/asm/atomic.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,18 +27,18 @@
# define ATOMIC_HASH_SIZE 4
# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))

extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;

/* Can't use raw_spin_lock_irq because of #include problems, so
* this is the substitute */
#define _atomic_spin_lock_irqsave(l,f) do { \
raw_spinlock_t *s = ATOMIC_HASH(l); \
arch_spinlock_t *s = ATOMIC_HASH(l); \
local_irq_save(f); \
__raw_spin_lock(s); \
} while(0)

#define _atomic_spin_unlock_irqrestore(l,f) do { \
raw_spinlock_t *s = ATOMIC_HASH(l); \
arch_spinlock_t *s = ATOMIC_HASH(l); \
__raw_spin_unlock(s); \
local_irq_restore(f); \
} while(0)
Expand Down
Loading

0 comments on commit 445c895

Please sign in to comment.