forked from torvalds/linux
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge tag 'openrisc-for-linus' of git://github.com/openrisc/linux
Pull OpenRISC updates from Stafford Horne: "Highlights include: - optimized memset and memcpy routines, ~20% boot time saving - support for cpu idling - adding support for l.swa and l.lwa atomic operations (in spec from 2014) - use atomics to implement: bitops, cmpxchg, futex - the atomics are in preparation for SMP support" * tag 'openrisc-for-linus' of git://github.com/openrisc/linux: (25 commits) openrisc: head: Init r0 to 0 on start openrisc: Export ioremap symbols used by modules arch/openrisc/lib/memcpy.c: use correct OR1200 option openrisc: head: Remove unused strings openrisc: head: Move init strings to rodata section openrisc: entry: Fix delay slot detection openrisc: entry: Whitespace and comment cleanups scripts/checkstack.pl: Add openrisc support MAINTAINERS: Add the openrisc official repository openrisc: Add .gitignore openrisc: Add optimized memcpy routine openrisc: Add optimized memset openrisc: Initial support for the idle state openrisc: Fix the bitmask for the unit present register openrisc: remove unnecessary stddef.h include openrisc: add futex_atomic_* implementations openrisc: add optimized atomic operations openrisc: add cmpxchg and xchg implementations openrisc: add atomic bitops openrisc: add l.lwa/l.swa emulation ...
- Loading branch information
Showing
26 changed files
with
1,064 additions
and
187 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -9315,6 +9315,7 @@ OPENRISC ARCHITECTURE | |
M: Jonas Bonn <[email protected]> | ||
M: Stefan Kristiansson <[email protected]> | ||
M: Stafford Horne <[email protected]> | ||
T: git git://github.com/openrisc/linux.git | ||
L: [email protected] | ||
W: http://openrisc.io | ||
S: Maintained | ||
|
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,126 @@ | ||
/* | ||
* Copyright (C) 2014 Stefan Kristiansson <[email protected]> | ||
* | ||
* This file is licensed under the terms of the GNU General Public License | ||
* version 2. This program is licensed "as is" without any warranty of any | ||
* kind, whether express or implied. | ||
*/ | ||
|
||
#ifndef __ASM_OPENRISC_ATOMIC_H | ||
#define __ASM_OPENRISC_ATOMIC_H | ||
|
||
#include <linux/types.h> | ||
|
||
/* Atomically perform op with v->counter and i */ | ||
#define ATOMIC_OP(op) \ | ||
static inline void atomic_##op(int i, atomic_t *v) \ | ||
{ \ | ||
int tmp; \ | ||
\ | ||
__asm__ __volatile__( \ | ||
"1: l.lwa %0,0(%1) \n" \ | ||
" l." #op " %0,%0,%2 \n" \ | ||
" l.swa 0(%1),%0 \n" \ | ||
" l.bnf 1b \n" \ | ||
" l.nop \n" \ | ||
: "=&r"(tmp) \ | ||
: "r"(&v->counter), "r"(i) \ | ||
: "cc", "memory"); \ | ||
} | ||
|
||
/* Atomically perform op with v->counter and i, return the result */ | ||
#define ATOMIC_OP_RETURN(op) \ | ||
static inline int atomic_##op##_return(int i, atomic_t *v) \ | ||
{ \ | ||
int tmp; \ | ||
\ | ||
__asm__ __volatile__( \ | ||
"1: l.lwa %0,0(%1) \n" \ | ||
" l." #op " %0,%0,%2 \n" \ | ||
" l.swa 0(%1),%0 \n" \ | ||
" l.bnf 1b \n" \ | ||
" l.nop \n" \ | ||
: "=&r"(tmp) \ | ||
: "r"(&v->counter), "r"(i) \ | ||
: "cc", "memory"); \ | ||
\ | ||
return tmp; \ | ||
} | ||
|
||
/* Atomically perform op with v->counter and i, return orig v->counter */ | ||
#define ATOMIC_FETCH_OP(op) \ | ||
static inline int atomic_fetch_##op(int i, atomic_t *v) \ | ||
{ \ | ||
int tmp, old; \ | ||
\ | ||
__asm__ __volatile__( \ | ||
"1: l.lwa %0,0(%2) \n" \ | ||
" l." #op " %1,%0,%3 \n" \ | ||
" l.swa 0(%2),%1 \n" \ | ||
" l.bnf 1b \n" \ | ||
" l.nop \n" \ | ||
: "=&r"(old), "=&r"(tmp) \ | ||
: "r"(&v->counter), "r"(i) \ | ||
: "cc", "memory"); \ | ||
\ | ||
return old; \ | ||
} | ||
|
||
ATOMIC_OP_RETURN(add) | ||
ATOMIC_OP_RETURN(sub) | ||
|
||
ATOMIC_FETCH_OP(add) | ||
ATOMIC_FETCH_OP(sub) | ||
ATOMIC_FETCH_OP(and) | ||
ATOMIC_FETCH_OP(or) | ||
ATOMIC_FETCH_OP(xor) | ||
|
||
ATOMIC_OP(and) | ||
ATOMIC_OP(or) | ||
ATOMIC_OP(xor) | ||
|
||
#undef ATOMIC_FETCH_OP | ||
#undef ATOMIC_OP_RETURN | ||
#undef ATOMIC_OP | ||
|
||
#define atomic_add_return atomic_add_return | ||
#define atomic_sub_return atomic_sub_return | ||
#define atomic_fetch_add atomic_fetch_add | ||
#define atomic_fetch_sub atomic_fetch_sub | ||
#define atomic_fetch_and atomic_fetch_and | ||
#define atomic_fetch_or atomic_fetch_or | ||
#define atomic_fetch_xor atomic_fetch_xor | ||
#define atomic_and atomic_and | ||
#define atomic_or atomic_or | ||
#define atomic_xor atomic_xor | ||
|
||
/* | ||
* Atomically add a to v->counter as long as v is not already u. | ||
* Returns the original value at v->counter. | ||
* | ||
* This is often used through atomic_inc_not_zero() | ||
*/ | ||
static inline int __atomic_add_unless(atomic_t *v, int a, int u) | ||
{ | ||
int old, tmp; | ||
|
||
__asm__ __volatile__( | ||
"1: l.lwa %0, 0(%2) \n" | ||
" l.sfeq %0, %4 \n" | ||
" l.bf 2f \n" | ||
" l.add %1, %0, %3 \n" | ||
" l.swa 0(%2), %1 \n" | ||
" l.bnf 1b \n" | ||
" l.nop \n" | ||
"2: \n" | ||
: "=&r"(old), "=&r" (tmp) | ||
: "r"(&v->counter), "r"(a), "r"(u) | ||
: "cc", "memory"); | ||
|
||
return old; | ||
} | ||
#define __atomic_add_unless __atomic_add_unless | ||
|
||
#include <asm-generic/atomic.h> | ||
|
||
#endif /* __ASM_OPENRISC_ATOMIC_H */ |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,123 @@ | ||
/* | ||
* Copyright (C) 2014 Stefan Kristiansson <[email protected]> | ||
* | ||
* This file is licensed under the terms of the GNU General Public License | ||
* version 2. This program is licensed "as is" without any warranty of any | ||
* kind, whether express or implied. | ||
*/ | ||
|
||
#ifndef __ASM_OPENRISC_BITOPS_ATOMIC_H | ||
#define __ASM_OPENRISC_BITOPS_ATOMIC_H | ||
|
||
static inline void set_bit(int nr, volatile unsigned long *addr) | ||
{ | ||
unsigned long mask = BIT_MASK(nr); | ||
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); | ||
unsigned long tmp; | ||
|
||
__asm__ __volatile__( | ||
"1: l.lwa %0,0(%1) \n" | ||
" l.or %0,%0,%2 \n" | ||
" l.swa 0(%1),%0 \n" | ||
" l.bnf 1b \n" | ||
" l.nop \n" | ||
: "=&r"(tmp) | ||
: "r"(p), "r"(mask) | ||
: "cc", "memory"); | ||
} | ||
|
||
static inline void clear_bit(int nr, volatile unsigned long *addr) | ||
{ | ||
unsigned long mask = BIT_MASK(nr); | ||
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); | ||
unsigned long tmp; | ||
|
||
__asm__ __volatile__( | ||
"1: l.lwa %0,0(%1) \n" | ||
" l.and %0,%0,%2 \n" | ||
" l.swa 0(%1),%0 \n" | ||
" l.bnf 1b \n" | ||
" l.nop \n" | ||
: "=&r"(tmp) | ||
: "r"(p), "r"(~mask) | ||
: "cc", "memory"); | ||
} | ||
|
||
static inline void change_bit(int nr, volatile unsigned long *addr) | ||
{ | ||
unsigned long mask = BIT_MASK(nr); | ||
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); | ||
unsigned long tmp; | ||
|
||
__asm__ __volatile__( | ||
"1: l.lwa %0,0(%1) \n" | ||
" l.xor %0,%0,%2 \n" | ||
" l.swa 0(%1),%0 \n" | ||
" l.bnf 1b \n" | ||
" l.nop \n" | ||
: "=&r"(tmp) | ||
: "r"(p), "r"(mask) | ||
: "cc", "memory"); | ||
} | ||
|
||
static inline int test_and_set_bit(int nr, volatile unsigned long *addr) | ||
{ | ||
unsigned long mask = BIT_MASK(nr); | ||
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); | ||
unsigned long old; | ||
unsigned long tmp; | ||
|
||
__asm__ __volatile__( | ||
"1: l.lwa %0,0(%2) \n" | ||
" l.or %1,%0,%3 \n" | ||
" l.swa 0(%2),%1 \n" | ||
" l.bnf 1b \n" | ||
" l.nop \n" | ||
: "=&r"(old), "=&r"(tmp) | ||
: "r"(p), "r"(mask) | ||
: "cc", "memory"); | ||
|
||
return (old & mask) != 0; | ||
} | ||
|
||
static inline int test_and_clear_bit(int nr, volatile unsigned long *addr) | ||
{ | ||
unsigned long mask = BIT_MASK(nr); | ||
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); | ||
unsigned long old; | ||
unsigned long tmp; | ||
|
||
__asm__ __volatile__( | ||
"1: l.lwa %0,0(%2) \n" | ||
" l.and %1,%0,%3 \n" | ||
" l.swa 0(%2),%1 \n" | ||
" l.bnf 1b \n" | ||
" l.nop \n" | ||
: "=&r"(old), "=&r"(tmp) | ||
: "r"(p), "r"(~mask) | ||
: "cc", "memory"); | ||
|
||
return (old & mask) != 0; | ||
} | ||
|
||
static inline int test_and_change_bit(int nr, volatile unsigned long *addr) | ||
{ | ||
unsigned long mask = BIT_MASK(nr); | ||
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); | ||
unsigned long old; | ||
unsigned long tmp; | ||
|
||
__asm__ __volatile__( | ||
"1: l.lwa %0,0(%2) \n" | ||
" l.xor %1,%0,%3 \n" | ||
" l.swa 0(%2),%1 \n" | ||
" l.bnf 1b \n" | ||
" l.nop \n" | ||
: "=&r"(old), "=&r"(tmp) | ||
: "r"(p), "r"(mask) | ||
: "cc", "memory"); | ||
|
||
return (old & mask) != 0; | ||
} | ||
|
||
#endif /* __ASM_OPENRISC_BITOPS_ATOMIC_H */ |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,83 @@ | ||
/* | ||
* Copyright (C) 2014 Stefan Kristiansson <[email protected]> | ||
* | ||
* This file is licensed under the terms of the GNU General Public License | ||
* version 2. This program is licensed "as is" without any warranty of any | ||
* kind, whether express or implied. | ||
*/ | ||
|
||
#ifndef __ASM_OPENRISC_CMPXCHG_H | ||
#define __ASM_OPENRISC_CMPXCHG_H | ||
|
||
#include <linux/types.h> | ||
|
||
/* | ||
* This function doesn't exist, so you'll get a linker error | ||
* if something tries to do an invalid cmpxchg(). | ||
*/ | ||
extern void __cmpxchg_called_with_bad_pointer(void); | ||
|
||
#define __HAVE_ARCH_CMPXCHG 1 | ||
|
||
static inline unsigned long | ||
__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) | ||
{ | ||
if (size != 4) { | ||
__cmpxchg_called_with_bad_pointer(); | ||
return old; | ||
} | ||
|
||
__asm__ __volatile__( | ||
"1: l.lwa %0, 0(%1) \n" | ||
" l.sfeq %0, %2 \n" | ||
" l.bnf 2f \n" | ||
" l.nop \n" | ||
" l.swa 0(%1), %3 \n" | ||
" l.bnf 1b \n" | ||
" l.nop \n" | ||
"2: \n" | ||
: "=&r"(old) | ||
: "r"(ptr), "r"(old), "r"(new) | ||
: "cc", "memory"); | ||
|
||
return old; | ||
} | ||
|
||
#define cmpxchg(ptr, o, n) \ | ||
({ \ | ||
(__typeof__(*(ptr))) __cmpxchg((ptr), \ | ||
(unsigned long)(o), \ | ||
(unsigned long)(n), \ | ||
sizeof(*(ptr))); \ | ||
}) | ||
|
||
/* | ||
* This function doesn't exist, so you'll get a linker error if | ||
* something tries to do an invalidly-sized xchg(). | ||
*/ | ||
extern void __xchg_called_with_bad_pointer(void); | ||
|
||
static inline unsigned long __xchg(unsigned long val, volatile void *ptr, | ||
int size) | ||
{ | ||
if (size != 4) { | ||
__xchg_called_with_bad_pointer(); | ||
return val; | ||
} | ||
|
||
__asm__ __volatile__( | ||
"1: l.lwa %0, 0(%1) \n" | ||
" l.swa 0(%1), %2 \n" | ||
" l.bnf 1b \n" | ||
" l.nop \n" | ||
: "=&r"(val) | ||
: "r"(ptr), "r"(val) | ||
: "cc", "memory"); | ||
|
||
return val; | ||
} | ||
|
||
#define xchg(ptr, with) \ | ||
((typeof(*(ptr)))__xchg((unsigned long)(with), (ptr), sizeof(*(ptr)))) | ||
|
||
#endif /* __ASM_OPENRISC_CMPXCHG_H */ |
Oops, something went wrong.