Skip to content

Commit

Permalink
Merge tag 'openrisc-for-linus' of git://github.com/openrisc/linux
Browse files Browse the repository at this point in the history
Pull OpenRISC updates from Stafford Horne:
 "Highlights include:

   - optimized memset and memcpy routines, ~20% boot time saving

   - support for cpu idling

   - adding support for l.swa and l.lwa atomic operations (in spec from
     2014)

   - use atomics to implement: bitops, cmpxchg, futex

   - the atomics are in preparation for SMP support"

* tag 'openrisc-for-linus' of git://github.com/openrisc/linux: (25 commits)
  openrisc: head: Init r0 to 0 on start
  openrisc: Export ioremap symbols used by modules
  arch/openrisc/lib/memcpy.c: use correct OR1200 option
  openrisc: head: Remove unused strings
  openrisc: head: Move init strings to rodata section
  openrisc: entry: Fix delay slot detection
  openrisc: entry: Whitespace and comment cleanups
  scripts/checkstack.pl: Add openrisc support
  MAINTAINERS: Add the openrisc official repository
  openrisc: Add .gitignore
  openrisc: Add optimized memcpy routine
  openrisc: Add optimized memset
  openrisc: Initial support for the idle state
  openrisc: Fix the bitmask for the unit present register
  openrisc: remove unnecessary stddef.h include
  openrisc: add futex_atomic_* implementations
  openrisc: add optimized atomic operations
  openrisc: add cmpxchg and xchg implementations
  openrisc: add atomic bitops
  openrisc: add l.lwa/l.swa emulation
  ...
  • Loading branch information
torvalds committed Feb 25, 2017
2 parents f8e6859 + a4d4426 commit 9e31489
Show file tree
Hide file tree
Showing 26 changed files with 1,064 additions and 187 deletions.
1 change: 1 addition & 0 deletions MAINTAINERS
Original file line number Diff line number Diff line change
Expand Up @@ -9315,6 +9315,7 @@ OPENRISC ARCHITECTURE
M: Jonas Bonn <[email protected]>
M: Stefan Kristiansson <[email protected]>
M: Stafford Horne <[email protected]>
T: git git://github.com/openrisc/linux.git
L: [email protected]
W: http://openrisc.io
S: Maintained
Expand Down
1 change: 1 addition & 0 deletions arch/openrisc/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ config OPENRISC
select HAVE_MEMBLOCK
select GPIOLIB
select HAVE_ARCH_TRACEHOOK
select SPARSE_IRQ
select GENERIC_IRQ_CHIP
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
Expand Down
1 change: 0 additions & 1 deletion arch/openrisc/TODO.openrisc
Original file line number Diff line number Diff line change
Expand Up @@ -10,4 +10,3 @@ that are due for investigation shortly, i.e. our TODO list:
or1k and this change is slowly trickling through the stack. For the time
being, or32 is equivalent to or1k.

-- Implement optimized version of memcpy and memset
5 changes: 1 addition & 4 deletions arch/openrisc/include/asm/Kbuild
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@

header-y += ucontext.h

generic-y += atomic.h
generic-y += auxvec.h
generic-y += barrier.h
generic-y += bitsperlong.h
Expand All @@ -10,8 +9,6 @@ generic-y += bugs.h
generic-y += cacheflush.h
generic-y += checksum.h
generic-y += clkdev.h
generic-y += cmpxchg-local.h
generic-y += cmpxchg.h
generic-y += current.h
generic-y += device.h
generic-y += div64.h
Expand All @@ -22,12 +19,12 @@ generic-y += exec.h
generic-y += fb.h
generic-y += fcntl.h
generic-y += ftrace.h
generic-y += futex.h
generic-y += hardirq.h
generic-y += hw_irq.h
generic-y += ioctl.h
generic-y += ioctls.h
generic-y += ipcbuf.h
generic-y += irq.h
generic-y += irq_regs.h
generic-y += irq_work.h
generic-y += kdebug.h
Expand Down
126 changes: 126 additions & 0 deletions arch/openrisc/include/asm/atomic.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,126 @@
/*
* Copyright (C) 2014 Stefan Kristiansson <[email protected]>
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/

#ifndef __ASM_OPENRISC_ATOMIC_H
#define __ASM_OPENRISC_ATOMIC_H

#include <linux/types.h>

/* Atomically perform op with v->counter and i */
#define ATOMIC_OP(op) \
static inline void atomic_##op(int i, atomic_t *v) \
{ \
int tmp; \
\
__asm__ __volatile__( \
"1: l.lwa %0,0(%1) \n" \
" l." #op " %0,%0,%2 \n" \
" l.swa 0(%1),%0 \n" \
" l.bnf 1b \n" \
" l.nop \n" \
: "=&r"(tmp) \
: "r"(&v->counter), "r"(i) \
: "cc", "memory"); \
}

/* Atomically perform op with v->counter and i, return the result */
#define ATOMIC_OP_RETURN(op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \
{ \
int tmp; \
\
__asm__ __volatile__( \
"1: l.lwa %0,0(%1) \n" \
" l." #op " %0,%0,%2 \n" \
" l.swa 0(%1),%0 \n" \
" l.bnf 1b \n" \
" l.nop \n" \
: "=&r"(tmp) \
: "r"(&v->counter), "r"(i) \
: "cc", "memory"); \
\
return tmp; \
}

/* Atomically perform op with v->counter and i, return orig v->counter */
#define ATOMIC_FETCH_OP(op) \
static inline int atomic_fetch_##op(int i, atomic_t *v) \
{ \
int tmp, old; \
\
__asm__ __volatile__( \
"1: l.lwa %0,0(%2) \n" \
" l." #op " %1,%0,%3 \n" \
" l.swa 0(%2),%1 \n" \
" l.bnf 1b \n" \
" l.nop \n" \
: "=&r"(old), "=&r"(tmp) \
: "r"(&v->counter), "r"(i) \
: "cc", "memory"); \
\
return old; \
}

ATOMIC_OP_RETURN(add)
ATOMIC_OP_RETURN(sub)

ATOMIC_FETCH_OP(add)
ATOMIC_FETCH_OP(sub)
ATOMIC_FETCH_OP(and)
ATOMIC_FETCH_OP(or)
ATOMIC_FETCH_OP(xor)

ATOMIC_OP(and)
ATOMIC_OP(or)
ATOMIC_OP(xor)

#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP

#define atomic_add_return atomic_add_return
#define atomic_sub_return atomic_sub_return
#define atomic_fetch_add atomic_fetch_add
#define atomic_fetch_sub atomic_fetch_sub
#define atomic_fetch_and atomic_fetch_and
#define atomic_fetch_or atomic_fetch_or
#define atomic_fetch_xor atomic_fetch_xor
#define atomic_and atomic_and
#define atomic_or atomic_or
#define atomic_xor atomic_xor

/*
* Atomically add a to v->counter as long as v is not already u.
* Returns the original value at v->counter.
*
* This is often used through atomic_inc_not_zero()
*/
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
int old, tmp;

__asm__ __volatile__(
"1: l.lwa %0, 0(%2) \n"
" l.sfeq %0, %4 \n"
" l.bf 2f \n"
" l.add %1, %0, %3 \n"
" l.swa 0(%2), %1 \n"
" l.bnf 1b \n"
" l.nop \n"
"2: \n"
: "=&r"(old), "=&r" (tmp)
: "r"(&v->counter), "r"(a), "r"(u)
: "cc", "memory");

return old;
}
#define __atomic_add_unless __atomic_add_unless

#include <asm-generic/atomic.h>

#endif /* __ASM_OPENRISC_ATOMIC_H */
2 changes: 1 addition & 1 deletion arch/openrisc/include/asm/bitops.h
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@
#include <asm-generic/bitops/hweight.h>
#include <asm-generic/bitops/lock.h>

#include <asm-generic/bitops/atomic.h>
#include <asm/bitops/atomic.h>
#include <asm-generic/bitops/non-atomic.h>
#include <asm-generic/bitops/le.h>
#include <asm-generic/bitops/ext2-atomic.h>
Expand Down
123 changes: 123 additions & 0 deletions arch/openrisc/include/asm/bitops/atomic.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,123 @@
/*
* Copyright (C) 2014 Stefan Kristiansson <[email protected]>
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/

#ifndef __ASM_OPENRISC_BITOPS_ATOMIC_H
#define __ASM_OPENRISC_BITOPS_ATOMIC_H

static inline void set_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
unsigned long tmp;

__asm__ __volatile__(
"1: l.lwa %0,0(%1) \n"
" l.or %0,%0,%2 \n"
" l.swa 0(%1),%0 \n"
" l.bnf 1b \n"
" l.nop \n"
: "=&r"(tmp)
: "r"(p), "r"(mask)
: "cc", "memory");
}

static inline void clear_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
unsigned long tmp;

__asm__ __volatile__(
"1: l.lwa %0,0(%1) \n"
" l.and %0,%0,%2 \n"
" l.swa 0(%1),%0 \n"
" l.bnf 1b \n"
" l.nop \n"
: "=&r"(tmp)
: "r"(p), "r"(~mask)
: "cc", "memory");
}

static inline void change_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
unsigned long tmp;

__asm__ __volatile__(
"1: l.lwa %0,0(%1) \n"
" l.xor %0,%0,%2 \n"
" l.swa 0(%1),%0 \n"
" l.bnf 1b \n"
" l.nop \n"
: "=&r"(tmp)
: "r"(p), "r"(mask)
: "cc", "memory");
}

static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
unsigned long old;
unsigned long tmp;

__asm__ __volatile__(
"1: l.lwa %0,0(%2) \n"
" l.or %1,%0,%3 \n"
" l.swa 0(%2),%1 \n"
" l.bnf 1b \n"
" l.nop \n"
: "=&r"(old), "=&r"(tmp)
: "r"(p), "r"(mask)
: "cc", "memory");

return (old & mask) != 0;
}

static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
unsigned long old;
unsigned long tmp;

__asm__ __volatile__(
"1: l.lwa %0,0(%2) \n"
" l.and %1,%0,%3 \n"
" l.swa 0(%2),%1 \n"
" l.bnf 1b \n"
" l.nop \n"
: "=&r"(old), "=&r"(tmp)
: "r"(p), "r"(~mask)
: "cc", "memory");

return (old & mask) != 0;
}

static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
unsigned long old;
unsigned long tmp;

__asm__ __volatile__(
"1: l.lwa %0,0(%2) \n"
" l.xor %1,%0,%3 \n"
" l.swa 0(%2),%1 \n"
" l.bnf 1b \n"
" l.nop \n"
: "=&r"(old), "=&r"(tmp)
: "r"(p), "r"(mask)
: "cc", "memory");

return (old & mask) != 0;
}

#endif /* __ASM_OPENRISC_BITOPS_ATOMIC_H */
83 changes: 83 additions & 0 deletions arch/openrisc/include/asm/cmpxchg.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,83 @@
/*
* Copyright (C) 2014 Stefan Kristiansson <[email protected]>
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/

#ifndef __ASM_OPENRISC_CMPXCHG_H
#define __ASM_OPENRISC_CMPXCHG_H

#include <linux/types.h>

/*
* This function doesn't exist, so you'll get a linker error
* if something tries to do an invalid cmpxchg().
*/
extern void __cmpxchg_called_with_bad_pointer(void);

#define __HAVE_ARCH_CMPXCHG 1

static inline unsigned long
__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
{
if (size != 4) {
__cmpxchg_called_with_bad_pointer();
return old;
}

__asm__ __volatile__(
"1: l.lwa %0, 0(%1) \n"
" l.sfeq %0, %2 \n"
" l.bnf 2f \n"
" l.nop \n"
" l.swa 0(%1), %3 \n"
" l.bnf 1b \n"
" l.nop \n"
"2: \n"
: "=&r"(old)
: "r"(ptr), "r"(old), "r"(new)
: "cc", "memory");

return old;
}

#define cmpxchg(ptr, o, n) \
({ \
(__typeof__(*(ptr))) __cmpxchg((ptr), \
(unsigned long)(o), \
(unsigned long)(n), \
sizeof(*(ptr))); \
})

/*
* This function doesn't exist, so you'll get a linker error if
* something tries to do an invalidly-sized xchg().
*/
extern void __xchg_called_with_bad_pointer(void);

static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
int size)
{
if (size != 4) {
__xchg_called_with_bad_pointer();
return val;
}

__asm__ __volatile__(
"1: l.lwa %0, 0(%1) \n"
" l.swa 0(%1), %2 \n"
" l.bnf 1b \n"
" l.nop \n"
: "=&r"(val)
: "r"(ptr), "r"(val)
: "cc", "memory");

return val;
}

#define xchg(ptr, with) \
((typeof(*(ptr)))__xchg((unsigned long)(with), (ptr), sizeof(*(ptr))))

#endif /* __ASM_OPENRISC_CMPXCHG_H */
Loading

0 comments on commit 9e31489

Please sign in to comment.