Skip to content

Commit

Permalink
sh: nommu: Support building without an uncached mapping.
Browse files Browse the repository at this point in the history
Now that nommu selects 32BIT we run in to the situation where SH-2A
supports an uncached identity mapping by way of the BSC, while the SH-2
does not. This provides stubs for the PC manglers and tidies up some of
the system*.h mess in the process.

Signed-off-by: Paul Mundt <[email protected]>
  • Loading branch information
pmundt committed Nov 4, 2010
1 parent e2fcf74 commit edc9a95
Show file tree
Hide file tree
Showing 4 changed files with 41 additions and 42 deletions.
4 changes: 1 addition & 3 deletions arch/sh/include/asm/system.h
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
#include <linux/compiler.h>
#include <linux/linkage.h>
#include <asm/types.h>
#include <asm/uncached.h>

#define AT_VECTOR_SIZE_ARCH 5 /* entries in ARCH_DLINFO */

Expand Down Expand Up @@ -137,9 +138,6 @@ extern unsigned int instruction_size(unsigned int insn);
#define instruction_size(insn) (4)
#endif

extern unsigned long cached_to_uncached;
extern unsigned long uncached_size;

void per_cpu_trap_init(void);
void default_idle(void);
void cpu_idle_wait(void);
Expand Down
36 changes: 0 additions & 36 deletions arch/sh/include/asm/system_32.h
Original file line number Diff line number Diff line change
Expand Up @@ -145,42 +145,6 @@ do { \
__restore_dsp(prev); \
} while (0)

/*
* Jump to uncached area.
* When handling TLB or caches, we need to do it from an uncached area.
*/
#define jump_to_uncached() \
do { \
unsigned long __dummy; \
\
__asm__ __volatile__( \
"mova 1f, %0\n\t" \
"add %1, %0\n\t" \
"jmp @%0\n\t" \
" nop\n\t" \
".balign 4\n" \
"1:" \
: "=&z" (__dummy) \
: "r" (cached_to_uncached)); \
} while (0)

/*
* Back to cached area.
*/
#define back_to_cached() \
do { \
unsigned long __dummy; \
ctrl_barrier(); \
__asm__ __volatile__( \
"mov.l 1f, %0\n\t" \
"jmp @%0\n\t" \
" nop\n\t" \
".balign 4\n" \
"1: .long 2f\n" \
"2:" \
: "=&r" (__dummy)); \
} while (0)

#ifdef CONFIG_CPU_HAS_SR_RB
#define lookup_exception_vector() \
({ \
Expand Down
3 changes: 0 additions & 3 deletions arch/sh/include/asm/system_64.h
Original file line number Diff line number Diff line change
Expand Up @@ -34,9 +34,6 @@ do { \
&next->thread); \
} while (0)

#define jump_to_uncached() do { } while (0)
#define back_to_cached() do { } while (0)

#define __icbi(addr) __asm__ __volatile__ ( "icbi %0, 0\n\t" : : "r" (addr))
#define __ocbp(addr) __asm__ __volatile__ ( "ocbp %0, 0\n\t" : : "r" (addr))
#define __ocbi(addr) __asm__ __volatile__ ( "ocbi %0, 0\n\t" : : "r" (addr))
Expand Down
40 changes: 40 additions & 0 deletions arch/sh/include/asm/uncached.h
Original file line number Diff line number Diff line change
Expand Up @@ -4,15 +4,55 @@
#include <linux/bug.h>

#ifdef CONFIG_UNCACHED_MAPPING
extern unsigned long cached_to_uncached;
extern unsigned long uncached_size;
extern unsigned long uncached_start, uncached_end;

extern int virt_addr_uncached(unsigned long kaddr);
extern void uncached_init(void);
extern void uncached_resize(unsigned long size);

/*
* Jump to uncached area.
* When handling TLB or caches, we need to do it from an uncached area.
*/
#define jump_to_uncached() \
do { \
unsigned long __dummy; \
\
__asm__ __volatile__( \
"mova 1f, %0\n\t" \
"add %1, %0\n\t" \
"jmp @%0\n\t" \
" nop\n\t" \
".balign 4\n" \
"1:" \
: "=&z" (__dummy) \
: "r" (cached_to_uncached)); \
} while (0)

/*
* Back to cached area.
*/
#define back_to_cached() \
do { \
unsigned long __dummy; \
ctrl_barrier(); \
__asm__ __volatile__( \
"mov.l 1f, %0\n\t" \
"jmp @%0\n\t" \
" nop\n\t" \
".balign 4\n" \
"1: .long 2f\n" \
"2:" \
: "=&r" (__dummy)); \
} while (0)
#else
#define virt_addr_uncached(kaddr) (0)
#define uncached_init() do { } while (0)
#define uncached_resize(size) BUG()
#define jump_to_uncached() do { } while (0)
#define back_to_cached() do { } while (0)
#endif

#endif /* __ASM_SH_UNCACHED_H */

0 comments on commit edc9a95

Please sign in to comment.