Skip to content

Commit

Permalink
Disintegrate asm/system.h for PowerPC
Browse files Browse the repository at this point in the history
Disintegrate asm/system.h for PowerPC.

Signed-off-by: David Howells <[email protected]>
Acked-by: Benjamin Herrenschmidt <[email protected]>
cc: [email protected]
  • Loading branch information
dhowells committed Mar 28, 2012
1 parent 527dcdc commit ae3a197
Show file tree
Hide file tree
Showing 133 changed files with 687 additions and 701 deletions.
8 changes: 2 additions & 6 deletions arch/powerpc/include/asm/atomic.h
Original file line number Diff line number Diff line change
Expand Up @@ -5,13 +5,9 @@
* PowerPC atomic operations
*/

#include <linux/types.h>

#ifdef __KERNEL__
#include <linux/compiler.h>
#include <asm/synch.h>
#include <asm/asm-compat.h>
#include <asm/system.h>
#include <linux/types.h>
#include <asm/cmpxchg.h>

#define ATOMIC_INIT(i) { (i) }

Expand Down
2 changes: 2 additions & 0 deletions arch/powerpc/include/asm/auxvec.h
Original file line number Diff line number Diff line change
Expand Up @@ -16,4 +16,6 @@
*/
#define AT_SYSINFO_EHDR 33

#define AT_VECTOR_SIZE_ARCH 6 /* entries in ARCH_DLINFO */

#endif
68 changes: 68 additions & 0 deletions arch/powerpc/include/asm/barrier.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
/*
* Copyright (C) 1999 Cort Dougan <[email protected]>
*/
#ifndef _ASM_POWERPC_BARRIER_H
#define _ASM_POWERPC_BARRIER_H

/*
* Memory barrier.
* The sync instruction guarantees that all memory accesses initiated
* by this processor have been performed (with respect to all other
* mechanisms that access memory). The eieio instruction is a barrier
* providing an ordering (separately) for (a) cacheable stores and (b)
* loads and stores to non-cacheable memory (e.g. I/O devices).
*
* mb() prevents loads and stores being reordered across this point.
* rmb() prevents loads being reordered across this point.
* wmb() prevents stores being reordered across this point.
* read_barrier_depends() prevents data-dependent loads being reordered
* across this point (nop on PPC).
*
* *mb() variants without smp_ prefix must order all types of memory
* operations with one another. sync is the only instruction sufficient
* to do this.
*
* For the smp_ barriers, ordering is for cacheable memory operations
* only. We have to use the sync instruction for smp_mb(), since lwsync
* doesn't order loads with respect to previous stores. Lwsync can be
* used for smp_rmb() and smp_wmb().
*
* However, on CPUs that don't support lwsync, lwsync actually maps to a
* heavy-weight sync, so smp_wmb() can be a lighter-weight eieio.
*/
#define mb() __asm__ __volatile__ ("sync" : : : "memory")
#define rmb() __asm__ __volatile__ ("sync" : : : "memory")
#define wmb() __asm__ __volatile__ ("sync" : : : "memory")
#define read_barrier_depends() do { } while(0)

#define set_mb(var, value) do { var = value; mb(); } while (0)

#ifdef CONFIG_SMP

#ifdef __SUBARCH_HAS_LWSYNC
# define SMPWMB LWSYNC
#else
# define SMPWMB eieio
#endif

#define smp_mb() mb()
#define smp_rmb() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory")
#define smp_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory")
#define smp_read_barrier_depends() read_barrier_depends()
#else
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#define smp_read_barrier_depends() do { } while(0)
#endif /* CONFIG_SMP */

/*
* This is a barrier which prevents following instructions from being
* started until the value of the argument x is known. For example, if
* x is a variable loaded from memory, this prevents following
* instructions from being executed until the load has been performed.
*/
#define data_barrier(x) \
asm volatile("twi 0,%0,0; isync" : : "r" (x) : "memory");

#endif /* _ASM_POWERPC_BARRIER_H */
11 changes: 11 additions & 0 deletions arch/powerpc/include/asm/bug.h
Original file line number Diff line number Diff line change
Expand Up @@ -126,5 +126,16 @@

#include <asm-generic/bug.h>

#ifndef __ASSEMBLY__

struct pt_regs;
extern int do_page_fault(struct pt_regs *, unsigned long, unsigned long);
extern void bad_page_fault(struct pt_regs *, unsigned long, int);
extern void _exception(int, struct pt_regs *, int, unsigned long);
extern void die(const char *, struct pt_regs *, long);
extern void print_backtrace(unsigned long *);

#endif /* !__ASSEMBLY__ */

#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_BUG_H */
16 changes: 16 additions & 0 deletions arch/powerpc/include/asm/cache.h
Original file line number Diff line number Diff line change
Expand Up @@ -42,8 +42,24 @@ extern struct ppc64_caches ppc64_caches;
#endif /* __powerpc64__ && ! __ASSEMBLY__ */

#if !defined(__ASSEMBLY__)

#define __read_mostly __attribute__((__section__(".data..read_mostly")))

#ifdef CONFIG_6xx
extern long _get_L2CR(void);
extern long _get_L3CR(void);
extern void _set_L2CR(unsigned long);
extern void _set_L3CR(unsigned long);
#else
#define _get_L2CR() 0L
#define _get_L3CR() 0L
#define _set_L2CR(val) do { } while(0)
#define _set_L3CR(val) do { } while(0)
#endif

extern void cacheable_memzero(void *p, unsigned int nb);
extern void *cacheable_memcpy(void *, const void *, unsigned int);

#endif /* !__ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_CACHE_H */
Loading

0 comments on commit ae3a197

Please sign in to comment.