Skip to content
This repository has been archived by the owner on Apr 10, 2019. It is now read-only.

Commit

Permalink
Disintegrate asm/system.h for X86
Browse files Browse the repository at this point in the history
Disintegrate asm/system.h for X86.

Signed-off-by: David Howells <[email protected]>
Acked-by: H. Peter Anvin <[email protected]>
cc: [email protected]
  • Loading branch information
dhowells committed Mar 28, 2012
1 parent 778aae8 commit f05e798
Show file tree
Hide file tree
Showing 50 changed files with 554 additions and 562 deletions.
1 change: 0 additions & 1 deletion arch/x86/ia32/ia32_aout.c
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@
#include <linux/init.h>
#include <linux/jiffies.h>

#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/pgalloc.h>
#include <asm/cacheflush.h>
Expand Down
1 change: 0 additions & 1 deletion arch/x86/include/asm/apic.h
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@
#include <linux/atomic.h>
#include <asm/fixmap.h>
#include <asm/mpspec.h>
#include <asm/system.h>
#include <asm/msr.h>

#define ARCH_APICTIMER_STOPS_ON_C3 1
Expand Down
7 changes: 7 additions & 0 deletions arch/x86/include/asm/auxvec.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,4 +9,11 @@
#endif
#define AT_SYSINFO_EHDR 33

/* entries in ARCH_DLINFO: */
#if defined(CONFIG_IA32_EMULATION) || !defined(CONFIG_X86_64)
# define AT_VECTOR_SIZE_ARCH 2
#else /* else it's non-compat x86-64 */
# define AT_VECTOR_SIZE_ARCH 1
#endif

#endif /* _ASM_X86_AUXVEC_H */
116 changes: 116 additions & 0 deletions arch/x86/include/asm/barrier.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,116 @@
#ifndef _ASM_X86_BARRIER_H
#define _ASM_X86_BARRIER_H

#include <asm/alternative.h>
#include <asm/nops.h>

/*
* Force strict CPU ordering.
* And yes, this is required on UP too when we're talking
* to devices.
*/

#ifdef CONFIG_X86_32
/*
* Some non-Intel clones support out of order store. wmb() ceases to be a
* nop for these.
*/
#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
#else
#define mb() asm volatile("mfence":::"memory")
#define rmb() asm volatile("lfence":::"memory")
#define wmb() asm volatile("sfence" ::: "memory")
#endif

/**
* read_barrier_depends - Flush all pending reads that subsequents reads
* depend on.
*
* No data-dependent reads from memory-like regions are ever reordered
* over this barrier. All reads preceding this primitive are guaranteed
* to access memory (but not necessarily other CPUs' caches) before any
* reads following this primitive that depend on the data return by
* any of the preceding reads. This primitive is much lighter weight than
* rmb() on most CPUs, and is never heavier weight than is
* rmb().
*
* These ordering constraints are respected by both the local CPU
* and the compiler.
*
* Ordering is not guaranteed by anything other than these primitives,
* not even by data dependencies. See the documentation for
* memory_barrier() for examples and URLs to more information.
*
* For example, the following code would force ordering (the initial
* value of "a" is zero, "b" is one, and "p" is "&a"):
*
* <programlisting>
* CPU 0 CPU 1
*
* b = 2;
* memory_barrier();
* p = &b; q = p;
* read_barrier_depends();
* d = *q;
* </programlisting>
*
* because the read of "*q" depends on the read of "p" and these
* two reads are separated by a read_barrier_depends(). However,
* the following code, with the same initial values for "a" and "b":
*
* <programlisting>
* CPU 0 CPU 1
*
* a = 2;
* memory_barrier();
* b = 3; y = b;
* read_barrier_depends();
* x = a;
* </programlisting>
*
* does not enforce ordering, since there is no data dependency between
* the read of "a" and the read of "b". Therefore, on some CPUs, such
* as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
* in cases like this where there are no data dependencies.
**/

#define read_barrier_depends() do { } while (0)

#ifdef CONFIG_SMP
#define smp_mb() mb()
#ifdef CONFIG_X86_PPRO_FENCE
# define smp_rmb() rmb()
#else
# define smp_rmb() barrier()
#endif
#ifdef CONFIG_X86_OOSTORE
# define smp_wmb() wmb()
#else
# define smp_wmb() barrier()
#endif
#define smp_read_barrier_depends() read_barrier_depends()
#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
#else
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#define smp_read_barrier_depends() do { } while (0)
#define set_mb(var, value) do { var = value; barrier(); } while (0)
#endif

/*
* Stop RDTSC speculation. This is needed when you need to use RDTSC
* (or get_cycles or vread that possibly accesses the TSC) in a defined
* code region.
*
* (Could use an alternative three way for this if there was one.)
*/
static __always_inline void rdtsc_barrier(void)
{
alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
}

#endif /* _ASM_X86_BARRIER_H */
4 changes: 4 additions & 0 deletions arch/x86/include/asm/bug.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,4 +36,8 @@ do { \
#endif /* !CONFIG_BUG */

#include <asm-generic/bug.h>


extern void show_regs_common(void);

#endif /* _ASM_X86_BUG_H */
1 change: 1 addition & 0 deletions arch/x86/include/asm/cacheflush.h
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@

/* Caches aren't brain-dead on the intel. */
#include <asm-generic/cacheflush.h>
#include <asm/special_insns.h>

#ifdef CONFIG_X86_PAT
/*
Expand Down
1 change: 0 additions & 1 deletion arch/x86/include/asm/elf.h
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,6 @@ extern unsigned int vdso_enabled;
(((x)->e_machine == EM_386) || ((x)->e_machine == EM_486))

#include <asm/processor.h>
#include <asm/system.h>

#ifdef CONFIG_X86_32
#include <asm/desc.h>
Expand Down
1 change: 1 addition & 0 deletions arch/x86/include/asm/exec.h
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
/* define arch_align_stack() here */
1 change: 0 additions & 1 deletion arch/x86/include/asm/futex.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@
#include <asm/asm.h>
#include <asm/errno.h>
#include <asm/processor.h>
#include <asm/system.h>

#define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
asm volatile("1:\t" insn "\n" \
Expand Down
1 change: 0 additions & 1 deletion arch/x86/include/asm/i387.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@

#include <linux/sched.h>
#include <linux/hardirq.h>
#include <asm/system.h>

struct pt_regs;
struct user_i387_struct;
Expand Down
1 change: 0 additions & 1 deletion arch/x86/include/asm/local.h
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@

#include <linux/percpu.h>

#include <asm/system.h>
#include <linux/atomic.h>
#include <asm/asm.h>

Expand Down
1 change: 0 additions & 1 deletion arch/x86/include/asm/mc146818rtc.h
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@
#define _ASM_X86_MC146818RTC_H

#include <asm/io.h>
#include <asm/system.h>
#include <asm/processor.h>
#include <linux/mc146818rtc.h>

Expand Down
31 changes: 30 additions & 1 deletion arch/x86/include/asm/processor.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,13 +14,13 @@ struct mm_struct;
#include <asm/sigcontext.h>
#include <asm/current.h>
#include <asm/cpufeature.h>
#include <asm/system.h>
#include <asm/page.h>
#include <asm/pgtable_types.h>
#include <asm/percpu.h>
#include <asm/msr.h>
#include <asm/desc_defs.h>
#include <asm/nops.h>
#include <asm/special_insns.h>

#include <linux/personality.h>
#include <linux/cpumask.h>
Expand All @@ -29,6 +29,15 @@ struct mm_struct;
#include <linux/math64.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/irqflags.h>

/*
* We handle most unaligned accesses in hardware. On the other hand
* unaligned DMA can be quite expensive on some Nehalem processors.
*
* Based on this we disable the IP header alignment in network drivers.
*/
#define NET_IP_ALIGN 0

#define HBP_NUM 4
/*
Expand Down Expand Up @@ -1022,4 +1031,24 @@ extern bool cpu_has_amd_erratum(const int *);
#define cpu_has_amd_erratum(x) (false)
#endif /* CONFIG_CPU_SUP_AMD */

#ifdef CONFIG_X86_32
/*
* disable hlt during certain critical i/o operations
*/
#define HAVE_DISABLE_HLT
#endif

void disable_hlt(void);
void enable_hlt(void);

void cpu_idle_wait(void);

extern unsigned long arch_align_stack(unsigned long sp);
extern void free_init_pages(char *what, unsigned long begin, unsigned long end);

void default_idle(void);
bool set_pm_idle_to_default(void);

void stop_this_cpu(void *dummy);

#endif /* _ASM_X86_PROCESSOR_H */
58 changes: 56 additions & 2 deletions arch/x86/include/asm/segment.h
Original file line number Diff line number Diff line change
Expand Up @@ -212,7 +212,61 @@
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
extern const char early_idt_handlers[NUM_EXCEPTION_VECTORS][10];
#endif
#endif

/*
* Load a segment. Fall back on loading the zero
* segment if something goes wrong..
*/
#define loadsegment(seg, value) \
do { \
unsigned short __val = (value); \
\
asm volatile(" \n" \
"1: movl %k0,%%" #seg " \n" \
\
".section .fixup,\"ax\" \n" \
"2: xorl %k0,%k0 \n" \
" jmp 1b \n" \
".previous \n" \
\
_ASM_EXTABLE(1b, 2b) \
\
: "+r" (__val) : : "memory"); \
} while (0)

/*
* Save a segment register away
*/
#define savesegment(seg, value) \
asm("mov %%" #seg ",%0":"=r" (value) : : "memory")

/*
* x86_32 user gs accessors.
*/
#ifdef CONFIG_X86_32
#ifdef CONFIG_X86_32_LAZY_GS
#define get_user_gs(regs) (u16)({unsigned long v; savesegment(gs, v); v;})
#define set_user_gs(regs, v) loadsegment(gs, (unsigned long)(v))
#define task_user_gs(tsk) ((tsk)->thread.gs)
#define lazy_save_gs(v) savesegment(gs, (v))
#define lazy_load_gs(v) loadsegment(gs, (v))
#else /* X86_32_LAZY_GS */
#define get_user_gs(regs) (u16)((regs)->gs)
#define set_user_gs(regs, v) do { (regs)->gs = (v); } while (0)
#define task_user_gs(tsk) (task_pt_regs(tsk)->gs)
#define lazy_save_gs(v) do { } while (0)
#define lazy_load_gs(v) do { } while (0)
#endif /* X86_32_LAZY_GS */
#endif /* X86_32 */

static inline unsigned long get_limit(unsigned long segment)
{
unsigned long __limit;
asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
return __limit + 1;
}

#endif /* !__ASSEMBLY__ */
#endif /* __KERNEL__ */

#endif /* _ASM_X86_SEGMENT_H */
Loading

0 comments on commit f05e798

Please sign in to comment.