Skip to content

Commit

Permalink
Merge branch 'x86-smap-for-linus' of git://git.kernel.org/pub/scm/lin…
Browse files Browse the repository at this point in the history
…ux/kernel/git/tip/tip

Pull x86/smap support from Ingo Molnar:
 "This adds support for the SMAP (Supervisor Mode Access Prevention) CPU
  feature on Intel CPUs: a hardware feature that prevents unintended
  user-space data access from kernel privileged code.

  It's turned on automatically when possible.

  This, in combination with SMEP, makes it even harder to exploit kernel
  bugs such as NULL pointer dereferences."

Fix up trivial conflict in arch/x86/kernel/entry_64.S due to newly added
includes right next to each other.

* 'x86-smap-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86, smep, smap: Make the switching functions one-way
  x86, suspend: On wakeup always initialize cr4 and EFER
  x86-32: Start out eflags and cr4 clean
  x86, smap: Do not abuse the [f][x]rstor_checking() functions for user space
  x86-32, smap: Add STAC/CLAC instructions to 32-bit kernel entry
  x86, smap: Reduce the SMAP overhead for signal handling
  x86, smap: A page fault due to SMAP is an oops
  x86, smap: Turn on Supervisor Mode Access Prevention
  x86, smap: Add STAC and CLAC instructions to control user space access
  x86, uaccess: Merge prototypes for clear_user/__clear_user
  x86, smap: Add a header file with macros for STAC/CLAC
  x86, alternative: Add header guards to <asm/alternative-asm.h>
  x86, alternative: Use .pushsection/.popsection
  x86, smap: Add CR4 bit for SMAP
  x86-32, mm: The WP test should be done on a kernel page
  • Loading branch information
torvalds committed Oct 1, 2012
2 parents a57d985 + b2cc2a0 commit 15385df
Show file tree
Hide file tree
Showing 31 changed files with 410 additions and 116 deletions.
6 changes: 5 additions & 1 deletion Documentation/kernel-parameters.txt
Original file line number Diff line number Diff line change
Expand Up @@ -1812,8 +1812,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
noexec=on: enable non-executable mappings (default)
noexec=off: disable non-executable mappings

nosmap [X86]
Disable SMAP (Supervisor Mode Access Prevention)
even if it is supported by processor.

nosmep [X86]
Disable SMEP (Supervisor Mode Execution Protection)
Disable SMEP (Supervisor Mode Execution Prevention)
even if it is supported by processor.

noexec32 [X86-64]
Expand Down
11 changes: 11 additions & 0 deletions arch/x86/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -1493,6 +1493,17 @@ config ARCH_RANDOM
If supported, this is a high bandwidth, cryptographically
secure hardware random number generator.

config X86_SMAP
def_bool y
prompt "Supervisor Mode Access Prevention" if EXPERT
---help---
Supervisor Mode Access Prevention (SMAP) is a security
feature in newer Intel processors. There is a small
performance cost if this enabled and turned on; there is
also a small increase in the kernel size if this is enabled.

If unsure, say Y.

config EFI
bool "EFI runtime service support"
depends on ACPI
Expand Down
13 changes: 8 additions & 5 deletions arch/x86/ia32/ia32_signal.c
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@
#include <asm/sigframe.h>
#include <asm/sighandling.h>
#include <asm/sys_ia32.h>
#include <asm/smap.h>

#define FIX_EFLAGS __FIX_EFLAGS

Expand Down Expand Up @@ -251,11 +252,12 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,

get_user_ex(tmp, &sc->fpstate);
buf = compat_ptr(tmp);
err |= restore_xstate_sig(buf, 1);

get_user_ex(*pax, &sc->ax);
} get_user_catch(err);

err |= restore_xstate_sig(buf, 1);

return err;
}

Expand Down Expand Up @@ -506,7 +508,6 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
put_user_ex(sig, &frame->sig);
put_user_ex(ptr_to_compat(&frame->info), &frame->pinfo);
put_user_ex(ptr_to_compat(&frame->uc), &frame->puc);
err |= copy_siginfo_to_user32(&frame->info, info);

/* Create the ucontext. */
if (cpu_has_xsave)
Expand All @@ -518,9 +519,6 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
put_user_ex(sas_ss_flags(regs->sp),
&frame->uc.uc_stack.ss_flags);
put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
err |= ia32_setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
regs, set->sig[0]);
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));

if (ka->sa.sa_flags & SA_RESTORER)
restorer = ka->sa.sa_restorer;
Expand All @@ -536,6 +534,11 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
} put_user_catch(err);

err |= copy_siginfo_to_user32(&frame->info, info);
err |= ia32_setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
regs, set->sig[0]);
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));

if (err)
return -EFAULT;

Expand Down
6 changes: 6 additions & 0 deletions arch/x86/ia32/ia32entry.S
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
#include <asm/segment.h>
#include <asm/irqflags.h>
#include <asm/asm.h>
#include <asm/smap.h>
#include <linux/linkage.h>
#include <linux/err.h>

Expand Down Expand Up @@ -146,8 +147,10 @@ ENTRY(ia32_sysenter_target)
SAVE_ARGS 0,1,0
/* no need to do an access_ok check here because rbp has been
32bit zero extended */
ASM_STAC
1: movl (%rbp),%ebp
_ASM_EXTABLE(1b,ia32_badarg)
ASM_CLAC
orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
CFI_REMEMBER_STATE
Expand Down Expand Up @@ -301,8 +304,10 @@ ENTRY(ia32_cstar_target)
/* no need to do an access_ok check here because r8 has been
32bit zero extended */
/* hardware stack frame is complete now */
ASM_STAC
1: movl (%r8),%r9d
_ASM_EXTABLE(1b,ia32_badarg)
ASM_CLAC
orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
CFI_REMEMBER_STATE
Expand Down Expand Up @@ -365,6 +370,7 @@ cstar_tracesys:
END(ia32_cstar_target)

ia32_badarg:
ASM_CLAC
movq $-EFAULT,%rax
jmp ia32_sysret
CFI_ENDPROC
Expand Down
9 changes: 7 additions & 2 deletions arch/x86/include/asm/alternative-asm.h
Original file line number Diff line number Diff line change
@@ -1,14 +1,17 @@
#ifndef _ASM_X86_ALTERNATIVE_ASM_H
#define _ASM_X86_ALTERNATIVE_ASM_H

#ifdef __ASSEMBLY__

#include <asm/asm.h>

#ifdef CONFIG_SMP
.macro LOCK_PREFIX
672: lock
.section .smp_locks,"a"
.pushsection .smp_locks,"a"
.balign 4
.long 672b - .
.previous
.popsection
.endm
#else
.macro LOCK_PREFIX
Expand All @@ -24,3 +27,5 @@
.endm

#endif /* __ASSEMBLY__ */

#endif /* _ASM_X86_ALTERNATIVE_ASM_H */
32 changes: 16 additions & 16 deletions arch/x86/include/asm/alternative.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,10 +29,10 @@

#ifdef CONFIG_SMP
#define LOCK_PREFIX_HERE \
".section .smp_locks,\"a\"\n" \
".balign 4\n" \
".long 671f - .\n" /* offset */ \
".previous\n" \
".pushsection .smp_locks,\"a\"\n" \
".balign 4\n" \
".long 671f - .\n" /* offset */ \
".popsection\n" \
"671:"

#define LOCK_PREFIX LOCK_PREFIX_HERE "\n\tlock; "
Expand Down Expand Up @@ -99,30 +99,30 @@ static inline int alternatives_text_reserved(void *start, void *end)
/* alternative assembly primitive: */
#define ALTERNATIVE(oldinstr, newinstr, feature) \
OLDINSTR(oldinstr) \
".section .altinstructions,\"a\"\n" \
".pushsection .altinstructions,\"a\"\n" \
ALTINSTR_ENTRY(feature, 1) \
".previous\n" \
".section .discard,\"aw\",@progbits\n" \
".popsection\n" \
".pushsection .discard,\"aw\",@progbits\n" \
DISCARD_ENTRY(1) \
".previous\n" \
".section .altinstr_replacement, \"ax\"\n" \
".popsection\n" \
".pushsection .altinstr_replacement, \"ax\"\n" \
ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
".previous"
".popsection"

#define ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2)\
OLDINSTR(oldinstr) \
".section .altinstructions,\"a\"\n" \
".pushsection .altinstructions,\"a\"\n" \
ALTINSTR_ENTRY(feature1, 1) \
ALTINSTR_ENTRY(feature2, 2) \
".previous\n" \
".section .discard,\"aw\",@progbits\n" \
".popsection\n" \
".pushsection .discard,\"aw\",@progbits\n" \
DISCARD_ENTRY(1) \
DISCARD_ENTRY(2) \
".previous\n" \
".section .altinstr_replacement, \"ax\"\n" \
".popsection\n" \
".pushsection .altinstr_replacement, \"ax\"\n" \
ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
".previous"
".popsection"

/*
* This must be included *after* the definition of ALTERNATIVE due to
Expand Down
42 changes: 38 additions & 4 deletions arch/x86/include/asm/fpu-internal.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
#include <asm/user.h>
#include <asm/uaccess.h>
#include <asm/xsave.h>
#include <asm/smap.h>

#ifdef CONFIG_X86_64
# include <asm/sigcontext32.h>
Expand Down Expand Up @@ -121,6 +122,22 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
__sanitize_i387_state(tsk);
}

#define user_insn(insn, output, input...) \
({ \
int err; \
asm volatile(ASM_STAC "\n" \
"1:" #insn "\n\t" \
"2: " ASM_CLAC "\n" \
".section .fixup,\"ax\"\n" \
"3: movl $-1,%[err]\n" \
" jmp 2b\n" \
".previous\n" \
_ASM_EXTABLE(1b, 3b) \
: [err] "=r" (err), output \
: "0"(0), input); \
err; \
})

#define check_insn(insn, output, input...) \
({ \
int err; \
Expand All @@ -138,18 +155,18 @@ static inline void sanitize_i387_state(struct task_struct *tsk)

static inline int fsave_user(struct i387_fsave_struct __user *fx)
{
return check_insn(fnsave %[fx]; fwait, [fx] "=m" (*fx), "m" (*fx));
return user_insn(fnsave %[fx]; fwait, [fx] "=m" (*fx), "m" (*fx));
}

static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
{
if (config_enabled(CONFIG_X86_32))
return check_insn(fxsave %[fx], [fx] "=m" (*fx), "m" (*fx));
return user_insn(fxsave %[fx], [fx] "=m" (*fx), "m" (*fx));
else if (config_enabled(CONFIG_AS_FXSAVEQ))
return check_insn(fxsaveq %[fx], [fx] "=m" (*fx), "m" (*fx));
return user_insn(fxsaveq %[fx], [fx] "=m" (*fx), "m" (*fx));

/* See comment in fpu_fxsave() below. */
return check_insn(rex64/fxsave (%[fx]), "=m" (*fx), [fx] "R" (fx));
return user_insn(rex64/fxsave (%[fx]), "=m" (*fx), [fx] "R" (fx));
}

static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
Expand All @@ -164,11 +181,28 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
"m" (*fx));
}

static inline int fxrstor_user(struct i387_fxsave_struct __user *fx)
{
if (config_enabled(CONFIG_X86_32))
return user_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
else if (config_enabled(CONFIG_AS_FXSAVEQ))
return user_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));

/* See comment in fpu_fxsave() below. */
return user_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx),
"m" (*fx));
}

static inline int frstor_checking(struct i387_fsave_struct *fx)
{
return check_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
}

static inline int frstor_user(struct i387_fsave_struct __user *fx)
{
return user_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
}

static inline void fpu_fxsave(struct fpu *fpu)
{
if (config_enabled(CONFIG_X86_32))
Expand Down
19 changes: 13 additions & 6 deletions arch/x86/include/asm/futex.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,10 +9,13 @@
#include <asm/asm.h>
#include <asm/errno.h>
#include <asm/processor.h>
#include <asm/smap.h>

#define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
asm volatile("1:\t" insn "\n" \
"2:\t.section .fixup,\"ax\"\n" \
asm volatile("\t" ASM_STAC "\n" \
"1:\t" insn "\n" \
"2:\t" ASM_CLAC "\n" \
"\t.section .fixup,\"ax\"\n" \
"3:\tmov\t%3, %1\n" \
"\tjmp\t2b\n" \
"\t.previous\n" \
Expand All @@ -21,12 +24,14 @@
: "i" (-EFAULT), "0" (oparg), "1" (0))

#define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
asm volatile("1:\tmovl %2, %0\n" \
asm volatile("\t" ASM_STAC "\n" \
"1:\tmovl %2, %0\n" \
"\tmovl\t%0, %3\n" \
"\t" insn "\n" \
"2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
"\tjnz\t1b\n" \
"3:\t.section .fixup,\"ax\"\n" \
"3:\t" ASM_CLAC "\n" \
"\t.section .fixup,\"ax\"\n" \
"4:\tmov\t%5, %1\n" \
"\tjmp\t3b\n" \
"\t.previous\n" \
Expand Down Expand Up @@ -122,8 +127,10 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT;

asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
"2:\t.section .fixup, \"ax\"\n"
asm volatile("\t" ASM_STAC "\n"
"1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
"2:\t" ASM_CLAC "\n"
"\t.section .fixup, \"ax\"\n"
"3:\tmov %3, %0\n"
"\tjmp 2b\n"
"\t.previous\n"
Expand Down
1 change: 1 addition & 0 deletions arch/x86/include/asm/processor-flags.h
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,7 @@
#define X86_CR4_PCIDE 0x00020000 /* enable PCID support */
#define X86_CR4_OSXSAVE 0x00040000 /* enable xsave and xrestore */
#define X86_CR4_SMEP 0x00100000 /* enable SMEP support */
#define X86_CR4_SMAP 0x00200000 /* enable SMAP support */

/*
* x86-64 Task Priority Register, CR8
Expand Down
Loading

0 comments on commit 15385df

Please sign in to comment.