Skip to content

Commit

Permalink
Merge branch 'core-objtool-for-linus' of git://git.kernel.org/pub/scm…
Browse files Browse the repository at this point in the history
…/linux/kernel/git/tip/tip

Pull objtool updates from Ingo Molnar:
 "This is a series from Peter Zijlstra that adds x86 build-time uaccess
  validation of SMAP to objtool, which will detect and warn about the
  following uaccess API usage bugs and weirdnesses:

   - call to %s() with UACCESS enabled
   - return with UACCESS enabled
   - return with UACCESS disabled from a UACCESS-safe function
   - recursive UACCESS enable
   - redundant UACCESS disable
   - UACCESS-safe disables UACCESS

  As it turns out not leaking uaccess permissions outside the intended
  uaccess functionality is hard when the interfaces are complex and when
  such bugs are mostly dormant.

  As a bonus we now also check the DF flag. We had at least one
  high-profile bug in that area in the early days of Linux, and the
  checking is fairly simple. The checks performed and warnings emitted
  are:

   - call to %s() with DF set
   - return with DF set
   - return with modified stack frame
   - recursive STD
   - redundant CLD

  It's all x86-only for now, but later on this can also be used for PAN
  on ARM and objtool is fairly cross-platform in principle.

  While all warnings emitted by this new checking facility that got
  reported to us were fixed, there might be GCC version dependent
  warnings that were not reported yet - which we'll address, should they
  trigger.

  The warnings are non-fatal build warnings"

* 'core-objtool-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (27 commits)
  mm/uaccess: Use 'unsigned long' to placate UBSAN warnings on older GCC versions
  x86/uaccess: Dont leak the AC flag into __put_user() argument evaluation
  sched/x86_64: Don't save flags on context switch
  objtool: Add Direction Flag validation
  objtool: Add UACCESS validation
  objtool: Fix sibling call detection
  objtool: Rewrite alt->skip_orig
  objtool: Add --backtrace support
  objtool: Rewrite add_ignores()
  objtool: Handle function aliases
  objtool: Set insn->func for alternatives
  x86/uaccess, kcov: Disable stack protector
  x86/uaccess, ftrace: Fix ftrace_likely_update() vs. SMAP
  x86/uaccess, ubsan: Fix UBSAN vs. SMAP
  x86/uaccess, kasan: Fix KASAN vs SMAP
  x86/smap: Ditch __stringify()
  x86/uaccess: Introduce user_access_{save,restore}()
  x86/uaccess, signal: Fix AC=1 bloat
  x86/uaccess: Always inline user_access_begin()
  x86/uaccess, xen: Suppress SMAP warnings
  ...
  • Loading branch information
torvalds committed May 6, 2019
2 parents 171c2bc + 29da93f commit 6ec6296
Show file tree
Hide file tree
Showing 41 changed files with 602 additions and 219 deletions.
2 changes: 2 additions & 0 deletions arch/x86/entry/entry_32.S
Original file line number Diff line number Diff line change
Expand Up @@ -650,6 +650,7 @@ ENTRY(__switch_to_asm)
pushl %ebx
pushl %edi
pushl %esi
pushfl

/* switch stack */
movl %esp, TASK_threadsp(%eax)
Expand All @@ -672,6 +673,7 @@ ENTRY(__switch_to_asm)
#endif

/* restore callee-saved registers */
popfl
popl %esi
popl %edi
popl %ebx
Expand Down
29 changes: 17 additions & 12 deletions arch/x86/ia32/ia32_signal.c
Original file line number Diff line number Diff line change
Expand Up @@ -61,9 +61,8 @@
} while (0)

#define RELOAD_SEG(seg) { \
unsigned int pre = GET_SEG(seg); \
unsigned int pre = (seg) | 3; \
unsigned int cur = get_user_seg(seg); \
pre |= 3; \
if (pre != cur) \
set_user_seg(seg, pre); \
}
Expand All @@ -72,23 +71,18 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
struct sigcontext_32 __user *sc)
{
unsigned int tmpflags, err = 0;
u16 gs, fs, es, ds;
void __user *buf;
u32 tmp;

/* Always make any pending restarted system calls return -EINTR */
current->restart_block.fn = do_no_restart_syscall;

get_user_try {
/*
* Reload fs and gs if they have changed in the signal
* handler. This does not handle long fs/gs base changes in
* the handler, but does not clobber them at least in the
* normal case.
*/
RELOAD_SEG(gs);
RELOAD_SEG(fs);
RELOAD_SEG(ds);
RELOAD_SEG(es);
gs = GET_SEG(gs);
fs = GET_SEG(fs);
ds = GET_SEG(ds);
es = GET_SEG(es);

COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
COPY(dx); COPY(cx); COPY(ip); COPY(ax);
Expand All @@ -106,6 +100,17 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
buf = compat_ptr(tmp);
} get_user_catch(err);

/*
* Reload fs and gs if they have changed in the signal
* handler. This does not handle long fs/gs base changes in
* the handler, but does not clobber them at least in the
* normal case.
*/
RELOAD_SEG(gs);
RELOAD_SEG(fs);
RELOAD_SEG(ds);
RELOAD_SEG(es);

err |= fpu__restore_sig(buf, 1);

force_iret();
Expand Down
11 changes: 11 additions & 0 deletions arch/x86/include/asm/alternative-asm.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,17 @@
.endm
#endif

/*
* objtool annotation to ignore the alternatives and only consider the original
* instruction(s).
*/
.macro ANNOTATE_IGNORE_ALTERNATIVE
.Lannotate_\@:
.pushsection .discard.ignore_alts
.long .Lannotate_\@ - .
.popsection
.endm

/*
* Issue one struct alt_instr descriptor entry (need to put it into
* the section .altinstructions, see below). This entry contains
Expand Down
10 changes: 10 additions & 0 deletions arch/x86/include/asm/alternative.h
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,16 @@
#define LOCK_PREFIX ""
#endif

/*
* objtool annotation to ignore the alternatives and only consider the original
* instruction(s).
*/
#define ANNOTATE_IGNORE_ALTERNATIVE \
"999:\n\t" \
".pushsection .discard.ignore_alts\n\t" \
".long 999b - .\n\t" \
".popsection\n\t"

struct alt_instr {
s32 instr_offset; /* original instruction */
s32 repl_offset; /* offset to replacement instruction */
Expand Down
24 changes: 0 additions & 24 deletions arch/x86/include/asm/asm.h
Original file line number Diff line number Diff line change
Expand Up @@ -148,30 +148,6 @@
_ASM_PTR (entry); \
.popsection

.macro ALIGN_DESTINATION
/* check for bad alignment of destination */
movl %edi,%ecx
andl $7,%ecx
jz 102f /* already aligned */
subl $8,%ecx
negl %ecx
subl %ecx,%edx
100: movb (%rsi),%al
101: movb %al,(%rdi)
incq %rsi
incq %rdi
decl %ecx
jnz 100b
102:
.section .fixup,"ax"
103: addl %ecx,%edx /* ecx is zerorest also */
jmp copy_user_handle_tail
.previous

_ASM_EXTABLE_UA(100b, 103b)
_ASM_EXTABLE_UA(101b, 103b)
.endm

#else
# define _EXPAND_EXTABLE_HANDLE(x) #x
# define _ASM_EXTABLE_HANDLE(from, to, handler) \
Expand Down
28 changes: 9 additions & 19 deletions arch/x86/include/asm/nospec-branch.h
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,15 @@
#include <asm/cpufeatures.h>
#include <asm/msr-index.h>

/*
* This should be used immediately before a retpoline alternative. It tells
* objtool where the retpolines are so that it can make sense of the control
* flow by just reading the original instruction(s) and ignoring the
* alternatives.
*/
#define ANNOTATE_NOSPEC_ALTERNATIVE \
ANNOTATE_IGNORE_ALTERNATIVE

/*
* Fill the CPU return stack buffer.
*
Expand Down Expand Up @@ -56,19 +65,6 @@

#ifdef __ASSEMBLY__

/*
* This should be used immediately before a retpoline alternative. It tells
* objtool where the retpolines are so that it can make sense of the control
* flow by just reading the original instruction(s) and ignoring the
* alternatives.
*/
.macro ANNOTATE_NOSPEC_ALTERNATIVE
.Lannotate_\@:
.pushsection .discard.nospec
.long .Lannotate_\@ - .
.popsection
.endm

/*
* This should be used immediately before an indirect jump/call. It tells
* objtool the subsequent indirect jump/call is vouched safe for retpoline
Expand Down Expand Up @@ -152,12 +148,6 @@

#else /* __ASSEMBLY__ */

#define ANNOTATE_NOSPEC_ALTERNATIVE \
"999:\n\t" \
".pushsection .discard.nospec\n\t" \
".long 999b - .\n\t" \
".popsection\n\t"

#define ANNOTATE_RETPOLINE_SAFE \
"999:\n\t" \
".pushsection .discard.retpoline_safe\n\t" \
Expand Down
37 changes: 28 additions & 9 deletions arch/x86/include/asm/smap.h
Original file line number Diff line number Diff line change
Expand Up @@ -13,13 +13,12 @@
#ifndef _ASM_X86_SMAP_H
#define _ASM_X86_SMAP_H

#include <linux/stringify.h>
#include <asm/nops.h>
#include <asm/cpufeatures.h>

/* "Raw" instruction opcodes */
#define __ASM_CLAC .byte 0x0f,0x01,0xca
#define __ASM_STAC .byte 0x0f,0x01,0xcb
#define __ASM_CLAC ".byte 0x0f,0x01,0xca"
#define __ASM_STAC ".byte 0x0f,0x01,0xcb"

#ifdef __ASSEMBLY__

Expand All @@ -28,10 +27,10 @@
#ifdef CONFIG_X86_SMAP

#define ASM_CLAC \
ALTERNATIVE "", __stringify(__ASM_CLAC), X86_FEATURE_SMAP
ALTERNATIVE "", __ASM_CLAC, X86_FEATURE_SMAP

#define ASM_STAC \
ALTERNATIVE "", __stringify(__ASM_STAC), X86_FEATURE_SMAP
ALTERNATIVE "", __ASM_STAC, X86_FEATURE_SMAP

#else /* CONFIG_X86_SMAP */

Expand All @@ -49,26 +48,46 @@
static __always_inline void clac(void)
{
/* Note: a barrier is implicit in alternative() */
alternative("", __stringify(__ASM_CLAC), X86_FEATURE_SMAP);
alternative("", __ASM_CLAC, X86_FEATURE_SMAP);
}

static __always_inline void stac(void)
{
/* Note: a barrier is implicit in alternative() */
alternative("", __stringify(__ASM_STAC), X86_FEATURE_SMAP);
alternative("", __ASM_STAC, X86_FEATURE_SMAP);
}

static __always_inline unsigned long smap_save(void)
{
unsigned long flags;

asm volatile (ALTERNATIVE("", "pushf; pop %0; " __ASM_CLAC,
X86_FEATURE_SMAP)
: "=rm" (flags) : : "memory", "cc");

return flags;
}

static __always_inline void smap_restore(unsigned long flags)
{
asm volatile (ALTERNATIVE("", "push %0; popf", X86_FEATURE_SMAP)
: : "g" (flags) : "memory", "cc");
}

/* These macros can be used in asm() statements */
#define ASM_CLAC \
ALTERNATIVE("", __stringify(__ASM_CLAC), X86_FEATURE_SMAP)
ALTERNATIVE("", __ASM_CLAC, X86_FEATURE_SMAP)
#define ASM_STAC \
ALTERNATIVE("", __stringify(__ASM_STAC), X86_FEATURE_SMAP)
ALTERNATIVE("", __ASM_STAC, X86_FEATURE_SMAP)

#else /* CONFIG_X86_SMAP */

static inline void clac(void) { }
static inline void stac(void) { }

static inline unsigned long smap_save(void) { return 0; }
static inline void smap_restore(unsigned long flags) { }

#define ASM_CLAC
#define ASM_STAC

Expand Down
1 change: 1 addition & 0 deletions arch/x86/include/asm/switch_to.h
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,7 @@ struct inactive_task_frame {
unsigned long r13;
unsigned long r12;
#else
unsigned long flags;
unsigned long si;
unsigned long di;
#endif
Expand Down
12 changes: 8 additions & 4 deletions arch/x86/include/asm/uaccess.h
Original file line number Diff line number Diff line change
Expand Up @@ -427,10 +427,11 @@ do { \
({ \
__label__ __pu_label; \
int __pu_err = -EFAULT; \
__typeof__(*(ptr)) __pu_val; \
__pu_val = x; \
__typeof__(*(ptr)) __pu_val = (x); \
__typeof__(ptr) __pu_ptr = (ptr); \
__typeof__(size) __pu_size = (size); \
__uaccess_begin(); \
__put_user_size(__pu_val, (ptr), (size), __pu_label); \
__put_user_size(__pu_val, __pu_ptr, __pu_size, __pu_label); \
__pu_err = 0; \
__pu_label: \
__uaccess_end(); \
Expand Down Expand Up @@ -705,7 +706,7 @@ extern struct movsl_mask {
* checking before using them, but you have to surround them with the
* user_access_begin/end() pair.
*/
static __must_check inline bool user_access_begin(const void __user *ptr, size_t len)
static __must_check __always_inline bool user_access_begin(const void __user *ptr, size_t len)
{
if (unlikely(!access_ok(ptr,len)))
return 0;
Expand All @@ -715,6 +716,9 @@ static __must_check inline bool user_access_begin(const void __user *ptr, size_t
#define user_access_begin(a,b) user_access_begin(a,b)
#define user_access_end() __uaccess_end()

#define user_access_save() smap_save()
#define user_access_restore(x) smap_restore(x)

#define unsafe_put_user(x, ptr, label) \
__put_user_size((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label)

Expand Down
3 changes: 0 additions & 3 deletions arch/x86/include/asm/uaccess_64.h
Original file line number Diff line number Diff line change
Expand Up @@ -207,9 +207,6 @@ __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
return __copy_user_flushcache(dst, src, size);
}

unsigned long
copy_user_handle_tail(char *to, char *from, unsigned len);

unsigned long
mcsafe_handle_tail(char *to, char *from, unsigned len);

Expand Down
24 changes: 20 additions & 4 deletions arch/x86/include/asm/xen/hypercall.h
Original file line number Diff line number Diff line change
Expand Up @@ -217,6 +217,22 @@ xen_single_call(unsigned int call,
return (long)__res;
}

static __always_inline void __xen_stac(void)
{
/*
* Suppress objtool seeing the STAC/CLAC and getting confused about it
* calling random code with AC=1.
*/
asm volatile(ANNOTATE_IGNORE_ALTERNATIVE
ASM_STAC ::: "memory", "flags");
}

static __always_inline void __xen_clac(void)
{
asm volatile(ANNOTATE_IGNORE_ALTERNATIVE
ASM_CLAC ::: "memory", "flags");
}

static inline long
privcmd_call(unsigned int call,
unsigned long a1, unsigned long a2,
Expand All @@ -225,9 +241,9 @@ privcmd_call(unsigned int call,
{
long res;

stac();
__xen_stac();
res = xen_single_call(call, a1, a2, a3, a4, a5);
clac();
__xen_clac();

return res;
}
Expand Down Expand Up @@ -424,9 +440,9 @@ HYPERVISOR_dm_op(
domid_t dom, unsigned int nr_bufs, struct xen_dm_op_buf *bufs)
{
int ret;
stac();
__xen_stac();
ret = _hypercall3(int, dm_op, dom, nr_bufs, bufs);
clac();
__xen_clac();
return ret;
}

Expand Down
7 changes: 7 additions & 0 deletions arch/x86/kernel/process_32.c
Original file line number Diff line number Diff line change
Expand Up @@ -127,6 +127,13 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
struct task_struct *tsk;
int err;

/*
* For a new task use the RESET flags value since there is no before.
* All the status flags are zero; DF and all the system flags must also
* be 0, specifically IF must be 0 because we context switch to the new
* task with interrupts disabled.
*/
frame->flags = X86_EFLAGS_FIXED;
frame->bp = 0;
frame->ret_addr = (unsigned long) ret_from_fork;
p->thread.sp = (unsigned long) fork_frame;
Expand Down
1 change: 1 addition & 0 deletions arch/x86/kernel/process_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -392,6 +392,7 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
childregs = task_pt_regs(p);
fork_frame = container_of(childregs, struct fork_frame, regs);
frame = &fork_frame->frame;

frame->bp = 0;
frame->ret_addr = (unsigned long) ret_from_fork;
p->thread.sp = (unsigned long) fork_frame;
Expand Down
Loading

0 comments on commit 6ec6296

Please sign in to comment.