Skip to content

Commit

Permalink
Merge branch 'core-urgent-for-linus' of git://git.kernel.org/pub/scm/…
Browse files Browse the repository at this point in the history
…linux/kernel/git/tip/tip

Pull core fixes from Thomas Gleixner:

 - Unbreak the BPF compilation which got broken by the unconditional
   requirement of asm-goto, which is not supported by clang.

 - Prevent probing on exception masking instructions in uprobes and
   kprobes to avoid the issues of the delayed exceptions instead of
   having an ugly workaround.

 - Prevent a double free_page() in the error path of do_kexec_load()

 - A set of objtool updates addressing various issues mostly related to
   switch tables and the noreturn detection for recursive sibling calls

 - Header sync for tools.

* 'core-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  objtool: Detect RIP-relative switch table references, part 2
  objtool: Detect RIP-relative switch table references
  objtool: Support GCC 8 switch tables
  objtool: Support GCC 8's cold subfunctions
  objtool: Fix "noreturn" detection for recursive sibling calls
  objtool, kprobes/x86: Sync the latest <asm/insn.h> header with tools/objtool/arch/x86/include/asm/insn.h
  x86/cpufeature: Guard asm_volatile_goto usage for BPF compilation
  uprobes/x86: Prohibit probing on MOV SS instruction
  kprobes/x86: Prohibit probing on exception masking instructions
  x86/kexec: Avoid double free_page() upon do_kexec_load() failure
  • Loading branch information
torvalds committed May 20, 2018
2 parents 203ec2f + 7dec80c commit 583dbad
Show file tree
Hide file tree
Showing 11 changed files with 206 additions and 77 deletions.
15 changes: 15 additions & 0 deletions arch/x86/include/asm/cpufeature.h
Original file line number Diff line number Diff line change
Expand Up @@ -140,6 +140,20 @@ extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit);

#define setup_force_cpu_bug(bit) setup_force_cpu_cap(bit)

#if defined(__clang__) && !defined(CC_HAVE_ASM_GOTO)

/*
* Workaround for the sake of BPF compilation which utilizes kernel
* headers, but clang does not support ASM GOTO and fails the build.
*/
#ifndef __BPF_TRACING__
#warning "Compiler lacks ASM_GOTO support. Add -D __BPF_TRACING__ to your compiler arguments"
#endif

#define static_cpu_has(bit) boot_cpu_has(bit)

#else

/*
* Static testing of CPU features. Used the same as boot_cpu_has().
* These will statically patch the target code for additional
Expand Down Expand Up @@ -195,6 +209,7 @@ static __always_inline __pure bool _static_cpu_has(u16 bit)
boot_cpu_has(bit) : \
_static_cpu_has(bit) \
)
#endif

#define cpu_has_bug(c, bit) cpu_has(c, (bit))
#define set_cpu_bug(c, bit) set_cpu_cap(c, (bit))
Expand Down
18 changes: 18 additions & 0 deletions arch/x86/include/asm/insn.h
Original file line number Diff line number Diff line change
Expand Up @@ -208,4 +208,22 @@ static inline int insn_offset_immediate(struct insn *insn)
return insn_offset_displacement(insn) + insn->displacement.nbytes;
}

#define POP_SS_OPCODE 0x1f
#define MOV_SREG_OPCODE 0x8e

/*
* Intel SDM Vol.3A 6.8.3 states;
* "Any single-step trap that would be delivered following the MOV to SS
* instruction or POP to SS instruction (because EFLAGS.TF is 1) is
* suppressed."
* This function returns true if @insn is MOV SS or POP SS. On these
* instructions, single stepping is suppressed.
*/
static inline int insn_masking_exception(struct insn *insn)
{
return insn->opcode.bytes[0] == POP_SS_OPCODE ||
(insn->opcode.bytes[0] == MOV_SREG_OPCODE &&
X86_MODRM_REG(insn->modrm.bytes[0]) == 2);
}

#endif /* _ASM_X86_INSN_H */
4 changes: 4 additions & 0 deletions arch/x86/kernel/kprobes/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -370,6 +370,10 @@ int __copy_instruction(u8 *dest, u8 *src, u8 *real, struct insn *insn)
if (insn->opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
return 0;

/* We should not singlestep on the exception masking instructions */
if (insn_masking_exception(insn))
return 0;

#ifdef CONFIG_X86_64
/* Only x86_64 has RIP relative instructions */
if (insn_rip_relative(insn)) {
Expand Down
6 changes: 5 additions & 1 deletion arch/x86/kernel/machine_kexec_32.c
Original file line number Diff line number Diff line change
Expand Up @@ -57,12 +57,17 @@ static void load_segments(void)
static void machine_kexec_free_page_tables(struct kimage *image)
{
free_page((unsigned long)image->arch.pgd);
image->arch.pgd = NULL;
#ifdef CONFIG_X86_PAE
free_page((unsigned long)image->arch.pmd0);
image->arch.pmd0 = NULL;
free_page((unsigned long)image->arch.pmd1);
image->arch.pmd1 = NULL;
#endif
free_page((unsigned long)image->arch.pte0);
image->arch.pte0 = NULL;
free_page((unsigned long)image->arch.pte1);
image->arch.pte1 = NULL;
}

static int machine_kexec_alloc_page_tables(struct kimage *image)
Expand All @@ -79,7 +84,6 @@ static int machine_kexec_alloc_page_tables(struct kimage *image)
!image->arch.pmd0 || !image->arch.pmd1 ||
#endif
!image->arch.pte0 || !image->arch.pte1) {
machine_kexec_free_page_tables(image);
return -ENOMEM;
}
return 0;
Expand Down
5 changes: 4 additions & 1 deletion arch/x86/kernel/machine_kexec_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -39,9 +39,13 @@ const struct kexec_file_ops * const kexec_file_loaders[] = {
static void free_transition_pgtable(struct kimage *image)
{
free_page((unsigned long)image->arch.p4d);
image->arch.p4d = NULL;
free_page((unsigned long)image->arch.pud);
image->arch.pud = NULL;
free_page((unsigned long)image->arch.pmd);
image->arch.pmd = NULL;
free_page((unsigned long)image->arch.pte);
image->arch.pte = NULL;
}

static int init_transition_pgtable(struct kimage *image, pgd_t *pgd)
Expand Down Expand Up @@ -91,7 +95,6 @@ static int init_transition_pgtable(struct kimage *image, pgd_t *pgd)
set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL_EXEC_NOENC));
return 0;
err:
free_transition_pgtable(image);
return result;
}

Expand Down
4 changes: 4 additions & 0 deletions arch/x86/kernel/uprobes.c
Original file line number Diff line number Diff line change
Expand Up @@ -299,6 +299,10 @@ static int uprobe_init_insn(struct arch_uprobe *auprobe, struct insn *insn, bool
if (is_prefix_bad(insn))
return -ENOTSUPP;

/* We should not singlestep on the exception masking instructions */
if (insn_masking_exception(insn))
return -ENOTSUPP;

if (x86_64)
good_insns = good_insns_64;
else
Expand Down
2 changes: 1 addition & 1 deletion samples/bpf/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -255,7 +255,7 @@ $(obj)/tracex5_kern.o: $(obj)/syscall_nrs.h
$(obj)/%.o: $(src)/%.c
$(CLANG) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) -I$(obj) \
-I$(srctree)/tools/testing/selftests/bpf/ \
-D__KERNEL__ -Wno-unused-value -Wno-pointer-sign \
-D__KERNEL__ -D__BPF_TRACING__ -Wno-unused-value -Wno-pointer-sign \
-D__TARGET_ARCH_$(ARCH) -Wno-compare-distinct-pointer-types \
-Wno-gnu-variable-sized-type-not-at-end \
-Wno-address-of-packed-member -Wno-tautological-compare \
Expand Down
18 changes: 18 additions & 0 deletions tools/objtool/arch/x86/include/asm/insn.h
Original file line number Diff line number Diff line change
Expand Up @@ -208,4 +208,22 @@ static inline int insn_offset_immediate(struct insn *insn)
return insn_offset_displacement(insn) + insn->displacement.nbytes;
}

#define POP_SS_OPCODE 0x1f
#define MOV_SREG_OPCODE 0x8e

/*
* Intel SDM Vol.3A 6.8.3 states;
* "Any single-step trap that would be delivered following the MOV to SS
* instruction or POP to SS instruction (because EFLAGS.TF is 1) is
* suppressed."
* This function returns true if @insn is MOV SS or POP SS. On these
* instructions, single stepping is suppressed.
*/
static inline int insn_masking_exception(struct insn *insn)
{
return insn->opcode.bytes[0] == POP_SS_OPCODE ||
(insn->opcode.bytes[0] == MOV_SREG_OPCODE &&
X86_MODRM_REG(insn->modrm.bytes[0]) == 2);
}

#endif /* _ASM_X86_INSN_H */
Loading

0 comments on commit 583dbad

Please sign in to comment.