Skip to content

Commit

Permalink
Merge tags 'objtool-urgent-2021-06-28' and 'objtool-core-2021-06-28' …
Browse files Browse the repository at this point in the history
…of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull objtool fix and updates from Ingo Molnar:
 "An ELF format fix for a section flags mismatch bug that breaks kernel
  tooling such as kpatch-build.

  The biggest change in this cycle is the new code to handle and rewrite
  variable sized jump labels - which results in slightly tighter code
  generation in hot paths, through the use of short(er) NOPs.

  Also a number of cleanups and fixes, and a change to the generic
  include/linux/compiler.h to handle a s390 GCC quirk"

* tag 'objtool-urgent-2021-06-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  objtool: Don't make .altinstructions writable

* tag 'objtool-core-2021-06-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  objtool: Improve reloc hash size guestimate
  instrumentation.h: Avoid using inline asm operand modifiers
  compiler.h: Avoid using inline asm operand modifiers
  kbuild: Fix objtool dependency for 'OBJECT_FILES_NON_STANDARD_<obj> := n'
  objtool: Reflow handle_jump_alt()
  jump_label/x86: Remove unused JUMP_LABEL_NOP_SIZE
  jump_label, x86: Allow short NOPs
  objtool: Provide stats for jump_labels
  objtool: Rewrite jump_label instructions
  objtool: Decode jump_entry::key addend
  jump_label, x86: Emit short JMP
  jump_label: Free jump_entry::key bit1 for build use
  jump_label, x86: Add variable length patching support
  jump_label, x86: Introduce jump_entry_size()
  jump_label, x86: Improve error when we fail expected text
  jump_label, x86: Factor out the __jump_table generation
  jump_label, x86: Strip ASM jump_label support
  x86, objtool: Dont exclude arch/x86/realmode/
  objtool: Rewrite hashtable sizing
  • Loading branch information
torvalds committed Jun 28, 2021
3 parents 6796355 + e31694e + d33b903 commit b89c07d
Show file tree
Hide file tree
Showing 16 changed files with 268 additions and 155 deletions.
79 changes: 30 additions & 49 deletions arch/x86/include/asm/jump_label.h
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,6 @@

#define HAVE_JUMP_LABEL_BATCH

#define JUMP_LABEL_NOP_SIZE 5

#include <asm/asm.h>
#include <asm/nops.h>

Expand All @@ -14,74 +12,57 @@
#include <linux/stringify.h>
#include <linux/types.h>

#define JUMP_TABLE_ENTRY \
".pushsection __jump_table, \"aw\" \n\t" \
_ASM_ALIGN "\n\t" \
".long 1b - . \n\t" \
".long %l[l_yes] - . \n\t" \
_ASM_PTR "%c0 + %c1 - .\n\t" \
".popsection \n\t"

#ifdef CONFIG_STACK_VALIDATION

static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
{
asm_volatile_goto("1:"
"jmp %l[l_yes] # objtool NOPs this \n\t"
JUMP_TABLE_ENTRY
: : "i" (key), "i" (2 | branch) : : l_yes);

return false;
l_yes:
return true;
}

#else

static __always_inline bool arch_static_branch(struct static_key * const key, const bool branch)
{
asm_volatile_goto("1:"
".byte " __stringify(BYTES_NOP5) "\n\t"
".pushsection __jump_table, \"aw\" \n\t"
_ASM_ALIGN "\n\t"
".long 1b - ., %l[l_yes] - . \n\t"
_ASM_PTR "%c0 + %c1 - .\n\t"
".popsection \n\t"
JUMP_TABLE_ENTRY
: : "i" (key), "i" (branch) : : l_yes);

return false;
l_yes:
return true;
}

#endif /* STACK_VALIDATION */

static __always_inline bool arch_static_branch_jump(struct static_key * const key, const bool branch)
{
asm_volatile_goto("1:"
".byte 0xe9\n\t .long %l[l_yes] - 2f\n\t"
"2:\n\t"
".pushsection __jump_table, \"aw\" \n\t"
_ASM_ALIGN "\n\t"
".long 1b - ., %l[l_yes] - . \n\t"
_ASM_PTR "%c0 + %c1 - .\n\t"
".popsection \n\t"
"jmp %l[l_yes]\n\t"
JUMP_TABLE_ENTRY
: : "i" (key), "i" (branch) : : l_yes);

return false;
l_yes:
return true;
}

#else /* __ASSEMBLY__ */

.macro STATIC_JUMP_IF_TRUE target, key, def
.Lstatic_jump_\@:
.if \def
/* Equivalent to "jmp.d32 \target" */
.byte 0xe9
.long \target - .Lstatic_jump_after_\@
.Lstatic_jump_after_\@:
.else
.byte BYTES_NOP5
.endif
.pushsection __jump_table, "aw"
_ASM_ALIGN
.long .Lstatic_jump_\@ - ., \target - .
_ASM_PTR \key - .
.popsection
.endm

.macro STATIC_JUMP_IF_FALSE target, key, def
.Lstatic_jump_\@:
.if \def
.byte BYTES_NOP5
.else
/* Equivalent to "jmp.d32 \target" */
.byte 0xe9
.long \target - .Lstatic_jump_after_\@
.Lstatic_jump_after_\@:
.endif
.pushsection __jump_table, "aw"
_ASM_ALIGN
.long .Lstatic_jump_\@ - ., \target - .
_ASM_PTR \key + 1 - .
.popsection
.endm
extern int arch_jump_entry_size(struct jump_entry *entry);

#endif /* __ASSEMBLY__ */

Expand Down
81 changes: 52 additions & 29 deletions arch/x86/kernel/jump_label.c
Original file line number Diff line number Diff line change
Expand Up @@ -15,50 +15,75 @@
#include <asm/kprobes.h>
#include <asm/alternative.h>
#include <asm/text-patching.h>
#include <asm/insn.h>

static void bug_at(const void *ip, int line)
int arch_jump_entry_size(struct jump_entry *entry)
{
/*
* The location is not an op that we were expecting.
* Something went wrong. Crash the box, as something could be
* corrupting the kernel.
*/
pr_crit("jump_label: Fatal kernel bug, unexpected op at %pS [%p] (%5ph) %d\n", ip, ip, ip, line);
BUG();
struct insn insn = {};

insn_decode_kernel(&insn, (void *)jump_entry_code(entry));
BUG_ON(insn.length != 2 && insn.length != 5);

return insn.length;
}

static const void *
__jump_label_set_jump_code(struct jump_entry *entry, enum jump_label_type type)
struct jump_label_patch {
const void *code;
int size;
};

static struct jump_label_patch
__jump_label_patch(struct jump_entry *entry, enum jump_label_type type)
{
const void *expect, *code;
const void *expect, *code, *nop;
const void *addr, *dest;
int line;
int size;

addr = (void *)jump_entry_code(entry);
dest = (void *)jump_entry_target(entry);

code = text_gen_insn(JMP32_INSN_OPCODE, addr, dest);
size = arch_jump_entry_size(entry);
switch (size) {
case JMP8_INSN_SIZE:
code = text_gen_insn(JMP8_INSN_OPCODE, addr, dest);
nop = x86_nops[size];
break;

if (type == JUMP_LABEL_JMP) {
expect = x86_nops[5]; line = __LINE__;
} else {
expect = code; line = __LINE__;
case JMP32_INSN_SIZE:
code = text_gen_insn(JMP32_INSN_OPCODE, addr, dest);
nop = x86_nops[size];
break;

default: BUG();
}

if (memcmp(addr, expect, JUMP_LABEL_NOP_SIZE))
bug_at(addr, line);
if (type == JUMP_LABEL_JMP)
expect = nop;
else
expect = code;

if (memcmp(addr, expect, size)) {
/*
* The location is not an op that we were expecting.
* Something went wrong. Crash the box, as something could be
* corrupting the kernel.
*/
pr_crit("jump_label: Fatal kernel bug, unexpected op at %pS [%p] (%5ph != %5ph)) size:%d type:%d\n",
addr, addr, addr, expect, size, type);
BUG();
}

if (type == JUMP_LABEL_NOP)
code = x86_nops[5];
code = nop;

return code;
return (struct jump_label_patch){.code = code, .size = size};
}

static inline void __jump_label_transform(struct jump_entry *entry,
enum jump_label_type type,
int init)
{
const void *opcode = __jump_label_set_jump_code(entry, type);
const struct jump_label_patch jlp = __jump_label_patch(entry, type);

/*
* As long as only a single processor is running and the code is still
Expand All @@ -72,12 +97,11 @@ static inline void __jump_label_transform(struct jump_entry *entry,
* always nop being the 'currently valid' instruction
*/
if (init || system_state == SYSTEM_BOOTING) {
text_poke_early((void *)jump_entry_code(entry), opcode,
JUMP_LABEL_NOP_SIZE);
text_poke_early((void *)jump_entry_code(entry), jlp.code, jlp.size);
return;
}

text_poke_bp((void *)jump_entry_code(entry), opcode, JUMP_LABEL_NOP_SIZE, NULL);
text_poke_bp((void *)jump_entry_code(entry), jlp.code, jlp.size, NULL);
}

static void __ref jump_label_transform(struct jump_entry *entry,
Expand All @@ -98,7 +122,7 @@ void arch_jump_label_transform(struct jump_entry *entry,
bool arch_jump_label_transform_queue(struct jump_entry *entry,
enum jump_label_type type)
{
const void *opcode;
struct jump_label_patch jlp;

if (system_state == SYSTEM_BOOTING) {
/*
Expand All @@ -109,9 +133,8 @@ bool arch_jump_label_transform_queue(struct jump_entry *entry,
}

mutex_lock(&text_mutex);
opcode = __jump_label_set_jump_code(entry, type);
text_poke_queue((void *)jump_entry_code(entry),
opcode, JUMP_LABEL_NOP_SIZE, NULL);
jlp = __jump_label_patch(entry, type);
text_poke_queue((void *)jump_entry_code(entry), jlp.code, jlp.size, NULL);
mutex_unlock(&text_mutex);
return true;
}
Expand Down
1 change: 0 additions & 1 deletion arch/x86/realmode/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@
# Sanitizer runtimes are unavailable and cannot be linked here.
KASAN_SANITIZE := n
KCSAN_SANITIZE := n
OBJECT_FILES_NON_STANDARD := y

subdir- := rm

Expand Down
22 changes: 14 additions & 8 deletions include/linux/compiler.h
Original file line number Diff line number Diff line change
Expand Up @@ -115,18 +115,24 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
* The __COUNTER__ based labels are a hack to make each instance of the macros
* unique, to convince GCC not to merge duplicate inline asm statements.
*/
#define annotate_reachable() ({ \
asm volatile("%c0:\n\t" \
#define __stringify_label(n) #n

#define __annotate_reachable(c) ({ \
asm volatile(__stringify_label(c) ":\n\t" \
".pushsection .discard.reachable\n\t" \
".long %c0b - .\n\t" \
".popsection\n\t" : : "i" (__COUNTER__)); \
".long " __stringify_label(c) "b - .\n\t" \
".popsection\n\t"); \
})
#define annotate_unreachable() ({ \
asm volatile("%c0:\n\t" \
#define annotate_reachable() __annotate_reachable(__COUNTER__)

#define __annotate_unreachable(c) ({ \
asm volatile(__stringify_label(c) ":\n\t" \
".pushsection .discard.unreachable\n\t" \
".long %c0b - .\n\t" \
".popsection\n\t" : : "i" (__COUNTER__)); \
".long " __stringify_label(c) "b - .\n\t" \
".popsection\n\t"); \
})
#define annotate_unreachable() __annotate_unreachable(__COUNTER__)

#define ASM_UNREACHABLE \
"999:\n\t" \
".pushsection .discard.unreachable\n\t" \
Expand Down
20 changes: 12 additions & 8 deletions include/linux/instrumentation.h
Original file line number Diff line number Diff line change
Expand Up @@ -4,13 +4,16 @@

#if defined(CONFIG_DEBUG_ENTRY) && defined(CONFIG_STACK_VALIDATION)

#include <linux/stringify.h>

/* Begin/end of an instrumentation safe region */
#define instrumentation_begin() ({ \
asm volatile("%c0: nop\n\t" \
#define __instrumentation_begin(c) ({ \
asm volatile(__stringify(c) ": nop\n\t" \
".pushsection .discard.instr_begin\n\t" \
".long %c0b - .\n\t" \
".popsection\n\t" : : "i" (__COUNTER__)); \
".long " __stringify(c) "b - .\n\t" \
".popsection\n\t"); \
})
#define instrumentation_begin() __instrumentation_begin(__COUNTER__)

/*
* Because instrumentation_{begin,end}() can nest, objtool validation considers
Expand Down Expand Up @@ -43,12 +46,13 @@
* To avoid this, have _end() be a NOP instruction, this ensures it will be
* part of the condition block and does not escape.
*/
#define instrumentation_end() ({ \
asm volatile("%c0: nop\n\t" \
#define __instrumentation_end(c) ({ \
asm volatile(__stringify(c) ": nop\n\t" \
".pushsection .discard.instr_end\n\t" \
".long %c0b - .\n\t" \
".popsection\n\t" : : "i" (__COUNTER__)); \
".long " __stringify(c) "b - .\n\t" \
".popsection\n\t"); \
})
#define instrumentation_end() __instrumentation_end(__COUNTER__)
#else
# define instrumentation_begin() do { } while(0)
# define instrumentation_end() do { } while(0)
Expand Down
16 changes: 14 additions & 2 deletions include/linux/jump_label.h
Original file line number Diff line number Diff line change
Expand Up @@ -171,9 +171,21 @@ static inline bool jump_entry_is_init(const struct jump_entry *entry)
return (unsigned long)entry->key & 2UL;
}

static inline void jump_entry_set_init(struct jump_entry *entry)
static inline void jump_entry_set_init(struct jump_entry *entry, bool set)
{
entry->key |= 2;
if (set)
entry->key |= 2;
else
entry->key &= ~2;
}

static inline int jump_entry_size(struct jump_entry *entry)
{
#ifdef JUMP_LABEL_NOP_SIZE
return JUMP_LABEL_NOP_SIZE;
#else
return arch_jump_entry_size(entry);
#endif
}

#endif
Expand Down
12 changes: 7 additions & 5 deletions kernel/jump_label.c
Original file line number Diff line number Diff line change
Expand Up @@ -309,7 +309,7 @@ EXPORT_SYMBOL_GPL(jump_label_rate_limit);
static int addr_conflict(struct jump_entry *entry, void *start, void *end)
{
if (jump_entry_code(entry) <= (unsigned long)end &&
jump_entry_code(entry) + JUMP_LABEL_NOP_SIZE > (unsigned long)start)
jump_entry_code(entry) + jump_entry_size(entry) > (unsigned long)start)
return 1;

return 0;
Expand Down Expand Up @@ -483,13 +483,14 @@ void __init jump_label_init(void)

for (iter = iter_start; iter < iter_stop; iter++) {
struct static_key *iterk;
bool in_init;

/* rewrite NOPs */
if (jump_label_type(iter) == JUMP_LABEL_NOP)
arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);

if (init_section_contains((void *)jump_entry_code(iter), 1))
jump_entry_set_init(iter);
in_init = init_section_contains((void *)jump_entry_code(iter), 1);
jump_entry_set_init(iter, in_init);

iterk = jump_entry_key(iter);
if (iterk == key)
Expand Down Expand Up @@ -634,9 +635,10 @@ static int jump_label_add_module(struct module *mod)

for (iter = iter_start; iter < iter_stop; iter++) {
struct static_key *iterk;
bool in_init;

if (within_module_init(jump_entry_code(iter), mod))
jump_entry_set_init(iter);
in_init = within_module_init(jump_entry_code(iter), mod);
jump_entry_set_init(iter, in_init);

iterk = jump_entry_key(iter);
if (iterk == key)
Expand Down
Loading

0 comments on commit b89c07d

Please sign in to comment.