Skip to content

Commit

Permalink
x86/alternatives: Get rid of __optimize_nops()
Browse files Browse the repository at this point in the history
There's no need to carve out bits of the NOP optimization functionality
and look at JMP opcodes - simply do one more NOPs optimization pass
at the end of patching.

A lot simpler code.

Signed-off-by: Borislav Petkov (AMD) <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
  • Loading branch information
bp3tk0v committed Apr 9, 2024
1 parent f796c75 commit da8f9cf
Showing 1 changed file with 16 additions and 43 deletions.
59 changes: 16 additions & 43 deletions arch/x86/kernel/alternative.c
Original file line number Diff line number Diff line change
Expand Up @@ -216,47 +216,12 @@ static int skip_nops(u8 *buf, int offset, int len)
return offset;
}

/*
* Optimize a sequence of NOPs, possibly preceded by an unconditional jump
* to the end of the NOP sequence into a single NOP.
*/
static bool
__optimize_nops(const u8 * const instr, u8 *buf, size_t len, struct insn *insn, int *next, int *prev, int *target)
{
int i = *next - insn->length;

switch (insn->opcode.bytes[0]) {
case JMP8_INSN_OPCODE:
case JMP32_INSN_OPCODE:
*prev = i;
*target = *next + insn->immediate.value;
return false;
}

if (insn_is_nop(insn)) {
int nop = i;

*next = skip_nops(buf, *next, len);
if (*target && *next == *target)
nop = *prev;

add_nop(buf + nop, *next - nop);
DUMP_BYTES(ALT, buf, len, "%px: [%d:%d) optimized NOPs: ", instr, nop, *next);
return true;
}

*target = 0;
return false;
}

/*
* "noinline" to cause control flow change and thus invalidate I$ and
* cause refetch after modification.
*/
static void __init_or_module noinline optimize_nops(const u8 * const instr, u8 *buf, size_t len)
static void noinline optimize_nops(const u8 * const instr, u8 *buf, size_t len)
{
int prev, target = 0;

for (int next, i = 0; i < len; i = next) {
struct insn insn;

Expand All @@ -265,7 +230,14 @@ static void __init_or_module noinline optimize_nops(const u8 * const instr, u8 *

next = i + insn.length;

__optimize_nops(instr, buf, len, &insn, &next, &prev, &target);
if (insn_is_nop(&insn)) {
int nop = i;

next = skip_nops(buf, next, len);

add_nop(buf + nop, next - nop);
DUMP_BYTES(ALT, buf, len, "%px: [%d:%d) optimized NOPs: ", instr, nop, next);
}
}
}

Expand Down Expand Up @@ -339,10 +311,8 @@ bool need_reloc(unsigned long offset, u8 *src, size_t src_len)
return (target < src || target > src + src_len);
}

void apply_relocation(u8 *buf, const u8 * const instr, size_t instrlen, u8 *repl, size_t repl_len)
static void __apply_relocation(u8 *buf, const u8 * const instr, size_t instrlen, u8 *repl, size_t repl_len)
{
int prev, target = 0;

for (int next, i = 0; i < instrlen; i = next) {
struct insn insn;

Expand All @@ -351,9 +321,6 @@ void apply_relocation(u8 *buf, const u8 * const instr, size_t instrlen, u8 *repl

next = i + insn.length;

if (__optimize_nops(instr, buf, instrlen, &insn, &next, &prev, &target))
continue;

switch (insn.opcode.bytes[0]) {
case 0x0f:
if (insn.opcode.bytes[1] < 0x80 ||
Expand Down Expand Up @@ -398,6 +365,12 @@ void apply_relocation(u8 *buf, const u8 * const instr, size_t instrlen, u8 *repl
}
}

void apply_relocation(u8 *buf, const u8 * const instr, size_t instrlen, u8 *repl, size_t repl_len)
{
__apply_relocation(buf, instr, instrlen, repl, repl_len);
optimize_nops(instr, buf, repl_len);
}

/* Low-level backend functions usable from alternative code replacements. */
DEFINE_ASM_FUNC(nop_func, "", .entry.text);
EXPORT_SYMBOL_GPL(nop_func);
Expand Down

0 comments on commit da8f9cf

Please sign in to comment.