Skip to content

Commit

Permalink
Merge branch 'x86-paravirt-for-linus' of git://git.kernel.org/pub/scm…
Browse files Browse the repository at this point in the history
…/linux/kernel/git/tip/tip

Pull x86 paravirt updates from Ingo Molnar:
 "A handful of paravirt patching code enhancements to make it more
  robust against patching failures, and related cleanups and not so
  related cleanups - by Thomas Gleixner and myself"

* 'x86-paravirt-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/paravirt: Rename paravirt_patch_site::instrtype to paravirt_patch_site::type
  x86/paravirt: Standardize 'insn_buff' variable names
  x86/paravirt: Match paravirt patchlet field definition ordering to initialization ordering
  x86/paravirt: Replace the paravirt patch asm magic
  x86/paravirt: Unify the 32/64 bit paravirt patching code
  x86/paravirt: Detect over-sized patching bugs in paravirt_patch_call()
  x86/paravirt: Detect over-sized patching bugs in paravirt_patch_insns()
  x86/paravirt: Remove bogus extern declarations
  • Loading branch information
torvalds committed Jul 9, 2019
2 parents 3431a94 + 46938cc commit da17702
Show file tree
Hide file tree
Showing 11 changed files with 214 additions and 238 deletions.
8 changes: 4 additions & 4 deletions arch/x86/events/intel/ds.c
Original file line number Diff line number Diff line change
Expand Up @@ -337,7 +337,7 @@ static int alloc_pebs_buffer(int cpu)
struct debug_store *ds = hwev->ds;
size_t bsiz = x86_pmu.pebs_buffer_size;
int max, node = cpu_to_node(cpu);
void *buffer, *ibuffer, *cea;
void *buffer, *insn_buff, *cea;

if (!x86_pmu.pebs)
return 0;
Expand All @@ -351,12 +351,12 @@ static int alloc_pebs_buffer(int cpu)
* buffer then.
*/
if (x86_pmu.intel_cap.pebs_format < 2) {
ibuffer = kzalloc_node(PEBS_FIXUP_SIZE, GFP_KERNEL, node);
if (!ibuffer) {
insn_buff = kzalloc_node(PEBS_FIXUP_SIZE, GFP_KERNEL, node);
if (!insn_buff) {
dsfree_pages(buffer, bsiz);
return -ENOMEM;
}
per_cpu(insn_buffer, cpu) = ibuffer;
per_cpu(insn_buffer, cpu) = insn_buff;
}
hwev->ds_pebs_vaddr = buffer;
/* Update the cpu entry area mapping */
Expand Down
21 changes: 7 additions & 14 deletions arch/x86/include/asm/paravirt_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ struct pv_init_ops {
* the number of bytes of code generated, as we nop pad the
* rest in generic code.
*/
unsigned (*patch)(u8 type, void *insnbuf,
unsigned (*patch)(u8 type, void *insn_buff,
unsigned long addr, unsigned len);
} __no_randomize_layout;

Expand Down Expand Up @@ -370,18 +370,11 @@ extern struct paravirt_patch_template pv_ops;
/* Simple instruction patching code. */
#define NATIVE_LABEL(a,x,b) "\n\t.globl " a #x "_" #b "\n" a #x "_" #b ":\n\t"

#define DEF_NATIVE(ops, name, code) \
__visible extern const char start_##ops##_##name[], end_##ops##_##name[]; \
asm(NATIVE_LABEL("start_", ops, name) code NATIVE_LABEL("end_", ops, name))
unsigned paravirt_patch_ident_64(void *insn_buff, unsigned len);
unsigned paravirt_patch_default(u8 type, void *insn_buff, unsigned long addr, unsigned len);
unsigned paravirt_patch_insns(void *insn_buff, unsigned len, const char *start, const char *end);

unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len);
unsigned paravirt_patch_default(u8 type, void *insnbuf,
unsigned long addr, unsigned len);

unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
const char *start, const char *end);

unsigned native_patch(u8 type, void *ibuf, unsigned long addr, unsigned len);
unsigned native_patch(u8 type, void *insn_buff, unsigned long addr, unsigned len);

int paravirt_disable_iospace(void);

Expand Down Expand Up @@ -679,8 +672,8 @@ u64 _paravirt_ident_64(u64);

/* These all sit in the .parainstructions section to tell us what to patch. */
struct paravirt_patch_site {
u8 *instr; /* original instructions */
u8 instrtype; /* type of this instruction */
u8 *instr; /* original instructions */
u8 type; /* type of this instruction */
u8 len; /* length of original instruction */
};

Expand Down
4 changes: 2 additions & 2 deletions arch/x86/kernel/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ KASAN_SANITIZE_paravirt.o := n

OBJECT_FILES_NON_STANDARD_relocate_kernel_$(BITS).o := y
OBJECT_FILES_NON_STANDARD_test_nx.o := y
OBJECT_FILES_NON_STANDARD_paravirt_patch_$(BITS).o := y
OBJECT_FILES_NON_STANDARD_paravirt_patch.o := y

ifdef CONFIG_FRAME_POINTER
OBJECT_FILES_NON_STANDARD_ftrace_$(BITS).o := y
Expand Down Expand Up @@ -112,7 +112,7 @@ obj-$(CONFIG_AMD_NB) += amd_nb.o
obj-$(CONFIG_DEBUG_NMI_SELFTEST) += nmi_selftest.o

obj-$(CONFIG_KVM_GUEST) += kvm.o kvmclock.o
obj-$(CONFIG_PARAVIRT) += paravirt.o paravirt_patch_$(BITS).o
obj-$(CONFIG_PARAVIRT) += paravirt.o paravirt_patch.o
obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= paravirt-spinlocks.o
obj-$(CONFIG_PARAVIRT_CLOCK) += pvclock.o
obj-$(CONFIG_X86_PMEM_LEGACY_DEVICE) += pmem.o
Expand Down
53 changes: 26 additions & 27 deletions arch/x86/kernel/alternative.c
Original file line number Diff line number Diff line change
Expand Up @@ -278,7 +278,7 @@ static inline bool is_jmp(const u8 opcode)
}

static void __init_or_module
recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insnbuf)
recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insn_buff)
{
u8 *next_rip, *tgt_rip;
s32 n_dspl, o_dspl;
Expand All @@ -287,7 +287,7 @@ recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insnbuf)
if (a->replacementlen != 5)
return;

o_dspl = *(s32 *)(insnbuf + 1);
o_dspl = *(s32 *)(insn_buff + 1);

/* next_rip of the replacement JMP */
next_rip = repl_insn + a->replacementlen;
Expand All @@ -313,18 +313,18 @@ recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insnbuf)
two_byte_jmp:
n_dspl -= 2;

insnbuf[0] = 0xeb;
insnbuf[1] = (s8)n_dspl;
add_nops(insnbuf + 2, 3);
insn_buff[0] = 0xeb;
insn_buff[1] = (s8)n_dspl;
add_nops(insn_buff + 2, 3);

repl_len = 2;
goto done;

five_byte_jmp:
n_dspl -= 5;

insnbuf[0] = 0xe9;
*(s32 *)&insnbuf[1] = n_dspl;
insn_buff[0] = 0xe9;
*(s32 *)&insn_buff[1] = n_dspl;

repl_len = 5;

Expand Down Expand Up @@ -371,7 +371,7 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
{
struct alt_instr *a;
u8 *instr, *replacement;
u8 insnbuf[MAX_PATCH_LEN];
u8 insn_buff[MAX_PATCH_LEN];

DPRINTK("alt table %px, -> %px", start, end);
/*
Expand All @@ -384,11 +384,11 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
* order.
*/
for (a = start; a < end; a++) {
int insnbuf_sz = 0;
int insn_buff_sz = 0;

instr = (u8 *)&a->instr_offset + a->instr_offset;
replacement = (u8 *)&a->repl_offset + a->repl_offset;
BUG_ON(a->instrlen > sizeof(insnbuf));
BUG_ON(a->instrlen > sizeof(insn_buff));
BUG_ON(a->cpuid >= (NCAPINTS + NBUGINTS) * 32);
if (!boot_cpu_has(a->cpuid)) {
if (a->padlen > 1)
Expand All @@ -406,33 +406,33 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
DUMP_BYTES(instr, a->instrlen, "%px: old_insn: ", instr);
DUMP_BYTES(replacement, a->replacementlen, "%px: rpl_insn: ", replacement);

memcpy(insnbuf, replacement, a->replacementlen);
insnbuf_sz = a->replacementlen;
memcpy(insn_buff, replacement, a->replacementlen);
insn_buff_sz = a->replacementlen;

/*
* 0xe8 is a relative jump; fix the offset.
*
* Instruction length is checked before the opcode to avoid
* accessing uninitialized bytes for zero-length replacements.
*/
if (a->replacementlen == 5 && *insnbuf == 0xe8) {
*(s32 *)(insnbuf + 1) += replacement - instr;
if (a->replacementlen == 5 && *insn_buff == 0xe8) {
*(s32 *)(insn_buff + 1) += replacement - instr;
DPRINTK("Fix CALL offset: 0x%x, CALL 0x%lx",
*(s32 *)(insnbuf + 1),
(unsigned long)instr + *(s32 *)(insnbuf + 1) + 5);
*(s32 *)(insn_buff + 1),
(unsigned long)instr + *(s32 *)(insn_buff + 1) + 5);
}

if (a->replacementlen && is_jmp(replacement[0]))
recompute_jump(a, instr, replacement, insnbuf);
recompute_jump(a, instr, replacement, insn_buff);

if (a->instrlen > a->replacementlen) {
add_nops(insnbuf + a->replacementlen,
add_nops(insn_buff + a->replacementlen,
a->instrlen - a->replacementlen);
insnbuf_sz += a->instrlen - a->replacementlen;
insn_buff_sz += a->instrlen - a->replacementlen;
}
DUMP_BYTES(insnbuf, insnbuf_sz, "%px: final_insn: ", instr);
DUMP_BYTES(insn_buff, insn_buff_sz, "%px: final_insn: ", instr);

text_poke_early(instr, insnbuf, insnbuf_sz);
text_poke_early(instr, insn_buff, insn_buff_sz);
}
}

Expand Down Expand Up @@ -594,22 +594,21 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
struct paravirt_patch_site *end)
{
struct paravirt_patch_site *p;
char insnbuf[MAX_PATCH_LEN];
char insn_buff[MAX_PATCH_LEN];

for (p = start; p < end; p++) {
unsigned int used;

BUG_ON(p->len > MAX_PATCH_LEN);
/* prep the buffer with the original instructions */
memcpy(insnbuf, p->instr, p->len);
used = pv_ops.init.patch(p->instrtype, insnbuf,
(unsigned long)p->instr, p->len);
memcpy(insn_buff, p->instr, p->len);
used = pv_ops.init.patch(p->type, insn_buff, (unsigned long)p->instr, p->len);

BUG_ON(used > p->len);

/* Pad the rest with nops */
add_nops(insnbuf + used, p->len - used);
text_poke_early(p->instr, insnbuf, p->len);
add_nops(insn_buff + used, p->len - used);
text_poke_early(p->instr, insn_buff, p->len);
}
}
extern struct paravirt_patch_site __start_parainstructions[],
Expand Down
16 changes: 8 additions & 8 deletions arch/x86/kernel/kprobes/opt.c
Original file line number Diff line number Diff line change
Expand Up @@ -422,7 +422,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op,
void arch_optimize_kprobes(struct list_head *oplist)
{
struct optimized_kprobe *op, *tmp;
u8 insn_buf[RELATIVEJUMP_SIZE];
u8 insn_buff[RELATIVEJUMP_SIZE];

list_for_each_entry_safe(op, tmp, oplist, list) {
s32 rel = (s32)((long)op->optinsn.insn -
Expand All @@ -434,10 +434,10 @@ void arch_optimize_kprobes(struct list_head *oplist)
memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
RELATIVE_ADDR_SIZE);

insn_buf[0] = RELATIVEJUMP_OPCODE;
*(s32 *)(&insn_buf[1]) = rel;
insn_buff[0] = RELATIVEJUMP_OPCODE;
*(s32 *)(&insn_buff[1]) = rel;

text_poke_bp(op->kp.addr, insn_buf, RELATIVEJUMP_SIZE,
text_poke_bp(op->kp.addr, insn_buff, RELATIVEJUMP_SIZE,
op->optinsn.insn);

list_del_init(&op->list);
Expand All @@ -447,12 +447,12 @@ void arch_optimize_kprobes(struct list_head *oplist)
/* Replace a relative jump with a breakpoint (int3). */
void arch_unoptimize_kprobe(struct optimized_kprobe *op)
{
u8 insn_buf[RELATIVEJUMP_SIZE];
u8 insn_buff[RELATIVEJUMP_SIZE];

/* Set int3 to first byte for kprobes */
insn_buf[0] = BREAKPOINT_INSTRUCTION;
memcpy(insn_buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE);
text_poke_bp(op->kp.addr, insn_buf, RELATIVEJUMP_SIZE,
insn_buff[0] = BREAKPOINT_INSTRUCTION;
memcpy(insn_buff + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE);
text_poke_bp(op->kp.addr, insn_buff, RELATIVEJUMP_SIZE,
op->optinsn.insn);
}

Expand Down
46 changes: 23 additions & 23 deletions arch/x86/kernel/paravirt.c
Original file line number Diff line number Diff line change
Expand Up @@ -58,24 +58,24 @@ struct branch {
u32 delta;
} __attribute__((packed));

static unsigned paravirt_patch_call(void *insnbuf, const void *target,
static unsigned paravirt_patch_call(void *insn_buff, const void *target,
unsigned long addr, unsigned len)
{
struct branch *b = insnbuf;
unsigned long delta = (unsigned long)target - (addr+5);

if (len < 5) {
#ifdef CONFIG_RETPOLINE
WARN_ONCE(1, "Failing to patch indirect CALL in %ps\n", (void *)addr);
#endif
return len; /* call too long for patch site */
const int call_len = 5;
struct branch *b = insn_buff;
unsigned long delta = (unsigned long)target - (addr+call_len);

if (len < call_len) {
pr_warn("paravirt: Failed to patch indirect CALL at %ps\n", (void *)addr);
/* Kernel might not be viable if patching fails, bail out: */
BUG_ON(1);
}

b->opcode = 0xe8; /* call */
b->delta = delta;
BUILD_BUG_ON(sizeof(*b) != 5);
BUILD_BUG_ON(sizeof(*b) != call_len);

return 5;
return call_len;
}

#ifdef CONFIG_PARAVIRT_XXL
Expand All @@ -85,10 +85,10 @@ u64 notrace _paravirt_ident_64(u64 x)
return x;
}

static unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
static unsigned paravirt_patch_jmp(void *insn_buff, const void *target,
unsigned long addr, unsigned len)
{
struct branch *b = insnbuf;
struct branch *b = insn_buff;
unsigned long delta = (unsigned long)target - (addr+5);

if (len < 5) {
Expand All @@ -113,7 +113,7 @@ void __init native_pv_lock_init(void)
static_branch_disable(&virt_spin_lock_key);
}

unsigned paravirt_patch_default(u8 type, void *insnbuf,
unsigned paravirt_patch_default(u8 type, void *insn_buff,
unsigned long addr, unsigned len)
{
/*
Expand All @@ -125,36 +125,36 @@ unsigned paravirt_patch_default(u8 type, void *insnbuf,

if (opfunc == NULL)
/* If there's no function, patch it with a ud2a (BUG) */
ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
ret = paravirt_patch_insns(insn_buff, len, ud2a, ud2a+sizeof(ud2a));
else if (opfunc == _paravirt_nop)
ret = 0;

#ifdef CONFIG_PARAVIRT_XXL
/* identity functions just return their single argument */
else if (opfunc == _paravirt_ident_64)
ret = paravirt_patch_ident_64(insnbuf, len);
ret = paravirt_patch_ident_64(insn_buff, len);

else if (type == PARAVIRT_PATCH(cpu.iret) ||
type == PARAVIRT_PATCH(cpu.usergs_sysret64))
/* If operation requires a jmp, then jmp */
ret = paravirt_patch_jmp(insnbuf, opfunc, addr, len);
ret = paravirt_patch_jmp(insn_buff, opfunc, addr, len);
#endif
else
/* Otherwise call the function. */
ret = paravirt_patch_call(insnbuf, opfunc, addr, len);
ret = paravirt_patch_call(insn_buff, opfunc, addr, len);

return ret;
}

unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
unsigned paravirt_patch_insns(void *insn_buff, unsigned len,
const char *start, const char *end)
{
unsigned insn_len = end - start;

if (insn_len > len || start == NULL)
insn_len = len;
else
memcpy(insnbuf, start, insn_len);
/* Alternative instruction is too large for the patch site and we cannot continue: */
BUG_ON(insn_len > len || start == NULL);

memcpy(insn_buff, start, insn_len);

return insn_len;
}
Expand Down
Loading

0 comments on commit da17702

Please sign in to comment.