Skip to content

Commit

Permalink
mm, kprobes: generalize and rename notify_page_fault() as kprobe_page…
Browse files Browse the repository at this point in the history
…_fault()

Architectures which support kprobes have very similar boilerplate around
calling kprobe_fault_handler().  Use a helper function in kprobes.h to
unify them, based on the x86 code.

This changes the behaviour for other architectures when preemption is
enabled.  Previously, they would have disabled preemption while calling
the kprobe handler.  However, preemption would be disabled if this fault
was due to a kprobe, so we know the fault was not due to a kprobe
handler and can simply return failure.

This behaviour was introduced in commit a980c0e ("x86/kprobes:
Refactor kprobes_fault() like kprobe_exceptions_notify()")

[[email protected]: export kprobe_fault_handler()]
  Link: http://lkml.kernel.org/r/[email protected]
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Anshuman Khandual <[email protected]>
Reviewed-by: Dave Hansen <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: Matthew Wilcox <[email protected]>
Cc: Mark Rutland <[email protected]>
Cc: Christophe Leroy <[email protected]>
Cc: Stephen Rothwell <[email protected]>
Cc: Andrey Konovalov <[email protected]>
Cc: Michael Ellerman <[email protected]>
Cc: Paul Mackerras <[email protected]>
Cc: Russell King <[email protected]>
Cc: Catalin Marinas <[email protected]>
Cc: Will Deacon <[email protected]>
Cc: Tony Luck <[email protected]>
Cc: Fenghua Yu <[email protected]>
Cc: Martin Schwidefsky <[email protected]>
Cc: Heiko Carstens <[email protected]>
Cc: Yoshinori Sato <[email protected]>
Cc: "David S. Miller" <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Andy Lutomirski <[email protected]>
Cc: Vineet Gupta <[email protected]>
Cc: James Hogan <[email protected]>
Cc: Paul Burton <[email protected]>
Cc: Ralf Baechle <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
Anshuman Khandual authored and torvalds committed Jul 17, 2019
1 parent 92bae78 commit b98cca4
Show file tree
Hide file tree
Showing 11 changed files with 32 additions and 156 deletions.
24 changes: 1 addition & 23 deletions arch/arm/mm/fault.c
Original file line number Diff line number Diff line change
Expand Up @@ -27,28 +27,6 @@

#ifdef CONFIG_MMU

#ifdef CONFIG_KPROBES
static inline int notify_page_fault(struct pt_regs *regs, unsigned int fsr)
{
int ret = 0;

if (!user_mode(regs)) {
/* kprobe_running() needs smp_processor_id() */
preempt_disable();
if (kprobe_running() && kprobe_fault_handler(regs, fsr))
ret = 1;
preempt_enable();
}

return ret;
}
#else
static inline int notify_page_fault(struct pt_regs *regs, unsigned int fsr)
{
return 0;
}
#endif

/*
* This is useful to dump out the page tables associated with
* 'addr' in mm 'mm'.
Expand Down Expand Up @@ -265,7 +243,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
vm_fault_t fault;
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;

if (notify_page_fault(regs, fsr))
if (kprobe_page_fault(regs, fsr))
return 0;

tsk = current;
Expand Down
24 changes: 1 addition & 23 deletions arch/arm64/mm/fault.c
Original file line number Diff line number Diff line change
Expand Up @@ -59,28 +59,6 @@ static inline const struct fault_info *esr_to_debug_fault_info(unsigned int esr)
return debug_fault_info + DBG_ESR_EVT(esr);
}

#ifdef CONFIG_KPROBES
static inline int notify_page_fault(struct pt_regs *regs, unsigned int esr)
{
int ret = 0;

/* kprobe_running() needs smp_processor_id() */
if (!user_mode(regs)) {
preempt_disable();
if (kprobe_running() && kprobe_fault_handler(regs, esr))
ret = 1;
preempt_enable();
}

return ret;
}
#else
static inline int notify_page_fault(struct pt_regs *regs, unsigned int esr)
{
return 0;
}
#endif

static void data_abort_decode(unsigned int esr)
{
pr_alert("Data abort info:\n");
Expand Down Expand Up @@ -434,7 +412,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
unsigned long vm_flags = VM_READ | VM_WRITE;
unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;

if (notify_page_fault(regs, esr))
if (kprobe_page_fault(regs, esr))
return 0;

/*
Expand Down
24 changes: 1 addition & 23 deletions arch/ia64/mm/fault.c
Original file line number Diff line number Diff line change
Expand Up @@ -21,28 +21,6 @@

extern int die(char *, struct pt_regs *, long);

#ifdef CONFIG_KPROBES
static inline int notify_page_fault(struct pt_regs *regs, int trap)
{
int ret = 0;

if (!user_mode(regs)) {
/* kprobe_running() needs smp_processor_id() */
preempt_disable();
if (kprobe_running() && kprobe_fault_handler(regs, trap))
ret = 1;
preempt_enable();
}

return ret;
}
#else
static inline int notify_page_fault(struct pt_regs *regs, int trap)
{
return 0;
}
#endif

/*
* Return TRUE if ADDRESS points at a page in the kernel's mapped segment
* (inside region 5, on ia64) and that page is present.
Expand Down Expand Up @@ -116,7 +94,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
/*
* This is to handle the kprobes on user space access instructions
*/
if (notify_page_fault(regs, TRAP_BRKPT))
if (kprobe_page_fault(regs, TRAP_BRKPT))
return;

if (user_mode(regs))
Expand Down
1 change: 1 addition & 0 deletions arch/mips/include/asm/kprobes.h
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ do { \
#define kretprobe_blacklist_size 0

void arch_remove_kprobe(struct kprobe *p);
int kprobe_fault_handler(struct pt_regs *regs, int trapnr);

/* Architecture specific copy of original instruction*/
struct arch_specific_insn {
Expand Down
2 changes: 1 addition & 1 deletion arch/mips/kernel/kprobes.c
Original file line number Diff line number Diff line change
Expand Up @@ -398,7 +398,7 @@ static inline int post_kprobe_handler(struct pt_regs *regs)
return 1;
}

static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
{
struct kprobe *cur = kprobe_running();
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
Expand Down
23 changes: 2 additions & 21 deletions arch/powerpc/mm/fault.c
Original file line number Diff line number Diff line change
Expand Up @@ -42,26 +42,6 @@
#include <asm/debug.h>
#include <asm/kup.h>

static inline bool notify_page_fault(struct pt_regs *regs)
{
bool ret = false;

#ifdef CONFIG_KPROBES
/* kprobe_running() needs smp_processor_id() */
if (!user_mode(regs)) {
preempt_disable();
if (kprobe_running() && kprobe_fault_handler(regs, 11))
ret = true;
preempt_enable();
}
#endif /* CONFIG_KPROBES */

if (unlikely(debugger_fault_handler(regs)))
ret = true;

return ret;
}

/*
* Check whether the instruction inst is a store using
* an update addressing form which will update r1.
Expand Down Expand Up @@ -461,8 +441,9 @@ static int __do_page_fault(struct pt_regs *regs, unsigned long address,
int is_write = page_fault_is_write(error_code);
vm_fault_t fault, major = 0;
bool must_retry = false;
bool kprobe_fault = kprobe_page_fault(regs, 11);

if (notify_page_fault(regs))
if (unlikely(debugger_fault_handler(regs) || kprobe_fault))
return 0;

if (unlikely(page_fault_is_bad(error_code))) {
Expand Down
16 changes: 1 addition & 15 deletions arch/s390/mm/fault.c
Original file line number Diff line number Diff line change
Expand Up @@ -67,20 +67,6 @@ static int __init fault_init(void)
}
early_initcall(fault_init);

static inline int notify_page_fault(struct pt_regs *regs)
{
int ret = 0;

/* kprobe_running() needs smp_processor_id() */
if (kprobes_built_in() && !user_mode(regs)) {
preempt_disable();
if (kprobe_running() && kprobe_fault_handler(regs, 14))
ret = 1;
preempt_enable();
}
return ret;
}

/*
* Find out which address space caused the exception.
*/
Expand Down Expand Up @@ -412,7 +398,7 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
*/
clear_pt_regs_flag(regs, PIF_PER_TRAP);

if (notify_page_fault(regs))
if (kprobe_page_fault(regs, 14))
return 0;

mm = tsk->mm;
Expand Down
18 changes: 2 additions & 16 deletions arch/sh/mm/fault.c
Original file line number Diff line number Diff line change
Expand Up @@ -24,20 +24,6 @@
#include <asm/tlbflush.h>
#include <asm/traps.h>

static inline int notify_page_fault(struct pt_regs *regs, int trap)
{
int ret = 0;

if (kprobes_built_in() && !user_mode(regs)) {
preempt_disable();
if (kprobe_running() && kprobe_fault_handler(regs, trap))
ret = 1;
preempt_enable();
}

return ret;
}

static void
force_sig_info_fault(int si_signo, int si_code, unsigned long address)
{
Expand Down Expand Up @@ -412,14 +398,14 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
if (unlikely(fault_in_kernel_space(address))) {
if (vmalloc_fault(address) >= 0)
return;
if (notify_page_fault(regs, vec))
if (kprobe_page_fault(regs, vec))
return;

bad_area_nosemaphore(regs, error_code, address);
return;
}

if (unlikely(notify_page_fault(regs, vec)))
if (unlikely(kprobe_page_fault(regs, vec)))
return;

/* Only enable interrupts if they were on before the fault */
Expand Down
16 changes: 1 addition & 15 deletions arch/sparc/mm/fault_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -38,20 +38,6 @@

int show_unhandled_signals = 1;

static inline __kprobes int notify_page_fault(struct pt_regs *regs)
{
int ret = 0;

/* kprobe_running() needs smp_processor_id() */
if (kprobes_built_in() && !user_mode(regs)) {
preempt_disable();
if (kprobe_running() && kprobe_fault_handler(regs, 0))
ret = 1;
preempt_enable();
}
return ret;
}

static void __kprobes unhandled_fault(unsigned long address,
struct task_struct *tsk,
struct pt_regs *regs)
Expand Down Expand Up @@ -285,7 +271,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)

fault_code = get_thread_fault_code();

if (notify_page_fault(regs))
if (kprobe_page_fault(regs, 0))
goto exit_exception;

si_code = SEGV_MAPERR;
Expand Down
21 changes: 2 additions & 19 deletions arch/x86/mm/fault.c
Original file line number Diff line number Diff line change
Expand Up @@ -46,23 +46,6 @@ kmmio_fault(struct pt_regs *regs, unsigned long addr)
return 0;
}

static nokprobe_inline int kprobes_fault(struct pt_regs *regs)
{
if (!kprobes_built_in())
return 0;
if (user_mode(regs))
return 0;
/*
* To be potentially processing a kprobe fault and to be allowed to call
* kprobe_running(), we have to be non-preemptible.
*/
if (preemptible())
return 0;
if (!kprobe_running())
return 0;
return kprobe_fault_handler(regs, X86_TRAP_PF);
}

/*
* Prefetch quirks:
*
Expand Down Expand Up @@ -1282,7 +1265,7 @@ do_kern_addr_fault(struct pt_regs *regs, unsigned long hw_error_code,
return;

/* kprobes don't want to hook the spurious faults: */
if (kprobes_fault(regs))
if (kprobe_page_fault(regs, X86_TRAP_PF))
return;

/*
Expand Down Expand Up @@ -1313,7 +1296,7 @@ void do_user_addr_fault(struct pt_regs *regs,
mm = tsk->mm;

/* kprobes don't want to hook the spurious faults: */
if (unlikely(kprobes_fault(regs)))
if (unlikely(kprobe_page_fault(regs, X86_TRAP_PF)))
return;

/*
Expand Down
19 changes: 19 additions & 0 deletions include/linux/kprobes.h
Original file line number Diff line number Diff line change
Expand Up @@ -458,4 +458,23 @@ static inline bool is_kprobe_optinsn_slot(unsigned long addr)
}
#endif

/* Returns true if kprobes handled the fault */
static nokprobe_inline bool kprobe_page_fault(struct pt_regs *regs,
unsigned int trap)
{
if (!kprobes_built_in())
return false;
if (user_mode(regs))
return false;
/*
* To be potentially processing a kprobe fault and to be allowed
* to call kprobe_running(), we have to be non-preemptible.
*/
if (preemptible())
return false;
if (!kprobe_running())
return false;
return kprobe_fault_handler(regs, trap);
}

#endif /* _LINUX_KPROBES_H */

0 comments on commit b98cca4

Please sign in to comment.