From 6a5022a56ac37da7bffece043331a101ed0040b1 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 17 Apr 2014 17:16:51 +0900 Subject: [PATCH 001/101] kprobes/x86: Allow to handle reentered kprobe on single-stepping Since the NMI handlers(e.g. perf) can interrupt in the single stepping (or preparing the single stepping, do_debug etc.), we should consider a kprobe is hit in the NMI handler. Even in that case, the kprobe is allowed to be reentered as same as the kprobes hit in kprobe handlers (KPROBE_HIT_ACTIVE or KPROBE_HIT_SSDONE). The real issue will happen when a kprobe hit while another reentered kprobe is processing (KPROBE_REENTER), because we already consumed a saved-area for the previous kprobe. Signed-off-by: Masami Hiramatsu Reviewed-by: Steven Rostedt Cc: Jiri Kosina Cc: Jonathan Lebon Link: http://lkml.kernel.org/r/20140417081651.26341.10593.stgit@ltc230.yrl.intra.hitachi.co.jp Signed-off-by: Ingo Molnar --- arch/x86/kernel/kprobes/core.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 61b17dc2c27734..da7bdaa3ce159e 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -531,10 +531,11 @@ reenter_kprobe(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb switch (kcb->kprobe_status) { case KPROBE_HIT_SSDONE: case KPROBE_HIT_ACTIVE: + case KPROBE_HIT_SS: kprobes_inc_nmissed_count(p); setup_singlestep(p, regs, kcb, 1); break; - case KPROBE_HIT_SS: + case KPROBE_REENTER: /* A probe has been hit in the codepath leading up to, or just * after, single-stepping of a probed instruction. This entire * codepath should strictly reside in .kprobes.text section. From be8f274323c26ddc7e6fd6c44254b7abcdbe6389 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 17 Apr 2014 17:16:58 +0900 Subject: [PATCH 002/101] kprobes: Prohibit probing on .entry.text code .entry.text is a code area which is used for interrupt/syscall entries, which includes many sensitive code. Thus, it is better to prohibit probing on all of such code instead of a part of that. Since some symbols are already registered on kprobe blacklist, this also removes them from the blacklist. Signed-off-by: Masami Hiramatsu Reviewed-by: Steven Rostedt Cc: Ananth N Mavinakayanahalli Cc: Anil S Keshavamurthy Cc: Borislav Petkov Cc: David S. Miller Cc: Frederic Weisbecker Cc: Jan Kiszka Cc: Jiri Kosina Cc: Jonathan Lebon Cc: Seiji Aguchi Link: http://lkml.kernel.org/r/20140417081658.26341.57354.stgit@ltc230.yrl.intra.hitachi.co.jp Signed-off-by: Ingo Molnar --- arch/x86/kernel/entry_32.S | 33 --------------------------------- arch/x86/kernel/entry_64.S | 20 -------------------- arch/x86/kernel/kprobes/core.c | 8 ++++++++ include/linux/kprobes.h | 1 + kernel/kprobes.c | 13 ++++++++----- 5 files changed, 17 insertions(+), 58 deletions(-) diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index a2a4f469788965..0ca5bf1697bb85 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -314,10 +314,6 @@ ENTRY(ret_from_kernel_thread) CFI_ENDPROC ENDPROC(ret_from_kernel_thread) -/* - * Interrupt exit functions should be protected against kprobes - */ - .pushsection .kprobes.text, "ax" /* * Return to user mode is not as complex as all this looks, * but we want the default path for a system call return to @@ -372,10 +368,6 @@ need_resched: END(resume_kernel) #endif CFI_ENDPROC -/* - * End of kprobes section - */ - .popsection /* SYSENTER_RETURN points to after the "sysenter" instruction in the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */ @@ -495,10 +487,6 @@ sysexit_audit: PTGS_TO_GS_EX ENDPROC(ia32_sysenter_target) -/* - * syscall stub including irq exit should be protected against kprobes - */ - .pushsection .kprobes.text, "ax" # system call handler stub ENTRY(system_call) RING0_INT_FRAME # can't unwind into user space anyway @@ -691,10 +679,6 @@ syscall_badsys: jmp resume_userspace END(syscall_badsys) CFI_ENDPROC -/* - * End of kprobes section - */ - .popsection .macro FIXUP_ESPFIX_STACK /* @@ -781,10 +765,6 @@ common_interrupt: ENDPROC(common_interrupt) CFI_ENDPROC -/* - * Irq entries should be protected against kprobes - */ - .pushsection .kprobes.text, "ax" #define BUILD_INTERRUPT3(name, nr, fn) \ ENTRY(name) \ RING0_INT_FRAME; \ @@ -961,10 +941,6 @@ ENTRY(spurious_interrupt_bug) jmp error_code CFI_ENDPROC END(spurious_interrupt_bug) -/* - * End of kprobes section - */ - .popsection #ifdef CONFIG_XEN /* Xen doesn't set %esp to be precisely what the normal sysenter @@ -1239,11 +1215,6 @@ return_to_handler: jmp *%ecx #endif -/* - * Some functions should be protected against kprobes - */ - .pushsection .kprobes.text, "ax" - #ifdef CONFIG_TRACING ENTRY(trace_page_fault) RING0_EC_FRAME @@ -1453,7 +1424,3 @@ ENTRY(async_page_fault) END(async_page_fault) #endif -/* - * End of kprobes section - */ - .popsection diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 1e96c3628bf24f..43bb3895166016 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -487,8 +487,6 @@ ENDPROC(native_usergs_sysret64) TRACE_IRQS_OFF .endm -/* save complete stack frame */ - .pushsection .kprobes.text, "ax" ENTRY(save_paranoid) XCPT_FRAME 1 RDI+8 cld @@ -517,7 +515,6 @@ ENTRY(save_paranoid) 1: ret CFI_ENDPROC END(save_paranoid) - .popsection /* * A newly forked process directly context switches into this address. @@ -975,10 +972,6 @@ END(interrupt) call \func .endm -/* - * Interrupt entry/exit should be protected against kprobes - */ - .pushsection .kprobes.text, "ax" /* * The interrupt stubs push (~vector+0x80) onto the stack and * then jump to common_interrupt. @@ -1113,10 +1106,6 @@ ENTRY(retint_kernel) CFI_ENDPROC END(common_interrupt) -/* - * End of kprobes section - */ - .popsection /* * APIC interrupts. @@ -1477,11 +1466,6 @@ apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \ hyperv_callback_vector hyperv_vector_handler #endif /* CONFIG_HYPERV */ -/* - * Some functions should be protected against kprobes - */ - .pushsection .kprobes.text, "ax" - paranoidzeroentry_ist debug do_debug DEBUG_STACK paranoidzeroentry_ist int3 do_int3 DEBUG_STACK paranoiderrorentry stack_segment do_stack_segment @@ -1898,7 +1882,3 @@ ENTRY(ignore_sysret) CFI_ENDPROC END(ignore_sysret) -/* - * End of kprobes section - */ - .popsection diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index da7bdaa3ce159e..7751b3dee53a08 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -1065,6 +1065,14 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) return 0; } +bool arch_within_kprobe_blacklist(unsigned long addr) +{ + return (addr >= (unsigned long)__kprobes_text_start && + addr < (unsigned long)__kprobes_text_end) || + (addr >= (unsigned long)__entry_text_start && + addr < (unsigned long)__entry_text_end); +} + int __init arch_init_kprobes(void) { return 0; diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 925eaf28fca9cc..cdf9251f82496b 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -265,6 +265,7 @@ extern void arch_disarm_kprobe(struct kprobe *p); extern int arch_init_kprobes(void); extern void show_registers(struct pt_regs *regs); extern void kprobes_inc_nmissed_count(struct kprobe *p); +extern bool arch_within_kprobe_blacklist(unsigned long addr); struct kprobe_insn_cache { struct mutex mutex; diff --git a/kernel/kprobes.c b/kernel/kprobes.c index ceeadfcabb7611..5b5ac76671e7c3 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -96,9 +96,6 @@ static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash) static struct kprobe_blackpoint kprobe_blacklist[] = { {"preempt_schedule",}, {"native_get_debugreg",}, - {"irq_entries_start",}, - {"common_interrupt",}, - {"mcount",}, /* mcount can be called from everywhere */ {NULL} /* Terminator */ }; @@ -1324,12 +1321,18 @@ static int __kprobes register_aggr_kprobe(struct kprobe *orig_p, return ret; } +bool __weak arch_within_kprobe_blacklist(unsigned long addr) +{ + /* The __kprobes marked functions and entry code must not be probed */ + return addr >= (unsigned long)__kprobes_text_start && + addr < (unsigned long)__kprobes_text_end; +} + static int __kprobes in_kprobes_functions(unsigned long addr) { struct kprobe_blackpoint *kb; - if (addr >= (unsigned long)__kprobes_text_start && - addr < (unsigned long)__kprobes_text_end) + if (arch_within_kprobe_blacklist(addr)) return -EINVAL; /* * If there exists a kprobe_blacklist, verify and From 376e242429bf8539ef39a080ac113c8799840b13 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 17 Apr 2014 17:17:05 +0900 Subject: [PATCH 003/101] kprobes: Introduce NOKPROBE_SYMBOL() macro to maintain kprobes blacklist MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Introduce NOKPROBE_SYMBOL() macro which builds a kprobes blacklist at kernel build time. The usage of this macro is similar to EXPORT_SYMBOL(), placed after the function definition: NOKPROBE_SYMBOL(function); Since this macro will inhibit inlining of static/inline functions, this patch also introduces a nokprobe_inline macro for static/inline functions. In this case, we must use NOKPROBE_SYMBOL() for the inline function caller. When CONFIG_KPROBES=y, the macro stores the given function address in the "_kprobe_blacklist" section. Since the data structures are not fully initialized by the macro (because there is no "size" information), those are re-initialized at boot time by using kallsyms. Signed-off-by: Masami Hiramatsu Link: http://lkml.kernel.org/r/20140417081705.26341.96719.stgit@ltc230.yrl.intra.hitachi.co.jp Cc: Alok Kataria Cc: Ananth N Mavinakayanahalli Cc: Andrew Morton Cc: Anil S Keshavamurthy Cc: Arnd Bergmann Cc: Christopher Li Cc: Chris Wright Cc: David S. Miller Cc: Jan-Simon Möller Cc: Jeremy Fitzhardinge Cc: Linus Torvalds Cc: Randy Dunlap Cc: Rusty Russell Cc: linux-arch@vger.kernel.org Cc: linux-doc@vger.kernel.org Cc: linux-sparse@vger.kernel.org Cc: virtualization@lists.linux-foundation.org Signed-off-by: Ingo Molnar --- Documentation/kprobes.txt | 16 ++++- arch/x86/include/asm/asm.h | 7 +++ arch/x86/kernel/paravirt.c | 4 ++ include/asm-generic/vmlinux.lds.h | 9 +++ include/linux/compiler.h | 2 + include/linux/kprobes.h | 20 +++++- kernel/kprobes.c | 100 ++++++++++++++++-------------- kernel/sched/core.c | 1 + 8 files changed, 107 insertions(+), 52 deletions(-) diff --git a/Documentation/kprobes.txt b/Documentation/kprobes.txt index 0cfb00fd86ffd7..4bbeca8483ed33 100644 --- a/Documentation/kprobes.txt +++ b/Documentation/kprobes.txt @@ -22,8 +22,9 @@ Appendix B: The kprobes sysctl interface Kprobes enables you to dynamically break into any kernel routine and collect debugging and performance information non-disruptively. You -can trap at almost any kernel code address, specifying a handler +can trap at almost any kernel code address(*), specifying a handler routine to be invoked when the breakpoint is hit. +(*: some parts of the kernel code can not be trapped, see 1.5 Blacklist) There are currently three types of probes: kprobes, jprobes, and kretprobes (also called return probes). A kprobe can be inserted @@ -273,6 +274,19 @@ using one of the following techniques: or - Execute 'sysctl -w debug.kprobes_optimization=n' +1.5 Blacklist + +Kprobes can probe most of the kernel except itself. This means +that there are some functions where kprobes cannot probe. Probing +(trapping) such functions can cause a recursive trap (e.g. double +fault) or the nested probe handler may never be called. +Kprobes manages such functions as a blacklist. +If you want to add a function into the blacklist, you just need +to (1) include linux/kprobes.h and (2) use NOKPROBE_SYMBOL() macro +to specify a blacklisted function. +Kprobes checks the given probe address against the blacklist and +rejects registering it, if the given address is in the blacklist. + 2. Architectures Supported Kprobes, jprobes, and return probes are implemented on the following diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h index 4582e8e1cd1ad4..7730c1c5c83aa7 100644 --- a/arch/x86/include/asm/asm.h +++ b/arch/x86/include/asm/asm.h @@ -57,6 +57,12 @@ .long (from) - . ; \ .long (to) - . + 0x7ffffff0 ; \ .popsection + +# define _ASM_NOKPROBE(entry) \ + .pushsection "_kprobe_blacklist","aw" ; \ + _ASM_ALIGN ; \ + _ASM_PTR (entry); \ + .popsection #else # define _ASM_EXTABLE(from,to) \ " .pushsection \"__ex_table\",\"a\"\n" \ @@ -71,6 +77,7 @@ " .long (" #from ") - .\n" \ " .long (" #to ") - . + 0x7ffffff0\n" \ " .popsection\n" +/* For C file, we already have NOKPROBE_SYMBOL macro */ #endif #endif /* _ASM_X86_ASM_H */ diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 1b10af835c31d9..e136869ae42e09 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include @@ -389,6 +390,9 @@ __visible struct pv_cpu_ops pv_cpu_ops = { .end_context_switch = paravirt_nop, }; +/* At this point, native_get_debugreg has a real function entry */ +NOKPROBE_SYMBOL(native_get_debugreg); + struct pv_apic_ops pv_apic_ops = { #ifdef CONFIG_X86_LOCAL_APIC .startup_ipi_hook = paravirt_nop, diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 146e4fffd710ca..40ceb3ceba79e2 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -109,6 +109,14 @@ #define BRANCH_PROFILE() #endif +#ifdef CONFIG_KPROBES +#define KPROBE_BLACKLIST() VMLINUX_SYMBOL(__start_kprobe_blacklist) = .; \ + *(_kprobe_blacklist) \ + VMLINUX_SYMBOL(__stop_kprobe_blacklist) = .; +#else +#define KPROBE_BLACKLIST() +#endif + #ifdef CONFIG_EVENT_TRACING #define FTRACE_EVENTS() . = ALIGN(8); \ VMLINUX_SYMBOL(__start_ftrace_events) = .; \ @@ -507,6 +515,7 @@ *(.init.rodata) \ FTRACE_EVENTS() \ TRACE_SYSCALLS() \ + KPROBE_BLACKLIST() \ MEM_DISCARD(init.rodata) \ CLK_OF_TABLES() \ RESERVEDMEM_OF_TABLES() \ diff --git a/include/linux/compiler.h b/include/linux/compiler.h index ee7239ea158348..0300c0f5c88b03 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -374,7 +374,9 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */ #ifdef CONFIG_KPROBES # define __kprobes __attribute__((__section__(".kprobes.text"))) +# define nokprobe_inline __always_inline #else # define __kprobes +# define nokprobe_inline inline #endif #endif /* __LINUX_COMPILER_H */ diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index cdf9251f82496b..e059507c465dcc 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -205,10 +205,10 @@ struct kretprobe_blackpoint { void *addr; }; -struct kprobe_blackpoint { - const char *name; +struct kprobe_blacklist_entry { + struct list_head list; unsigned long start_addr; - unsigned long range; + unsigned long end_addr; }; #ifdef CONFIG_KPROBES @@ -477,4 +477,18 @@ static inline int enable_jprobe(struct jprobe *jp) return enable_kprobe(&jp->kp); } +#ifdef CONFIG_KPROBES +/* + * Blacklist ganerating macro. Specify functions which is not probed + * by using this macro. + */ +#define __NOKPROBE_SYMBOL(fname) \ +static unsigned long __used \ + __attribute__((section("_kprobe_blacklist"))) \ + _kbl_addr_##fname = (unsigned long)fname; +#define NOKPROBE_SYMBOL(fname) __NOKPROBE_SYMBOL(fname) +#else +#define NOKPROBE_SYMBOL(fname) +#endif + #endif /* _LINUX_KPROBES_H */ diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 5b5ac76671e7c3..5ffc6875d2a7d4 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -86,18 +86,8 @@ static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash) return &(kretprobe_table_locks[hash].lock); } -/* - * Normally, functions that we'd want to prohibit kprobes in, are marked - * __kprobes. But, there are cases where such functions already belong to - * a different section (__sched for preempt_schedule) - * - * For such cases, we now have a blacklist - */ -static struct kprobe_blackpoint kprobe_blacklist[] = { - {"preempt_schedule",}, - {"native_get_debugreg",}, - {NULL} /* Terminator */ -}; +/* Blacklist -- list of struct kprobe_blacklist_entry */ +static LIST_HEAD(kprobe_blacklist); #ifdef __ARCH_WANT_KPROBES_INSN_SLOT /* @@ -1328,24 +1318,22 @@ bool __weak arch_within_kprobe_blacklist(unsigned long addr) addr < (unsigned long)__kprobes_text_end; } -static int __kprobes in_kprobes_functions(unsigned long addr) +static bool __kprobes within_kprobe_blacklist(unsigned long addr) { - struct kprobe_blackpoint *kb; + struct kprobe_blacklist_entry *ent; if (arch_within_kprobe_blacklist(addr)) - return -EINVAL; + return true; /* * If there exists a kprobe_blacklist, verify and * fail any probe registration in the prohibited area */ - for (kb = kprobe_blacklist; kb->name != NULL; kb++) { - if (kb->start_addr) { - if (addr >= kb->start_addr && - addr < (kb->start_addr + kb->range)) - return -EINVAL; - } + list_for_each_entry(ent, &kprobe_blacklist, list) { + if (addr >= ent->start_addr && addr < ent->end_addr) + return true; } - return 0; + + return false; } /* @@ -1436,7 +1424,7 @@ static __kprobes int check_kprobe_address_safe(struct kprobe *p, /* Ensure it is not in reserved area nor out of text */ if (!kernel_text_address((unsigned long) p->addr) || - in_kprobes_functions((unsigned long) p->addr) || + within_kprobe_blacklist((unsigned long) p->addr) || jump_label_text_reserved(p->addr, p->addr)) { ret = -EINVAL; goto out; @@ -2022,6 +2010,38 @@ void __kprobes dump_kprobe(struct kprobe *kp) kp->symbol_name, kp->addr, kp->offset); } +/* + * Lookup and populate the kprobe_blacklist. + * + * Unlike the kretprobe blacklist, we'll need to determine + * the range of addresses that belong to the said functions, + * since a kprobe need not necessarily be at the beginning + * of a function. + */ +static int __init populate_kprobe_blacklist(unsigned long *start, + unsigned long *end) +{ + unsigned long *iter; + struct kprobe_blacklist_entry *ent; + unsigned long offset = 0, size = 0; + + for (iter = start; iter < end; iter++) { + if (!kallsyms_lookup_size_offset(*iter, &size, &offset)) { + pr_err("Failed to find blacklist %p\n", (void *)*iter); + continue; + } + + ent = kmalloc(sizeof(*ent), GFP_KERNEL); + if (!ent) + return -ENOMEM; + ent->start_addr = *iter; + ent->end_addr = *iter + size; + INIT_LIST_HEAD(&ent->list); + list_add_tail(&ent->list, &kprobe_blacklist); + } + return 0; +} + /* Module notifier call back, checking kprobes on the module */ static int __kprobes kprobes_module_callback(struct notifier_block *nb, unsigned long val, void *data) @@ -2065,14 +2085,13 @@ static struct notifier_block kprobe_module_nb = { .priority = 0 }; +/* Markers of _kprobe_blacklist section */ +extern unsigned long __start_kprobe_blacklist[]; +extern unsigned long __stop_kprobe_blacklist[]; + static int __init init_kprobes(void) { int i, err = 0; - unsigned long offset = 0, size = 0; - char *modname, namebuf[KSYM_NAME_LEN]; - const char *symbol_name; - void *addr; - struct kprobe_blackpoint *kb; /* FIXME allocate the probe table, currently defined statically */ /* initialize all list heads */ @@ -2082,26 +2101,11 @@ static int __init init_kprobes(void) raw_spin_lock_init(&(kretprobe_table_locks[i].lock)); } - /* - * Lookup and populate the kprobe_blacklist. - * - * Unlike the kretprobe blacklist, we'll need to determine - * the range of addresses that belong to the said functions, - * since a kprobe need not necessarily be at the beginning - * of a function. - */ - for (kb = kprobe_blacklist; kb->name != NULL; kb++) { - kprobe_lookup_name(kb->name, addr); - if (!addr) - continue; - - kb->start_addr = (unsigned long)addr; - symbol_name = kallsyms_lookup(kb->start_addr, - &size, &offset, &modname, namebuf); - if (!symbol_name) - kb->range = 0; - else - kb->range = size; + err = populate_kprobe_blacklist(__start_kprobe_blacklist, + __stop_kprobe_blacklist); + if (err) { + pr_err("kprobes: failed to populate blacklist: %d\n", err); + pr_err("Please take care of using kprobes.\n"); } if (kretprobe_blacklist_size) { diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 268a45ea238cc8..6863631e8cd0e9 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2804,6 +2804,7 @@ asmlinkage void __sched notrace preempt_schedule(void) barrier(); } while (need_resched()); } +NOKPROBE_SYMBOL(preempt_schedule); EXPORT_SYMBOL(preempt_schedule); #endif /* CONFIG_PREEMPT */ From 0f46efeb44e360b78f54a968b4d92e6877c35891 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 17 Apr 2014 17:17:12 +0900 Subject: [PATCH 004/101] kprobes, x86: Prohibit probing on debug_stack_*() Prohibit probing on debug_stack_reset and debug_stack_set_zero. Since the both functions are called from TRACE_IRQS_ON/OFF_DEBUG macros which run in int3 ist entry, probing it may cause a soft lockup. This happens when the kernel built with CONFIG_DYNAMIC_FTRACE=y and CONFIG_TRACE_IRQFLAGS=y. Signed-off-by: Masami Hiramatsu Reviewed-by: Steven Rostedt Cc: Borislav Petkov Cc: Jan Beulich Cc: Paul Gortmaker Cc: Seiji Aguchi Link: http://lkml.kernel.org/r/20140417081712.26341.32994.stgit@ltc230.yrl.intra.hitachi.co.jp Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/common.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index a135239badb7fd..5af696dddd1d28 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -1160,6 +1161,7 @@ int is_debug_stack(unsigned long addr) (addr <= __get_cpu_var(debug_stack_addr) && addr > (__get_cpu_var(debug_stack_addr) - DEBUG_STKSZ)); } +NOKPROBE_SYMBOL(is_debug_stack); DEFINE_PER_CPU(u32, debug_idt_ctr); @@ -1168,6 +1170,7 @@ void debug_stack_set_zero(void) this_cpu_inc(debug_idt_ctr); load_current_idt(); } +NOKPROBE_SYMBOL(debug_stack_set_zero); void debug_stack_reset(void) { @@ -1176,6 +1179,7 @@ void debug_stack_reset(void) if (this_cpu_dec_return(debug_idt_ctr) == 0) load_current_idt(); } +NOKPROBE_SYMBOL(debug_stack_reset); #else /* CONFIG_X86_64 */ From 8027197220e02d5cebbbfdff36c2827661fbc692 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 17 Apr 2014 17:17:19 +0900 Subject: [PATCH 005/101] kprobes, x86: Prohibit probing on native_set_debugreg()/load_idt() Since the kprobes uses do_debug for single stepping, functions called from do_debug() before notify_die() must not be probed. And also native_load_idt() is called from paranoid_exit when returning int3, this also must not be probed. Signed-off-by: Masami Hiramatsu Reviewed-by: Steven Rostedt Cc: Alok Kataria Cc: Chris Wright Cc: Jeremy Fitzhardinge Cc: Rusty Russell Cc: virtualization@lists.linux-foundation.org Link: http://lkml.kernel.org/r/20140417081719.26341.65542.stgit@ltc230.yrl.intra.hitachi.co.jp Signed-off-by: Ingo Molnar --- arch/x86/kernel/paravirt.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index e136869ae42e09..548d25f00c90ad 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -390,8 +390,10 @@ __visible struct pv_cpu_ops pv_cpu_ops = { .end_context_switch = paravirt_nop, }; -/* At this point, native_get_debugreg has a real function entry */ +/* At this point, native_get/set_debugreg has real function entries */ NOKPROBE_SYMBOL(native_get_debugreg); +NOKPROBE_SYMBOL(native_set_debugreg); +NOKPROBE_SYMBOL(native_load_idt); struct pv_apic_ops pv_apic_ops = { #ifdef CONFIG_X86_LOCAL_APIC From 98def1dedd00f42ded8423c418c971751f46aad2 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 17 Apr 2014 17:17:26 +0900 Subject: [PATCH 006/101] kprobes, x86: Prohibit probing on thunk functions and restore thunk/restore functions are also used for tracing irqoff etc. and those are involved in kprobe's exception handling. Prohibit probing on them to avoid kernel crash. Signed-off-by: Masami Hiramatsu Reviewed-by: Steven Rostedt Link: http://lkml.kernel.org/r/20140417081726.26341.3872.stgit@ltc230.yrl.intra.hitachi.co.jp Signed-off-by: Ingo Molnar --- arch/x86/lib/thunk_32.S | 3 ++- arch/x86/lib/thunk_64.S | 3 +++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/arch/x86/lib/thunk_32.S b/arch/x86/lib/thunk_32.S index 2930ae05d77305..28f85c91671223 100644 --- a/arch/x86/lib/thunk_32.S +++ b/arch/x86/lib/thunk_32.S @@ -4,8 +4,8 @@ * (inspired by Andi Kleen's thunk_64.S) * Subject to the GNU public license, v.2. No warranty of any kind. */ - #include + #include #ifdef CONFIG_TRACE_IRQFLAGS /* put return address in eax (arg1) */ @@ -22,6 +22,7 @@ popl %ecx popl %eax ret + _ASM_NOKPROBE(\name) .endm thunk_ra trace_hardirqs_on_thunk,trace_hardirqs_on_caller diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S index a63efd6bb6a5a2..92d9feaff42b04 100644 --- a/arch/x86/lib/thunk_64.S +++ b/arch/x86/lib/thunk_64.S @@ -8,6 +8,7 @@ #include #include #include +#include /* rdi: arg1 ... normal C conventions. rax is saved/restored. */ .macro THUNK name, func, put_ret_addr_in_rdi=0 @@ -25,6 +26,7 @@ call \func jmp restore CFI_ENDPROC + _ASM_NOKPROBE(\name) .endm #ifdef CONFIG_TRACE_IRQFLAGS @@ -43,3 +45,4 @@ restore: RESTORE_ARGS ret CFI_ENDPROC + _ASM_NOKPROBE(restore) From 6f6343f53d133bae516caf3d254bce37d8774625 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 17 Apr 2014 17:17:33 +0900 Subject: [PATCH 007/101] kprobes/x86: Call exception handlers directly from do_int3/do_debug To avoid a kernel crash by probing on lockdep code, call kprobe_int3_handler() and kprobe_debug_handler()(which was formerly called post_kprobe_handler()) directly from do_int3 and do_debug. Currently kprobes uses notify_die() to hook the int3/debug exceptoins. Since there is a locking code in notify_die, the lockdep code can be invoked. And because the lockdep involves printk() related things, theoretically, we need to prohibit probing on such code, which means much longer blacklist we'll have. Instead, hooking the int3/debug for kprobes before notify_die() can avoid this problem. Anyway, most of the int3 handlers in the kernel are already called from do_int3 directly, e.g. ftrace_int3_handler, poke_int3_handler, kgdb_ll_trap. Actually only kprobe_exceptions_notify is on the notifier_call_chain. Signed-off-by: Masami Hiramatsu Reviewed-by: Steven Rostedt Cc: Andrew Morton Cc: Borislav Petkov Cc: Jiri Kosina Cc: Jonathan Lebon Cc: Kees Cook Cc: Rusty Russell Cc: Seiji Aguchi Link: http://lkml.kernel.org/r/20140417081733.26341.24423.stgit@ltc230.yrl.intra.hitachi.co.jp Signed-off-by: Ingo Molnar --- arch/x86/include/asm/kprobes.h | 2 ++ arch/x86/kernel/kprobes/core.c | 24 +++--------------------- arch/x86/kernel/traps.c | 10 ++++++++++ 3 files changed, 15 insertions(+), 21 deletions(-) diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h index 9454c167629ff1..53cdfb2857abe4 100644 --- a/arch/x86/include/asm/kprobes.h +++ b/arch/x86/include/asm/kprobes.h @@ -116,4 +116,6 @@ struct kprobe_ctlblk { extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr); extern int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *data); +extern int kprobe_int3_handler(struct pt_regs *regs); +extern int kprobe_debug_handler(struct pt_regs *regs); #endif /* _ASM_X86_KPROBES_H */ diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 7751b3dee53a08..9b80aec1ea1a4a 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -559,7 +559,7 @@ reenter_kprobe(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb * Interrupts are disabled on entry as trap3 is an interrupt gate and they * remain disabled throughout this function. */ -static int __kprobes kprobe_handler(struct pt_regs *regs) +int __kprobes kprobe_int3_handler(struct pt_regs *regs) { kprobe_opcode_t *addr; struct kprobe *p; @@ -857,7 +857,7 @@ resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k * Interrupts are disabled on entry as trap1 is an interrupt gate and they * remain disabled throughout this function. */ -static int __kprobes post_kprobe_handler(struct pt_regs *regs) +int __kprobes kprobe_debug_handler(struct pt_regs *regs) { struct kprobe *cur = kprobe_running(); struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); @@ -963,22 +963,7 @@ kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *d if (args->regs && user_mode_vm(args->regs)) return ret; - switch (val) { - case DIE_INT3: - if (kprobe_handler(args->regs)) - ret = NOTIFY_STOP; - break; - case DIE_DEBUG: - if (post_kprobe_handler(args->regs)) { - /* - * Reset the BS bit in dr6 (pointed by args->err) to - * denote completion of processing - */ - (*(unsigned long *)ERR_PTR(args->err)) &= ~DR_STEP; - ret = NOTIFY_STOP; - } - break; - case DIE_GPF: + if (val == DIE_GPF) { /* * To be potentially processing a kprobe fault and to * trust the result from kprobe_running(), we have @@ -987,9 +972,6 @@ kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *d if (!preemptible() && kprobe_running() && kprobe_fault_handler(args->regs, args->trapnr)) ret = NOTIFY_STOP; - break; - default: - break; } return ret; } diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 57409f6b8c623e..e5d4a70814d72b 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -334,6 +334,11 @@ dotraplinkage void __kprobes notrace do_int3(struct pt_regs *regs, long error_co goto exit; #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */ +#ifdef CONFIG_KPROBES + if (kprobe_int3_handler(regs)) + return; +#endif + if (notify_die(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP, SIGTRAP) == NOTIFY_STOP) goto exit; @@ -440,6 +445,11 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) /* Store the virtualized DR6 value */ tsk->thread.debugreg6 = dr6; +#ifdef CONFIG_KPROBES + if (kprobe_debug_handler(regs)) + goto exit; +#endif + if (notify_die(DIE_DEBUG, "debug", regs, (long)&dr6, error_code, SIGTRAP) == NOTIFY_STOP) goto exit; From ecd50f714c421c759354632dd00f70c718c95b10 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 17 Apr 2014 17:17:40 +0900 Subject: [PATCH 008/101] kprobes, x86: Call exception_enter after kprobes handled Move exception_enter() call after kprobes handler is done. Since the exception_enter() involves many other functions (like printk), it can cause recursive int3/break loop when kprobes probe such functions. Signed-off-by: Masami Hiramatsu Reviewed-by: Steven Rostedt Cc: Andrew Morton Cc: Borislav Petkov Cc: Jiri Kosina Cc: Kees Cook Cc: Rusty Russell Cc: Seiji Aguchi Link: http://lkml.kernel.org/r/20140417081740.26341.10894.stgit@ltc230.yrl.intra.hitachi.co.jp Signed-off-by: Ingo Molnar --- arch/x86/kernel/traps.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index e5d4a70814d72b..ba9abe9cbce338 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -327,7 +327,6 @@ dotraplinkage void __kprobes notrace do_int3(struct pt_regs *regs, long error_co if (poke_int3_handler(regs)) return; - prev_state = exception_enter(); #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP, SIGTRAP) == NOTIFY_STOP) @@ -338,6 +337,7 @@ dotraplinkage void __kprobes notrace do_int3(struct pt_regs *regs, long error_co if (kprobe_int3_handler(regs)) return; #endif + prev_state = exception_enter(); if (notify_die(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP, SIGTRAP) == NOTIFY_STOP) @@ -415,8 +415,6 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) unsigned long dr6; int si_code; - prev_state = exception_enter(); - get_debugreg(dr6, 6); /* Filter out all the reserved bits which are preset to 1 */ @@ -449,6 +447,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) if (kprobe_debug_handler(regs)) goto exit; #endif + prev_state = exception_enter(); if (notify_die(DIE_DEBUG, "debug", regs, (long)&dr6, error_code, SIGTRAP) == NOTIFY_STOP) From 7ec8a97a990da8e3ba87175a757731e17f74072e Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 17 Apr 2014 17:17:47 +0900 Subject: [PATCH 009/101] kprobes/x86: Allow probe on some kprobe preparation functions There is no need to prohibit probing on the functions used in preparation phase. Those are safely probed because those are not invoked from breakpoint/fault/debug handlers, there is no chance to cause recursive exceptions. Following functions are now removed from the kprobes blacklist: can_boost can_probe can_optimize is_IF_modifier __copy_instruction copy_optimized_instructions arch_copy_kprobe arch_prepare_kprobe arch_arm_kprobe arch_disarm_kprobe arch_remove_kprobe arch_trampoline_kprobe arch_prepare_kprobe_ftrace arch_prepare_optimized_kprobe arch_check_optimized_kprobe arch_within_optimized_kprobe __arch_remove_optimized_kprobe arch_remove_optimized_kprobe arch_optimize_kprobes arch_unoptimize_kprobe I tested those functions by putting kprobes on all instructions in the functions with the bash script I sent to LKML. See: https://lkml.org/lkml/2014/3/27/33 Signed-off-by: Masami Hiramatsu Cc: Jiri Kosina Cc: Jonathan Lebon Link: http://lkml.kernel.org/r/20140417081747.26341.36065.stgit@ltc230.yrl.intra.hitachi.co.jp Signed-off-by: Ingo Molnar --- arch/x86/kernel/kprobes/core.c | 20 ++++++++++---------- arch/x86/kernel/kprobes/ftrace.c | 2 +- arch/x86/kernel/kprobes/opt.c | 24 ++++++++++++------------ 3 files changed, 23 insertions(+), 23 deletions(-) diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 9b80aec1ea1a4a..bd717137ae7716 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -159,7 +159,7 @@ static kprobe_opcode_t *__kprobes skip_prefixes(kprobe_opcode_t *insn) * Returns non-zero if opcode is boostable. * RIP relative instructions are adjusted at copying time in 64 bits mode */ -int __kprobes can_boost(kprobe_opcode_t *opcodes) +int can_boost(kprobe_opcode_t *opcodes) { kprobe_opcode_t opcode; kprobe_opcode_t *orig_opcodes = opcodes; @@ -260,7 +260,7 @@ unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long add } /* Check if paddr is at an instruction boundary */ -static int __kprobes can_probe(unsigned long paddr) +static int can_probe(unsigned long paddr) { unsigned long addr, __addr, offset = 0; struct insn insn; @@ -299,7 +299,7 @@ static int __kprobes can_probe(unsigned long paddr) /* * Returns non-zero if opcode modifies the interrupt flag. */ -static int __kprobes is_IF_modifier(kprobe_opcode_t *insn) +static int is_IF_modifier(kprobe_opcode_t *insn) { /* Skip prefixes */ insn = skip_prefixes(insn); @@ -322,7 +322,7 @@ static int __kprobes is_IF_modifier(kprobe_opcode_t *insn) * If not, return null. * Only applicable to 64-bit x86. */ -int __kprobes __copy_instruction(u8 *dest, u8 *src) +int __copy_instruction(u8 *dest, u8 *src) { struct insn insn; kprobe_opcode_t buf[MAX_INSN_SIZE]; @@ -365,7 +365,7 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src) return insn.length; } -static int __kprobes arch_copy_kprobe(struct kprobe *p) +static int arch_copy_kprobe(struct kprobe *p) { int ret; @@ -392,7 +392,7 @@ static int __kprobes arch_copy_kprobe(struct kprobe *p) return 0; } -int __kprobes arch_prepare_kprobe(struct kprobe *p) +int arch_prepare_kprobe(struct kprobe *p) { if (alternatives_text_reserved(p->addr, p->addr)) return -EINVAL; @@ -407,17 +407,17 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) return arch_copy_kprobe(p); } -void __kprobes arch_arm_kprobe(struct kprobe *p) +void arch_arm_kprobe(struct kprobe *p) { text_poke(p->addr, ((unsigned char []){BREAKPOINT_INSTRUCTION}), 1); } -void __kprobes arch_disarm_kprobe(struct kprobe *p) +void arch_disarm_kprobe(struct kprobe *p) { text_poke(p->addr, &p->opcode, 1); } -void __kprobes arch_remove_kprobe(struct kprobe *p) +void arch_remove_kprobe(struct kprobe *p) { if (p->ainsn.insn) { free_insn_slot(p->ainsn.insn, (p->ainsn.boostable == 1)); @@ -1060,7 +1060,7 @@ int __init arch_init_kprobes(void) return 0; } -int __kprobes arch_trampoline_kprobe(struct kprobe *p) +int arch_trampoline_kprobe(struct kprobe *p) { return 0; } diff --git a/arch/x86/kernel/kprobes/ftrace.c b/arch/x86/kernel/kprobes/ftrace.c index 23ef5c556f0614..dcaa1310ccfda1 100644 --- a/arch/x86/kernel/kprobes/ftrace.c +++ b/arch/x86/kernel/kprobes/ftrace.c @@ -85,7 +85,7 @@ void __kprobes kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, local_irq_restore(flags); } -int __kprobes arch_prepare_kprobe_ftrace(struct kprobe *p) +int arch_prepare_kprobe_ftrace(struct kprobe *p) { p->ainsn.insn = NULL; p->ainsn.boostable = -1; diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c index 898160b42e4392..fba7fb075e8a76 100644 --- a/arch/x86/kernel/kprobes/opt.c +++ b/arch/x86/kernel/kprobes/opt.c @@ -77,7 +77,7 @@ unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr) } /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */ -static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val) +static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val) { #ifdef CONFIG_X86_64 *addr++ = 0x48; @@ -169,7 +169,7 @@ static void __kprobes optimized_callback(struct optimized_kprobe *op, struct pt_ local_irq_restore(flags); } -static int __kprobes copy_optimized_instructions(u8 *dest, u8 *src) +static int copy_optimized_instructions(u8 *dest, u8 *src) { int len = 0, ret; @@ -189,7 +189,7 @@ static int __kprobes copy_optimized_instructions(u8 *dest, u8 *src) } /* Check whether insn is indirect jump */ -static int __kprobes insn_is_indirect_jump(struct insn *insn) +static int insn_is_indirect_jump(struct insn *insn) { return ((insn->opcode.bytes[0] == 0xff && (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */ @@ -224,7 +224,7 @@ static int insn_jump_into_range(struct insn *insn, unsigned long start, int len) } /* Decode whole function to ensure any instructions don't jump into target */ -static int __kprobes can_optimize(unsigned long paddr) +static int can_optimize(unsigned long paddr) { unsigned long addr, size = 0, offset = 0; struct insn insn; @@ -275,7 +275,7 @@ static int __kprobes can_optimize(unsigned long paddr) } /* Check optimized_kprobe can actually be optimized. */ -int __kprobes arch_check_optimized_kprobe(struct optimized_kprobe *op) +int arch_check_optimized_kprobe(struct optimized_kprobe *op) { int i; struct kprobe *p; @@ -290,15 +290,15 @@ int __kprobes arch_check_optimized_kprobe(struct optimized_kprobe *op) } /* Check the addr is within the optimized instructions. */ -int __kprobes -arch_within_optimized_kprobe(struct optimized_kprobe *op, unsigned long addr) +int arch_within_optimized_kprobe(struct optimized_kprobe *op, + unsigned long addr) { return ((unsigned long)op->kp.addr <= addr && (unsigned long)op->kp.addr + op->optinsn.size > addr); } /* Free optimized instruction slot */ -static __kprobes +static void __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty) { if (op->optinsn.insn) { @@ -308,7 +308,7 @@ void __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty) } } -void __kprobes arch_remove_optimized_kprobe(struct optimized_kprobe *op) +void arch_remove_optimized_kprobe(struct optimized_kprobe *op) { __arch_remove_optimized_kprobe(op, 1); } @@ -318,7 +318,7 @@ void __kprobes arch_remove_optimized_kprobe(struct optimized_kprobe *op) * Target instructions MUST be relocatable (checked inside) * This is called when new aggr(opt)probe is allocated or reused. */ -int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op) +int arch_prepare_optimized_kprobe(struct optimized_kprobe *op) { u8 *buf; int ret; @@ -372,7 +372,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op) * Replace breakpoints (int3) with relative jumps. * Caller must call with locking kprobe_mutex and text_mutex. */ -void __kprobes arch_optimize_kprobes(struct list_head *oplist) +void arch_optimize_kprobes(struct list_head *oplist) { struct optimized_kprobe *op, *tmp; u8 insn_buf[RELATIVEJUMP_SIZE]; @@ -398,7 +398,7 @@ void __kprobes arch_optimize_kprobes(struct list_head *oplist) } /* Replace a relative jump with a breakpoint (int3). */ -void __kprobes arch_unoptimize_kprobe(struct optimized_kprobe *op) +void arch_unoptimize_kprobe(struct optimized_kprobe *op) { u8 insn_buf[RELATIVEJUMP_SIZE]; From 55479f64756fc508182a05e35e52f01395a50d4d Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 17 Apr 2014 17:17:54 +0900 Subject: [PATCH 010/101] kprobes: Allow probe on some kprobe functions There is no need to prohibit probing on the functions used for preparation, registeration, optimization, controll etc. Those are safely probed because those are not invoked from breakpoint/fault/debug handlers, there is no chance to cause recursive exceptions. Following functions are now removed from the kprobes blacklist: add_new_kprobe aggr_kprobe_disabled alloc_aggr_kprobe alloc_aggr_kprobe arm_all_kprobes __arm_kprobe arm_kprobe arm_kprobe_ftrace check_kprobe_address_safe collect_garbage_slots collect_garbage_slots collect_one_slot debugfs_kprobe_init __disable_kprobe disable_kprobe disarm_all_kprobes __disarm_kprobe disarm_kprobe disarm_kprobe_ftrace do_free_cleaned_kprobes do_optimize_kprobes do_unoptimize_kprobes enable_kprobe force_unoptimize_kprobe free_aggr_kprobe free_aggr_kprobe __free_insn_slot __get_insn_slot get_optimized_kprobe __get_valid_kprobe init_aggr_kprobe init_aggr_kprobe in_nokprobe_functions kick_kprobe_optimizer kill_kprobe kill_optimized_kprobe kprobe_addr kprobe_optimizer kprobe_queued kprobe_seq_next kprobe_seq_start kprobe_seq_stop kprobes_module_callback kprobes_open optimize_all_kprobes optimize_kprobe prepare_kprobe prepare_optimized_kprobe register_aggr_kprobe register_jprobe register_jprobes register_kprobe register_kprobes register_kretprobe register_kretprobe register_kretprobes register_kretprobes report_probe show_kprobe_addr try_to_optimize_kprobe unoptimize_all_kprobes unoptimize_kprobe unregister_jprobe unregister_jprobes unregister_kprobe __unregister_kprobe_bottom unregister_kprobes __unregister_kprobe_top unregister_kretprobe unregister_kretprobe unregister_kretprobes unregister_kretprobes wait_for_kprobe_optimizer I tested those functions by putting kprobes on all instructions in the functions with the bash script I sent to LKML. See: https://lkml.org/lkml/2014/3/27/33 Signed-off-by: Masami Hiramatsu Link: http://lkml.kernel.org/r/20140417081753.26341.57889.stgit@ltc230.yrl.intra.hitachi.co.jp Cc: Ananth N Mavinakayanahalli Cc: Anil S Keshavamurthy Cc: David S. Miller Cc: fche@redhat.com Cc: systemtap@sourceware.org Signed-off-by: Ingo Molnar --- kernel/kprobes.c | 153 +++++++++++++++++++++++------------------------ 1 file changed, 76 insertions(+), 77 deletions(-) diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 5ffc6875d2a7d4..4db2cc616f5016 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -138,13 +138,13 @@ struct kprobe_insn_cache kprobe_insn_slots = { .insn_size = MAX_INSN_SIZE, .nr_garbage = 0, }; -static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c); +static int collect_garbage_slots(struct kprobe_insn_cache *c); /** * __get_insn_slot() - Find a slot on an executable page for an instruction. * We allocate an executable page if there's no room on existing ones. */ -kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c) +kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c) { struct kprobe_insn_page *kip; kprobe_opcode_t *slot = NULL; @@ -201,7 +201,7 @@ kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c) } /* Return 1 if all garbages are collected, otherwise 0. */ -static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx) +static int collect_one_slot(struct kprobe_insn_page *kip, int idx) { kip->slot_used[idx] = SLOT_CLEAN; kip->nused--; @@ -222,7 +222,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx) return 0; } -static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c) +static int collect_garbage_slots(struct kprobe_insn_cache *c) { struct kprobe_insn_page *kip, *next; @@ -244,8 +244,8 @@ static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c) return 0; } -void __kprobes __free_insn_slot(struct kprobe_insn_cache *c, - kprobe_opcode_t *slot, int dirty) +void __free_insn_slot(struct kprobe_insn_cache *c, + kprobe_opcode_t *slot, int dirty) { struct kprobe_insn_page *kip; @@ -361,7 +361,7 @@ void __kprobes opt_pre_handler(struct kprobe *p, struct pt_regs *regs) } /* Free optimized instructions and optimized_kprobe */ -static __kprobes void free_aggr_kprobe(struct kprobe *p) +static void free_aggr_kprobe(struct kprobe *p) { struct optimized_kprobe *op; @@ -399,7 +399,7 @@ static inline int kprobe_disarmed(struct kprobe *p) } /* Return true(!0) if the probe is queued on (un)optimizing lists */ -static int __kprobes kprobe_queued(struct kprobe *p) +static int kprobe_queued(struct kprobe *p) { struct optimized_kprobe *op; @@ -415,7 +415,7 @@ static int __kprobes kprobe_queued(struct kprobe *p) * Return an optimized kprobe whose optimizing code replaces * instructions including addr (exclude breakpoint). */ -static struct kprobe *__kprobes get_optimized_kprobe(unsigned long addr) +static struct kprobe *get_optimized_kprobe(unsigned long addr) { int i; struct kprobe *p = NULL; @@ -447,7 +447,7 @@ static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer); * Optimize (replace a breakpoint with a jump) kprobes listed on * optimizing_list. */ -static __kprobes void do_optimize_kprobes(void) +static void do_optimize_kprobes(void) { /* Optimization never be done when disarmed */ if (kprobes_all_disarmed || !kprobes_allow_optimization || @@ -475,7 +475,7 @@ static __kprobes void do_optimize_kprobes(void) * Unoptimize (replace a jump with a breakpoint and remove the breakpoint * if need) kprobes listed on unoptimizing_list. */ -static __kprobes void do_unoptimize_kprobes(void) +static void do_unoptimize_kprobes(void) { struct optimized_kprobe *op, *tmp; @@ -507,7 +507,7 @@ static __kprobes void do_unoptimize_kprobes(void) } /* Reclaim all kprobes on the free_list */ -static __kprobes void do_free_cleaned_kprobes(void) +static void do_free_cleaned_kprobes(void) { struct optimized_kprobe *op, *tmp; @@ -519,13 +519,13 @@ static __kprobes void do_free_cleaned_kprobes(void) } /* Start optimizer after OPTIMIZE_DELAY passed */ -static __kprobes void kick_kprobe_optimizer(void) +static void kick_kprobe_optimizer(void) { schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY); } /* Kprobe jump optimizer */ -static __kprobes void kprobe_optimizer(struct work_struct *work) +static void kprobe_optimizer(struct work_struct *work) { mutex_lock(&kprobe_mutex); /* Lock modules while optimizing kprobes */ @@ -561,7 +561,7 @@ static __kprobes void kprobe_optimizer(struct work_struct *work) } /* Wait for completing optimization and unoptimization */ -static __kprobes void wait_for_kprobe_optimizer(void) +static void wait_for_kprobe_optimizer(void) { mutex_lock(&kprobe_mutex); @@ -580,7 +580,7 @@ static __kprobes void wait_for_kprobe_optimizer(void) } /* Optimize kprobe if p is ready to be optimized */ -static __kprobes void optimize_kprobe(struct kprobe *p) +static void optimize_kprobe(struct kprobe *p) { struct optimized_kprobe *op; @@ -614,7 +614,7 @@ static __kprobes void optimize_kprobe(struct kprobe *p) } /* Short cut to direct unoptimizing */ -static __kprobes void force_unoptimize_kprobe(struct optimized_kprobe *op) +static void force_unoptimize_kprobe(struct optimized_kprobe *op) { get_online_cpus(); arch_unoptimize_kprobe(op); @@ -624,7 +624,7 @@ static __kprobes void force_unoptimize_kprobe(struct optimized_kprobe *op) } /* Unoptimize a kprobe if p is optimized */ -static __kprobes void unoptimize_kprobe(struct kprobe *p, bool force) +static void unoptimize_kprobe(struct kprobe *p, bool force) { struct optimized_kprobe *op; @@ -684,7 +684,7 @@ static void reuse_unused_kprobe(struct kprobe *ap) } /* Remove optimized instructions */ -static void __kprobes kill_optimized_kprobe(struct kprobe *p) +static void kill_optimized_kprobe(struct kprobe *p) { struct optimized_kprobe *op; @@ -710,7 +710,7 @@ static void __kprobes kill_optimized_kprobe(struct kprobe *p) } /* Try to prepare optimized instructions */ -static __kprobes void prepare_optimized_kprobe(struct kprobe *p) +static void prepare_optimized_kprobe(struct kprobe *p) { struct optimized_kprobe *op; @@ -719,7 +719,7 @@ static __kprobes void prepare_optimized_kprobe(struct kprobe *p) } /* Allocate new optimized_kprobe and try to prepare optimized instructions */ -static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p) +static struct kprobe *alloc_aggr_kprobe(struct kprobe *p) { struct optimized_kprobe *op; @@ -734,13 +734,13 @@ static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p) return &op->kp; } -static void __kprobes init_aggr_kprobe(struct kprobe *ap, struct kprobe *p); +static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p); /* * Prepare an optimized_kprobe and optimize it * NOTE: p must be a normal registered kprobe */ -static __kprobes void try_to_optimize_kprobe(struct kprobe *p) +static void try_to_optimize_kprobe(struct kprobe *p) { struct kprobe *ap; struct optimized_kprobe *op; @@ -774,7 +774,7 @@ static __kprobes void try_to_optimize_kprobe(struct kprobe *p) } #ifdef CONFIG_SYSCTL -static void __kprobes optimize_all_kprobes(void) +static void optimize_all_kprobes(void) { struct hlist_head *head; struct kprobe *p; @@ -797,7 +797,7 @@ static void __kprobes optimize_all_kprobes(void) mutex_unlock(&kprobe_mutex); } -static void __kprobes unoptimize_all_kprobes(void) +static void unoptimize_all_kprobes(void) { struct hlist_head *head; struct kprobe *p; @@ -848,7 +848,7 @@ int proc_kprobes_optimization_handler(struct ctl_table *table, int write, #endif /* CONFIG_SYSCTL */ /* Put a breakpoint for a probe. Must be called with text_mutex locked */ -static void __kprobes __arm_kprobe(struct kprobe *p) +static void __arm_kprobe(struct kprobe *p) { struct kprobe *_p; @@ -863,7 +863,7 @@ static void __kprobes __arm_kprobe(struct kprobe *p) } /* Remove the breakpoint of a probe. Must be called with text_mutex locked */ -static void __kprobes __disarm_kprobe(struct kprobe *p, bool reopt) +static void __disarm_kprobe(struct kprobe *p, bool reopt) { struct kprobe *_p; @@ -898,13 +898,13 @@ static void reuse_unused_kprobe(struct kprobe *ap) BUG_ON(kprobe_unused(ap)); } -static __kprobes void free_aggr_kprobe(struct kprobe *p) +static void free_aggr_kprobe(struct kprobe *p) { arch_remove_kprobe(p); kfree(p); } -static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p) +static struct kprobe *alloc_aggr_kprobe(struct kprobe *p) { return kzalloc(sizeof(struct kprobe), GFP_KERNEL); } @@ -918,7 +918,7 @@ static struct ftrace_ops kprobe_ftrace_ops __read_mostly = { static int kprobe_ftrace_enabled; /* Must ensure p->addr is really on ftrace */ -static int __kprobes prepare_kprobe(struct kprobe *p) +static int prepare_kprobe(struct kprobe *p) { if (!kprobe_ftrace(p)) return arch_prepare_kprobe(p); @@ -927,7 +927,7 @@ static int __kprobes prepare_kprobe(struct kprobe *p) } /* Caller must lock kprobe_mutex */ -static void __kprobes arm_kprobe_ftrace(struct kprobe *p) +static void arm_kprobe_ftrace(struct kprobe *p) { int ret; @@ -942,7 +942,7 @@ static void __kprobes arm_kprobe_ftrace(struct kprobe *p) } /* Caller must lock kprobe_mutex */ -static void __kprobes disarm_kprobe_ftrace(struct kprobe *p) +static void disarm_kprobe_ftrace(struct kprobe *p) { int ret; @@ -962,7 +962,7 @@ static void __kprobes disarm_kprobe_ftrace(struct kprobe *p) #endif /* Arm a kprobe with text_mutex */ -static void __kprobes arm_kprobe(struct kprobe *kp) +static void arm_kprobe(struct kprobe *kp) { if (unlikely(kprobe_ftrace(kp))) { arm_kprobe_ftrace(kp); @@ -979,7 +979,7 @@ static void __kprobes arm_kprobe(struct kprobe *kp) } /* Disarm a kprobe with text_mutex */ -static void __kprobes disarm_kprobe(struct kprobe *kp, bool reopt) +static void disarm_kprobe(struct kprobe *kp, bool reopt) { if (unlikely(kprobe_ftrace(kp))) { disarm_kprobe_ftrace(kp); @@ -1189,7 +1189,7 @@ static void __kprobes cleanup_rp_inst(struct kretprobe *rp) * Add the new probe to ap->list. Fail if this is the * second jprobe at the address - two jprobes can't coexist */ -static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p) +static int add_new_kprobe(struct kprobe *ap, struct kprobe *p) { BUG_ON(kprobe_gone(ap) || kprobe_gone(p)); @@ -1213,7 +1213,7 @@ static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p) * Fill in the required fields of the "manager kprobe". Replace the * earlier kprobe in the hlist with the manager kprobe */ -static void __kprobes init_aggr_kprobe(struct kprobe *ap, struct kprobe *p) +static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p) { /* Copy p's insn slot to ap */ copy_kprobe(p, ap); @@ -1239,8 +1239,7 @@ static void __kprobes init_aggr_kprobe(struct kprobe *ap, struct kprobe *p) * This is the second or subsequent kprobe at the address - handle * the intricacies */ -static int __kprobes register_aggr_kprobe(struct kprobe *orig_p, - struct kprobe *p) +static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p) { int ret = 0; struct kprobe *ap = orig_p; @@ -1318,7 +1317,7 @@ bool __weak arch_within_kprobe_blacklist(unsigned long addr) addr < (unsigned long)__kprobes_text_end; } -static bool __kprobes within_kprobe_blacklist(unsigned long addr) +static bool within_kprobe_blacklist(unsigned long addr) { struct kprobe_blacklist_entry *ent; @@ -1342,7 +1341,7 @@ static bool __kprobes within_kprobe_blacklist(unsigned long addr) * This returns encoded errors if it fails to look up symbol or invalid * combination of parameters. */ -static kprobe_opcode_t __kprobes *kprobe_addr(struct kprobe *p) +static kprobe_opcode_t *kprobe_addr(struct kprobe *p) { kprobe_opcode_t *addr = p->addr; @@ -1365,7 +1364,7 @@ static kprobe_opcode_t __kprobes *kprobe_addr(struct kprobe *p) } /* Check passed kprobe is valid and return kprobe in kprobe_table. */ -static struct kprobe * __kprobes __get_valid_kprobe(struct kprobe *p) +static struct kprobe *__get_valid_kprobe(struct kprobe *p) { struct kprobe *ap, *list_p; @@ -1397,8 +1396,8 @@ static inline int check_kprobe_rereg(struct kprobe *p) return ret; } -static __kprobes int check_kprobe_address_safe(struct kprobe *p, - struct module **probed_mod) +static int check_kprobe_address_safe(struct kprobe *p, + struct module **probed_mod) { int ret = 0; unsigned long ftrace_addr; @@ -1460,7 +1459,7 @@ static __kprobes int check_kprobe_address_safe(struct kprobe *p, return ret; } -int __kprobes register_kprobe(struct kprobe *p) +int register_kprobe(struct kprobe *p) { int ret; struct kprobe *old_p; @@ -1522,7 +1521,7 @@ int __kprobes register_kprobe(struct kprobe *p) EXPORT_SYMBOL_GPL(register_kprobe); /* Check if all probes on the aggrprobe are disabled */ -static int __kprobes aggr_kprobe_disabled(struct kprobe *ap) +static int aggr_kprobe_disabled(struct kprobe *ap) { struct kprobe *kp; @@ -1538,7 +1537,7 @@ static int __kprobes aggr_kprobe_disabled(struct kprobe *ap) } /* Disable one kprobe: Make sure called under kprobe_mutex is locked */ -static struct kprobe *__kprobes __disable_kprobe(struct kprobe *p) +static struct kprobe *__disable_kprobe(struct kprobe *p) { struct kprobe *orig_p; @@ -1565,7 +1564,7 @@ static struct kprobe *__kprobes __disable_kprobe(struct kprobe *p) /* * Unregister a kprobe without a scheduler synchronization. */ -static int __kprobes __unregister_kprobe_top(struct kprobe *p) +static int __unregister_kprobe_top(struct kprobe *p) { struct kprobe *ap, *list_p; @@ -1622,7 +1621,7 @@ static int __kprobes __unregister_kprobe_top(struct kprobe *p) return 0; } -static void __kprobes __unregister_kprobe_bottom(struct kprobe *p) +static void __unregister_kprobe_bottom(struct kprobe *p) { struct kprobe *ap; @@ -1638,7 +1637,7 @@ static void __kprobes __unregister_kprobe_bottom(struct kprobe *p) /* Otherwise, do nothing. */ } -int __kprobes register_kprobes(struct kprobe **kps, int num) +int register_kprobes(struct kprobe **kps, int num) { int i, ret = 0; @@ -1656,13 +1655,13 @@ int __kprobes register_kprobes(struct kprobe **kps, int num) } EXPORT_SYMBOL_GPL(register_kprobes); -void __kprobes unregister_kprobe(struct kprobe *p) +void unregister_kprobe(struct kprobe *p) { unregister_kprobes(&p, 1); } EXPORT_SYMBOL_GPL(unregister_kprobe); -void __kprobes unregister_kprobes(struct kprobe **kps, int num) +void unregister_kprobes(struct kprobe **kps, int num) { int i; @@ -1691,7 +1690,7 @@ unsigned long __weak arch_deref_entry_point(void *entry) return (unsigned long)entry; } -int __kprobes register_jprobes(struct jprobe **jps, int num) +int register_jprobes(struct jprobe **jps, int num) { struct jprobe *jp; int ret = 0, i; @@ -1722,19 +1721,19 @@ int __kprobes register_jprobes(struct jprobe **jps, int num) } EXPORT_SYMBOL_GPL(register_jprobes); -int __kprobes register_jprobe(struct jprobe *jp) +int register_jprobe(struct jprobe *jp) { return register_jprobes(&jp, 1); } EXPORT_SYMBOL_GPL(register_jprobe); -void __kprobes unregister_jprobe(struct jprobe *jp) +void unregister_jprobe(struct jprobe *jp) { unregister_jprobes(&jp, 1); } EXPORT_SYMBOL_GPL(unregister_jprobe); -void __kprobes unregister_jprobes(struct jprobe **jps, int num) +void unregister_jprobes(struct jprobe **jps, int num) { int i; @@ -1799,7 +1798,7 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p, return 0; } -int __kprobes register_kretprobe(struct kretprobe *rp) +int register_kretprobe(struct kretprobe *rp) { int ret = 0; struct kretprobe_instance *inst; @@ -1852,7 +1851,7 @@ int __kprobes register_kretprobe(struct kretprobe *rp) } EXPORT_SYMBOL_GPL(register_kretprobe); -int __kprobes register_kretprobes(struct kretprobe **rps, int num) +int register_kretprobes(struct kretprobe **rps, int num) { int ret = 0, i; @@ -1870,13 +1869,13 @@ int __kprobes register_kretprobes(struct kretprobe **rps, int num) } EXPORT_SYMBOL_GPL(register_kretprobes); -void __kprobes unregister_kretprobe(struct kretprobe *rp) +void unregister_kretprobe(struct kretprobe *rp) { unregister_kretprobes(&rp, 1); } EXPORT_SYMBOL_GPL(unregister_kretprobe); -void __kprobes unregister_kretprobes(struct kretprobe **rps, int num) +void unregister_kretprobes(struct kretprobe **rps, int num) { int i; @@ -1899,24 +1898,24 @@ void __kprobes unregister_kretprobes(struct kretprobe **rps, int num) EXPORT_SYMBOL_GPL(unregister_kretprobes); #else /* CONFIG_KRETPROBES */ -int __kprobes register_kretprobe(struct kretprobe *rp) +int register_kretprobe(struct kretprobe *rp) { return -ENOSYS; } EXPORT_SYMBOL_GPL(register_kretprobe); -int __kprobes register_kretprobes(struct kretprobe **rps, int num) +int register_kretprobes(struct kretprobe **rps, int num) { return -ENOSYS; } EXPORT_SYMBOL_GPL(register_kretprobes); -void __kprobes unregister_kretprobe(struct kretprobe *rp) +void unregister_kretprobe(struct kretprobe *rp) { } EXPORT_SYMBOL_GPL(unregister_kretprobe); -void __kprobes unregister_kretprobes(struct kretprobe **rps, int num) +void unregister_kretprobes(struct kretprobe **rps, int num) { } EXPORT_SYMBOL_GPL(unregister_kretprobes); @@ -1930,7 +1929,7 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p, #endif /* CONFIG_KRETPROBES */ /* Set the kprobe gone and remove its instruction buffer. */ -static void __kprobes kill_kprobe(struct kprobe *p) +static void kill_kprobe(struct kprobe *p) { struct kprobe *kp; @@ -1954,7 +1953,7 @@ static void __kprobes kill_kprobe(struct kprobe *p) } /* Disable one kprobe */ -int __kprobes disable_kprobe(struct kprobe *kp) +int disable_kprobe(struct kprobe *kp) { int ret = 0; @@ -1970,7 +1969,7 @@ int __kprobes disable_kprobe(struct kprobe *kp) EXPORT_SYMBOL_GPL(disable_kprobe); /* Enable one kprobe */ -int __kprobes enable_kprobe(struct kprobe *kp) +int enable_kprobe(struct kprobe *kp) { int ret = 0; struct kprobe *p; @@ -2043,8 +2042,8 @@ static int __init populate_kprobe_blacklist(unsigned long *start, } /* Module notifier call back, checking kprobes on the module */ -static int __kprobes kprobes_module_callback(struct notifier_block *nb, - unsigned long val, void *data) +static int kprobes_module_callback(struct notifier_block *nb, + unsigned long val, void *data) { struct module *mod = data; struct hlist_head *head; @@ -2145,7 +2144,7 @@ static int __init init_kprobes(void) } #ifdef CONFIG_DEBUG_FS -static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p, +static void report_probe(struct seq_file *pi, struct kprobe *p, const char *sym, int offset, char *modname, struct kprobe *pp) { char *kprobe_type; @@ -2174,12 +2173,12 @@ static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p, (kprobe_ftrace(pp) ? "[FTRACE]" : "")); } -static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos) +static void *kprobe_seq_start(struct seq_file *f, loff_t *pos) { return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL; } -static void __kprobes *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos) +static void *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos) { (*pos)++; if (*pos >= KPROBE_TABLE_SIZE) @@ -2187,12 +2186,12 @@ static void __kprobes *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos) return pos; } -static void __kprobes kprobe_seq_stop(struct seq_file *f, void *v) +static void kprobe_seq_stop(struct seq_file *f, void *v) { /* Nothing to do */ } -static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v) +static int show_kprobe_addr(struct seq_file *pi, void *v) { struct hlist_head *head; struct kprobe *p, *kp; @@ -2223,7 +2222,7 @@ static const struct seq_operations kprobes_seq_ops = { .show = show_kprobe_addr }; -static int __kprobes kprobes_open(struct inode *inode, struct file *filp) +static int kprobes_open(struct inode *inode, struct file *filp) { return seq_open(filp, &kprobes_seq_ops); } @@ -2235,7 +2234,7 @@ static const struct file_operations debugfs_kprobes_operations = { .release = seq_release, }; -static void __kprobes arm_all_kprobes(void) +static void arm_all_kprobes(void) { struct hlist_head *head; struct kprobe *p; @@ -2263,7 +2262,7 @@ static void __kprobes arm_all_kprobes(void) return; } -static void __kprobes disarm_all_kprobes(void) +static void disarm_all_kprobes(void) { struct hlist_head *head; struct kprobe *p; @@ -2347,7 +2346,7 @@ static const struct file_operations fops_kp = { .llseek = default_llseek, }; -static int __kprobes debugfs_kprobe_init(void) +static int __init debugfs_kprobe_init(void) { struct dentry *dir, *file; unsigned int value = 1; From fbc1963d2c1c4eb4651132a2c5c9d6111ada17d3 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 17 Apr 2014 17:18:00 +0900 Subject: [PATCH 011/101] kprobes, ftrace: Allow probing on some functions There is no need to prohibit probing on the functions used for preparation and uprobe only fetch functions. Those are safely probed because those are not invoked from kprobe's breakpoint/fault/debug handlers. So there is no chance to cause recursive exceptions. Following functions are now removed from the kprobes blacklist: update_bitfield_fetch_param free_bitfield_fetch_param kprobe_register FETCH_FUNC_NAME(stack, type) in trace_uprobe.c FETCH_FUNC_NAME(memory, type) in trace_uprobe.c FETCH_FUNC_NAME(memory, string) in trace_uprobe.c FETCH_FUNC_NAME(memory, string_size) in trace_uprobe.c FETCH_FUNC_NAME(file_offset, type) in trace_uprobe.c Signed-off-by: Masami Hiramatsu Cc: Frederic Weisbecker Cc: Steven Rostedt Link: http://lkml.kernel.org/r/20140417081800.26341.56504.stgit@ltc230.yrl.intra.hitachi.co.jp Signed-off-by: Ingo Molnar --- kernel/trace/trace_kprobe.c | 5 ++--- kernel/trace/trace_probe.c | 4 ++-- kernel/trace/trace_uprobe.c | 20 ++++++++++---------- 3 files changed, 14 insertions(+), 15 deletions(-) diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 903ae28962be7f..aa5f0bfcdf7b13 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -1196,9 +1196,8 @@ kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri, * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe * lockless, but we can't race with this __init function. */ -static __kprobes -int kprobe_register(struct ftrace_event_call *event, - enum trace_reg type, void *data) +static int kprobe_register(struct ftrace_event_call *event, + enum trace_reg type, void *data) { struct trace_kprobe *tk = (struct trace_kprobe *)event->data; struct ftrace_event_file *file = data; diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c index 8364a421b4dfc7..d3a91e40a6595e 100644 --- a/kernel/trace/trace_probe.c +++ b/kernel/trace/trace_probe.c @@ -183,7 +183,7 @@ DEFINE_BASIC_FETCH_FUNCS(bitfield) #define fetch_bitfield_string NULL #define fetch_bitfield_string_size NULL -static __kprobes void +static void update_bitfield_fetch_param(struct bitfield_fetch_param *data) { /* @@ -196,7 +196,7 @@ update_bitfield_fetch_param(struct bitfield_fetch_param *data) update_symbol_cache(data->orig.data); } -static __kprobes void +static void free_bitfield_fetch_param(struct bitfield_fetch_param *data) { /* diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index c082a74413455d..991e3b7c4edb3d 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -108,8 +108,8 @@ static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n) * Uprobes-specific fetch functions */ #define DEFINE_FETCH_stack(type) \ -static __kprobes void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs,\ - void *offset, void *dest) \ +static void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs, \ + void *offset, void *dest) \ { \ *(type *)dest = (type)get_user_stack_nth(regs, \ ((unsigned long)offset)); \ @@ -120,8 +120,8 @@ DEFINE_BASIC_FETCH_FUNCS(stack) #define fetch_stack_string_size NULL #define DEFINE_FETCH_memory(type) \ -static __kprobes void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs,\ - void *addr, void *dest) \ +static void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs, \ + void *addr, void *dest) \ { \ type retval; \ void __user *vaddr = (void __force __user *) addr; \ @@ -136,8 +136,8 @@ DEFINE_BASIC_FETCH_FUNCS(memory) * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max * length and relative data location. */ -static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs, - void *addr, void *dest) +static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs, + void *addr, void *dest) { long ret; u32 rloc = *(u32 *)dest; @@ -158,8 +158,8 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs, } } -static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs, - void *addr, void *dest) +static void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs, + void *addr, void *dest) { int len; void __user *vaddr = (void __force __user *) addr; @@ -184,8 +184,8 @@ static unsigned long translate_user_vaddr(void *file_offset) } #define DEFINE_FETCH_file_offset(type) \ -static __kprobes void FETCH_FUNC_NAME(file_offset, type)(struct pt_regs *regs,\ - void *offset, void *dest) \ +static void FETCH_FUNC_NAME(file_offset, type)(struct pt_regs *regs, \ + void *offset, void *dest)\ { \ void *vaddr = (void *)translate_user_vaddr(offset); \ \ From 9c54b6164eeb292a0eac86c6913bd8daaff35e62 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 17 Apr 2014 17:18:07 +0900 Subject: [PATCH 012/101] kprobes, x86: Allow kprobes on text_poke/hw_breakpoint Allow kprobes on text_poke/hw_breakpoint because those are not related to the critical int3-debug recursive path of kprobes at this moment. Signed-off-by: Masami Hiramatsu Reviewed-by: Steven Rostedt Cc: Andrew Morton Cc: Borislav Petkov Cc: Fengguang Wu Cc: Jiri Kosina Cc: Oleg Nesterov Cc: Paul Gortmaker Link: http://lkml.kernel.org/r/20140417081807.26341.73219.stgit@ltc230.yrl.intra.hitachi.co.jp Signed-off-by: Ingo Molnar --- arch/x86/kernel/alternative.c | 3 +-- arch/x86/kernel/hw_breakpoint.c | 5 ++--- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index df94598ad05a84..703130f469ecf7 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -5,7 +5,6 @@ #include #include #include -#include #include #include #include @@ -551,7 +550,7 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode, * * Note: Must be called under text_mutex. */ -void *__kprobes text_poke(void *addr, const void *opcode, size_t len) +void *text_poke(void *addr, const void *opcode, size_t len) { unsigned long flags; char *vaddr; diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c index a67b47c31314ba..5f9cf20cdb6802 100644 --- a/arch/x86/kernel/hw_breakpoint.c +++ b/arch/x86/kernel/hw_breakpoint.c @@ -32,7 +32,6 @@ #include #include #include -#include #include #include #include @@ -424,7 +423,7 @@ EXPORT_SYMBOL_GPL(hw_breakpoint_restore); * NOTIFY_STOP returned for all other cases * */ -static int __kprobes hw_breakpoint_handler(struct die_args *args) +static int hw_breakpoint_handler(struct die_args *args) { int i, cpu, rc = NOTIFY_STOP; struct perf_event *bp; @@ -511,7 +510,7 @@ static int __kprobes hw_breakpoint_handler(struct die_args *args) /* * Handle debug exception notifications. */ -int __kprobes hw_breakpoint_exceptions_notify( +int hw_breakpoint_exceptions_notify( struct notifier_block *unused, unsigned long val, void *data) { if (val != DIE_DEBUG) From 9326638cbee2d36b051ed2a69f3e4e107e5f86bd Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 17 Apr 2014 17:18:14 +0900 Subject: [PATCH 013/101] kprobes, x86: Use NOKPROBE_SYMBOL() instead of __kprobes annotation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use NOKPROBE_SYMBOL macro for protecting functions from kprobes instead of __kprobes annotation under arch/x86. This applies nokprobe_inline annotation for some cases, because NOKPROBE_SYMBOL() will inhibit inlining by referring the symbol address. This just folds a bunch of previous NOKPROBE_SYMBOL() cleanup patches for x86 to one patch. Signed-off-by: Masami Hiramatsu Link: http://lkml.kernel.org/r/20140417081814.26341.51656.stgit@ltc230.yrl.intra.hitachi.co.jp Cc: Andrew Morton Cc: Arnaldo Carvalho de Melo Cc: Borislav Petkov Cc: Dave Hansen Cc: Fernando Luis Vázquez Cao Cc: Gleb Natapov Cc: Jason Wang Cc: Jesper Nilsson Cc: Jiri Kosina Cc: Jiri Olsa Cc: Jiri Slaby Cc: Johannes Weiner Cc: Jonathan Lebon Cc: Kees Cook Cc: Matt Fleming Cc: Michel Lespinasse Cc: Paolo Bonzini Cc: Paul E. McKenney Cc: Paul Gortmaker Cc: Paul Mackerras Cc: Raghavendra K T Cc: Rusty Russell Cc: Seiji Aguchi Cc: Srivatsa Vaddagiri Cc: Tejun Heo Cc: Vineet Gupta Signed-off-by: Ingo Molnar --- arch/x86/include/asm/traps.h | 2 +- arch/x86/kernel/apic/hw_nmi.c | 3 +- arch/x86/kernel/cpu/perf_event.c | 3 +- arch/x86/kernel/cpu/perf_event_amd_ibs.c | 3 +- arch/x86/kernel/dumpstack.c | 9 ++- arch/x86/kernel/kprobes/core.c | 77 +++++++++++++++--------- arch/x86/kernel/kprobes/ftrace.c | 15 +++-- arch/x86/kernel/kprobes/opt.c | 8 ++- arch/x86/kernel/kvm.c | 4 +- arch/x86/kernel/nmi.c | 18 ++++-- arch/x86/kernel/traps.c | 20 +++--- arch/x86/mm/fault.c | 29 +++++---- 12 files changed, 122 insertions(+), 69 deletions(-) diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h index 58d66fe06b6170..ca32508c58c5b6 100644 --- a/arch/x86/include/asm/traps.h +++ b/arch/x86/include/asm/traps.h @@ -68,7 +68,7 @@ dotraplinkage void do_segment_not_present(struct pt_regs *, long); dotraplinkage void do_stack_segment(struct pt_regs *, long); #ifdef CONFIG_X86_64 dotraplinkage void do_double_fault(struct pt_regs *, long); -asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *); +asmlinkage struct pt_regs *sync_regs(struct pt_regs *); #endif dotraplinkage void do_general_protection(struct pt_regs *, long); dotraplinkage void do_page_fault(struct pt_regs *, unsigned long); diff --git a/arch/x86/kernel/apic/hw_nmi.c b/arch/x86/kernel/apic/hw_nmi.c index a698d7165c96a5..73eb5b336f63cb 100644 --- a/arch/x86/kernel/apic/hw_nmi.c +++ b/arch/x86/kernel/apic/hw_nmi.c @@ -60,7 +60,7 @@ void arch_trigger_all_cpu_backtrace(void) smp_mb__after_clear_bit(); } -static int __kprobes +static int arch_trigger_all_cpu_backtrace_handler(unsigned int cmd, struct pt_regs *regs) { int cpu; @@ -80,6 +80,7 @@ arch_trigger_all_cpu_backtrace_handler(unsigned int cmd, struct pt_regs *regs) return NMI_DONE; } +NOKPROBE_SYMBOL(arch_trigger_all_cpu_backtrace_handler); static int __init register_trigger_all_cpu_backtrace(void) { diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index ae407f7226c899..5fc8771979ba3f 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -1292,7 +1292,7 @@ void perf_events_lapic_init(void) apic_write(APIC_LVTPC, APIC_DM_NMI); } -static int __kprobes +static int perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs) { u64 start_clock; @@ -1310,6 +1310,7 @@ perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs) return ret; } +NOKPROBE_SYMBOL(perf_event_nmi_handler); struct event_constraint emptyconstraint; struct event_constraint unconstrained; diff --git a/arch/x86/kernel/cpu/perf_event_amd_ibs.c b/arch/x86/kernel/cpu/perf_event_amd_ibs.c index 4c36bbe3173aa0..cbb1be3ed9e432 100644 --- a/arch/x86/kernel/cpu/perf_event_amd_ibs.c +++ b/arch/x86/kernel/cpu/perf_event_amd_ibs.c @@ -593,7 +593,7 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs) return 1; } -static int __kprobes +static int perf_ibs_nmi_handler(unsigned int cmd, struct pt_regs *regs) { int handled = 0; @@ -606,6 +606,7 @@ perf_ibs_nmi_handler(unsigned int cmd, struct pt_regs *regs) return handled; } +NOKPROBE_SYMBOL(perf_ibs_nmi_handler); static __init int perf_ibs_pmu_init(struct perf_ibs *perf_ibs, char *name) { diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index d9c12d3022a70c..b74ebc7c4402e7 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c @@ -200,7 +200,7 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED; static int die_owner = -1; static unsigned int die_nest_count; -unsigned __kprobes long oops_begin(void) +unsigned long oops_begin(void) { int cpu; unsigned long flags; @@ -223,8 +223,9 @@ unsigned __kprobes long oops_begin(void) return flags; } EXPORT_SYMBOL_GPL(oops_begin); +NOKPROBE_SYMBOL(oops_begin); -void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr) +void oops_end(unsigned long flags, struct pt_regs *regs, int signr) { if (regs && kexec_should_crash(current)) crash_kexec(regs); @@ -247,8 +248,9 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr) panic("Fatal exception"); do_exit(signr); } +NOKPROBE_SYMBOL(oops_end); -int __kprobes __die(const char *str, struct pt_regs *regs, long err) +int __die(const char *str, struct pt_regs *regs, long err) { #ifdef CONFIG_X86_32 unsigned short ss; @@ -291,6 +293,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err) #endif return 0; } +NOKPROBE_SYMBOL(__die); /* * This is gone through when something in the kernel has done something bad diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index bd717137ae7716..7596df664901ee 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -112,7 +112,8 @@ struct kretprobe_blackpoint kretprobe_blacklist[] = { const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist); -static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op) +static nokprobe_inline void +__synthesize_relative_insn(void *from, void *to, u8 op) { struct __arch_relative_insn { u8 op; @@ -125,21 +126,23 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op) } /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/ -void __kprobes synthesize_reljump(void *from, void *to) +void synthesize_reljump(void *from, void *to) { __synthesize_relative_insn(from, to, RELATIVEJUMP_OPCODE); } +NOKPROBE_SYMBOL(synthesize_reljump); /* Insert a call instruction at address 'from', which calls address 'to'.*/ -void __kprobes synthesize_relcall(void *from, void *to) +void synthesize_relcall(void *from, void *to) { __synthesize_relative_insn(from, to, RELATIVECALL_OPCODE); } +NOKPROBE_SYMBOL(synthesize_relcall); /* * Skip the prefixes of the instruction. */ -static kprobe_opcode_t *__kprobes skip_prefixes(kprobe_opcode_t *insn) +static kprobe_opcode_t *skip_prefixes(kprobe_opcode_t *insn) { insn_attr_t attr; @@ -154,6 +157,7 @@ static kprobe_opcode_t *__kprobes skip_prefixes(kprobe_opcode_t *insn) #endif return insn; } +NOKPROBE_SYMBOL(skip_prefixes); /* * Returns non-zero if opcode is boostable. @@ -425,7 +429,8 @@ void arch_remove_kprobe(struct kprobe *p) } } -static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) +static nokprobe_inline void +save_previous_kprobe(struct kprobe_ctlblk *kcb) { kcb->prev_kprobe.kp = kprobe_running(); kcb->prev_kprobe.status = kcb->kprobe_status; @@ -433,7 +438,8 @@ static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) kcb->prev_kprobe.saved_flags = kcb->kprobe_saved_flags; } -static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) +static nokprobe_inline void +restore_previous_kprobe(struct kprobe_ctlblk *kcb) { __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); kcb->kprobe_status = kcb->prev_kprobe.status; @@ -441,8 +447,9 @@ static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) kcb->kprobe_saved_flags = kcb->prev_kprobe.saved_flags; } -static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, - struct kprobe_ctlblk *kcb) +static nokprobe_inline void +set_current_kprobe(struct kprobe *p, struct pt_regs *regs, + struct kprobe_ctlblk *kcb) { __this_cpu_write(current_kprobe, p); kcb->kprobe_saved_flags = kcb->kprobe_old_flags @@ -451,7 +458,7 @@ static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, kcb->kprobe_saved_flags &= ~X86_EFLAGS_IF; } -static void __kprobes clear_btf(void) +static nokprobe_inline void clear_btf(void) { if (test_thread_flag(TIF_BLOCKSTEP)) { unsigned long debugctl = get_debugctlmsr(); @@ -461,7 +468,7 @@ static void __kprobes clear_btf(void) } } -static void __kprobes restore_btf(void) +static nokprobe_inline void restore_btf(void) { if (test_thread_flag(TIF_BLOCKSTEP)) { unsigned long debugctl = get_debugctlmsr(); @@ -471,8 +478,7 @@ static void __kprobes restore_btf(void) } } -void __kprobes -arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) +void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) { unsigned long *sara = stack_addr(regs); @@ -481,9 +487,10 @@ arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) /* Replace the return addr with trampoline addr */ *sara = (unsigned long) &kretprobe_trampoline; } +NOKPROBE_SYMBOL(arch_prepare_kretprobe); -static void __kprobes -setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb, int reenter) +static void setup_singlestep(struct kprobe *p, struct pt_regs *regs, + struct kprobe_ctlblk *kcb, int reenter) { if (setup_detour_execution(p, regs, reenter)) return; @@ -519,14 +526,15 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k else regs->ip = (unsigned long)p->ainsn.insn; } +NOKPROBE_SYMBOL(setup_singlestep); /* * We have reentered the kprobe_handler(), since another probe was hit while * within the handler. We save the original kprobes variables and just single * step on the instruction of the new probe without calling any user handlers. */ -static int __kprobes -reenter_kprobe(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb) +static int reenter_kprobe(struct kprobe *p, struct pt_regs *regs, + struct kprobe_ctlblk *kcb) { switch (kcb->kprobe_status) { case KPROBE_HIT_SSDONE: @@ -554,12 +562,13 @@ reenter_kprobe(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb return 1; } +NOKPROBE_SYMBOL(reenter_kprobe); /* * Interrupts are disabled on entry as trap3 is an interrupt gate and they * remain disabled throughout this function. */ -int __kprobes kprobe_int3_handler(struct pt_regs *regs) +int kprobe_int3_handler(struct pt_regs *regs) { kprobe_opcode_t *addr; struct kprobe *p; @@ -622,12 +631,13 @@ int __kprobes kprobe_int3_handler(struct pt_regs *regs) preempt_enable_no_resched(); return 0; } +NOKPROBE_SYMBOL(kprobe_int3_handler); /* * When a retprobed function returns, this code saves registers and * calls trampoline_handler() runs, which calls the kretprobe's handler. */ -static void __used __kprobes kretprobe_trampoline_holder(void) +static void __used kretprobe_trampoline_holder(void) { asm volatile ( ".global kretprobe_trampoline\n" @@ -658,11 +668,13 @@ static void __used __kprobes kretprobe_trampoline_holder(void) #endif " ret\n"); } +NOKPROBE_SYMBOL(kretprobe_trampoline_holder); +NOKPROBE_SYMBOL(kretprobe_trampoline); /* * Called from kretprobe_trampoline */ -__visible __used __kprobes void *trampoline_handler(struct pt_regs *regs) +__visible __used void *trampoline_handler(struct pt_regs *regs) { struct kretprobe_instance *ri = NULL; struct hlist_head *head, empty_rp; @@ -748,6 +760,7 @@ __visible __used __kprobes void *trampoline_handler(struct pt_regs *regs) } return (void *)orig_ret_address; } +NOKPROBE_SYMBOL(trampoline_handler); /* * Called after single-stepping. p->addr is the address of the @@ -776,8 +789,8 @@ __visible __used __kprobes void *trampoline_handler(struct pt_regs *regs) * jump instruction after the copied instruction, that jumps to the next * instruction after the probepoint. */ -static void __kprobes -resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb) +static void resume_execution(struct kprobe *p, struct pt_regs *regs, + struct kprobe_ctlblk *kcb) { unsigned long *tos = stack_addr(regs); unsigned long copy_ip = (unsigned long)p->ainsn.insn; @@ -852,12 +865,13 @@ resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k no_change: restore_btf(); } +NOKPROBE_SYMBOL(resume_execution); /* * Interrupts are disabled on entry as trap1 is an interrupt gate and they * remain disabled throughout this function. */ -int __kprobes kprobe_debug_handler(struct pt_regs *regs) +int kprobe_debug_handler(struct pt_regs *regs) { struct kprobe *cur = kprobe_running(); struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); @@ -892,8 +906,9 @@ int __kprobes kprobe_debug_handler(struct pt_regs *regs) return 1; } +NOKPROBE_SYMBOL(kprobe_debug_handler); -int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) +int kprobe_fault_handler(struct pt_regs *regs, int trapnr) { struct kprobe *cur = kprobe_running(); struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); @@ -950,12 +965,13 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) return 0; } +NOKPROBE_SYMBOL(kprobe_fault_handler); /* * Wrapper routine for handling exceptions. */ -int __kprobes -kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *data) +int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, + void *data) { struct die_args *args = data; int ret = NOTIFY_DONE; @@ -975,8 +991,9 @@ kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *d } return ret; } +NOKPROBE_SYMBOL(kprobe_exceptions_notify); -int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) +int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) { struct jprobe *jp = container_of(p, struct jprobe, kp); unsigned long addr; @@ -1000,8 +1017,9 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) regs->ip = (unsigned long)(jp->entry); return 1; } +NOKPROBE_SYMBOL(setjmp_pre_handler); -void __kprobes jprobe_return(void) +void jprobe_return(void) { struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); @@ -1017,8 +1035,10 @@ void __kprobes jprobe_return(void) " nop \n"::"b" (kcb->jprobe_saved_sp):"memory"); } +NOKPROBE_SYMBOL(jprobe_return); +NOKPROBE_SYMBOL(jprobe_return_end); -int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) +int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) { struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); u8 *addr = (u8 *) (regs->ip - 1); @@ -1046,6 +1066,7 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) } return 0; } +NOKPROBE_SYMBOL(longjmp_break_handler); bool arch_within_kprobe_blacklist(unsigned long addr) { diff --git a/arch/x86/kernel/kprobes/ftrace.c b/arch/x86/kernel/kprobes/ftrace.c index dcaa1310ccfda1..717b02a22e6763 100644 --- a/arch/x86/kernel/kprobes/ftrace.c +++ b/arch/x86/kernel/kprobes/ftrace.c @@ -25,8 +25,9 @@ #include "common.h" -static int __skip_singlestep(struct kprobe *p, struct pt_regs *regs, - struct kprobe_ctlblk *kcb) +static nokprobe_inline +int __skip_singlestep(struct kprobe *p, struct pt_regs *regs, + struct kprobe_ctlblk *kcb) { /* * Emulate singlestep (and also recover regs->ip) @@ -41,18 +42,19 @@ static int __skip_singlestep(struct kprobe *p, struct pt_regs *regs, return 1; } -int __kprobes skip_singlestep(struct kprobe *p, struct pt_regs *regs, - struct kprobe_ctlblk *kcb) +int skip_singlestep(struct kprobe *p, struct pt_regs *regs, + struct kprobe_ctlblk *kcb) { if (kprobe_ftrace(p)) return __skip_singlestep(p, regs, kcb); else return 0; } +NOKPROBE_SYMBOL(skip_singlestep); /* Ftrace callback handler for kprobes */ -void __kprobes kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, - struct ftrace_ops *ops, struct pt_regs *regs) +void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *ops, struct pt_regs *regs) { struct kprobe *p; struct kprobe_ctlblk *kcb; @@ -84,6 +86,7 @@ void __kprobes kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, end: local_irq_restore(flags); } +NOKPROBE_SYMBOL(kprobe_ftrace_handler); int arch_prepare_kprobe_ftrace(struct kprobe *p) { diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c index fba7fb075e8a76..f304773285ae36 100644 --- a/arch/x86/kernel/kprobes/opt.c +++ b/arch/x86/kernel/kprobes/opt.c @@ -138,7 +138,8 @@ asm ( #define INT3_SIZE sizeof(kprobe_opcode_t) /* Optimized kprobe call back function: called from optinsn */ -static void __kprobes optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs) +static void +optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs) { struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); unsigned long flags; @@ -168,6 +169,7 @@ static void __kprobes optimized_callback(struct optimized_kprobe *op, struct pt_ } local_irq_restore(flags); } +NOKPROBE_SYMBOL(optimized_callback); static int copy_optimized_instructions(u8 *dest, u8 *src) { @@ -424,8 +426,7 @@ extern void arch_unoptimize_kprobes(struct list_head *oplist, } } -int __kprobes -setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter) +int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter) { struct optimized_kprobe *op; @@ -441,3 +442,4 @@ setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter) } return 0; } +NOKPROBE_SYMBOL(setup_detour_execution); diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 0331cb389d6861..d81abcbfe50142 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -251,8 +251,9 @@ u32 kvm_read_and_reset_pf_reason(void) return reason; } EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason); +NOKPROBE_SYMBOL(kvm_read_and_reset_pf_reason); -dotraplinkage void __kprobes +dotraplinkage void do_async_page_fault(struct pt_regs *regs, unsigned long error_code) { enum ctx_state prev_state; @@ -276,6 +277,7 @@ do_async_page_fault(struct pt_regs *regs, unsigned long error_code) break; } } +NOKPROBE_SYMBOL(do_async_page_fault); static void __init paravirt_ops_setup(void) { diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c index b4872b999a713d..c3e985d1751ced 100644 --- a/arch/x86/kernel/nmi.c +++ b/arch/x86/kernel/nmi.c @@ -110,7 +110,7 @@ static void nmi_max_handler(struct irq_work *w) a->handler, whole_msecs, decimal_msecs); } -static int __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b) +static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b) { struct nmi_desc *desc = nmi_to_desc(type); struct nmiaction *a; @@ -146,6 +146,7 @@ static int __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2 /* return total number of NMI events handled */ return handled; } +NOKPROBE_SYMBOL(nmi_handle); int __register_nmi_handler(unsigned int type, struct nmiaction *action) { @@ -208,7 +209,7 @@ void unregister_nmi_handler(unsigned int type, const char *name) } EXPORT_SYMBOL_GPL(unregister_nmi_handler); -static __kprobes void +static void pci_serr_error(unsigned char reason, struct pt_regs *regs) { /* check to see if anyone registered against these types of errors */ @@ -238,8 +239,9 @@ pci_serr_error(unsigned char reason, struct pt_regs *regs) reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_SERR; outb(reason, NMI_REASON_PORT); } +NOKPROBE_SYMBOL(pci_serr_error); -static __kprobes void +static void io_check_error(unsigned char reason, struct pt_regs *regs) { unsigned long i; @@ -269,8 +271,9 @@ io_check_error(unsigned char reason, struct pt_regs *regs) reason &= ~NMI_REASON_CLEAR_IOCHK; outb(reason, NMI_REASON_PORT); } +NOKPROBE_SYMBOL(io_check_error); -static __kprobes void +static void unknown_nmi_error(unsigned char reason, struct pt_regs *regs) { int handled; @@ -298,11 +301,12 @@ unknown_nmi_error(unsigned char reason, struct pt_regs *regs) pr_emerg("Dazed and confused, but trying to continue\n"); } +NOKPROBE_SYMBOL(unknown_nmi_error); static DEFINE_PER_CPU(bool, swallow_nmi); static DEFINE_PER_CPU(unsigned long, last_nmi_rip); -static __kprobes void default_do_nmi(struct pt_regs *regs) +static void default_do_nmi(struct pt_regs *regs) { unsigned char reason = 0; int handled; @@ -401,6 +405,7 @@ static __kprobes void default_do_nmi(struct pt_regs *regs) else unknown_nmi_error(reason, regs); } +NOKPROBE_SYMBOL(default_do_nmi); /* * NMIs can hit breakpoints which will cause it to lose its @@ -520,7 +525,7 @@ static inline void nmi_nesting_postprocess(void) } #endif -dotraplinkage notrace __kprobes void +dotraplinkage notrace void do_nmi(struct pt_regs *regs, long error_code) { nmi_nesting_preprocess(regs); @@ -537,6 +542,7 @@ do_nmi(struct pt_regs *regs, long error_code) /* On i386, may loop back to preprocess */ nmi_nesting_postprocess(); } +NOKPROBE_SYMBOL(do_nmi); void stop_nmi(void) { diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index ba9abe9cbce338..3c8ae7d83820bd 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -106,7 +106,7 @@ static inline void preempt_conditional_cli(struct pt_regs *regs) preempt_count_dec(); } -static int __kprobes +static nokprobe_inline int do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str, struct pt_regs *regs, long error_code) { @@ -136,7 +136,7 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str, return -1; } -static void __kprobes +static void do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, long error_code, siginfo_t *info) { @@ -173,6 +173,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, else force_sig(signr, tsk); } +NOKPROBE_SYMBOL(do_trap); #define DO_ERROR(trapnr, signr, str, name) \ dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \ @@ -263,7 +264,7 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code) } #endif -dotraplinkage void __kprobes +dotraplinkage void do_general_protection(struct pt_regs *regs, long error_code) { struct task_struct *tsk; @@ -309,9 +310,10 @@ do_general_protection(struct pt_regs *regs, long error_code) exit: exception_exit(prev_state); } +NOKPROBE_SYMBOL(do_general_protection); /* May run on IST stack. */ -dotraplinkage void __kprobes notrace do_int3(struct pt_regs *regs, long error_code) +dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code) { enum ctx_state prev_state; @@ -355,6 +357,7 @@ dotraplinkage void __kprobes notrace do_int3(struct pt_regs *regs, long error_co exit: exception_exit(prev_state); } +NOKPROBE_SYMBOL(do_int3); #ifdef CONFIG_X86_64 /* @@ -362,7 +365,7 @@ dotraplinkage void __kprobes notrace do_int3(struct pt_regs *regs, long error_co * for scheduling or signal handling. The actual stack switch is done in * entry.S */ -asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs) +asmlinkage struct pt_regs *sync_regs(struct pt_regs *eregs) { struct pt_regs *regs = eregs; /* Did already sync */ @@ -381,6 +384,7 @@ asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs) *regs = *eregs; return regs; } +NOKPROBE_SYMBOL(sync_regs); #endif /* @@ -407,7 +411,7 @@ asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs) * * May run on IST stack. */ -dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) +dotraplinkage void do_debug(struct pt_regs *regs, long error_code) { struct task_struct *tsk = current; enum ctx_state prev_state; @@ -491,6 +495,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) exit: exception_exit(prev_state); } +NOKPROBE_SYMBOL(do_debug); /* * Note that we play around with the 'TS' bit in an attempt to get @@ -662,7 +667,7 @@ void math_state_restore(void) } EXPORT_SYMBOL_GPL(math_state_restore); -dotraplinkage void __kprobes +dotraplinkage void do_device_not_available(struct pt_regs *regs, long error_code) { enum ctx_state prev_state; @@ -688,6 +693,7 @@ do_device_not_available(struct pt_regs *regs, long error_code) #endif exception_exit(prev_state); } +NOKPROBE_SYMBOL(do_device_not_available); #ifdef CONFIG_X86_32 dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code) diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 8e57229926779e..f83bd0de5eefe6 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -8,7 +8,7 @@ #include /* oops_begin/end, ... */ #include /* search_exception_table */ #include /* max_low_pfn */ -#include /* __kprobes, ... */ +#include /* NOKPROBE_SYMBOL, ... */ #include /* kmmio_handler, ... */ #include /* perf_sw_event */ #include /* hstate_index_to_shift */ @@ -45,7 +45,7 @@ enum x86_pf_error_code { * Returns 0 if mmiotrace is disabled, or if the fault is not * handled by mmiotrace: */ -static inline int __kprobes +static nokprobe_inline int kmmio_fault(struct pt_regs *regs, unsigned long addr) { if (unlikely(is_kmmio_active())) @@ -54,7 +54,7 @@ kmmio_fault(struct pt_regs *regs, unsigned long addr) return 0; } -static inline int __kprobes kprobes_fault(struct pt_regs *regs) +static nokprobe_inline int kprobes_fault(struct pt_regs *regs) { int ret = 0; @@ -261,7 +261,7 @@ void vmalloc_sync_all(void) * * Handle a fault on the vmalloc or module mapping area */ -static noinline __kprobes int vmalloc_fault(unsigned long address) +static noinline int vmalloc_fault(unsigned long address) { unsigned long pgd_paddr; pmd_t *pmd_k; @@ -291,6 +291,7 @@ static noinline __kprobes int vmalloc_fault(unsigned long address) return 0; } +NOKPROBE_SYMBOL(vmalloc_fault); /* * Did it hit the DOS screen memory VA from vm86 mode? @@ -358,7 +359,7 @@ void vmalloc_sync_all(void) * * This assumes no large pages in there. */ -static noinline __kprobes int vmalloc_fault(unsigned long address) +static noinline int vmalloc_fault(unsigned long address) { pgd_t *pgd, *pgd_ref; pud_t *pud, *pud_ref; @@ -425,6 +426,7 @@ static noinline __kprobes int vmalloc_fault(unsigned long address) return 0; } +NOKPROBE_SYMBOL(vmalloc_fault); #ifdef CONFIG_CPU_SUP_AMD static const char errata93_warning[] = @@ -927,7 +929,7 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte) * There are no security implications to leaving a stale TLB when * increasing the permissions on a page. */ -static noinline __kprobes int +static noinline int spurious_fault(unsigned long error_code, unsigned long address) { pgd_t *pgd; @@ -975,6 +977,7 @@ spurious_fault(unsigned long error_code, unsigned long address) return ret; } +NOKPROBE_SYMBOL(spurious_fault); int show_unhandled_signals = 1; @@ -1030,7 +1033,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs) * {,trace_}do_page_fault() have notrace on. Having this an actual function * guarantees there's a function trace entry. */ -static void __kprobes noinline +static noinline void __do_page_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address) { @@ -1253,8 +1256,9 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code, up_read(&mm->mmap_sem); } +NOKPROBE_SYMBOL(__do_page_fault); -dotraplinkage void __kprobes notrace +dotraplinkage void notrace do_page_fault(struct pt_regs *regs, unsigned long error_code) { unsigned long address = read_cr2(); /* Get the faulting address */ @@ -1272,10 +1276,12 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code) __do_page_fault(regs, error_code, address); exception_exit(prev_state); } +NOKPROBE_SYMBOL(do_page_fault); #ifdef CONFIG_TRACING -static void trace_page_fault_entries(unsigned long address, struct pt_regs *regs, - unsigned long error_code) +static nokprobe_inline void +trace_page_fault_entries(unsigned long address, struct pt_regs *regs, + unsigned long error_code) { if (user_mode(regs)) trace_page_fault_user(address, regs, error_code); @@ -1283,7 +1289,7 @@ static void trace_page_fault_entries(unsigned long address, struct pt_regs *regs trace_page_fault_kernel(address, regs, error_code); } -dotraplinkage void __kprobes notrace +dotraplinkage void notrace trace_do_page_fault(struct pt_regs *regs, unsigned long error_code) { /* @@ -1300,4 +1306,5 @@ trace_do_page_fault(struct pt_regs *regs, unsigned long error_code) __do_page_fault(regs, error_code, address); exception_exit(prev_state); } +NOKPROBE_SYMBOL(trace_do_page_fault); #endif /* CONFIG_TRACING */ From 820aede0209a51549e8a014c8030e29229920e4e Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 17 Apr 2014 17:18:21 +0900 Subject: [PATCH 014/101] kprobes: Use NOKPROBE_SYMBOL macro instead of __kprobes Use NOKPROBE_SYMBOL macro to protect functions from kprobes instead of __kprobes annotation. Signed-off-by: Masami Hiramatsu Reviewed-by: Steven Rostedt Cc: Ananth N Mavinakayanahalli Cc: Anil S Keshavamurthy Cc: David S. Miller Link: http://lkml.kernel.org/r/20140417081821.26341.40362.stgit@ltc230.yrl.intra.hitachi.co.jp Signed-off-by: Ingo Molnar --- kernel/kprobes.c | 67 +++++++++++++++++++++++++++++------------------- 1 file changed, 41 insertions(+), 26 deletions(-) diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 4db2cc616f5016..a21b4e67fd971f 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -301,7 +301,7 @@ static inline void reset_kprobe_instance(void) * OR * - with preemption disabled - from arch/xxx/kernel/kprobes.c */ -struct kprobe __kprobes *get_kprobe(void *addr) +struct kprobe *get_kprobe(void *addr) { struct hlist_head *head; struct kprobe *p; @@ -314,8 +314,9 @@ struct kprobe __kprobes *get_kprobe(void *addr) return NULL; } +NOKPROBE_SYMBOL(get_kprobe); -static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs); +static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs); /* Return true if the kprobe is an aggregator */ static inline int kprobe_aggrprobe(struct kprobe *p) @@ -347,7 +348,7 @@ static bool kprobes_allow_optimization; * Call all pre_handler on the list, but ignores its return value. * This must be called from arch-dep optimized caller. */ -void __kprobes opt_pre_handler(struct kprobe *p, struct pt_regs *regs) +void opt_pre_handler(struct kprobe *p, struct pt_regs *regs) { struct kprobe *kp; @@ -359,6 +360,7 @@ void __kprobes opt_pre_handler(struct kprobe *p, struct pt_regs *regs) reset_kprobe_instance(); } } +NOKPROBE_SYMBOL(opt_pre_handler); /* Free optimized instructions and optimized_kprobe */ static void free_aggr_kprobe(struct kprobe *p) @@ -995,7 +997,7 @@ static void disarm_kprobe(struct kprobe *kp, bool reopt) * Aggregate handlers for multiple kprobes support - these handlers * take care of invoking the individual kprobe handlers on p->list */ -static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs) +static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs) { struct kprobe *kp; @@ -1009,9 +1011,10 @@ static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs) } return 0; } +NOKPROBE_SYMBOL(aggr_pre_handler); -static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs, - unsigned long flags) +static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs, + unsigned long flags) { struct kprobe *kp; @@ -1023,9 +1026,10 @@ static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs, } } } +NOKPROBE_SYMBOL(aggr_post_handler); -static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, - int trapnr) +static int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, + int trapnr) { struct kprobe *cur = __this_cpu_read(kprobe_instance); @@ -1039,8 +1043,9 @@ static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, } return 0; } +NOKPROBE_SYMBOL(aggr_fault_handler); -static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs) +static int aggr_break_handler(struct kprobe *p, struct pt_regs *regs) { struct kprobe *cur = __this_cpu_read(kprobe_instance); int ret = 0; @@ -1052,9 +1057,10 @@ static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs) reset_kprobe_instance(); return ret; } +NOKPROBE_SYMBOL(aggr_break_handler); /* Walks the list and increments nmissed count for multiprobe case */ -void __kprobes kprobes_inc_nmissed_count(struct kprobe *p) +void kprobes_inc_nmissed_count(struct kprobe *p) { struct kprobe *kp; if (!kprobe_aggrprobe(p)) { @@ -1065,9 +1071,10 @@ void __kprobes kprobes_inc_nmissed_count(struct kprobe *p) } return; } +NOKPROBE_SYMBOL(kprobes_inc_nmissed_count); -void __kprobes recycle_rp_inst(struct kretprobe_instance *ri, - struct hlist_head *head) +void recycle_rp_inst(struct kretprobe_instance *ri, + struct hlist_head *head) { struct kretprobe *rp = ri->rp; @@ -1082,8 +1089,9 @@ void __kprobes recycle_rp_inst(struct kretprobe_instance *ri, /* Unregistering */ hlist_add_head(&ri->hlist, head); } +NOKPROBE_SYMBOL(recycle_rp_inst); -void __kprobes kretprobe_hash_lock(struct task_struct *tsk, +void kretprobe_hash_lock(struct task_struct *tsk, struct hlist_head **head, unsigned long *flags) __acquires(hlist_lock) { @@ -1094,17 +1102,19 @@ __acquires(hlist_lock) hlist_lock = kretprobe_table_lock_ptr(hash); raw_spin_lock_irqsave(hlist_lock, *flags); } +NOKPROBE_SYMBOL(kretprobe_hash_lock); -static void __kprobes kretprobe_table_lock(unsigned long hash, - unsigned long *flags) +static void kretprobe_table_lock(unsigned long hash, + unsigned long *flags) __acquires(hlist_lock) { raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); raw_spin_lock_irqsave(hlist_lock, *flags); } +NOKPROBE_SYMBOL(kretprobe_table_lock); -void __kprobes kretprobe_hash_unlock(struct task_struct *tsk, - unsigned long *flags) +void kretprobe_hash_unlock(struct task_struct *tsk, + unsigned long *flags) __releases(hlist_lock) { unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); @@ -1113,14 +1123,16 @@ __releases(hlist_lock) hlist_lock = kretprobe_table_lock_ptr(hash); raw_spin_unlock_irqrestore(hlist_lock, *flags); } +NOKPROBE_SYMBOL(kretprobe_hash_unlock); -static void __kprobes kretprobe_table_unlock(unsigned long hash, - unsigned long *flags) +static void kretprobe_table_unlock(unsigned long hash, + unsigned long *flags) __releases(hlist_lock) { raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); raw_spin_unlock_irqrestore(hlist_lock, *flags); } +NOKPROBE_SYMBOL(kretprobe_table_unlock); /* * This function is called from finish_task_switch when task tk becomes dead, @@ -1128,7 +1140,7 @@ __releases(hlist_lock) * with this task. These left over instances represent probed functions * that have been called but will never return. */ -void __kprobes kprobe_flush_task(struct task_struct *tk) +void kprobe_flush_task(struct task_struct *tk) { struct kretprobe_instance *ri; struct hlist_head *head, empty_rp; @@ -1153,6 +1165,7 @@ void __kprobes kprobe_flush_task(struct task_struct *tk) kfree(ri); } } +NOKPROBE_SYMBOL(kprobe_flush_task); static inline void free_rp_inst(struct kretprobe *rp) { @@ -1165,7 +1178,7 @@ static inline void free_rp_inst(struct kretprobe *rp) } } -static void __kprobes cleanup_rp_inst(struct kretprobe *rp) +static void cleanup_rp_inst(struct kretprobe *rp) { unsigned long flags, hash; struct kretprobe_instance *ri; @@ -1184,6 +1197,7 @@ static void __kprobes cleanup_rp_inst(struct kretprobe *rp) } free_rp_inst(rp); } +NOKPROBE_SYMBOL(cleanup_rp_inst); /* * Add the new probe to ap->list. Fail if this is the @@ -1758,8 +1772,7 @@ EXPORT_SYMBOL_GPL(unregister_jprobes); * This kprobe pre_handler is registered with every kretprobe. When probe * hits it will set up the return probe. */ -static int __kprobes pre_handler_kretprobe(struct kprobe *p, - struct pt_regs *regs) +static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs) { struct kretprobe *rp = container_of(p, struct kretprobe, kp); unsigned long hash, flags = 0; @@ -1797,6 +1810,7 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p, } return 0; } +NOKPROBE_SYMBOL(pre_handler_kretprobe); int register_kretprobe(struct kretprobe *rp) { @@ -1920,11 +1934,11 @@ void unregister_kretprobes(struct kretprobe **rps, int num) } EXPORT_SYMBOL_GPL(unregister_kretprobes); -static int __kprobes pre_handler_kretprobe(struct kprobe *p, - struct pt_regs *regs) +static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs) { return 0; } +NOKPROBE_SYMBOL(pre_handler_kretprobe); #endif /* CONFIG_KRETPROBES */ @@ -2002,12 +2016,13 @@ int enable_kprobe(struct kprobe *kp) } EXPORT_SYMBOL_GPL(enable_kprobe); -void __kprobes dump_kprobe(struct kprobe *kp) +void dump_kprobe(struct kprobe *kp) { printk(KERN_WARNING "Dumping kprobe:\n"); printk(KERN_WARNING "Name: %s\nAddress: %p\nOffset: %x\n", kp->symbol_name, kp->addr, kp->offset); } +NOKPROBE_SYMBOL(dump_kprobe); /* * Lookup and populate the kprobe_blacklist. From 3da0f18007e5b87b573cf6ae8c445d59e757d274 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 17 Apr 2014 17:18:28 +0900 Subject: [PATCH 015/101] kprobes, ftrace: Use NOKPROBE_SYMBOL macro in ftrace Use NOKPROBE_SYMBOL macro to protect functions from kprobes instead of __kprobes annotation in ftrace. This applies nokprobe_inline annotation for some cases, because NOKPROBE_SYMBOL() will inhibit inlining by referring the symbol address. Signed-off-by: Masami Hiramatsu Cc: Frederic Weisbecker Cc: Steven Rostedt Link: http://lkml.kernel.org/r/20140417081828.26341.55152.stgit@ltc230.yrl.intra.hitachi.co.jp Signed-off-by: Ingo Molnar --- kernel/trace/trace_event_perf.c | 5 ++- kernel/trace/trace_kprobe.c | 66 +++++++++++++++++++-------------- kernel/trace/trace_probe.c | 61 ++++++++++++++++-------------- kernel/trace/trace_probe.h | 15 ++++---- 4 files changed, 82 insertions(+), 65 deletions(-) diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index c894614de14d8e..5d12bb407b4429 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c @@ -248,8 +248,8 @@ void perf_trace_del(struct perf_event *p_event, int flags) tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event); } -__kprobes void *perf_trace_buf_prepare(int size, unsigned short type, - struct pt_regs *regs, int *rctxp) +void *perf_trace_buf_prepare(int size, unsigned short type, + struct pt_regs *regs, int *rctxp) { struct trace_entry *entry; unsigned long flags; @@ -281,6 +281,7 @@ __kprobes void *perf_trace_buf_prepare(int size, unsigned short type, return raw_data; } EXPORT_SYMBOL_GPL(perf_trace_buf_prepare); +NOKPROBE_SYMBOL(perf_trace_buf_prepare); #ifdef CONFIG_FUNCTION_TRACER static void diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index aa5f0bfcdf7b13..242e4ec97d94b0 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -40,27 +40,27 @@ struct trace_kprobe { (sizeof(struct probe_arg) * (n))) -static __kprobes bool trace_kprobe_is_return(struct trace_kprobe *tk) +static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk) { return tk->rp.handler != NULL; } -static __kprobes const char *trace_kprobe_symbol(struct trace_kprobe *tk) +static nokprobe_inline const char *trace_kprobe_symbol(struct trace_kprobe *tk) { return tk->symbol ? tk->symbol : "unknown"; } -static __kprobes unsigned long trace_kprobe_offset(struct trace_kprobe *tk) +static nokprobe_inline unsigned long trace_kprobe_offset(struct trace_kprobe *tk) { return tk->rp.kp.offset; } -static __kprobes bool trace_kprobe_has_gone(struct trace_kprobe *tk) +static nokprobe_inline bool trace_kprobe_has_gone(struct trace_kprobe *tk) { return !!(kprobe_gone(&tk->rp.kp)); } -static __kprobes bool trace_kprobe_within_module(struct trace_kprobe *tk, +static nokprobe_inline bool trace_kprobe_within_module(struct trace_kprobe *tk, struct module *mod) { int len = strlen(mod->name); @@ -68,7 +68,7 @@ static __kprobes bool trace_kprobe_within_module(struct trace_kprobe *tk, return strncmp(mod->name, name, len) == 0 && name[len] == ':'; } -static __kprobes bool trace_kprobe_is_on_module(struct trace_kprobe *tk) +static nokprobe_inline bool trace_kprobe_is_on_module(struct trace_kprobe *tk) { return !!strchr(trace_kprobe_symbol(tk), ':'); } @@ -132,19 +132,21 @@ struct symbol_cache *alloc_symbol_cache(const char *sym, long offset) * Kprobes-specific fetch functions */ #define DEFINE_FETCH_stack(type) \ -static __kprobes void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs,\ +static void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs, \ void *offset, void *dest) \ { \ *(type *)dest = (type)regs_get_kernel_stack_nth(regs, \ (unsigned int)((unsigned long)offset)); \ -} +} \ +NOKPROBE_SYMBOL(FETCH_FUNC_NAME(stack, type)); + DEFINE_BASIC_FETCH_FUNCS(stack) /* No string on the stack entry */ #define fetch_stack_string NULL #define fetch_stack_string_size NULL #define DEFINE_FETCH_memory(type) \ -static __kprobes void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs,\ +static void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs, \ void *addr, void *dest) \ { \ type retval; \ @@ -152,14 +154,16 @@ static __kprobes void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs,\ *(type *)dest = 0; \ else \ *(type *)dest = retval; \ -} +} \ +NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, type)); + DEFINE_BASIC_FETCH_FUNCS(memory) /* * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max * length and relative data location. */ -static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs, - void *addr, void *dest) +static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs, + void *addr, void *dest) { long ret; int maxlen = get_rloc_len(*(u32 *)dest); @@ -193,10 +197,11 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs, get_rloc_offs(*(u32 *)dest)); } } +NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, string)); /* Return the length of string -- including null terminal byte */ -static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs, - void *addr, void *dest) +static void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs, + void *addr, void *dest) { mm_segment_t old_fs; int ret, len = 0; @@ -219,17 +224,19 @@ static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs, else *(u32 *)dest = len; } +NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, string_size)); #define DEFINE_FETCH_symbol(type) \ -__kprobes void FETCH_FUNC_NAME(symbol, type)(struct pt_regs *regs, \ - void *data, void *dest) \ +void FETCH_FUNC_NAME(symbol, type)(struct pt_regs *regs, void *data, void *dest)\ { \ struct symbol_cache *sc = data; \ if (sc->addr) \ fetch_memory_##type(regs, (void *)sc->addr, dest); \ else \ *(type *)dest = 0; \ -} +} \ +NOKPROBE_SYMBOL(FETCH_FUNC_NAME(symbol, type)); + DEFINE_BASIC_FETCH_FUNCS(symbol) DEFINE_FETCH_symbol(string) DEFINE_FETCH_symbol(string_size) @@ -907,7 +914,7 @@ static const struct file_operations kprobe_profile_ops = { }; /* Kprobe handler */ -static __kprobes void +static nokprobe_inline void __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs, struct ftrace_event_file *ftrace_file) { @@ -943,7 +950,7 @@ __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs, entry, irq_flags, pc, regs); } -static __kprobes void +static void kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs) { struct event_file_link *link; @@ -951,9 +958,10 @@ kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs) list_for_each_entry_rcu(link, &tk->tp.files, list) __kprobe_trace_func(tk, regs, link->file); } +NOKPROBE_SYMBOL(kprobe_trace_func); /* Kretprobe handler */ -static __kprobes void +static nokprobe_inline void __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri, struct pt_regs *regs, struct ftrace_event_file *ftrace_file) @@ -991,7 +999,7 @@ __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri, entry, irq_flags, pc, regs); } -static __kprobes void +static void kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri, struct pt_regs *regs) { @@ -1000,6 +1008,7 @@ kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri, list_for_each_entry_rcu(link, &tk->tp.files, list) __kretprobe_trace_func(tk, ri, regs, link->file); } +NOKPROBE_SYMBOL(kretprobe_trace_func); /* Event entry printers */ static enum print_line_t @@ -1131,7 +1140,7 @@ static int kretprobe_event_define_fields(struct ftrace_event_call *event_call) #ifdef CONFIG_PERF_EVENTS /* Kprobe profile handler */ -static __kprobes void +static void kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs) { struct ftrace_event_call *call = &tk->tp.call; @@ -1158,9 +1167,10 @@ kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs) store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize); perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL); } +NOKPROBE_SYMBOL(kprobe_perf_func); /* Kretprobe profile handler */ -static __kprobes void +static void kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri, struct pt_regs *regs) { @@ -1188,6 +1198,7 @@ kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri, store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize); perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL); } +NOKPROBE_SYMBOL(kretprobe_perf_func); #endif /* CONFIG_PERF_EVENTS */ /* @@ -1223,8 +1234,7 @@ static int kprobe_register(struct ftrace_event_call *event, return 0; } -static __kprobes -int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs) +static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs) { struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp); @@ -1238,9 +1248,10 @@ int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs) #endif return 0; /* We don't tweek kernel, so just return 0 */ } +NOKPROBE_SYMBOL(kprobe_dispatcher); -static __kprobes -int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs) +static int +kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs) { struct trace_kprobe *tk = container_of(ri->rp, struct trace_kprobe, rp); @@ -1254,6 +1265,7 @@ int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs) #endif return 0; /* We don't tweek kernel, so just return 0 */ } +NOKPROBE_SYMBOL(kretprobe_dispatcher); static struct trace_event_functions kretprobe_funcs = { .trace = print_kretprobe_event diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c index d3a91e40a6595e..d4b9fc22cd27fb 100644 --- a/kernel/trace/trace_probe.c +++ b/kernel/trace/trace_probe.c @@ -37,13 +37,13 @@ const char *reserved_field_names[] = { /* Printing in basic type function template */ #define DEFINE_BASIC_PRINT_TYPE_FUNC(type, fmt) \ -__kprobes int PRINT_TYPE_FUNC_NAME(type)(struct trace_seq *s, \ - const char *name, \ - void *data, void *ent) \ +int PRINT_TYPE_FUNC_NAME(type)(struct trace_seq *s, const char *name, \ + void *data, void *ent) \ { \ return trace_seq_printf(s, " %s=" fmt, name, *(type *)data); \ } \ -const char PRINT_TYPE_FMT_NAME(type)[] = fmt; +const char PRINT_TYPE_FMT_NAME(type)[] = fmt; \ +NOKPROBE_SYMBOL(PRINT_TYPE_FUNC_NAME(type)); DEFINE_BASIC_PRINT_TYPE_FUNC(u8 , "0x%x") DEFINE_BASIC_PRINT_TYPE_FUNC(u16, "0x%x") @@ -55,9 +55,8 @@ DEFINE_BASIC_PRINT_TYPE_FUNC(s32, "%d") DEFINE_BASIC_PRINT_TYPE_FUNC(s64, "%Ld") /* Print type function for string type */ -__kprobes int PRINT_TYPE_FUNC_NAME(string)(struct trace_seq *s, - const char *name, - void *data, void *ent) +int PRINT_TYPE_FUNC_NAME(string)(struct trace_seq *s, const char *name, + void *data, void *ent) { int len = *(u32 *)data >> 16; @@ -67,6 +66,7 @@ __kprobes int PRINT_TYPE_FUNC_NAME(string)(struct trace_seq *s, return trace_seq_printf(s, " %s=\"%s\"", name, (const char *)get_loc_data(data, ent)); } +NOKPROBE_SYMBOL(PRINT_TYPE_FUNC_NAME(string)); const char PRINT_TYPE_FMT_NAME(string)[] = "\\\"%s\\\""; @@ -81,23 +81,24 @@ const char PRINT_TYPE_FMT_NAME(string)[] = "\\\"%s\\\""; /* Data fetch function templates */ #define DEFINE_FETCH_reg(type) \ -__kprobes void FETCH_FUNC_NAME(reg, type)(struct pt_regs *regs, \ - void *offset, void *dest) \ +void FETCH_FUNC_NAME(reg, type)(struct pt_regs *regs, void *offset, void *dest) \ { \ *(type *)dest = (type)regs_get_register(regs, \ (unsigned int)((unsigned long)offset)); \ -} +} \ +NOKPROBE_SYMBOL(FETCH_FUNC_NAME(reg, type)); DEFINE_BASIC_FETCH_FUNCS(reg) /* No string on the register */ #define fetch_reg_string NULL #define fetch_reg_string_size NULL #define DEFINE_FETCH_retval(type) \ -__kprobes void FETCH_FUNC_NAME(retval, type)(struct pt_regs *regs, \ - void *dummy, void *dest) \ +void FETCH_FUNC_NAME(retval, type)(struct pt_regs *regs, \ + void *dummy, void *dest) \ { \ *(type *)dest = (type)regs_return_value(regs); \ -} +} \ +NOKPROBE_SYMBOL(FETCH_FUNC_NAME(retval, type)); DEFINE_BASIC_FETCH_FUNCS(retval) /* No string on the retval */ #define fetch_retval_string NULL @@ -112,8 +113,8 @@ struct deref_fetch_param { }; #define DEFINE_FETCH_deref(type) \ -__kprobes void FETCH_FUNC_NAME(deref, type)(struct pt_regs *regs, \ - void *data, void *dest) \ +void FETCH_FUNC_NAME(deref, type)(struct pt_regs *regs, \ + void *data, void *dest) \ { \ struct deref_fetch_param *dprm = data; \ unsigned long addr; \ @@ -123,12 +124,13 @@ __kprobes void FETCH_FUNC_NAME(deref, type)(struct pt_regs *regs, \ dprm->fetch(regs, (void *)addr, dest); \ } else \ *(type *)dest = 0; \ -} +} \ +NOKPROBE_SYMBOL(FETCH_FUNC_NAME(deref, type)); DEFINE_BASIC_FETCH_FUNCS(deref) DEFINE_FETCH_deref(string) -__kprobes void FETCH_FUNC_NAME(deref, string_size)(struct pt_regs *regs, - void *data, void *dest) +void FETCH_FUNC_NAME(deref, string_size)(struct pt_regs *regs, + void *data, void *dest) { struct deref_fetch_param *dprm = data; unsigned long addr; @@ -140,16 +142,18 @@ __kprobes void FETCH_FUNC_NAME(deref, string_size)(struct pt_regs *regs, } else *(string_size *)dest = 0; } +NOKPROBE_SYMBOL(FETCH_FUNC_NAME(deref, string_size)); -static __kprobes void update_deref_fetch_param(struct deref_fetch_param *data) +static void update_deref_fetch_param(struct deref_fetch_param *data) { if (CHECK_FETCH_FUNCS(deref, data->orig.fn)) update_deref_fetch_param(data->orig.data); else if (CHECK_FETCH_FUNCS(symbol, data->orig.fn)) update_symbol_cache(data->orig.data); } +NOKPROBE_SYMBOL(update_deref_fetch_param); -static __kprobes void free_deref_fetch_param(struct deref_fetch_param *data) +static void free_deref_fetch_param(struct deref_fetch_param *data) { if (CHECK_FETCH_FUNCS(deref, data->orig.fn)) free_deref_fetch_param(data->orig.data); @@ -157,6 +161,7 @@ static __kprobes void free_deref_fetch_param(struct deref_fetch_param *data) free_symbol_cache(data->orig.data); kfree(data); } +NOKPROBE_SYMBOL(free_deref_fetch_param); /* Bitfield fetch function */ struct bitfield_fetch_param { @@ -166,8 +171,8 @@ struct bitfield_fetch_param { }; #define DEFINE_FETCH_bitfield(type) \ -__kprobes void FETCH_FUNC_NAME(bitfield, type)(struct pt_regs *regs, \ - void *data, void *dest) \ +void FETCH_FUNC_NAME(bitfield, type)(struct pt_regs *regs, \ + void *data, void *dest) \ { \ struct bitfield_fetch_param *bprm = data; \ type buf = 0; \ @@ -177,8 +182,8 @@ __kprobes void FETCH_FUNC_NAME(bitfield, type)(struct pt_regs *regs, \ buf >>= bprm->low_shift; \ } \ *(type *)dest = buf; \ -} - +} \ +NOKPROBE_SYMBOL(FETCH_FUNC_NAME(bitfield, type)); DEFINE_BASIC_FETCH_FUNCS(bitfield) #define fetch_bitfield_string NULL #define fetch_bitfield_string_size NULL @@ -255,17 +260,17 @@ static const struct fetch_type *find_fetch_type(const char *type, } /* Special function : only accept unsigned long */ -static __kprobes void fetch_kernel_stack_address(struct pt_regs *regs, - void *dummy, void *dest) +static void fetch_kernel_stack_address(struct pt_regs *regs, void *dummy, void *dest) { *(unsigned long *)dest = kernel_stack_pointer(regs); } +NOKPROBE_SYMBOL(fetch_kernel_stack_address); -static __kprobes void fetch_user_stack_address(struct pt_regs *regs, - void *dummy, void *dest) +static void fetch_user_stack_address(struct pt_regs *regs, void *dummy, void *dest) { *(unsigned long *)dest = user_stack_pointer(regs); } +NOKPROBE_SYMBOL(fetch_user_stack_address); static fetch_func_t get_fetch_size_function(const struct fetch_type *type, fetch_func_t orig_fn, diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h index fb1ab5dfbd42f6..4f815fbce16d26 100644 --- a/kernel/trace/trace_probe.h +++ b/kernel/trace/trace_probe.h @@ -81,13 +81,13 @@ */ #define convert_rloc_to_loc(dl, offs) ((u32)(dl) + (offs)) -static inline void *get_rloc_data(u32 *dl) +static nokprobe_inline void *get_rloc_data(u32 *dl) { return (u8 *)dl + get_rloc_offs(*dl); } /* For data_loc conversion */ -static inline void *get_loc_data(u32 *dl, void *ent) +static nokprobe_inline void *get_loc_data(u32 *dl, void *ent) { return (u8 *)ent + get_rloc_offs(*dl); } @@ -136,9 +136,8 @@ typedef u32 string_size; /* Printing in basic type function template */ #define DECLARE_BASIC_PRINT_TYPE_FUNC(type) \ -__kprobes int PRINT_TYPE_FUNC_NAME(type)(struct trace_seq *s, \ - const char *name, \ - void *data, void *ent); \ +int PRINT_TYPE_FUNC_NAME(type)(struct trace_seq *s, const char *name, \ + void *data, void *ent); \ extern const char PRINT_TYPE_FMT_NAME(type)[] DECLARE_BASIC_PRINT_TYPE_FUNC(u8); @@ -303,7 +302,7 @@ static inline bool trace_probe_is_registered(struct trace_probe *tp) return !!(tp->flags & TP_FLAG_REGISTERED); } -static inline __kprobes void call_fetch(struct fetch_param *fprm, +static nokprobe_inline void call_fetch(struct fetch_param *fprm, struct pt_regs *regs, void *dest) { return fprm->fn(regs, fprm->data, dest); @@ -351,7 +350,7 @@ extern ssize_t traceprobe_probes_write(struct file *file, extern int traceprobe_command(const char *buf, int (*createfn)(int, char**)); /* Sum up total data length for dynamic arraies (strings) */ -static inline __kprobes int +static nokprobe_inline int __get_data_size(struct trace_probe *tp, struct pt_regs *regs) { int i, ret = 0; @@ -367,7 +366,7 @@ __get_data_size(struct trace_probe *tp, struct pt_regs *regs) } /* Store the value of each argument */ -static inline __kprobes void +static nokprobe_inline void store_trace_args(int ent_size, struct trace_probe *tp, struct pt_regs *regs, u8 *data, int maxlen) { From b40a2cb6e0c218c6d0f12c7f7e683e75972973c1 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 17 Apr 2014 17:18:35 +0900 Subject: [PATCH 016/101] kprobes, notifier: Use NOKPROBE_SYMBOL macro in notifier Use NOKPROBE_SYMBOL macro to protect functions from kprobes instead of __kprobes annotation in notifier. Signed-off-by: Masami Hiramatsu Reviewed-by: Steven Rostedt Reviewed-by: Josh Triplett Cc: Paul E. McKenney Link: http://lkml.kernel.org/r/20140417081835.26341.56128.stgit@ltc230.yrl.intra.hitachi.co.jp Signed-off-by: Ingo Molnar --- kernel/notifier.c | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/kernel/notifier.c b/kernel/notifier.c index db4c8b08a50cef..4803da6eab62f1 100644 --- a/kernel/notifier.c +++ b/kernel/notifier.c @@ -71,9 +71,9 @@ static int notifier_chain_unregister(struct notifier_block **nl, * @returns: notifier_call_chain returns the value returned by the * last notifier function called. */ -static int __kprobes notifier_call_chain(struct notifier_block **nl, - unsigned long val, void *v, - int nr_to_call, int *nr_calls) +static int notifier_call_chain(struct notifier_block **nl, + unsigned long val, void *v, + int nr_to_call, int *nr_calls) { int ret = NOTIFY_DONE; struct notifier_block *nb, *next_nb; @@ -102,6 +102,7 @@ static int __kprobes notifier_call_chain(struct notifier_block **nl, } return ret; } +NOKPROBE_SYMBOL(notifier_call_chain); /* * Atomic notifier chain routines. Registration and unregistration @@ -172,9 +173,9 @@ EXPORT_SYMBOL_GPL(atomic_notifier_chain_unregister); * Otherwise the return value is the return value * of the last notifier function called. */ -int __kprobes __atomic_notifier_call_chain(struct atomic_notifier_head *nh, - unsigned long val, void *v, - int nr_to_call, int *nr_calls) +int __atomic_notifier_call_chain(struct atomic_notifier_head *nh, + unsigned long val, void *v, + int nr_to_call, int *nr_calls) { int ret; @@ -184,13 +185,15 @@ int __kprobes __atomic_notifier_call_chain(struct atomic_notifier_head *nh, return ret; } EXPORT_SYMBOL_GPL(__atomic_notifier_call_chain); +NOKPROBE_SYMBOL(__atomic_notifier_call_chain); -int __kprobes atomic_notifier_call_chain(struct atomic_notifier_head *nh, - unsigned long val, void *v) +int atomic_notifier_call_chain(struct atomic_notifier_head *nh, + unsigned long val, void *v) { return __atomic_notifier_call_chain(nh, val, v, -1, NULL); } EXPORT_SYMBOL_GPL(atomic_notifier_call_chain); +NOKPROBE_SYMBOL(atomic_notifier_call_chain); /* * Blocking notifier chain routines. All access to the chain is @@ -527,7 +530,7 @@ EXPORT_SYMBOL_GPL(srcu_init_notifier_head); static ATOMIC_NOTIFIER_HEAD(die_chain); -int notrace __kprobes notify_die(enum die_val val, const char *str, +int notrace notify_die(enum die_val val, const char *str, struct pt_regs *regs, long err, int trap, int sig) { struct die_args args = { @@ -540,6 +543,7 @@ int notrace __kprobes notify_die(enum die_val val, const char *str, }; return atomic_notifier_call_chain(&die_chain, val, &args); } +NOKPROBE_SYMBOL(notify_die); int register_die_notifier(struct notifier_block *nb) { From edafe3a56dbd42c499245b222e9f7e80099356e5 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 17 Apr 2014 17:18:42 +0900 Subject: [PATCH 017/101] kprobes, sched: Use NOKPROBE_SYMBOL macro in sched Use NOKPROBE_SYMBOL macro to protect functions from kprobes instead of __kprobes annotation in sched/core.c. Signed-off-by: Masami Hiramatsu Reviewed-by: Steven Rostedt Cc: Peter Zijlstra Signed-off-by: Ingo Molnar Link: http://lkml.kernel.org/r/20140417081842.26341.83959.stgit@ltc230.yrl.intra.hitachi.co.jp --- kernel/sched/core.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 6863631e8cd0e9..00781cc3804789 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2480,7 +2480,7 @@ notrace unsigned long get_parent_ip(unsigned long addr) #if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \ defined(CONFIG_PREEMPT_TRACER)) -void __kprobes preempt_count_add(int val) +void preempt_count_add(int val) { #ifdef CONFIG_DEBUG_PREEMPT /* @@ -2506,8 +2506,9 @@ void __kprobes preempt_count_add(int val) } } EXPORT_SYMBOL(preempt_count_add); +NOKPROBE_SYMBOL(preempt_count_add); -void __kprobes preempt_count_sub(int val) +void preempt_count_sub(int val) { #ifdef CONFIG_DEBUG_PREEMPT /* @@ -2528,6 +2529,7 @@ void __kprobes preempt_count_sub(int val) __preempt_count_sub(val); } EXPORT_SYMBOL(preempt_count_sub); +NOKPROBE_SYMBOL(preempt_count_sub); #endif From 637247403abff8c963bc7be8002b3f49ea604563 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 17 Apr 2014 17:18:49 +0900 Subject: [PATCH 018/101] kprobes: Show blacklist entries via debugfs Show blacklist entries (function names with the address range) via /sys/kernel/debug/kprobes/blacklist. Note that at this point the blacklist supports only in vmlinux, not module. So the list is fixed and not updated. Signed-off-by: Masami Hiramatsu Cc: Ananth N Mavinakayanahalli Cc: Anil S Keshavamurthy Cc: David S. Miller Link: http://lkml.kernel.org/r/20140417081849.26341.11609.stgit@ltc230.yrl.intra.hitachi.co.jp Signed-off-by: Ingo Molnar --- kernel/kprobes.c | 61 +++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 53 insertions(+), 8 deletions(-) diff --git a/kernel/kprobes.c b/kernel/kprobes.c index a21b4e67fd971f..3214289df5a7a8 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -2249,6 +2249,46 @@ static const struct file_operations debugfs_kprobes_operations = { .release = seq_release, }; +/* kprobes/blacklist -- shows which functions can not be probed */ +static void *kprobe_blacklist_seq_start(struct seq_file *m, loff_t *pos) +{ + return seq_list_start(&kprobe_blacklist, *pos); +} + +static void *kprobe_blacklist_seq_next(struct seq_file *m, void *v, loff_t *pos) +{ + return seq_list_next(v, &kprobe_blacklist, pos); +} + +static int kprobe_blacklist_seq_show(struct seq_file *m, void *v) +{ + struct kprobe_blacklist_entry *ent = + list_entry(v, struct kprobe_blacklist_entry, list); + + seq_printf(m, "0x%p-0x%p\t%ps\n", (void *)ent->start_addr, + (void *)ent->end_addr, (void *)ent->start_addr); + return 0; +} + +static const struct seq_operations kprobe_blacklist_seq_ops = { + .start = kprobe_blacklist_seq_start, + .next = kprobe_blacklist_seq_next, + .stop = kprobe_seq_stop, /* Reuse void function */ + .show = kprobe_blacklist_seq_show, +}; + +static int kprobe_blacklist_open(struct inode *inode, struct file *filp) +{ + return seq_open(filp, &kprobe_blacklist_seq_ops); +} + +static const struct file_operations debugfs_kprobe_blacklist_ops = { + .open = kprobe_blacklist_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + static void arm_all_kprobes(void) { struct hlist_head *head; @@ -2372,19 +2412,24 @@ static int __init debugfs_kprobe_init(void) file = debugfs_create_file("list", 0444, dir, NULL, &debugfs_kprobes_operations); - if (!file) { - debugfs_remove(dir); - return -ENOMEM; - } + if (!file) + goto error; file = debugfs_create_file("enabled", 0600, dir, &value, &fops_kp); - if (!file) { - debugfs_remove(dir); - return -ENOMEM; - } + if (!file) + goto error; + + file = debugfs_create_file("blacklist", 0444, dir, NULL, + &debugfs_kprobe_blacklist_ops); + if (!file) + goto error; return 0; + +error: + debugfs_remove(dir); + return -ENOMEM; } late_initcall(debugfs_kprobe_init); From 250bbd12c2fe1221ec96d8087d63e982d4f2180a Mon Sep 17 00:00:00 2001 From: Denys Vlasenko Date: Thu, 24 Apr 2014 19:08:24 +0200 Subject: [PATCH 019/101] uprobes/x86: Refuse to attach uprobe to "word-sized" branch insns All branch insns on x86 can be prefixed with the operand-size override prefix, 0x66. It was only ever useful for performing jumps to 32-bit offsets in 16-bit code segments. In 32-bit code, such instructions are useless since they cause IP truncation to 16 bits, and in case of call insns, they save only 16 bits of return address and misalign the stack pointer as a "bonus". In 64-bit code, such instructions are treated differently by Intel and AMD CPUs: Intel ignores the prefix altogether, AMD treats them the same as in 32-bit mode. Before this patch, the emulation code would execute the instructions as if they have no 0x66 prefix. With this patch, we refuse to attach uprobes to such insns. Signed-off-by: Denys Vlasenko Acked-by: Jim Keniston Acked-by: Masami Hiramatsu Signed-off-by: Oleg Nesterov --- arch/x86/kernel/uprobes.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index ace22916ade3ce..3cf24a21819663 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -583,6 +583,7 @@ static struct uprobe_xol_ops branch_xol_ops = { static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn) { u8 opc1 = OPCODE1(insn); + int i; /* has the side-effect of processing the entire instruction */ insn_get_length(insn); @@ -612,6 +613,16 @@ static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn) return -ENOSYS; } + /* + * 16-bit overrides such as CALLW (66 e8 nn nn) are not supported. + * Intel and AMD behavior differ in 64-bit mode: Intel ignores 66 prefix. + * No one uses these insns, reject any branch insns with such prefix. + */ + for (i = 0; i < insn->prefixes.nbytes; i++) { + if (insn->prefixes.bytes[i] == 0x66) + return -ENOTSUPP; + } + auprobe->branch.opc1 = opc1; auprobe->branch.ilen = insn->length; auprobe->branch.offs = insn->immediate.value; From 73175d0d19657ec132cc24e8cf0e341e73c54868 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sat, 19 Apr 2014 12:34:02 +0200 Subject: [PATCH 020/101] uprobes/x86: Add uprobe_init_insn(), kill validate_insn_{32,64}bits() validate_insn_32bits() and validate_insn_64bits() are very similar, turn them into the single uprobe_init_insn() which has the additional "bool x86_64" argument which can be passed to insn_init() and used to choose between good_insns_64/good_insns_32. Also kill UPROBE_FIX_NONE, it has no users. Note: the current code doesn't use ifdef's consistently, good_insns_64 depends on CONFIG_X86_64 but good_insns_32 is unconditional. This patch removes ifdef around good_insns_64, we will add it back later along with the similar one for good_insns_32. Signed-off-by: Oleg Nesterov Reviewed-by: Jim Keniston Acked-by: Srikar Dronamraju --- arch/x86/kernel/uprobes.c | 45 +++++++++++---------------------------- 1 file changed, 13 insertions(+), 32 deletions(-) diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index 3cf24a21819663..b4aff6a70f4d40 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -32,9 +32,6 @@ /* Post-execution fixups. */ -/* No fixup needed */ -#define UPROBE_FIX_NONE 0x0 - /* Adjust IP back to vicinity of actual insn */ #define UPROBE_FIX_IP 0x1 @@ -114,7 +111,6 @@ static volatile u32 good_2byte_insns[256 / 32] = { /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ }; -#ifdef CONFIG_X86_64 /* Good-instruction tables for 64-bit apps */ static volatile u32 good_insns_64[256 / 32] = { /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ @@ -138,7 +134,6 @@ static volatile u32 good_insns_64[256 / 32] = { /* ---------------------------------------------- */ /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ }; -#endif #undef W /* @@ -209,16 +204,22 @@ static bool is_prefix_bad(struct insn *insn) return false; } -static int validate_insn_32bits(struct arch_uprobe *auprobe, struct insn *insn) +static int uprobe_init_insn(struct arch_uprobe *auprobe, struct insn *insn, bool x86_64) { - insn_init(insn, auprobe->insn, false); + u32 volatile *good_insns; + + insn_init(insn, auprobe->insn, x86_64); - /* Skip good instruction prefixes; reject "bad" ones. */ insn_get_opcode(insn); if (is_prefix_bad(insn)) return -ENOTSUPP; - if (test_bit(OPCODE1(insn), (unsigned long *)good_insns_32)) + if (x86_64) + good_insns = good_insns_64; + else + good_insns = good_insns_32; + + if (test_bit(OPCODE1(insn), (unsigned long *)good_insns)) return 0; if (insn->opcode.nbytes == 2) { @@ -355,30 +356,10 @@ handle_riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs, long * } } -static int validate_insn_64bits(struct arch_uprobe *auprobe, struct insn *insn) -{ - insn_init(insn, auprobe->insn, true); - - /* Skip good instruction prefixes; reject "bad" ones. */ - insn_get_opcode(insn); - if (is_prefix_bad(insn)) - return -ENOTSUPP; - - if (test_bit(OPCODE1(insn), (unsigned long *)good_insns_64)) - return 0; - - if (insn->opcode.nbytes == 2) { - if (test_bit(OPCODE2(insn), (unsigned long *)good_2byte_insns)) - return 0; - } - return -ENOTSUPP; -} - static int validate_insn_bits(struct arch_uprobe *auprobe, struct mm_struct *mm, struct insn *insn) { - if (mm->context.ia32_compat) - return validate_insn_32bits(auprobe, insn); - return validate_insn_64bits(auprobe, insn); + bool x86_64 = !mm->context.ia32_compat; + return uprobe_init_insn(auprobe, insn, x86_64); } #else /* 32-bit: */ /* @@ -398,7 +379,7 @@ static void handle_riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs * static int validate_insn_bits(struct arch_uprobe *auprobe, struct mm_struct *mm, struct insn *insn) { - return validate_insn_32bits(auprobe, insn); + return uprobe_init_insn(auprobe, insn, false); } #endif /* CONFIG_X86_64 */ From 2ae1f49ae1978fedb6ad607e1f8b084aa9752f95 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sat, 19 Apr 2014 14:03:05 +0200 Subject: [PATCH 021/101] uprobes/x86: Add is_64bit_mm(), kill validate_insn_bits() 1. Extract the ->ia32_compat check from 64bit validate_insn_bits() into the new helper, is_64bit_mm(), it will have more users. TODO: this checks is actually wrong if mm owner is X32 task, we need another fix which changes set_personality_ia32(). TODO: even worse, the whole 64-or-32-bit logic is very broken and the fix is not simple, we need the nontrivial changes in the core uprobes code. 2. Kill validate_insn_bits() and change its single caller to use uprobe_init_insn(is_64bit_mm(mm). Signed-off-by: Oleg Nesterov Reviewed-by: Jim Keniston Acked-by: Srikar Dronamraju --- arch/x86/kernel/uprobes.c | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index b4aff6a70f4d40..b3b25ddc04fbca 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -231,6 +231,11 @@ static int uprobe_init_insn(struct arch_uprobe *auprobe, struct insn *insn, bool } #ifdef CONFIG_X86_64 +static inline bool is_64bit_mm(struct mm_struct *mm) +{ + return !config_enabled(CONFIG_IA32_EMULATION) || + !mm->context.ia32_compat; +} /* * If arch_uprobe->insn doesn't use rip-relative addressing, return * immediately. Otherwise, rewrite the instruction so that it accesses @@ -355,13 +360,11 @@ handle_riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs, long * *correction += 4; } } - -static int validate_insn_bits(struct arch_uprobe *auprobe, struct mm_struct *mm, struct insn *insn) +#else /* 32-bit: */ +static inline bool is_64bit_mm(struct mm_struct *mm) { - bool x86_64 = !mm->context.ia32_compat; - return uprobe_init_insn(auprobe, insn, x86_64); + return false; } -#else /* 32-bit: */ /* * No RIP-relative addressing on 32-bit */ @@ -376,11 +379,6 @@ static void handle_riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs * long *correction) { } - -static int validate_insn_bits(struct arch_uprobe *auprobe, struct mm_struct *mm, struct insn *insn) -{ - return uprobe_init_insn(auprobe, insn, false); -} #endif /* CONFIG_X86_64 */ struct uprobe_xol_ops { @@ -625,7 +623,7 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, bool fix_ip = true, fix_call = false; int ret; - ret = validate_insn_bits(auprobe, mm, &insn); + ret = uprobe_init_insn(auprobe, &insn, is_64bit_mm(mm)); if (ret) return ret; From ff261964cfcfe49d73690ca29b0ba2853d9497e3 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sat, 19 Apr 2014 14:15:27 +0200 Subject: [PATCH 022/101] uprobes/x86: Shift "insn_complete" from branch_setup_xol_ops() to uprobe_init_insn() Change uprobe_init_insn() to make insn_complete() == T, this makes other insn_get_*() calls unnecessary. Signed-off-by: Oleg Nesterov Reviewed-by: Jim Keniston Acked-by: Srikar Dronamraju --- arch/x86/kernel/uprobes.c | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index b3b25ddc04fbca..98d7db50f425f6 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -209,8 +209,11 @@ static int uprobe_init_insn(struct arch_uprobe *auprobe, struct insn *insn, bool u32 volatile *good_insns; insn_init(insn, auprobe->insn, x86_64); + /* has the side-effect of processing the entire instruction */ + insn_get_length(insn); + if (WARN_ON_ONCE(!insn_complete(insn))) + return -ENOEXEC; - insn_get_opcode(insn); if (is_prefix_bad(insn)) return -ENOTSUPP; @@ -283,8 +286,6 @@ handle_riprel_insn(struct arch_uprobe *auprobe, struct insn *insn) * is the immediate operand. */ cursor = auprobe->insn + insn_offset_modrm(insn); - insn_get_length(insn); - /* * Convert from rip-relative addressing to indirect addressing * via a scratch register. Change the r/m field from 0x5 (%rip) @@ -564,11 +565,6 @@ static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn) u8 opc1 = OPCODE1(insn); int i; - /* has the side-effect of processing the entire instruction */ - insn_get_length(insn); - if (WARN_ON_ONCE(!insn_complete(insn))) - return -ENOEXEC; - switch (opc1) { case 0xeb: /* jmp 8 */ case 0xe9: /* jmp 32 */ @@ -654,7 +650,6 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, fix_ip = false; break; case 0xff: - insn_get_modrm(&insn); switch (MODRM_REG(&insn)) { case 2: case 3: /* call or lcall, indirect */ fix_call = true; From 8dbacad93a2a12adebcc717e6055b1bcc1739ab8 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sat, 19 Apr 2014 16:07:15 +0200 Subject: [PATCH 023/101] uprobes/x86: Make good_insns_* depend on CONFIG_X86_* Add the suitable ifdef's around good_insns_* arrays. We do not want to add the ugly ifdef's into their only user, uprobe_init_insn(), so the "#else" branch simply defines them as NULL. This doesn't generate the extra code, gcc is smart enough, although the code is fine even if it could not detect that (without CONFIG_IA32_EMULATION) is_64bit_mm() is __builtin_constant_p(). The patch looks more complicated because it also moves good_insns_64 up close to good_insns_32. Signed-off-by: Oleg Nesterov Reviewed-by: Jim Keniston Acked-by: Srikar Dronamraju --- arch/x86/kernel/uprobes.c | 56 ++++++++++++++++++++++----------------- 1 file changed, 32 insertions(+), 24 deletions(-) diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index 98d7db50f425f6..892975b3c99c5c 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -64,6 +64,7 @@ * to keep gcc from statically optimizing it out, as variable_test_bit makes * some versions of gcc to think only *(unsigned long*) is used. */ +#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION) static volatile u32 good_insns_32[256 / 32] = { /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ /* ---------------------------------------------- */ @@ -86,32 +87,12 @@ static volatile u32 good_insns_32[256 / 32] = { /* ---------------------------------------------- */ /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ }; - -/* Using this for both 64-bit and 32-bit apps */ -static volatile u32 good_2byte_insns[256 / 32] = { - /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ - /* ---------------------------------------------- */ - W(0x00, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1) | /* 00 */ - W(0x10, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1) , /* 10 */ - W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* 20 */ - W(0x30, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 30 */ - W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */ - W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */ - W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 60 */ - W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) , /* 70 */ - W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */ - W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */ - W(0xa0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1) | /* a0 */ - W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1) , /* b0 */ - W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* c0 */ - W(0xd0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */ - W(0xe0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* e0 */ - W(0xf0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0) /* f0 */ - /* ---------------------------------------------- */ - /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ -}; +#else +#define good_insns_32 NULL +#endif /* Good-instruction tables for 64-bit apps */ +#if defined(CONFIG_X86_64) static volatile u32 good_insns_64[256 / 32] = { /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ /* ---------------------------------------------- */ @@ -134,6 +115,33 @@ static volatile u32 good_insns_64[256 / 32] = { /* ---------------------------------------------- */ /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ }; +#else +#define good_insns_64 NULL +#endif + +/* Using this for both 64-bit and 32-bit apps */ +static volatile u32 good_2byte_insns[256 / 32] = { + /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ + /* ---------------------------------------------- */ + W(0x00, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1) | /* 00 */ + W(0x10, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1) , /* 10 */ + W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* 20 */ + W(0x30, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 30 */ + W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */ + W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */ + W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 60 */ + W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) , /* 70 */ + W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */ + W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */ + W(0xa0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1) | /* a0 */ + W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1) , /* b0 */ + W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* c0 */ + W(0xd0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */ + W(0xe0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* e0 */ + W(0xf0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0) /* f0 */ + /* ---------------------------------------------- */ + /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ +}; #undef W /* From b24dc8dace74708fd849312722090169c5da97d3 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sat, 19 Apr 2014 18:10:09 +0200 Subject: [PATCH 024/101] uprobes/x86: Fix is_64bit_mm() with CONFIG_X86_X32 is_64bit_mm() assumes that mm->context.ia32_compat means the 32-bit instruction set, this is not true if the task is TIF_X32. Change set_personality_ia32() to initialize mm->context.ia32_compat by TIF_X32 or TIF_IA32 instead of 1. This allows to fix is_64bit_mm() without affecting other users, they all treat ia32_compat as "bool". TIF_ in ->ia32_compat looks a bit strange, but this is grep-friendly and avoids the new define's. Signed-off-by: Oleg Nesterov Reviewed-by: Jim Keniston Acked-by: Srikar Dronamraju --- arch/x86/kernel/process_64.c | 7 ++++--- arch/x86/kernel/uprobes.c | 2 +- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 9c0280f93d05db..9b53940981b7c9 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -413,12 +413,11 @@ void set_personality_ia32(bool x32) set_thread_flag(TIF_ADDR32); /* Mark the associated mm as containing 32-bit tasks. */ - if (current->mm) - current->mm->context.ia32_compat = 1; - if (x32) { clear_thread_flag(TIF_IA32); set_thread_flag(TIF_X32); + if (current->mm) + current->mm->context.ia32_compat = TIF_X32; current->personality &= ~READ_IMPLIES_EXEC; /* is_compat_task() uses the presence of the x32 syscall bit flag to determine compat status */ @@ -426,6 +425,8 @@ void set_personality_ia32(bool x32) } else { set_thread_flag(TIF_IA32); clear_thread_flag(TIF_X32); + if (current->mm) + current->mm->context.ia32_compat = TIF_IA32; current->personality |= force_personality32; /* Prepare the first "return" to user space */ current_thread_info()->status |= TS_COMPAT; diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index 892975b3c99c5c..ecbffd16d09057 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -245,7 +245,7 @@ static int uprobe_init_insn(struct arch_uprobe *auprobe, struct insn *insn, bool static inline bool is_64bit_mm(struct mm_struct *mm) { return !config_enabled(CONFIG_IA32_EMULATION) || - !mm->context.ia32_compat; + !(mm->context.ia32_compat == TIF_IA32); } /* * If arch_uprobe->insn doesn't use rip-relative addressing, return From dd91016dfc9ba9236cb0149984da3f0434278b49 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Tue, 22 Apr 2014 15:20:07 +0200 Subject: [PATCH 025/101] uprobes/x86: Don't change the task's state if ->pre_xol() fails Currently this doesn't matter, the only ->pre_xol() hook can't fail, but we need to fix arch_uprobe_pre_xol() anyway. If ->pre_xol() fails we should not change regs->ip/flags, we should just return the error to make restart actually possible. Signed-off-by: Oleg Nesterov Reviewed-by: Jim Keniston --- arch/x86/kernel/uprobes.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index ecbffd16d09057..f4464b1b943503 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -687,6 +687,12 @@ int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) { struct uprobe_task *utask = current->utask; + if (auprobe->ops->pre_xol) { + int err = auprobe->ops->pre_xol(auprobe, regs); + if (err) + return err; + } + regs->ip = utask->xol_vaddr; utask->autask.saved_trap_nr = current->thread.trap_nr; current->thread.trap_nr = UPROBE_TRAP_NR; @@ -696,8 +702,6 @@ int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) if (test_tsk_thread_flag(current, TIF_BLOCKSTEP)) set_task_blockstep(current, false); - if (auprobe->ops->pre_xol) - return auprobe->ops->pre_xol(auprobe, regs); return 0; } From 588fbd613c3d8fa73e96720761d49f1d40d34d4c Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Mon, 21 Apr 2014 16:58:17 +0200 Subject: [PATCH 026/101] uprobes/x86: Introduce uprobe_xol_ops->abort() and default_abort_op() arch_uprobe_abort_xol() calls handle_riprel_post_xol() even if auprobe->ops != default_xol_ops. This is fine correctness wise, only default_pre_xol_op() can set UPROBE_FIX_RIP_AX|UPROBE_FIX_RIP_CX and otherwise handle_riprel_post_xol() is nop. But this doesn't look clean and this doesn't allow us to move ->fixups into the union in arch_uprobe. Move this handle_riprel_post_xol() call into the new default_abort_op() hook and change arch_uprobe_abort_xol() accordingly. Signed-off-by: Oleg Nesterov Reviewed-by: Jim Keniston --- arch/x86/kernel/uprobes.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index f4464b1b943503..b3c2a92cce6c8d 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -394,6 +394,7 @@ struct uprobe_xol_ops { bool (*emulate)(struct arch_uprobe *, struct pt_regs *); int (*pre_xol)(struct arch_uprobe *, struct pt_regs *); int (*post_xol)(struct arch_uprobe *, struct pt_regs *); + void (*abort)(struct arch_uprobe *, struct pt_regs *); }; static inline int sizeof_long(void) @@ -444,9 +445,15 @@ static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs return 0; } +static void default_abort_op(struct arch_uprobe *auprobe, struct pt_regs *regs) +{ + handle_riprel_post_xol(auprobe, regs, NULL); +} + static struct uprobe_xol_ops default_xol_ops = { .pre_xol = default_pre_xol_op, .post_xol = default_post_xol_op, + .abort = default_abort_op, }; static bool branch_is_call(struct arch_uprobe *auprobe) @@ -820,10 +827,11 @@ void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) { struct uprobe_task *utask = current->utask; - current->thread.trap_nr = utask->autask.saved_trap_nr; - handle_riprel_post_xol(auprobe, regs, NULL); - instruction_pointer_set(regs, utask->vaddr); + if (auprobe->ops->abort) + auprobe->ops->abort(auprobe, regs); + current->thread.trap_nr = utask->autask.saved_trap_nr; + regs->ip = utask->vaddr; /* clear TF if it was set by us in arch_uprobe_pre_xol() */ if (!utask->autask.saved_tf) regs->flags &= ~X86_EFLAGS_TF; From 6ded5f3848bfd3227ee208aa38f8bf8d7209d4e3 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Mon, 21 Apr 2014 18:28:02 +0200 Subject: [PATCH 027/101] uprobes/x86: Don't use arch_uprobe_abort_xol() in arch_uprobe_post_xol() 014940bad8e4 "uprobes/x86: Send SIGILL if arch_uprobe_post_xol() fails" changed arch_uprobe_post_xol() to use arch_uprobe_abort_xol() if ->post_xol fails. This was correct and helped to avoid the additional complications, we need to clear X86_EFLAGS_TF in this case. However, now that we have uprobe_xol_ops->abort() hook it would be better to avoid arch_uprobe_abort_xol() here. ->post_xol() should likely do what ->abort() does anyway, we should not do the same work twice. Currently only handle_riprel_post_xol() can be called twice, this is unnecessary but safe. Still this is not clean and can lead to the problems in future. Change arch_uprobe_post_xol() to clear X86_EFLAGS_TF and restore ->ip by hand and avoid arch_uprobe_abort_xol(). This temporary uglifies the usage of autask.saved_tf, we will cleanup this later. Signed-off-by: Oleg Nesterov Reviewed-by: Jim Keniston --- arch/x86/kernel/uprobes.c | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index b3c2a92cce6c8d..2efb93f9603094 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -759,22 +759,24 @@ int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) struct uprobe_task *utask = current->utask; WARN_ON_ONCE(current->thread.trap_nr != UPROBE_TRAP_NR); + current->thread.trap_nr = utask->autask.saved_trap_nr; if (auprobe->ops->post_xol) { int err = auprobe->ops->post_xol(auprobe, regs); if (err) { - arch_uprobe_abort_xol(auprobe, regs); + if (!utask->autask.saved_tf) + regs->flags &= ~X86_EFLAGS_TF; /* - * Restart the probed insn. ->post_xol() must ensure - * this is really possible if it returns -ERESTART. + * Restore ->ip for restart or post mortem analysis. + * ->post_xol() must not return -ERESTART unless this + * is really possible. */ + regs->ip = utask->vaddr; if (err == -ERESTART) return 0; return err; } } - - current->thread.trap_nr = utask->autask.saved_trap_nr; /* * arch_uprobe_pre_xol() doesn't save the state of TIF_BLOCKSTEP * so we can get an extra SIGTRAP if we do not clear TF. We need @@ -819,9 +821,8 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, /* * This function gets called when XOL instruction either gets trapped or - * the thread has a fatal signal, or if arch_uprobe_post_xol() failed. - * Reset the instruction pointer to its probed address for the potential - * restart or for post mortem analysis. + * the thread has a fatal signal. Reset the instruction pointer to its + * probed address for the potential restart or for post mortem analysis. */ void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) { From 220ef8dc9a7a63fe202aacd3fc61e5104f6dd98c Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Mon, 21 Apr 2014 20:39:56 +0200 Subject: [PATCH 028/101] uprobes/x86: Move UPROBE_FIX_SETF logic from arch_uprobe_post_xol() to default_post_xol_op() UPROBE_FIX_SETF is only needed to handle "popf" correctly but it is processed by the generic arch_uprobe_post_xol() code. This doesn't allows us to make ->fixups private for default_xol_ops. 1 Change default_post_xol_op(UPROBE_FIX_SETF) to set ->saved_tf = T. "popf" always reads the flags from stack, it doesn't matter if TF was set or not before single-step. Ignoring the naming, this is even more logical, "saved_tf" means "owned by application" and we do not own this flag after "popf". 2. Change arch_uprobe_post_xol() to save ->saved_tf into the local "bool send_sigtrap" before ->post_xol(). 3. Change arch_uprobe_post_xol() to ignore UPROBE_FIX_SETF and just check ->saved_tf after ->post_xol(). With this patch ->fixups and ->rip_rela_target_address are only used by default_xol_ops hooks, we are ready to remove them from the common part of arch_uprobe. Signed-off-by: Oleg Nesterov Reviewed-by: Jim Keniston --- arch/x86/kernel/uprobes.c | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index 2efb93f9603094..b2bca293fc5719 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -441,6 +441,9 @@ static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs return -ERESTART; } } + /* popf; tell the caller to not touch TF */ + if (auprobe->fixups & UPROBE_FIX_SETF) + utask->autask.saved_tf = true; return 0; } @@ -757,15 +760,15 @@ bool arch_uprobe_xol_was_trapped(struct task_struct *t) int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) { struct uprobe_task *utask = current->utask; + bool send_sigtrap = utask->autask.saved_tf; + int err = 0; WARN_ON_ONCE(current->thread.trap_nr != UPROBE_TRAP_NR); current->thread.trap_nr = utask->autask.saved_trap_nr; if (auprobe->ops->post_xol) { - int err = auprobe->ops->post_xol(auprobe, regs); + err = auprobe->ops->post_xol(auprobe, regs); if (err) { - if (!utask->autask.saved_tf) - regs->flags &= ~X86_EFLAGS_TF; /* * Restore ->ip for restart or post mortem analysis. * ->post_xol() must not return -ERESTART unless this @@ -773,8 +776,8 @@ int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) */ regs->ip = utask->vaddr; if (err == -ERESTART) - return 0; - return err; + err = 0; + send_sigtrap = false; } } /* @@ -782,12 +785,13 @@ int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) * so we can get an extra SIGTRAP if we do not clear TF. We need * to examine the opcode to make it right. */ - if (utask->autask.saved_tf) + if (send_sigtrap) send_sig(SIGTRAP, current, 0); - else if (!(auprobe->fixups & UPROBE_FIX_SETF)) + + if (!utask->autask.saved_tf) regs->flags &= ~X86_EFLAGS_TF; - return 0; + return err; } /* callback routine for handling exceptions. */ From 97aa5cddbe9e01521137f337624469374e3cbde5 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Tue, 22 Apr 2014 16:20:55 +0200 Subject: [PATCH 029/101] uprobes/x86: Move default_xol_ops's data into arch_uprobe->def Finally we can move arch_uprobe->fixups/rip_rela_target_address into the new "def" struct and place this struct in the union, they are only used by default_xol_ops paths. The patch also renames rip_rela_target_address to riprel_target just to make this name shorter. Signed-off-by: Oleg Nesterov Reviewed-by: Jim Keniston --- arch/x86/include/asm/uprobes.h | 12 ++++++---- arch/x86/kernel/uprobes.c | 43 +++++++++++++++++----------------- 2 files changed, 28 insertions(+), 27 deletions(-) diff --git a/arch/x86/include/asm/uprobes.h b/arch/x86/include/asm/uprobes.h index 93bee7b93854dc..72caff7afbde7e 100644 --- a/arch/x86/include/asm/uprobes.h +++ b/arch/x86/include/asm/uprobes.h @@ -41,18 +41,20 @@ struct arch_uprobe { u8 ixol[MAX_UINSN_BYTES]; }; - u16 fixups; const struct uprobe_xol_ops *ops; union { -#ifdef CONFIG_X86_64 - unsigned long rip_rela_target_address; -#endif struct { s32 offs; u8 ilen; u8 opc1; - } branch; + } branch; + struct { +#ifdef CONFIG_X86_64 + long riprel_target; +#endif + u16 fixups; + } def; }; }; diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index b2bca293fc5719..7824ce248f8f61 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -251,10 +251,9 @@ static inline bool is_64bit_mm(struct mm_struct *mm) * If arch_uprobe->insn doesn't use rip-relative addressing, return * immediately. Otherwise, rewrite the instruction so that it accesses * its memory operand indirectly through a scratch register. Set - * arch_uprobe->fixups and arch_uprobe->rip_rela_target_address - * accordingly. (The contents of the scratch register will be saved - * before we single-step the modified instruction, and restored - * afterward.) + * def->fixups and def->riprel_target accordingly. (The contents of the + * scratch register will be saved before we single-step the modified + * instruction, and restored afterward). * * We do this because a rip-relative instruction can access only a * relatively small area (+/- 2 GB from the instruction), and the XOL @@ -308,18 +307,18 @@ handle_riprel_insn(struct arch_uprobe *auprobe, struct insn *insn) * is NOT the register operand, so we use %rcx (register * #1) for the scratch register. */ - auprobe->fixups = UPROBE_FIX_RIP_CX; + auprobe->def.fixups = UPROBE_FIX_RIP_CX; /* Change modrm from 00 000 101 to 00 000 001. */ *cursor = 0x1; } else { /* Use %rax (register #0) for the scratch register. */ - auprobe->fixups = UPROBE_FIX_RIP_AX; + auprobe->def.fixups = UPROBE_FIX_RIP_AX; /* Change modrm from 00 xxx 101 to 00 xxx 000 */ *cursor = (reg << 3); } /* Target address = address of next instruction + (signed) offset */ - auprobe->rip_rela_target_address = (long)insn->length + insn->displacement.value; + auprobe->def.riprel_target = (long)insn->length + insn->displacement.value; /* Displacement field is gone; slide immediate field (if any) over. */ if (insn->immediate.nbytes) { @@ -336,25 +335,25 @@ static void pre_xol_rip_insn(struct arch_uprobe *auprobe, struct pt_regs *regs, struct arch_uprobe_task *autask) { - if (auprobe->fixups & UPROBE_FIX_RIP_AX) { + if (auprobe->def.fixups & UPROBE_FIX_RIP_AX) { autask->saved_scratch_register = regs->ax; regs->ax = current->utask->vaddr; - regs->ax += auprobe->rip_rela_target_address; - } else if (auprobe->fixups & UPROBE_FIX_RIP_CX) { + regs->ax += auprobe->def.riprel_target; + } else if (auprobe->def.fixups & UPROBE_FIX_RIP_CX) { autask->saved_scratch_register = regs->cx; regs->cx = current->utask->vaddr; - regs->cx += auprobe->rip_rela_target_address; + regs->cx += auprobe->def.riprel_target; } } static void handle_riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs, long *correction) { - if (auprobe->fixups & (UPROBE_FIX_RIP_AX | UPROBE_FIX_RIP_CX)) { + if (auprobe->def.fixups & (UPROBE_FIX_RIP_AX | UPROBE_FIX_RIP_CX)) { struct arch_uprobe_task *autask; autask = ¤t->utask->autask; - if (auprobe->fixups & UPROBE_FIX_RIP_AX) + if (auprobe->def.fixups & UPROBE_FIX_RIP_AX) regs->ax = autask->saved_scratch_register; else regs->cx = autask->saved_scratch_register; @@ -432,17 +431,17 @@ static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs long correction = (long)(utask->vaddr - utask->xol_vaddr); handle_riprel_post_xol(auprobe, regs, &correction); - if (auprobe->fixups & UPROBE_FIX_IP) + if (auprobe->def.fixups & UPROBE_FIX_IP) regs->ip += correction; - if (auprobe->fixups & UPROBE_FIX_CALL) { + if (auprobe->def.fixups & UPROBE_FIX_CALL) { if (adjust_ret_addr(regs->sp, correction)) { regs->sp += sizeof_long(); return -ERESTART; } } /* popf; tell the caller to not touch TF */ - if (auprobe->fixups & UPROBE_FIX_SETF) + if (auprobe->def.fixups & UPROBE_FIX_SETF) utask->autask.saved_tf = true; return 0; @@ -646,13 +645,13 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, return ret; /* - * Figure out which fixups arch_uprobe_post_xol() will need to perform, - * and annotate arch_uprobe->fixups accordingly. To start with, ->fixups - * is either zero or it reflects rip-related fixups. + * Figure out which fixups default_post_xol_op() will need to perform, + * and annotate def->fixups accordingly. To start with, ->fixups is + * either zero or it reflects rip-related fixups. */ switch (OPCODE1(&insn)) { case 0x9d: /* popf */ - auprobe->fixups |= UPROBE_FIX_SETF; + auprobe->def.fixups |= UPROBE_FIX_SETF; break; case 0xc3: /* ret or lret -- ip is correct */ case 0xcb: @@ -680,9 +679,9 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, } if (fix_ip) - auprobe->fixups |= UPROBE_FIX_IP; + auprobe->def.fixups |= UPROBE_FIX_IP; if (fix_call) - auprobe->fixups |= UPROBE_FIX_CALL; + auprobe->def.fixups |= UPROBE_FIX_CALL; auprobe->ops = &default_xol_ops; return 0; From 78d9af4cd375880a574327210eb9dab572618364 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 24 Apr 2014 18:52:37 +0200 Subject: [PATCH 030/101] uprobes/x86: Cleanup the usage of arch_uprobe->def.fixups, make it u8 handle_riprel_insn() assumes that nobody else could modify ->fixups before. This is correct but fragile, change it to use "|=". Also make ->fixups u8, we are going to add the new members into the union. It is not clear why UPROBE_FIX_RIP_.X lived in the upper byte, redefine them so that they can fit into u8. Signed-off-by: Oleg Nesterov --- arch/x86/include/asm/uprobes.h | 2 +- arch/x86/kernel/uprobes.c | 14 +++++++------- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/arch/x86/include/asm/uprobes.h b/arch/x86/include/asm/uprobes.h index 72caff7afbde7e..9ce25ce04fee5f 100644 --- a/arch/x86/include/asm/uprobes.h +++ b/arch/x86/include/asm/uprobes.h @@ -53,7 +53,7 @@ struct arch_uprobe { #ifdef CONFIG_X86_64 long riprel_target; #endif - u16 fixups; + u8 fixups; } def; }; }; diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index 7824ce248f8f61..a8e1d7e470013c 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -33,16 +33,16 @@ /* Post-execution fixups. */ /* Adjust IP back to vicinity of actual insn */ -#define UPROBE_FIX_IP 0x1 +#define UPROBE_FIX_IP 0x01 /* Adjust the return address of a call insn */ -#define UPROBE_FIX_CALL 0x2 +#define UPROBE_FIX_CALL 0x02 /* Instruction will modify TF, don't change it */ -#define UPROBE_FIX_SETF 0x4 +#define UPROBE_FIX_SETF 0x04 -#define UPROBE_FIX_RIP_AX 0x8000 -#define UPROBE_FIX_RIP_CX 0x4000 +#define UPROBE_FIX_RIP_AX 0x08 +#define UPROBE_FIX_RIP_CX 0x10 #define UPROBE_TRAP_NR UINT_MAX @@ -307,12 +307,12 @@ handle_riprel_insn(struct arch_uprobe *auprobe, struct insn *insn) * is NOT the register operand, so we use %rcx (register * #1) for the scratch register. */ - auprobe->def.fixups = UPROBE_FIX_RIP_CX; + auprobe->def.fixups |= UPROBE_FIX_RIP_CX; /* Change modrm from 00 000 101 to 00 000 001. */ *cursor = 0x1; } else { /* Use %rax (register #0) for the scratch register. */ - auprobe->def.fixups = UPROBE_FIX_RIP_AX; + auprobe->def.fixups |= UPROBE_FIX_RIP_AX; /* Change modrm from 00 xxx 101 to 00 xxx 000 */ *cursor = (reg << 3); } From 2b82cadffc4154a25c25d88a63c7fb3397cda9d6 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 24 Apr 2014 19:21:38 +0200 Subject: [PATCH 031/101] uprobes/x86: Introduce push_ret_address() Extract the "push return address" code from branch_emulate_op() into the new simple helper, push_ret_address(). It will have more users. Signed-off-by: Oleg Nesterov --- arch/x86/kernel/uprobes.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index a8e1d7e470013c..df75913acfc097 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -407,6 +407,17 @@ static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs) return 0; } +static int push_ret_address(struct pt_regs *regs, unsigned long ip) +{ + unsigned long new_sp = regs->sp - sizeof_long(); + + if (copy_to_user((void __user *)new_sp, &ip, sizeof_long())) + return -EFAULT; + + regs->sp = new_sp; + return 0; +} + /* * Adjust the return address pushed by a call insn executed out of line. */ @@ -517,7 +528,6 @@ static bool branch_emulate_op(struct arch_uprobe *auprobe, struct pt_regs *regs) unsigned long offs = (long)auprobe->branch.offs; if (branch_is_call(auprobe)) { - unsigned long new_sp = regs->sp - sizeof_long(); /* * If it fails we execute this (mangled, see the comment in * branch_clear_offset) insn out-of-line. In the likely case @@ -527,9 +537,8 @@ static bool branch_emulate_op(struct arch_uprobe *auprobe, struct pt_regs *regs) * * But there is corner case, see the comment in ->post_xol(). */ - if (copy_to_user((void __user *)new_sp, &new_ip, sizeof_long())) + if (push_ret_address(regs, new_ip)) return false; - regs->sp = new_sp; } else if (!check_jmp_cond(auprobe, regs)) { offs = 0; } From 1dc76e6eacef271230d9ff6fd0f91824bda03f44 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Fri, 25 Apr 2014 18:06:19 +0200 Subject: [PATCH 032/101] uprobes/x86: Kill adjust_ret_addr(), simplify UPROBE_FIX_CALL logic The only insn which could have both UPROBE_FIX_IP and UPROBE_FIX_CALL was 0xe8 "call relative", and now it is handled by branch_xol_ops. So we can change default_post_xol_op(UPROBE_FIX_CALL) to simply push the address of next insn == utask->vaddr + insn.length, just we need to record insn.length into the new auprobe->def.ilen member. Note: if/when we teach branch_xol_ops to support jcxz/loopz we can remove the "correction" logic, UPROBE_FIX_IP can use the same address. Signed-off-by: Oleg Nesterov --- arch/x86/include/asm/uprobes.h | 1 + arch/x86/kernel/uprobes.c | 24 +++--------------------- 2 files changed, 4 insertions(+), 21 deletions(-) diff --git a/arch/x86/include/asm/uprobes.h b/arch/x86/include/asm/uprobes.h index 9ce25ce04fee5f..a040d493a4f9ef 100644 --- a/arch/x86/include/asm/uprobes.h +++ b/arch/x86/include/asm/uprobes.h @@ -54,6 +54,7 @@ struct arch_uprobe { long riprel_target; #endif u8 fixups; + u8 ilen; } def; }; }; diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index df75913acfc097..5bcce852628a7f 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -418,24 +418,6 @@ static int push_ret_address(struct pt_regs *regs, unsigned long ip) return 0; } -/* - * Adjust the return address pushed by a call insn executed out of line. - */ -static int adjust_ret_addr(unsigned long sp, long correction) -{ - int rasize = sizeof_long(); - long ra; - - if (copy_from_user(&ra, (void __user *)sp, rasize)) - return -EFAULT; - - ra += correction; - if (copy_to_user((void __user *)sp, &ra, rasize)) - return -EFAULT; - - return 0; -} - static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs) { struct uprobe_task *utask = current->utask; @@ -446,10 +428,9 @@ static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs regs->ip += correction; if (auprobe->def.fixups & UPROBE_FIX_CALL) { - if (adjust_ret_addr(regs->sp, correction)) { - regs->sp += sizeof_long(); + regs->sp += sizeof_long(); + if (push_ret_address(regs, utask->vaddr + auprobe->def.ilen)) return -ERESTART; - } } /* popf; tell the caller to not touch TF */ if (auprobe->def.fixups & UPROBE_FIX_SETF) @@ -687,6 +668,7 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, handle_riprel_insn(auprobe, &insn); } + auprobe->def.ilen = insn.length; if (fix_ip) auprobe->def.fixups |= UPROBE_FIX_IP; if (fix_call) From 83cd591485e558ab70aed45ce7261ce3f5ee8746 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Fri, 25 Apr 2014 18:53:32 +0200 Subject: [PATCH 033/101] uprobes/x86: Cleanup the usage of UPROBE_FIX_IP/UPROBE_FIX_CALL Now that UPROBE_FIX_IP/UPROBE_FIX_CALL are mutually exclusive we can use a single "fix_ip_or_call" enum instead of 2 fix_* booleans. This way the logic looks more understandable and clean to me. While at it, join "case 0xea" with other "ip is correct" ret/lret cases. Also change default_post_xol_op() to use "else if" for the same reason. Signed-off-by: Oleg Nesterov --- arch/x86/kernel/uprobes.c | 27 +++++++++++---------------- 1 file changed, 11 insertions(+), 16 deletions(-) diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index 5bcce852628a7f..d2792e884d543a 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -424,10 +424,9 @@ static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs long correction = (long)(utask->vaddr - utask->xol_vaddr); handle_riprel_post_xol(auprobe, regs, &correction); - if (auprobe->def.fixups & UPROBE_FIX_IP) + if (auprobe->def.fixups & UPROBE_FIX_IP) { regs->ip += correction; - - if (auprobe->def.fixups & UPROBE_FIX_CALL) { + } else if (auprobe->def.fixups & UPROBE_FIX_CALL) { regs->sp += sizeof_long(); if (push_ret_address(regs, utask->vaddr + auprobe->def.ilen)) return -ERESTART; @@ -623,7 +622,7 @@ static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn) int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long addr) { struct insn insn; - bool fix_ip = true, fix_call = false; + u8 fix_ip_or_call = UPROBE_FIX_IP; int ret; ret = uprobe_init_insn(auprobe, &insn, is_64bit_mm(mm)); @@ -647,21 +646,20 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, case 0xcb: case 0xc2: case 0xca: - fix_ip = false; + case 0xea: /* jmp absolute -- ip is correct */ + fix_ip_or_call = 0; break; case 0x9a: /* call absolute - Fix return addr, not ip */ - fix_call = true; - fix_ip = false; - break; - case 0xea: /* jmp absolute -- ip is correct */ - fix_ip = false; + fix_ip_or_call = UPROBE_FIX_CALL; break; case 0xff: switch (MODRM_REG(&insn)) { case 2: case 3: /* call or lcall, indirect */ - fix_call = true; + fix_ip_or_call = UPROBE_FIX_CALL; + break; case 4: case 5: /* jmp or ljmp, indirect */ - fix_ip = false; + fix_ip_or_call = 0; + break; } /* fall through */ default: @@ -669,10 +667,7 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, } auprobe->def.ilen = insn.length; - if (fix_ip) - auprobe->def.fixups |= UPROBE_FIX_IP; - if (fix_call) - auprobe->def.fixups |= UPROBE_FIX_CALL; + auprobe->def.fixups |= fix_ip_or_call; auprobe->ops = &default_xol_ops; return 0; From 1475ee7fadafc6d0c194f2f4cbdae10ed04b9580 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sun, 27 Apr 2014 16:31:59 +0200 Subject: [PATCH 034/101] uprobes/x86: Rename *riprel* helpers to make the naming consistent handle_riprel_insn(), pre_xol_rip_insn() and handle_riprel_post_xol() look confusing and inconsistent. Rename them into riprel_analyze(), riprel_pre_xol(), and riprel_post_xol() respectively. No changes in compiled code. Signed-off-by: Oleg Nesterov Acked-by: Srikar Dronamraju --- arch/x86/kernel/uprobes.c | 24 +++++++++++------------- 1 file changed, 11 insertions(+), 13 deletions(-) diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index d2792e884d543a..187be0e15e1d76 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -268,8 +268,7 @@ static inline bool is_64bit_mm(struct mm_struct *mm) * - There's never a SIB byte. * - The displacement is always 4 bytes. */ -static void -handle_riprel_insn(struct arch_uprobe *auprobe, struct insn *insn) +static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn) { u8 *cursor; u8 reg; @@ -331,8 +330,7 @@ handle_riprel_insn(struct arch_uprobe *auprobe, struct insn *insn) * If we're emulating a rip-relative instruction, save the contents * of the scratch register and store the target address in that register. */ -static void -pre_xol_rip_insn(struct arch_uprobe *auprobe, struct pt_regs *regs, +static void riprel_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs, struct arch_uprobe_task *autask) { if (auprobe->def.fixups & UPROBE_FIX_RIP_AX) { @@ -346,8 +344,8 @@ pre_xol_rip_insn(struct arch_uprobe *auprobe, struct pt_regs *regs, } } -static void -handle_riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs, long *correction) +static void riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs, + long *correction) { if (auprobe->def.fixups & (UPROBE_FIX_RIP_AX | UPROBE_FIX_RIP_CX)) { struct arch_uprobe_task *autask; @@ -376,14 +374,14 @@ static inline bool is_64bit_mm(struct mm_struct *mm) /* * No RIP-relative addressing on 32-bit */ -static void handle_riprel_insn(struct arch_uprobe *auprobe, struct insn *insn) +static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn) { } -static void pre_xol_rip_insn(struct arch_uprobe *auprobe, struct pt_regs *regs, +static void riprel_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs, struct arch_uprobe_task *autask) { } -static void handle_riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs, +static void riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs, long *correction) { } @@ -403,7 +401,7 @@ static inline int sizeof_long(void) static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs) { - pre_xol_rip_insn(auprobe, regs, ¤t->utask->autask); + riprel_pre_xol(auprobe, regs, ¤t->utask->autask); return 0; } @@ -423,7 +421,7 @@ static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs struct uprobe_task *utask = current->utask; long correction = (long)(utask->vaddr - utask->xol_vaddr); - handle_riprel_post_xol(auprobe, regs, &correction); + riprel_post_xol(auprobe, regs, &correction); if (auprobe->def.fixups & UPROBE_FIX_IP) { regs->ip += correction; } else if (auprobe->def.fixups & UPROBE_FIX_CALL) { @@ -440,7 +438,7 @@ static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs static void default_abort_op(struct arch_uprobe *auprobe, struct pt_regs *regs) { - handle_riprel_post_xol(auprobe, regs, NULL); + riprel_post_xol(auprobe, regs, NULL); } static struct uprobe_xol_ops default_xol_ops = { @@ -663,7 +661,7 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, } /* fall through */ default: - handle_riprel_insn(auprobe, &insn); + riprel_analyze(auprobe, &insn); } auprobe->def.ilen = insn.length; From 7f55e82bacaaa2c41b8e14d6bc78129b096b67b8 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sun, 27 Apr 2014 17:00:46 +0200 Subject: [PATCH 035/101] uprobes/x86: Kill the "autask" arg of riprel_pre_xol() default_pre_xol_op() passes ¤t->utask->autask to riprel_pre_xol() and this is just ugly because it still needs to load current->utask to read ->vaddr. Remove this argument, change riprel_pre_xol() to use current->utask. Signed-off-by: Oleg Nesterov Acked-by: Srikar Dronamraju --- arch/x86/kernel/uprobes.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index 187be0e15e1d76..5df1bca7c2bc0f 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -330,16 +330,17 @@ static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn) * If we're emulating a rip-relative instruction, save the contents * of the scratch register and store the target address in that register. */ -static void riprel_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs, - struct arch_uprobe_task *autask) +static void riprel_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) { + struct uprobe_task *utask = current->utask; + if (auprobe->def.fixups & UPROBE_FIX_RIP_AX) { - autask->saved_scratch_register = regs->ax; - regs->ax = current->utask->vaddr; + utask->autask.saved_scratch_register = regs->ax; + regs->ax = utask->vaddr; regs->ax += auprobe->def.riprel_target; } else if (auprobe->def.fixups & UPROBE_FIX_RIP_CX) { - autask->saved_scratch_register = regs->cx; - regs->cx = current->utask->vaddr; + utask->autask.saved_scratch_register = regs->cx; + regs->cx = utask->vaddr; regs->cx += auprobe->def.riprel_target; } } @@ -377,8 +378,7 @@ static inline bool is_64bit_mm(struct mm_struct *mm) static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn) { } -static void riprel_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs, - struct arch_uprobe_task *autask) +static void riprel_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) { } static void riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs, @@ -401,7 +401,7 @@ static inline int sizeof_long(void) static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs) { - riprel_pre_xol(auprobe, regs, ¤t->utask->autask); + riprel_pre_xol(auprobe, regs); return 0; } From c90a6950120a7e45f31a22653fe6543507ae64d0 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sun, 27 Apr 2014 18:13:31 +0200 Subject: [PATCH 036/101] uprobes/x86: Simplify riprel_{pre,post}_xol() and make them similar Ignoring the "correction" logic riprel_pre_xol() and riprel_post_xol() are very similar but look quite differently. 1. Add the "UPROBE_FIX_RIP_AX | UPROBE_FIX_RIP_CX" check at the start of riprel_pre_xol(), like the same check in riprel_post_xol(). 2. Add the trivial scratch_reg() helper which returns the address of scratch register pre_xol/post_xol need to change. 3. Change these functions to use the new helper and avoid copy-and-paste under if/else branches. Signed-off-by: Oleg Nesterov Acked-by: Srikar Dronamraju --- arch/x86/kernel/uprobes.c | 30 ++++++++++++++---------------- 1 file changed, 14 insertions(+), 16 deletions(-) diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index 5df1bca7c2bc0f..2ebadb252093c1 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -326,22 +326,24 @@ static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn) } } +static inline unsigned long * +scratch_reg(struct arch_uprobe *auprobe, struct pt_regs *regs) +{ + return (auprobe->def.fixups & UPROBE_FIX_RIP_AX) ? ®s->ax : ®s->cx; +} + /* * If we're emulating a rip-relative instruction, save the contents * of the scratch register and store the target address in that register. */ static void riprel_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) { - struct uprobe_task *utask = current->utask; + if (auprobe->def.fixups & (UPROBE_FIX_RIP_AX | UPROBE_FIX_RIP_CX)) { + struct uprobe_task *utask = current->utask; + unsigned long *sr = scratch_reg(auprobe, regs); - if (auprobe->def.fixups & UPROBE_FIX_RIP_AX) { - utask->autask.saved_scratch_register = regs->ax; - regs->ax = utask->vaddr; - regs->ax += auprobe->def.riprel_target; - } else if (auprobe->def.fixups & UPROBE_FIX_RIP_CX) { - utask->autask.saved_scratch_register = regs->cx; - regs->cx = utask->vaddr; - regs->cx += auprobe->def.riprel_target; + utask->autask.saved_scratch_register = *sr; + *sr = utask->vaddr + auprobe->def.riprel_target; } } @@ -349,14 +351,10 @@ static void riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs, long *correction) { if (auprobe->def.fixups & (UPROBE_FIX_RIP_AX | UPROBE_FIX_RIP_CX)) { - struct arch_uprobe_task *autask; - - autask = ¤t->utask->autask; - if (auprobe->def.fixups & UPROBE_FIX_RIP_AX) - regs->ax = autask->saved_scratch_register; - else - regs->cx = autask->saved_scratch_register; + struct uprobe_task *utask = current->utask; + unsigned long *sr = scratch_reg(auprobe, regs); + *sr = utask->autask.saved_scratch_register; /* * The original instruction includes a displacement, and so * is 4 bytes longer than what we've just single-stepped. From ce5f36a58fd1d92cb945cf2568751d40e8598508 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 24 Apr 2014 13:26:01 +0200 Subject: [PATCH 037/101] uprobes/tracing: Make uprobe_perf_close() visible to uprobe_perf_open() Preparation. Move uprobe_perf_close() up before uprobe_perf_open() to avoid the forward declaration in the next patch and make it readable. Signed-off-by: Oleg Nesterov Acked-by: Steven Rostedt Acked-by: Srikar Dronamraju --- kernel/trace/trace_uprobe.c | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index c082a74413455d..51bd071eaca04d 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -1009,54 +1009,54 @@ uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event) return __uprobe_perf_filter(&tu->filter, event->hw.tp_target->mm); } -static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event) +static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event) { bool done; write_lock(&tu->filter.rwlock); if (event->hw.tp_target) { - /* - * event->parent != NULL means copy_process(), we can avoid - * uprobe_apply(). current->mm must be probed and we can rely - * on dup_mmap() which preserves the already installed bp's. - * - * attr.enable_on_exec means that exec/mmap will install the - * breakpoints we need. - */ + list_del(&event->hw.tp_list); done = tu->filter.nr_systemwide || - event->parent || event->attr.enable_on_exec || + (event->hw.tp_target->flags & PF_EXITING) || uprobe_filter_event(tu, event); - list_add(&event->hw.tp_list, &tu->filter.perf_events); } else { + tu->filter.nr_systemwide--; done = tu->filter.nr_systemwide; - tu->filter.nr_systemwide++; } write_unlock(&tu->filter.rwlock); if (!done) - uprobe_apply(tu->inode, tu->offset, &tu->consumer, true); + uprobe_apply(tu->inode, tu->offset, &tu->consumer, false); return 0; } -static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event) +static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event) { bool done; write_lock(&tu->filter.rwlock); if (event->hw.tp_target) { - list_del(&event->hw.tp_list); + /* + * event->parent != NULL means copy_process(), we can avoid + * uprobe_apply(). current->mm must be probed and we can rely + * on dup_mmap() which preserves the already installed bp's. + * + * attr.enable_on_exec means that exec/mmap will install the + * breakpoints we need. + */ done = tu->filter.nr_systemwide || - (event->hw.tp_target->flags & PF_EXITING) || + event->parent || event->attr.enable_on_exec || uprobe_filter_event(tu, event); + list_add(&event->hw.tp_list, &tu->filter.perf_events); } else { - tu->filter.nr_systemwide--; done = tu->filter.nr_systemwide; + tu->filter.nr_systemwide++; } write_unlock(&tu->filter.rwlock); if (!done) - uprobe_apply(tu->inode, tu->offset, &tu->consumer, false); + uprobe_apply(tu->inode, tu->offset, &tu->consumer, true); return 0; } From 927d687480ab7e43d73a003bab58803fc67717d9 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 24 Apr 2014 13:33:31 +0200 Subject: [PATCH 038/101] uprobes/tracing: Fix uprobe_perf_open() on uprobe_apply() failure uprobe_perf_open()->uprobe_apply() can fail, but this error is wrongly ignored. Change uprobe_perf_open() to do uprobe_perf_close() and return the error code in this case. Change uprobe_perf_close() to propogate the error from uprobe_apply() as well, although it should not fail. Signed-off-by: Oleg Nesterov Acked-by: Steven Rostedt Acked-by: Srikar Dronamraju --- kernel/trace/trace_uprobe.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 51bd071eaca04d..5a7f1a6b3b8b66 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -1026,7 +1026,7 @@ static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event) write_unlock(&tu->filter.rwlock); if (!done) - uprobe_apply(tu->inode, tu->offset, &tu->consumer, false); + return uprobe_apply(tu->inode, tu->offset, &tu->consumer, false); return 0; } @@ -1034,6 +1034,7 @@ static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event) static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event) { bool done; + int err; write_lock(&tu->filter.rwlock); if (event->hw.tp_target) { @@ -1055,10 +1056,13 @@ static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event) } write_unlock(&tu->filter.rwlock); - if (!done) - uprobe_apply(tu->inode, tu->offset, &tu->consumer, true); - - return 0; + err = 0; + if (!done) { + err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true); + if (err) + uprobe_perf_close(tu, event); + } + return err; } static bool uprobe_perf_filter(struct uprobe_consumer *uc, From 13f59c5e45be59665c11ddde19799b6295543b7d Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Mon, 28 Apr 2014 20:15:43 +0200 Subject: [PATCH 039/101] uprobes: Refuse to insert a probe into MAP_SHARED vma valid_vma() rejects the VM_SHARED vmas, but this still allows to insert a probe into the MAP_SHARED but not VM_MAYWRITE vma. Currently this is fine, such a mapping doesn't really differ from the private read-only mmap except mprotect(PROT_WRITE) won't work. However, get_user_pages(FOLL_WRITE | FOLL_FORCE) doesn't allow to COW in this case, and it would be safer to follow the same conventions as mm even if currently this happens to work. After the recent cda540ace6a1 "mm: get_user_pages(write,force) refuse to COW in shared areas" only uprobes can insert an anon page into the shared file-backed area, lets stop this and change valid_vma() to check VM_MAYSHARE instead. Signed-off-by: Oleg Nesterov --- kernel/events/uprobes.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index d1edc5e6fd03e2..7716c40f2c5040 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -127,7 +127,7 @@ struct xol_area { */ static bool valid_vma(struct vm_area_struct *vma, bool is_register) { - vm_flags_t flags = VM_HUGETLB | VM_MAYEXEC | VM_SHARED; + vm_flags_t flags = VM_HUGETLB | VM_MAYEXEC | VM_MAYSHARE; if (is_register) flags |= VM_WRITE; From 69902c718c0b476e94ed7fccd3cf29ca39fe433a Mon Sep 17 00:00:00 2001 From: Vineet Gupta Date: Thu, 1 May 2014 10:56:44 +0530 Subject: [PATCH 040/101] kprobes: Ensure blacklist data is aligned ARC Linux (not supporting native unaligned access) was failing to boot because __start_kprobe_blacklist was not aligned. This was because per generated vmlinux.lds it was emitted right next to .rodata with strings etc hence could be randomly unaligned. Fix that by ensuring a word alignment. While 4 would suffice for 32bit arches and problem at hand, it is probably better to put 8. | Path: (null) CPU: 0 PID: 1 Comm: swapper Not tainted | 3.15.0-rc3-next-20140430 #2 | task: 8f044000 ti: 8f01e000 task.ti: 8f01e000 | | [ECR ]: 0x00230400 => Misaligned r/w from 0x800fb0d3 | [EFA ]: 0x800fb0d3 | [BLINK ]: do_one_initcall+0x86/0x1bc | [ERET ]: init_kprobes+0x52/0x120 Signed-off-by: Vineet Gupta Cc: Cc: Cc: Cc: Cc: Cc: Cc: Cc: Cc: Cc: Cc: Cc: Cc: Cc: anton Kolesov Link: http://lkml.kernel.org/r/5361DB14.7010406@synopsys.com Signed-off-by: Ingo Molnar --- include/asm-generic/vmlinux.lds.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 40ceb3ceba79e2..8e0204a68c742e 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -110,7 +110,8 @@ #endif #ifdef CONFIG_KPROBES -#define KPROBE_BLACKLIST() VMLINUX_SYMBOL(__start_kprobe_blacklist) = .; \ +#define KPROBE_BLACKLIST() . = ALIGN(8); \ + VMLINUX_SYMBOL(__start_kprobe_blacklist) = .; \ *(_kprobe_blacklist) \ VMLINUX_SYMBOL(__stop_kprobe_blacklist) = .; #else From 29dedee0e693aa113164c820395ce51446a71ace Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Mon, 5 May 2014 16:38:18 +0200 Subject: [PATCH 041/101] uprobes: Add mem_cgroup_charge_anon() into uprobe_write_opcode() Hugh says: The one I noticed was that it forgets all about memcg (because it was copied from KSM, and there the replacement page has already been charged to a memcg). See how mm/memory.c do_anonymous_page() does a mem_cgroup_charge_anon(). Hopefully not a big problem, uprobes is a system-wide thing and only root can insert the probes. But I agree, should be fixed anyway. Add mem_cgroup_{un,}charge_anon() into uprobe_write_opcode(). To simplify the error handling (and avoid the new "uncharge" label) the patch also moves anon_vma_prepare() up before we alloc/charge the new page. While at it fix the comment about ->mmap_sem, it is held for write. Suggested-by: Hugh Dickins Signed-off-by: Oleg Nesterov --- kernel/events/uprobes.c | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 7716c40f2c5040..a13251e8bfa43e 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -279,18 +279,13 @@ static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t * supported by that architecture then we need to modify is_trap_at_addr and * uprobe_write_opcode accordingly. This would never be a problem for archs * that have fixed length instructions. - */ - -/* + * * uprobe_write_opcode - write the opcode at a given virtual address. * @mm: the probed process address space. * @vaddr: the virtual address to store the opcode. * @opcode: opcode to be written at @vaddr. * - * Called with mm->mmap_sem held (for read and with a reference to - * mm). - * - * For mm @mm, write the opcode at @vaddr. + * Called with mm->mmap_sem held for write. * Return 0 (success) or a negative errno. */ int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr, @@ -310,21 +305,25 @@ int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr, if (ret <= 0) goto put_old; + ret = anon_vma_prepare(vma); + if (ret) + goto put_old; + ret = -ENOMEM; new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr); if (!new_page) goto put_old; - __SetPageUptodate(new_page); + if (mem_cgroup_charge_anon(new_page, mm, GFP_KERNEL)) + goto put_new; + __SetPageUptodate(new_page); copy_highpage(new_page, old_page); copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE); - ret = anon_vma_prepare(vma); - if (ret) - goto put_new; - ret = __replace_page(vma, vaddr, old_page, new_page); + if (ret) + mem_cgroup_uncharge_page(new_page); put_new: page_cache_release(new_page); From 50204c6f6dd01b5bce1b53e0b003d01849455512 Mon Sep 17 00:00:00 2001 From: Denys Vlasenko Date: Thu, 1 May 2014 16:52:46 +0200 Subject: [PATCH 042/101] uprobes/x86: Simplify rip-relative handling It is possible to replace rip-relative addressing mode with addressing mode of the same length: (reg+disp32). This eliminates the need to fix up immediate and correct for changing instruction length. And we can kill arch_uprobe->def.riprel_target. Signed-off-by: Denys Vlasenko Reviewed-by: Jim Keniston Signed-off-by: Oleg Nesterov --- arch/x86/include/asm/uprobes.h | 3 -- arch/x86/kernel/uprobes.c | 71 ++++++++++++++-------------------- 2 files changed, 30 insertions(+), 44 deletions(-) diff --git a/arch/x86/include/asm/uprobes.h b/arch/x86/include/asm/uprobes.h index a040d493a4f9ef..7be3c079e389d1 100644 --- a/arch/x86/include/asm/uprobes.h +++ b/arch/x86/include/asm/uprobes.h @@ -50,9 +50,6 @@ struct arch_uprobe { u8 opc1; } branch; struct { -#ifdef CONFIG_X86_64 - long riprel_target; -#endif u8 fixups; u8 ilen; } def; diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index 2ebadb252093c1..31dcb4d5ea4628 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -251,9 +251,9 @@ static inline bool is_64bit_mm(struct mm_struct *mm) * If arch_uprobe->insn doesn't use rip-relative addressing, return * immediately. Otherwise, rewrite the instruction so that it accesses * its memory operand indirectly through a scratch register. Set - * def->fixups and def->riprel_target accordingly. (The contents of the - * scratch register will be saved before we single-step the modified - * instruction, and restored afterward). + * def->fixups accordingly. (The contents of the scratch register + * will be saved before we single-step the modified instruction, + * and restored afterward). * * We do this because a rip-relative instruction can access only a * relatively small area (+/- 2 GB from the instruction), and the XOL @@ -264,9 +264,12 @@ static inline bool is_64bit_mm(struct mm_struct *mm) * * Some useful facts about rip-relative instructions: * - * - There's always a modrm byte. + * - There's always a modrm byte with bit layout "00 reg 101". * - There's never a SIB byte. * - The displacement is always 4 bytes. + * - REX.B=1 bit in REX prefix, which normally extends r/m field, + * has no effect on rip-relative mode. It doesn't make modrm byte + * with r/m=101 refer to register 1101 = R13. */ static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn) { @@ -293,9 +296,8 @@ static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn) */ cursor = auprobe->insn + insn_offset_modrm(insn); /* - * Convert from rip-relative addressing to indirect addressing - * via a scratch register. Change the r/m field from 0x5 (%rip) - * to 0x0 (%rax) or 0x1 (%rcx), and squeeze out the offset field. + * Convert from rip-relative addressing + * to register-relative addressing via a scratch register. */ reg = MODRM_REG(insn); if (reg == 0) { @@ -307,22 +309,21 @@ static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn) * #1) for the scratch register. */ auprobe->def.fixups |= UPROBE_FIX_RIP_CX; - /* Change modrm from 00 000 101 to 00 000 001. */ - *cursor = 0x1; + /* + * Change modrm from "00 000 101" to "10 000 001". Example: + * 89 05 disp32 mov %eax,disp32(%rip) becomes + * 89 81 disp32 mov %eax,disp32(%rcx) + */ + *cursor = 0x81; } else { /* Use %rax (register #0) for the scratch register. */ auprobe->def.fixups |= UPROBE_FIX_RIP_AX; - /* Change modrm from 00 xxx 101 to 00 xxx 000 */ - *cursor = (reg << 3); - } - - /* Target address = address of next instruction + (signed) offset */ - auprobe->def.riprel_target = (long)insn->length + insn->displacement.value; - - /* Displacement field is gone; slide immediate field (if any) over. */ - if (insn->immediate.nbytes) { - cursor++; - memmove(cursor, cursor + insn->displacement.nbytes, insn->immediate.nbytes); + /* + * Change modrm from "00 reg 101" to "10 reg 000". Example: + * 89 1d disp32 mov %edx,disp32(%rip) becomes + * 89 98 disp32 mov %edx,disp32(%rax) + */ + *cursor = (reg << 3) | 0x80; } } @@ -343,26 +344,17 @@ static void riprel_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) unsigned long *sr = scratch_reg(auprobe, regs); utask->autask.saved_scratch_register = *sr; - *sr = utask->vaddr + auprobe->def.riprel_target; + *sr = utask->vaddr + auprobe->def.ilen; } } -static void riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs, - long *correction) +static void riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) { if (auprobe->def.fixups & (UPROBE_FIX_RIP_AX | UPROBE_FIX_RIP_CX)) { struct uprobe_task *utask = current->utask; unsigned long *sr = scratch_reg(auprobe, regs); *sr = utask->autask.saved_scratch_register; - /* - * The original instruction includes a displacement, and so - * is 4 bytes longer than what we've just single-stepped. - * Caller may need to apply other fixups to handle stuff - * like "jmpq *...(%rip)" and "callq *...(%rip)". - */ - if (correction) - *correction += 4; } } #else /* 32-bit: */ @@ -379,8 +371,7 @@ static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn) static void riprel_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) { } -static void riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs, - long *correction) +static void riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) { } #endif /* CONFIG_X86_64 */ @@ -417,10 +408,10 @@ static int push_ret_address(struct pt_regs *regs, unsigned long ip) static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs) { struct uprobe_task *utask = current->utask; - long correction = (long)(utask->vaddr - utask->xol_vaddr); - riprel_post_xol(auprobe, regs, &correction); + riprel_post_xol(auprobe, regs); if (auprobe->def.fixups & UPROBE_FIX_IP) { + long correction = utask->vaddr - utask->xol_vaddr; regs->ip += correction; } else if (auprobe->def.fixups & UPROBE_FIX_CALL) { regs->sp += sizeof_long(); @@ -436,7 +427,7 @@ static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs static void default_abort_op(struct arch_uprobe *auprobe, struct pt_regs *regs) { - riprel_post_xol(auprobe, regs, NULL); + riprel_post_xol(auprobe, regs); } static struct uprobe_xol_ops default_xol_ops = { @@ -732,11 +723,9 @@ bool arch_uprobe_xol_was_trapped(struct task_struct *t) * * If the original instruction was a rip-relative instruction such as * "movl %edx,0xnnnn(%rip)", we have instead executed an equivalent - * instruction using a scratch register -- e.g., "movl %edx,(%rax)". - * We need to restore the contents of the scratch register and adjust - * the ip, keeping in mind that the instruction we executed is 4 bytes - * shorter than the original instruction (since we squeezed out the offset - * field). (FIX_RIP_AX or FIX_RIP_CX) + * instruction using a scratch register -- e.g., "movl %edx,0xnnnn(%rax)". + * We need to restore the contents of the scratch register + * (FIX_RIP_AX or FIX_RIP_CX). */ int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) { From 1ea30fb64598bd3a6ba43d874bb53c55878eaef5 Mon Sep 17 00:00:00 2001 From: Denys Vlasenko Date: Fri, 2 May 2014 17:04:00 +0200 Subject: [PATCH 043/101] uprobes/x86: Fix scratch register selection for rip-relative fixups Before this patch, instructions such as div, mul, shifts with count in CL, cmpxchg are mishandled. This patch adds vex prefix handling. In particular, it avoids colliding with register operand encoded in vex.vvvv field. Since we need to avoid two possible register operands, the selection of scratch register needs to be from at least three registers. After looking through a lot of CPU docs, it looks like the safest choice is SI,DI,BX. Selecting BX needs care to not collide with implicit use of BX by cmpxchg8b. Test-case: #include static const char *const pass[] = { "FAIL", "pass" }; long two = 2; void test1(void) { long ax = 0, dx = 0; asm volatile("\n" " xor %%edx,%%edx\n" " lea 2(%%edx),%%eax\n" // We divide 2 by 2. Result (in eax) should be 1: " probe1: .globl probe1\n" " divl two(%%rip)\n" // If we have a bug (eax mangled on entry) the result will be 2, // because eax gets restored by probe machinery. : "=a" (ax), "=d" (dx) /*out*/ : "0" (ax), "1" (dx) /*in*/ : "memory" /*clobber*/ ); dprintf(2, "%s: %s\n", __func__, pass[ax == 1] ); } long val2 = 0; void test2(void) { long old_val = val2; long ax = 0, dx = 0; asm volatile("\n" " mov val2,%%eax\n" // eax := val2 " lea 1(%%eax),%%edx\n" // edx := eax+1 // eax is equal to val2. cmpxchg should store edx to val2: " probe2: .globl probe2\n" " cmpxchg %%edx,val2(%%rip)\n" // If we have a bug (eax mangled on entry), val2 will stay unchanged : "=a" (ax), "=d" (dx) /*out*/ : "0" (ax), "1" (dx) /*in*/ : "memory" /*clobber*/ ); dprintf(2, "%s: %s\n", __func__, pass[val2 == old_val + 1] ); } long val3[2] = {0,0}; void test3(void) { long old_val = val3[0]; long ax = 0, dx = 0; asm volatile("\n" " mov val3,%%eax\n" // edx:eax := val3 " mov val3+4,%%edx\n" " mov %%eax,%%ebx\n" // ecx:ebx := edx:eax + 1 " mov %%edx,%%ecx\n" " add $1,%%ebx\n" " adc $0,%%ecx\n" // edx:eax is equal to val3. cmpxchg8b should store ecx:ebx to val3: " probe3: .globl probe3\n" " cmpxchg8b val3(%%rip)\n" // If we have a bug (edx:eax mangled on entry), val3 will stay unchanged. // If ecx:edx in mangled, val3 will get wrong value. : "=a" (ax), "=d" (dx) /*out*/ : "0" (ax), "1" (dx) /*in*/ : "cx", "bx", "memory" /*clobber*/ ); dprintf(2, "%s: %s\n", __func__, pass[val3[0] == old_val + 1 && val3[1] == 0] ); } int main(int argc, char **argv) { test1(); test2(); test3(); return 0; } Before this change all tests fail if probe{1,2,3} are probed. Signed-off-by: Denys Vlasenko Reviewed-by: Jim Keniston Signed-off-by: Oleg Nesterov --- arch/x86/kernel/uprobes.c | 176 +++++++++++++++++++++++++++----------- 1 file changed, 125 insertions(+), 51 deletions(-) diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index 31dcb4d5ea4628..159ca520ef5b75 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -41,8 +41,11 @@ /* Instruction will modify TF, don't change it */ #define UPROBE_FIX_SETF 0x04 -#define UPROBE_FIX_RIP_AX 0x08 -#define UPROBE_FIX_RIP_CX 0x10 +#define UPROBE_FIX_RIP_SI 0x08 +#define UPROBE_FIX_RIP_DI 0x10 +#define UPROBE_FIX_RIP_BX 0x20 +#define UPROBE_FIX_RIP_MASK \ + (UPROBE_FIX_RIP_SI | UPROBE_FIX_RIP_DI | UPROBE_FIX_RIP_BX) #define UPROBE_TRAP_NR UINT_MAX @@ -275,20 +278,109 @@ static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn) { u8 *cursor; u8 reg; + u8 reg2; if (!insn_rip_relative(insn)) return; /* - * insn_rip_relative() would have decoded rex_prefix, modrm. + * insn_rip_relative() would have decoded rex_prefix, vex_prefix, modrm. * Clear REX.b bit (extension of MODRM.rm field): - * we want to encode rax/rcx, not r8/r9. + * we want to encode low numbered reg, not r8+. */ if (insn->rex_prefix.nbytes) { cursor = auprobe->insn + insn_offset_rex_prefix(insn); - *cursor &= 0xfe; /* Clearing REX.B bit */ + /* REX byte has 0100wrxb layout, clearing REX.b bit */ + *cursor &= 0xfe; } + /* + * Similar treatment for VEX3 prefix. + * TODO: add XOP/EVEX treatment when insn decoder supports them + */ + if (insn->vex_prefix.nbytes == 3) { + /* + * vex2: c5 rvvvvLpp (has no b bit) + * vex3/xop: c4/8f rxbmmmmm wvvvvLpp + * evex: 62 rxbR00mm wvvvv1pp zllBVaaa + * (evex will need setting of both b and x since + * in non-sib encoding evex.x is 4th bit of MODRM.rm) + * Setting VEX3.b (setting because it has inverted meaning): + */ + cursor = auprobe->insn + insn_offset_vex_prefix(insn) + 1; + *cursor |= 0x20; + } + + /* + * Convert from rip-relative addressing to register-relative addressing + * via a scratch register. + * + * This is tricky since there are insns with modrm byte + * which also use registers not encoded in modrm byte: + * [i]div/[i]mul: implicitly use dx:ax + * shift ops: implicitly use cx + * cmpxchg: implicitly uses ax + * cmpxchg8/16b: implicitly uses dx:ax and bx:cx + * Encoding: 0f c7/1 modrm + * The code below thinks that reg=1 (cx), chooses si as scratch. + * mulx: implicitly uses dx: mulx r/m,r1,r2 does r1:r2 = dx * r/m. + * First appeared in Haswell (BMI2 insn). It is vex-encoded. + * Example where none of bx,cx,dx can be used as scratch reg: + * c4 e2 63 f6 0d disp32 mulx disp32(%rip),%ebx,%ecx + * [v]pcmpistri: implicitly uses cx, xmm0 + * [v]pcmpistrm: implicitly uses xmm0 + * [v]pcmpestri: implicitly uses ax, dx, cx, xmm0 + * [v]pcmpestrm: implicitly uses ax, dx, xmm0 + * Evil SSE4.2 string comparison ops from hell. + * maskmovq/[v]maskmovdqu: implicitly uses (ds:rdi) as destination. + * Encoding: 0f f7 modrm, 66 0f f7 modrm, vex-encoded: c5 f9 f7 modrm. + * Store op1, byte-masked by op2 msb's in each byte, to (ds:rdi). + * AMD says it has no 3-operand form (vex.vvvv must be 1111) + * and that it can have only register operands, not mem + * (its modrm byte must have mode=11). + * If these restrictions will ever be lifted, + * we'll need code to prevent selection of di as scratch reg! + * + * Summary: I don't know any insns with modrm byte which + * use SI register implicitly. DI register is used only + * by one insn (maskmovq) and BX register is used + * only by one too (cmpxchg8b). + * BP is stack-segment based (may be a problem?). + * AX, DX, CX are off-limits (many implicit users). + * SP is unusable (it's stack pointer - think about "pop mem"; + * also, rsp+disp32 needs sib encoding -> insn length change). + */ + reg = MODRM_REG(insn); /* Fetch modrm.reg */ + reg2 = 0xff; /* Fetch vex.vvvv */ + if (insn->vex_prefix.nbytes == 2) + reg2 = insn->vex_prefix.bytes[1]; + else if (insn->vex_prefix.nbytes == 3) + reg2 = insn->vex_prefix.bytes[2]; + /* + * TODO: add XOP, EXEV vvvv reading. + * + * vex.vvvv field is in bits 6-3, bits are inverted. + * But in 32-bit mode, high-order bit may be ignored. + * Therefore, let's consider only 3 low-order bits. + */ + reg2 = ((reg2 >> 3) & 0x7) ^ 0x7; + /* + * Register numbering is ax,cx,dx,bx, sp,bp,si,di, r8..r15. + * + * Choose scratch reg. Order is important: must not select bx + * if we can use si (cmpxchg8b case!) + */ + if (reg != 6 && reg2 != 6) { + reg2 = 6; + auprobe->def.fixups |= UPROBE_FIX_RIP_SI; + } else if (reg != 7 && reg2 != 7) { + reg2 = 7; + auprobe->def.fixups |= UPROBE_FIX_RIP_DI; + /* TODO (paranoia): force maskmovq to not use di */ + } else { + reg2 = 3; + auprobe->def.fixups |= UPROBE_FIX_RIP_BX; + } /* * Point cursor at the modrm byte. The next 4 bytes are the * displacement. Beyond the displacement, for some instructions, @@ -296,41 +388,21 @@ static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn) */ cursor = auprobe->insn + insn_offset_modrm(insn); /* - * Convert from rip-relative addressing - * to register-relative addressing via a scratch register. + * Change modrm from "00 reg 101" to "10 reg reg2". Example: + * 89 05 disp32 mov %eax,disp32(%rip) becomes + * 89 86 disp32 mov %eax,disp32(%rsi) */ - reg = MODRM_REG(insn); - if (reg == 0) { - /* - * The register operand (if any) is either the A register - * (%rax, %eax, etc.) or (if the 0x4 bit is set in the - * REX prefix) %r8. In any case, we know the C register - * is NOT the register operand, so we use %rcx (register - * #1) for the scratch register. - */ - auprobe->def.fixups |= UPROBE_FIX_RIP_CX; - /* - * Change modrm from "00 000 101" to "10 000 001". Example: - * 89 05 disp32 mov %eax,disp32(%rip) becomes - * 89 81 disp32 mov %eax,disp32(%rcx) - */ - *cursor = 0x81; - } else { - /* Use %rax (register #0) for the scratch register. */ - auprobe->def.fixups |= UPROBE_FIX_RIP_AX; - /* - * Change modrm from "00 reg 101" to "10 reg 000". Example: - * 89 1d disp32 mov %edx,disp32(%rip) becomes - * 89 98 disp32 mov %edx,disp32(%rax) - */ - *cursor = (reg << 3) | 0x80; - } + *cursor = 0x80 | (reg << 3) | reg2; } static inline unsigned long * scratch_reg(struct arch_uprobe *auprobe, struct pt_regs *regs) { - return (auprobe->def.fixups & UPROBE_FIX_RIP_AX) ? ®s->ax : ®s->cx; + if (auprobe->def.fixups & UPROBE_FIX_RIP_SI) + return ®s->si; + if (auprobe->def.fixups & UPROBE_FIX_RIP_DI) + return ®s->di; + return ®s->bx; } /* @@ -339,7 +411,7 @@ scratch_reg(struct arch_uprobe *auprobe, struct pt_regs *regs) */ static void riprel_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) { - if (auprobe->def.fixups & (UPROBE_FIX_RIP_AX | UPROBE_FIX_RIP_CX)) { + if (auprobe->def.fixups & UPROBE_FIX_RIP_MASK) { struct uprobe_task *utask = current->utask; unsigned long *sr = scratch_reg(auprobe, regs); @@ -350,7 +422,7 @@ static void riprel_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) static void riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) { - if (auprobe->def.fixups & (UPROBE_FIX_RIP_AX | UPROBE_FIX_RIP_CX)) { + if (auprobe->def.fixups & UPROBE_FIX_RIP_MASK) { struct uprobe_task *utask = current->utask; unsigned long *sr = scratch_reg(auprobe, regs); @@ -405,6 +477,23 @@ static int push_ret_address(struct pt_regs *regs, unsigned long ip) return 0; } +/* + * We have to fix things up as follows: + * + * Typically, the new ip is relative to the copied instruction. We need + * to make it relative to the original instruction (FIX_IP). Exceptions + * are return instructions and absolute or indirect jump or call instructions. + * + * If the single-stepped instruction was a call, the return address that + * is atop the stack is the address following the copied instruction. We + * need to make it the address following the original instruction (FIX_CALL). + * + * If the original instruction was a rip-relative instruction such as + * "movl %edx,0xnnnn(%rip)", we have instead executed an equivalent + * instruction using a scratch register -- e.g., "movl %edx,0xnnnn(%rsi)". + * We need to restore the contents of the scratch register + * (FIX_RIP_reg). + */ static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs) { struct uprobe_task *utask = current->utask; @@ -711,21 +800,6 @@ bool arch_uprobe_xol_was_trapped(struct task_struct *t) * single-step, we single-stepped a copy of the instruction. * * This function prepares to resume execution after the single-step. - * We have to fix things up as follows: - * - * Typically, the new ip is relative to the copied instruction. We need - * to make it relative to the original instruction (FIX_IP). Exceptions - * are return instructions and absolute or indirect jump or call instructions. - * - * If the single-stepped instruction was a call, the return address that - * is atop the stack is the address following the copied instruction. We - * need to make it the address following the original instruction (FIX_CALL). - * - * If the original instruction was a rip-relative instruction such as - * "movl %edx,0xnnnn(%rip)", we have instead executed an equivalent - * instruction using a scratch register -- e.g., "movl %edx,0xnnnn(%rax)". - * We need to restore the contents of the scratch register - * (FIX_RIP_AX or FIX_RIP_CX). */ int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) { From 5e1b05beeca8139204324581a5b1ffb53d057f96 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 8 May 2014 20:34:00 +0200 Subject: [PATCH 044/101] x86/traps: Make math_error() static Trivial, make math_error() static. Signed-off-by: Oleg Nesterov --- arch/x86/include/asm/traps.h | 1 - arch/x86/kernel/traps.c | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h index 58d66fe06b6170..a7b212db9e04c6 100644 --- a/arch/x86/include/asm/traps.h +++ b/arch/x86/include/asm/traps.h @@ -98,7 +98,6 @@ static inline int get_si_code(unsigned long condition) extern int panic_on_unrecovered_nmi; -void math_error(struct pt_regs *, int, int); void math_emulate(struct math_emu_info *); #ifndef CONFIG_X86_32 asmlinkage void smp_thermal_interrupt(void); diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 57409f6b8c623e..8eddd628326e17 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -488,7 +488,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) * the correct behaviour even in the presence of the asynchronous * IRQ13 behaviour */ -void math_error(struct pt_regs *regs, int error_code, int trapnr) +static void math_error(struct pt_regs *regs, int error_code, int trapnr) { struct task_struct *task = current; siginfo_t info; From 38cad57be9800e46c52a3612fb9d963eee4fd9c3 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Wed, 7 May 2014 16:47:09 +0200 Subject: [PATCH 045/101] x86/traps: Use SEND_SIG_PRIV instead of force_sig() force_sig() is just force_sig_info(SEND_SIG_PRIV). Imho it should die, we have too many ugly "send signal" helpers. And do_trap() looks just ugly because it uses force_sig_info() or force_sig() depending on info != NULL. Signed-off-by: Oleg Nesterov --- arch/x86/kernel/traps.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 8eddd628326e17..2cd429117a4110 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -168,10 +168,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, } #endif - if (info) - force_sig_info(signr, info, tsk); - else - force_sig(signr, tsk); + force_sig_info(signr, info ?: SEND_SIG_PRIV, tsk); } #define DO_ERROR(trapnr, signr, str, name) \ @@ -305,7 +302,7 @@ do_general_protection(struct pt_regs *regs, long error_code) pr_cont("\n"); } - force_sig(SIGSEGV, tsk); + force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk); exit: exception_exit(prev_state); } @@ -645,7 +642,7 @@ void math_state_restore(void) */ if (unlikely(restore_fpu_checking(tsk))) { drop_init_fpu(tsk); - force_sig(SIGSEGV, tsk); + force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk); return; } From dff0796e53c29147c9bd1f5567a261dcf0e528bc Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Wed, 7 May 2014 17:21:34 +0200 Subject: [PATCH 046/101] x86/traps: Introduce do_error_trap() Move the common code from DO_ERROR() and DO_ERROR_INFO() into the new helper, do_error_trap(). This simplifies define's and shaves 527 bytes from traps.o. Signed-off-by: Oleg Nesterov --- arch/x86/kernel/traps.c | 38 +++++++++++++++++--------------------- 1 file changed, 17 insertions(+), 21 deletions(-) diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 2cd429117a4110..ab8dad719b9e8d 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -171,41 +171,37 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, force_sig_info(signr, info ?: SEND_SIG_PRIV, tsk); } +static void do_error_trap(struct pt_regs *regs, long error_code, char *str, + unsigned long trapnr, int signr, siginfo_t *info) +{ + enum ctx_state prev_state = exception_enter(); + + if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) != + NOTIFY_STOP) { + conditional_sti(regs); + do_trap(trapnr, signr, str, regs, error_code, info); + } + + exception_exit(prev_state); +} + #define DO_ERROR(trapnr, signr, str, name) \ dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \ { \ - enum ctx_state prev_state; \ - \ - prev_state = exception_enter(); \ - if (notify_die(DIE_TRAP, str, regs, error_code, \ - trapnr, signr) == NOTIFY_STOP) { \ - exception_exit(prev_state); \ - return; \ - } \ - conditional_sti(regs); \ - do_trap(trapnr, signr, str, regs, error_code, NULL); \ - exception_exit(prev_state); \ + do_error_trap(regs, error_code, str, trapnr, signr, NULL); \ } #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \ dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \ { \ siginfo_t info; \ - enum ctx_state prev_state; \ \ info.si_signo = signr; \ info.si_errno = 0; \ info.si_code = sicode; \ info.si_addr = (void __user *)siaddr; \ - prev_state = exception_enter(); \ - if (notify_die(DIE_TRAP, str, regs, error_code, \ - trapnr, signr) == NOTIFY_STOP) { \ - exception_exit(prev_state); \ - return; \ - } \ - conditional_sti(regs); \ - do_trap(trapnr, signr, str, regs, error_code, &info); \ - exception_exit(prev_state); \ + \ + do_error_trap(regs, error_code, str, trapnr, signr, &info); \ } DO_ERROR_INFO(X86_TRAP_DE, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->ip ) From 958d3d729802f7d741cbe8400e69b89baae580ee Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Wed, 7 May 2014 17:59:39 +0200 Subject: [PATCH 047/101] x86/traps: Introduce fill_trap_info(), simplify DO_ERROR_INFO() Extract the fill-siginfo code from DO_ERROR_INFO() into the new helper, fill_trap_info(). It can calculate si_code and si_addr looking at trapnr, so we can remove these arguments from DO_ERROR_INFO() and simplify the source code. The generated code is the same, __builtin_constant_p(trapnr) == T. Signed-off-by: Oleg Nesterov --- arch/x86/kernel/traps.c | 53 +++++++++++++++++++++++++++++------------ 1 file changed, 38 insertions(+), 15 deletions(-) diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index ab8dad719b9e8d..1cf8a4c409b60d 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -136,6 +136,33 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str, return -1; } +static void fill_trap_info(struct pt_regs *regs, int signr, int trapnr, + siginfo_t *info) +{ + unsigned long siaddr; + int sicode; + + switch (trapnr) { + case X86_TRAP_DE: + sicode = FPE_INTDIV; + siaddr = regs->ip; + break; + case X86_TRAP_UD: + sicode = ILL_ILLOPN; + siaddr = regs->ip; + break; + case X86_TRAP_AC: + sicode = BUS_ADRALN; + siaddr = 0; + break; + } + + info->si_signo = signr; + info->si_errno = 0; + info->si_code = sicode; + info->si_addr = (void __user *)siaddr; +} + static void __kprobes do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, long error_code, siginfo_t *info) @@ -191,30 +218,26 @@ dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \ do_error_trap(regs, error_code, str, trapnr, signr, NULL); \ } -#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \ +#define DO_ERROR_INFO(trapnr, signr, str, name) \ dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \ { \ siginfo_t info; \ \ - info.si_signo = signr; \ - info.si_errno = 0; \ - info.si_code = sicode; \ - info.si_addr = (void __user *)siaddr; \ - \ + fill_trap_info(regs, signr, trapnr, &info); \ do_error_trap(regs, error_code, str, trapnr, signr, &info); \ } -DO_ERROR_INFO(X86_TRAP_DE, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->ip ) -DO_ERROR (X86_TRAP_OF, SIGSEGV, "overflow", overflow ) -DO_ERROR (X86_TRAP_BR, SIGSEGV, "bounds", bounds ) -DO_ERROR_INFO(X86_TRAP_UD, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip ) -DO_ERROR (X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun ) -DO_ERROR (X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS ) -DO_ERROR (X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present ) +DO_ERROR_INFO(X86_TRAP_DE, SIGFPE, "divide error", divide_error) +DO_ERROR (X86_TRAP_OF, SIGSEGV, "overflow", overflow) +DO_ERROR (X86_TRAP_BR, SIGSEGV, "bounds", bounds) +DO_ERROR_INFO(X86_TRAP_UD, SIGILL, "invalid opcode", invalid_op) +DO_ERROR (X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun) +DO_ERROR (X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS) +DO_ERROR (X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present) #ifdef CONFIG_X86_32 -DO_ERROR (X86_TRAP_SS, SIGBUS, "stack segment", stack_segment ) +DO_ERROR (X86_TRAP_SS, SIGBUS, "stack segment", stack_segment) #endif -DO_ERROR_INFO(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0 ) +DO_ERROR_INFO(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check) #ifdef CONFIG_X86_64 /* Runs on IST stack */ From 1c326c4dfe182a1c4c1e39f2c00f04c380d11692 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 8 May 2014 20:04:11 +0200 Subject: [PATCH 048/101] x86/traps: Shift fill_trap_info() from DO_ERROR_INFO() to do_error_trap() Move the callsite of fill_trap_info() into do_error_trap() and remove the "siginfo_t *info" argument. This obviously breaks DO_ERROR() which passed info == NULL, we simply change fill_trap_info() to return "siginfo_t *" and add the "default" case which returns SEND_SIG_PRIV. Signed-off-by: Oleg Nesterov --- arch/x86/kernel/traps.c | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 1cf8a4c409b60d..c9dd74124038f5 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -136,13 +136,16 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str, return -1; } -static void fill_trap_info(struct pt_regs *regs, int signr, int trapnr, - siginfo_t *info) +static siginfo_t *fill_trap_info(struct pt_regs *regs, int signr, int trapnr, + siginfo_t *info) { unsigned long siaddr; int sicode; switch (trapnr) { + default: + return SEND_SIG_PRIV; + case X86_TRAP_DE: sicode = FPE_INTDIV; siaddr = regs->ip; @@ -161,6 +164,7 @@ static void fill_trap_info(struct pt_regs *regs, int signr, int trapnr, info->si_errno = 0; info->si_code = sicode; info->si_addr = (void __user *)siaddr; + return info; } static void __kprobes @@ -199,14 +203,16 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, } static void do_error_trap(struct pt_regs *regs, long error_code, char *str, - unsigned long trapnr, int signr, siginfo_t *info) + unsigned long trapnr, int signr) { enum ctx_state prev_state = exception_enter(); + siginfo_t info; if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) != NOTIFY_STOP) { conditional_sti(regs); - do_trap(trapnr, signr, str, regs, error_code, info); + do_trap(trapnr, signr, str, regs, error_code, + fill_trap_info(regs, signr, trapnr, &info)); } exception_exit(prev_state); @@ -215,16 +221,13 @@ static void do_error_trap(struct pt_regs *regs, long error_code, char *str, #define DO_ERROR(trapnr, signr, str, name) \ dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \ { \ - do_error_trap(regs, error_code, str, trapnr, signr, NULL); \ + do_error_trap(regs, error_code, str, trapnr, signr); \ } #define DO_ERROR_INFO(trapnr, signr, str, name) \ dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \ { \ - siginfo_t info; \ - \ - fill_trap_info(regs, signr, trapnr, &info); \ - do_error_trap(regs, error_code, str, trapnr, signr, &info); \ + do_error_trap(regs, error_code, str, trapnr, signr); \ } DO_ERROR_INFO(X86_TRAP_DE, SIGFPE, "divide error", divide_error) From 0eb14833d5b1ea1accfeffb71be5de5929f85da9 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 8 May 2014 20:12:24 +0200 Subject: [PATCH 049/101] x86/traps: Kill DO_ERROR_INFO() Now that DO_ERROR_INFO() doesn't differ from DO_ERROR() we can remove it and use DO_ERROR() instead. Signed-off-by: Oleg Nesterov --- arch/x86/kernel/traps.c | 24 +++++++++--------------- 1 file changed, 9 insertions(+), 15 deletions(-) diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index c9dd74124038f5..73b3ea32245a3d 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -224,23 +224,17 @@ dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \ do_error_trap(regs, error_code, str, trapnr, signr); \ } -#define DO_ERROR_INFO(trapnr, signr, str, name) \ -dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \ -{ \ - do_error_trap(regs, error_code, str, trapnr, signr); \ -} - -DO_ERROR_INFO(X86_TRAP_DE, SIGFPE, "divide error", divide_error) -DO_ERROR (X86_TRAP_OF, SIGSEGV, "overflow", overflow) -DO_ERROR (X86_TRAP_BR, SIGSEGV, "bounds", bounds) -DO_ERROR_INFO(X86_TRAP_UD, SIGILL, "invalid opcode", invalid_op) -DO_ERROR (X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun) -DO_ERROR (X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS) -DO_ERROR (X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present) +DO_ERROR(X86_TRAP_DE, SIGFPE, "divide error", divide_error) +DO_ERROR(X86_TRAP_OF, SIGSEGV, "overflow", overflow) +DO_ERROR(X86_TRAP_BR, SIGSEGV, "bounds", bounds) +DO_ERROR(X86_TRAP_UD, SIGILL, "invalid opcode", invalid_op) +DO_ERROR(X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun",coprocessor_segment_overrun) +DO_ERROR(X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS) +DO_ERROR(X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present) #ifdef CONFIG_X86_32 -DO_ERROR (X86_TRAP_SS, SIGBUS, "stack segment", stack_segment) +DO_ERROR(X86_TRAP_SS, SIGBUS, "stack segment", stack_segment) #endif -DO_ERROR_INFO(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check) +DO_ERROR(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check) #ifdef CONFIG_X86_64 /* Runs on IST stack */ From b02ef20a9fba08948e643d3eec0efadf1da01a44 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Mon, 12 May 2014 18:24:45 +0200 Subject: [PATCH 050/101] uprobes/x86: Fix the wrong ->si_addr when xol triggers a trap If the probed insn triggers a trap, ->si_addr = regs->ip is technically correct, but this is not what the signal handler wants; we need to pass the address of the probed insn, not the address of xol slot. Add the new arch-agnostic helper, uprobe_get_trap_addr(), and change fill_trap_info() and math_error() to use it. !CONFIG_UPROBES case in uprobes.h uses a macro to avoid include hell and ensure that it can be compiled even if an architecture doesn't define instruction_pointer(). Test-case: #include #include #include extern void probe_div(void); void sigh(int sig, siginfo_t *info, void *c) { int passed = (info->si_addr == probe_div); printf(passed ? "PASS\n" : "FAIL\n"); _exit(!passed); } int main(void) { struct sigaction sa = { .sa_sigaction = sigh, .sa_flags = SA_SIGINFO, }; sigaction(SIGFPE, &sa, NULL); asm ( "xor %ecx,%ecx\n" ".globl probe_div; probe_div:\n" "idiv %ecx\n" ); return 0; } it fails if probe_div() is probed. Note: show_unhandled_signals users should probably use this helper too, but we need to cleanup them first. Signed-off-by: Oleg Nesterov Reviewed-by: Masami Hiramatsu --- arch/x86/kernel/traps.c | 7 ++++--- include/linux/uprobes.h | 4 ++++ kernel/events/uprobes.c | 10 ++++++++++ 3 files changed, 18 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 73b3ea32245a3d..3fdb20548c4b5e 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -148,11 +149,11 @@ static siginfo_t *fill_trap_info(struct pt_regs *regs, int signr, int trapnr, case X86_TRAP_DE: sicode = FPE_INTDIV; - siaddr = regs->ip; + siaddr = uprobe_get_trap_addr(regs); break; case X86_TRAP_UD: sicode = ILL_ILLOPN; - siaddr = regs->ip; + siaddr = uprobe_get_trap_addr(regs); break; case X86_TRAP_AC: sicode = BUS_ADRALN; @@ -531,7 +532,7 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr) task->thread.error_code = error_code; info.si_signo = SIGFPE; info.si_errno = 0; - info.si_addr = (void __user *)regs->ip; + info.si_addr = (void __user *)uprobe_get_trap_addr(regs); if (trapnr == X86_TRAP_MF) { unsigned short cwd, swd; /* diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h index edff2b97b86436..88c3b7e8b384b6 100644 --- a/include/linux/uprobes.h +++ b/include/linux/uprobes.h @@ -102,6 +102,7 @@ extern int __weak set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, u extern bool __weak is_swbp_insn(uprobe_opcode_t *insn); extern bool __weak is_trap_insn(uprobe_opcode_t *insn); extern unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs); +extern unsigned long uprobe_get_trap_addr(struct pt_regs *regs); extern int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t); extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc); extern int uprobe_apply(struct inode *inode, loff_t offset, struct uprobe_consumer *uc, bool); @@ -130,6 +131,9 @@ extern bool __weak arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *r #else /* !CONFIG_UPROBES */ struct uprobes_state { }; + +#define uprobe_get_trap_addr(regs) instruction_pointer(regs) + static inline int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc) { diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index a13251e8bfa43e..3b02c72938a834 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -1351,6 +1351,16 @@ unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs) return instruction_pointer(regs) - UPROBE_SWBP_INSN_SIZE; } +unsigned long uprobe_get_trap_addr(struct pt_regs *regs) +{ + struct uprobe_task *utask = current->utask; + + if (unlikely(utask && utask->active_uprobe)) + return utask->vaddr; + + return instruction_pointer(regs); +} + /* * Called with no locks held. * Called in context of a exiting or a exec-ing thread. From 1844dbcbe78503e0f4a8996d69da725d5e7a5177 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Wed, 28 May 2014 14:12:18 +0900 Subject: [PATCH 051/101] perf tools: Introduce hists__inc_nr_samples() There're some duplicate code for counting number of samples. Add hists__inc_nr_samples() and reuse it. Suggested-by: Jiri Olsa Signed-off-by: Namhyung Kim Link: http://lkml.kernel.org/r/1401335910-16832-2-git-send-email-namhyung@kernel.org Signed-off-by: Jiri Olsa --- tools/perf/builtin-annotate.c | 2 +- tools/perf/builtin-report.c | 4 +--- tools/perf/builtin-sched.c | 2 +- tools/perf/builtin-top.c | 5 +---- tools/perf/tests/hists_filter.c | 4 +--- tools/perf/util/hist.c | 7 +++++++ tools/perf/util/hist.h | 1 + 7 files changed, 13 insertions(+), 12 deletions(-) diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index d30d2c2e2a7a30..bf52461a88bd2a 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -70,7 +70,7 @@ static int perf_evsel__add_sample(struct perf_evsel *evsel, return -ENOMEM; ret = hist_entry__inc_addr_samples(he, evsel->idx, al->addr); - hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE); + hists__inc_nr_samples(&evsel->hists, true); return ret; } diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index bc0eec1ce4beab..4a3b84dd4f41b7 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -92,9 +92,7 @@ static void report__inc_stats(struct report *rep, struct hist_entry *he) * counted in perf_session_deliver_event(). The dump_trace * requires this info is ready before going to the output tree. */ - hists__inc_nr_events(he->hists, PERF_RECORD_SAMPLE); - if (!he->filtered) - he->hists->stats.nr_non_filtered_samples++; + hists__inc_nr_samples(he->hists, he->filtered); } static int report__add_mem_hist_entry(struct report *rep, struct addr_location *al, diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index d7176830b9b2fb..c38d06c0477572 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c @@ -1428,7 +1428,7 @@ static int perf_sched__process_tracepoint_sample(struct perf_tool *tool __maybe_ int err = 0; evsel->hists.stats.total_period += sample->period; - hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE); + hists__inc_nr_samples(&evsel->hists, true); if (evsel->handler != NULL) { tracepoint_handler f = evsel->handler; diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 5b389ce4cd15d0..51309264d2104a 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -252,10 +252,7 @@ static struct hist_entry *perf_evsel__add_hist_entry(struct perf_evsel *evsel, if (he == NULL) return NULL; - hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE); - if (!he->filtered) - evsel->hists.stats.nr_non_filtered_samples++; - + hists__inc_nr_samples(&evsel->hists, he->filtered); return he; } diff --git a/tools/perf/tests/hists_filter.c b/tools/perf/tests/hists_filter.c index c5ba924a3581cb..0a71ef4b91583c 100644 --- a/tools/perf/tests/hists_filter.c +++ b/tools/perf/tests/hists_filter.c @@ -85,9 +85,7 @@ static int add_hist_entries(struct perf_evlist *evlist, struct machine *machine) fake_samples[i].map = al.map; fake_samples[i].sym = al.sym; - hists__inc_nr_events(he->hists, PERF_RECORD_SAMPLE); - if (!he->filtered) - he->hists->stats.nr_non_filtered_samples++; + hists__inc_nr_samples(he->hists, he->filtered); } } diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index b262b44b7a656d..5943ba60f19335 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c @@ -800,6 +800,13 @@ void hists__inc_nr_events(struct hists *hists, u32 type) events_stats__inc(&hists->stats, type); } +void hists__inc_nr_samples(struct hists *hists, bool filtered) +{ + events_stats__inc(&hists->stats, PERF_RECORD_SAMPLE); + if (!filtered) + hists->stats.nr_non_filtered_samples++; +} + static struct hist_entry *hists__add_dummy_entry(struct hists *hists, struct hist_entry *pair) { diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h index a8418d19808dc2..03ae1dbb1b1574 100644 --- a/tools/perf/util/hist.h +++ b/tools/perf/util/hist.h @@ -119,6 +119,7 @@ u64 hists__total_period(struct hists *hists); void hists__reset_stats(struct hists *hists); void hists__inc_stats(struct hists *hists, struct hist_entry *h); void hists__inc_nr_events(struct hists *hists, u32 type); +void hists__inc_nr_samples(struct hists *hists, bool filtered); void events_stats__inc(struct events_stats *stats, u32 type); size_t events_stats__fprintf(struct events_stats *stats, FILE *fp); From 69bcb019fc809874f518559c8e5b0a90176f0532 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Wed, 30 Oct 2013 09:40:34 +0900 Subject: [PATCH 052/101] perf tools: Introduce struct hist_entry_iter There're some duplicate code when adding hist entries. They are different in that some have branch info or mem info but generally do same thing. So introduce new struct hist_entry_iter and add callbacks to customize each case in general way. The new perf_evsel__add_entry() function will look like: iter->prepare_entry(); iter->add_single_entry(); while (iter->next_entry()) iter->add_next_entry(); iter->finish_entry(); This will help further work like the cumulative callchain patchset. Signed-off-by: Namhyung Kim Tested-by: Arun Sharma Tested-by: Rodrigo Campos Cc: David Ahern Cc: Frederic Weisbecker Cc: Stephane Eranian Link: http://lkml.kernel.org/r/1401335910-16832-3-git-send-email-namhyung@kernel.org Signed-off-by: Jiri Olsa --- tools/perf/builtin-report.c | 192 +++----------------- tools/perf/tests/hists_filter.c | 16 +- tools/perf/tests/hists_output.c | 11 +- tools/perf/util/hist.c | 299 ++++++++++++++++++++++++++++++++ tools/perf/util/hist.h | 33 ++++ 5 files changed, 372 insertions(+), 179 deletions(-) diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 4a3b84dd4f41b7..3201bdfa8c3f2b 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -76,163 +76,16 @@ static int report__config(const char *var, const char *value, void *cb) return perf_default_config(var, value, cb); } -static void report__inc_stats(struct report *rep, struct hist_entry *he) +static void report__inc_stats(struct report *rep, + struct hist_entry *he __maybe_unused) { /* - * The @he is either of a newly created one or an existing one - * merging current sample. We only want to count a new one so - * checking ->nr_events being 1. + * We cannot access @he at this time. Just assume it's a new entry. + * It'll be fixed once we have a callback mechanism in hist_iter. */ - if (he->stat.nr_events == 1) - rep->nr_entries++; - - /* - * Only counts number of samples at this stage as it's more - * natural to do it here and non-sample events are also - * counted in perf_session_deliver_event(). The dump_trace - * requires this info is ready before going to the output tree. - */ - hists__inc_nr_samples(he->hists, he->filtered); -} - -static int report__add_mem_hist_entry(struct report *rep, struct addr_location *al, - struct perf_sample *sample, struct perf_evsel *evsel) -{ - struct symbol *parent = NULL; - struct hist_entry *he; - struct mem_info *mi, *mx; - uint64_t cost; - int err = sample__resolve_callchain(sample, &parent, evsel, al, rep->max_stack); - - if (err) - return err; - - mi = sample__resolve_mem(sample, al); - if (!mi) - return -ENOMEM; - - if (rep->hide_unresolved && !al->sym) - return 0; - - cost = sample->weight; - if (!cost) - cost = 1; - - /* - * must pass period=weight in order to get the correct - * sorting from hists__collapse_resort() which is solely - * based on periods. We want sorting be done on nr_events * weight - * and this is indirectly achieved by passing period=weight here - * and the he_stat__add_period() function. - */ - he = __hists__add_entry(&evsel->hists, al, parent, NULL, mi, - cost, cost, 0); - if (!he) - return -ENOMEM; - - if (ui__has_annotation()) { - err = hist_entry__inc_addr_samples(he, evsel->idx, al->addr); - if (err) - goto out; - - mx = he->mem_info; - err = addr_map_symbol__inc_samples(&mx->daddr, evsel->idx); - if (err) - goto out; - } - - report__inc_stats(rep, he); - - err = hist_entry__append_callchain(he, sample); -out: - return err; -} - -static int report__add_branch_hist_entry(struct report *rep, struct addr_location *al, - struct perf_sample *sample, struct perf_evsel *evsel) -{ - struct symbol *parent = NULL; - unsigned i; - struct hist_entry *he; - struct branch_info *bi, *bx; - int err = sample__resolve_callchain(sample, &parent, evsel, al, rep->max_stack); - - if (err) - return err; - - bi = sample__resolve_bstack(sample, al); - if (!bi) - return -ENOMEM; - - for (i = 0; i < sample->branch_stack->nr; i++) { - if (rep->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym)) - continue; - - err = -ENOMEM; - - /* overwrite the 'al' to branch-to info */ - al->map = bi[i].to.map; - al->sym = bi[i].to.sym; - al->addr = bi[i].to.addr; - /* - * The report shows the percentage of total branches captured - * and not events sampled. Thus we use a pseudo period of 1. - */ - he = __hists__add_entry(&evsel->hists, al, parent, &bi[i], NULL, - 1, 1, 0); - if (he) { - if (ui__has_annotation()) { - bx = he->branch_info; - err = addr_map_symbol__inc_samples(&bx->from, - evsel->idx); - if (err) - goto out; - - err = addr_map_symbol__inc_samples(&bx->to, - evsel->idx); - if (err) - goto out; - } - report__inc_stats(rep, he); - } else - goto out; - } - err = 0; -out: - free(bi); - return err; + rep->nr_entries++; } -static int report__add_hist_entry(struct report *rep, struct perf_evsel *evsel, - struct addr_location *al, struct perf_sample *sample) -{ - struct symbol *parent = NULL; - struct hist_entry *he; - int err = sample__resolve_callchain(sample, &parent, evsel, al, rep->max_stack); - - if (err) - return err; - - he = __hists__add_entry(&evsel->hists, al, parent, NULL, NULL, - sample->period, sample->weight, - sample->transaction); - if (he == NULL) - return -ENOMEM; - - err = hist_entry__append_callchain(he, sample); - if (err) - goto out; - - if (ui__has_annotation()) - err = hist_entry__inc_addr_samples(he, evsel->idx, al->addr); - - report__inc_stats(rep, he); - -out: - return err; -} - - static int process_sample_event(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, @@ -241,6 +94,9 @@ static int process_sample_event(struct perf_tool *tool, { struct report *rep = container_of(tool, struct report, tool); struct addr_location al; + struct hist_entry_iter iter = { + .hide_unresolved = rep->hide_unresolved, + }; int ret; if (perf_event__preprocess_sample(event, machine, &al, sample) < 0) { @@ -255,22 +111,22 @@ static int process_sample_event(struct perf_tool *tool, if (rep->cpu_list && !test_bit(sample->cpu, rep->cpu_bitmap)) return 0; - if (sort__mode == SORT_MODE__BRANCH) { - ret = report__add_branch_hist_entry(rep, &al, sample, evsel); - if (ret < 0) - pr_debug("problem adding lbr entry, skipping event\n"); - } else if (rep->mem_mode == 1) { - ret = report__add_mem_hist_entry(rep, &al, sample, evsel); - if (ret < 0) - pr_debug("problem adding mem entry, skipping event\n"); - } else { - if (al.map != NULL) - al.map->dso->hit = 1; - - ret = report__add_hist_entry(rep, evsel, &al, sample); - if (ret < 0) - pr_debug("problem incrementing symbol period, skipping event\n"); - } + if (sort__mode == SORT_MODE__BRANCH) + iter.ops = &hist_iter_branch; + else if (rep->mem_mode) + iter.ops = &hist_iter_mem; + else + iter.ops = &hist_iter_normal; + + if (al.map != NULL) + al.map->dso->hit = 1; + + report__inc_stats(rep, NULL); + + ret = hist_entry_iter__add(&iter, &al, evsel, sample, rep->max_stack); + if (ret < 0) + pr_debug("problem adding hist entry, skipping event\n"); + return ret; } diff --git a/tools/perf/tests/hists_filter.c b/tools/perf/tests/hists_filter.c index 0a71ef4b91583c..76b02e1de701a5 100644 --- a/tools/perf/tests/hists_filter.c +++ b/tools/perf/tests/hists_filter.c @@ -42,11 +42,11 @@ static struct sample fake_samples[] = { { .pid = 300, .ip = 0xf0000 + 800, }, }; -static int add_hist_entries(struct perf_evlist *evlist, struct machine *machine) +static int add_hist_entries(struct perf_evlist *evlist, + struct machine *machine __maybe_unused) { struct perf_evsel *evsel; struct addr_location al; - struct hist_entry *he; struct perf_sample sample = { .cpu = 0, }; size_t i; @@ -62,6 +62,10 @@ static int add_hist_entries(struct perf_evlist *evlist, struct machine *machine) .misc = PERF_RECORD_MISC_USER, }, }; + struct hist_entry_iter iter = { + .ops = &hist_iter_normal, + .hide_unresolved = false, + }; /* make sure it has no filter at first */ evsel->hists.thread_filter = NULL; @@ -71,21 +75,19 @@ static int add_hist_entries(struct perf_evlist *evlist, struct machine *machine) sample.pid = fake_samples[i].pid; sample.tid = fake_samples[i].pid; sample.ip = fake_samples[i].ip; + sample.period = 100; if (perf_event__preprocess_sample(&event, machine, &al, &sample) < 0) goto out; - he = __hists__add_entry(&evsel->hists, &al, NULL, - NULL, NULL, 100, 1, 0); - if (he == NULL) + if (hist_entry_iter__add(&iter, &al, evsel, &sample, + PERF_MAX_STACK_DEPTH) < 0) goto out; fake_samples[i].thread = al.thread; fake_samples[i].map = al.map; fake_samples[i].sym = al.sym; - - hists__inc_nr_samples(he->hists, he->filtered); } } diff --git a/tools/perf/tests/hists_output.c b/tools/perf/tests/hists_output.c index a16850551797ea..1308f88a916948 100644 --- a/tools/perf/tests/hists_output.c +++ b/tools/perf/tests/hists_output.c @@ -46,7 +46,7 @@ static struct sample fake_samples[] = { static int add_hist_entries(struct hists *hists, struct machine *machine) { struct addr_location al; - struct hist_entry *he; + struct perf_evsel *evsel = hists_to_evsel(hists); struct perf_sample sample = { .period = 100, }; size_t i; @@ -56,6 +56,10 @@ static int add_hist_entries(struct hists *hists, struct machine *machine) .misc = PERF_RECORD_MISC_USER, }, }; + struct hist_entry_iter iter = { + .ops = &hist_iter_normal, + .hide_unresolved = false, + }; sample.cpu = fake_samples[i].cpu; sample.pid = fake_samples[i].pid; @@ -66,9 +70,8 @@ static int add_hist_entries(struct hists *hists, struct machine *machine) &sample) < 0) goto out; - he = __hists__add_entry(hists, &al, NULL, NULL, NULL, - sample.period, 1, 0); - if (he == NULL) + if (hist_entry_iter__add(&iter, &al, evsel, &sample, + PERF_MAX_STACK_DEPTH) < 0) goto out; fake_samples[i].thread = al.thread; diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index 5943ba60f19335..d8662356de202e 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c @@ -4,6 +4,7 @@ #include "session.h" #include "sort.h" #include "evsel.h" +#include "annotate.h" #include static bool hists__filter_entry_by_dso(struct hists *hists, @@ -429,6 +430,304 @@ struct hist_entry *__hists__add_entry(struct hists *hists, return add_hist_entry(hists, &entry, al); } +static int +iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused, + struct addr_location *al __maybe_unused) +{ + return 0; +} + +static int +iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused, + struct addr_location *al __maybe_unused) +{ + return 0; +} + +static int +iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al) +{ + struct perf_sample *sample = iter->sample; + struct mem_info *mi; + + mi = sample__resolve_mem(sample, al); + if (mi == NULL) + return -ENOMEM; + + iter->priv = mi; + return 0; +} + +static int +iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al) +{ + u64 cost; + struct mem_info *mi = iter->priv; + struct hist_entry *he; + + if (mi == NULL) + return -EINVAL; + + cost = iter->sample->weight; + if (!cost) + cost = 1; + + /* + * must pass period=weight in order to get the correct + * sorting from hists__collapse_resort() which is solely + * based on periods. We want sorting be done on nr_events * weight + * and this is indirectly achieved by passing period=weight here + * and the he_stat__add_period() function. + */ + he = __hists__add_entry(&iter->evsel->hists, al, iter->parent, NULL, mi, + cost, cost, 0); + if (!he) + return -ENOMEM; + + iter->he = he; + return 0; +} + +static int +iter_finish_mem_entry(struct hist_entry_iter *iter, struct addr_location *al) +{ + struct perf_evsel *evsel = iter->evsel; + struct hist_entry *he = iter->he; + struct mem_info *mx; + int err = -EINVAL; + + if (he == NULL) + goto out; + + if (ui__has_annotation()) { + err = hist_entry__inc_addr_samples(he, evsel->idx, al->addr); + if (err) + goto out; + + mx = he->mem_info; + err = addr_map_symbol__inc_samples(&mx->daddr, evsel->idx); + if (err) + goto out; + } + + hists__inc_nr_samples(&evsel->hists, he->filtered); + + err = hist_entry__append_callchain(he, iter->sample); + +out: + /* + * We don't need to free iter->priv (mem_info) here since + * the mem info was either already freed in add_hist_entry() or + * passed to a new hist entry by hist_entry__new(). + */ + iter->priv = NULL; + + iter->he = NULL; + return err; +} + +static int +iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al) +{ + struct branch_info *bi; + struct perf_sample *sample = iter->sample; + + bi = sample__resolve_bstack(sample, al); + if (!bi) + return -ENOMEM; + + iter->curr = 0; + iter->total = sample->branch_stack->nr; + + iter->priv = bi; + return 0; +} + +static int +iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused, + struct addr_location *al __maybe_unused) +{ + return 0; +} + +static int +iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al) +{ + struct branch_info *bi = iter->priv; + int i = iter->curr; + + if (bi == NULL) + return 0; + + if (iter->curr >= iter->total) + return 0; + + al->map = bi[i].to.map; + al->sym = bi[i].to.sym; + al->addr = bi[i].to.addr; + return 1; +} + +static int +iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al) +{ + struct branch_info *bi, *bx; + struct perf_evsel *evsel = iter->evsel; + struct hist_entry *he = NULL; + int i = iter->curr; + int err = 0; + + bi = iter->priv; + + if (iter->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym)) + goto out; + + /* + * The report shows the percentage of total branches captured + * and not events sampled. Thus we use a pseudo period of 1. + */ + he = __hists__add_entry(&evsel->hists, al, iter->parent, &bi[i], NULL, + 1, 1, 0); + if (he == NULL) + return -ENOMEM; + + if (ui__has_annotation()) { + bx = he->branch_info; + err = addr_map_symbol__inc_samples(&bx->from, evsel->idx); + if (err) + goto out; + + err = addr_map_symbol__inc_samples(&bx->to, evsel->idx); + if (err) + goto out; + } + + hists__inc_nr_samples(&evsel->hists, he->filtered); + +out: + iter->he = he; + iter->curr++; + return err; +} + +static int +iter_finish_branch_entry(struct hist_entry_iter *iter, + struct addr_location *al __maybe_unused) +{ + zfree(&iter->priv); + iter->he = NULL; + + return iter->curr >= iter->total ? 0 : -1; +} + +static int +iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused, + struct addr_location *al __maybe_unused) +{ + return 0; +} + +static int +iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al) +{ + struct perf_evsel *evsel = iter->evsel; + struct perf_sample *sample = iter->sample; + struct hist_entry *he; + + he = __hists__add_entry(&evsel->hists, al, iter->parent, NULL, NULL, + sample->period, sample->weight, + sample->transaction); + if (he == NULL) + return -ENOMEM; + + iter->he = he; + return 0; +} + +static int +iter_finish_normal_entry(struct hist_entry_iter *iter, struct addr_location *al) +{ + int err; + struct hist_entry *he = iter->he; + struct perf_evsel *evsel = iter->evsel; + struct perf_sample *sample = iter->sample; + + if (he == NULL) + return 0; + + iter->he = NULL; + + if (ui__has_annotation()) { + err = hist_entry__inc_addr_samples(he, evsel->idx, al->addr); + if (err) + return err; + } + + hists__inc_nr_samples(&evsel->hists, he->filtered); + + return hist_entry__append_callchain(he, sample); +} + +const struct hist_iter_ops hist_iter_mem = { + .prepare_entry = iter_prepare_mem_entry, + .add_single_entry = iter_add_single_mem_entry, + .next_entry = iter_next_nop_entry, + .add_next_entry = iter_add_next_nop_entry, + .finish_entry = iter_finish_mem_entry, +}; + +const struct hist_iter_ops hist_iter_branch = { + .prepare_entry = iter_prepare_branch_entry, + .add_single_entry = iter_add_single_branch_entry, + .next_entry = iter_next_branch_entry, + .add_next_entry = iter_add_next_branch_entry, + .finish_entry = iter_finish_branch_entry, +}; + +const struct hist_iter_ops hist_iter_normal = { + .prepare_entry = iter_prepare_normal_entry, + .add_single_entry = iter_add_single_normal_entry, + .next_entry = iter_next_nop_entry, + .add_next_entry = iter_add_next_nop_entry, + .finish_entry = iter_finish_normal_entry, +}; + +int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al, + struct perf_evsel *evsel, struct perf_sample *sample, + int max_stack_depth) +{ + int err, err2; + + err = sample__resolve_callchain(sample, &iter->parent, evsel, al, + max_stack_depth); + if (err) + return err; + + iter->evsel = evsel; + iter->sample = sample; + + err = iter->ops->prepare_entry(iter, al); + if (err) + goto out; + + err = iter->ops->add_single_entry(iter, al); + if (err) + goto out; + + while (iter->ops->next_entry(iter, al)) { + err = iter->ops->add_next_entry(iter, al); + if (err) + break; + } + +out: + err2 = iter->ops->finish_entry(iter, al); + if (!err) + err = err2; + + return err; +} + int64_t hist_entry__cmp(struct hist_entry *left, struct hist_entry *right) { diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h index 03ae1dbb1b1574..8894f184357cd8 100644 --- a/tools/perf/util/hist.h +++ b/tools/perf/util/hist.h @@ -96,12 +96,45 @@ struct hists { u16 col_len[HISTC_NR_COLS]; }; +struct hist_entry_iter; + +struct hist_iter_ops { + int (*prepare_entry)(struct hist_entry_iter *, struct addr_location *); + int (*add_single_entry)(struct hist_entry_iter *, struct addr_location *); + int (*next_entry)(struct hist_entry_iter *, struct addr_location *); + int (*add_next_entry)(struct hist_entry_iter *, struct addr_location *); + int (*finish_entry)(struct hist_entry_iter *, struct addr_location *); +}; + +struct hist_entry_iter { + int total; + int curr; + + bool hide_unresolved; + + struct perf_evsel *evsel; + struct perf_sample *sample; + struct hist_entry *he; + struct symbol *parent; + void *priv; + + const struct hist_iter_ops *ops; +}; + +extern const struct hist_iter_ops hist_iter_normal; +extern const struct hist_iter_ops hist_iter_branch; +extern const struct hist_iter_ops hist_iter_mem; + struct hist_entry *__hists__add_entry(struct hists *hists, struct addr_location *al, struct symbol *parent, struct branch_info *bi, struct mem_info *mi, u64 period, u64 weight, u64 transaction); +int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al, + struct perf_evsel *evsel, struct perf_sample *sample, + int max_stack_depth); + int64_t hist_entry__cmp(struct hist_entry *left, struct hist_entry *right); int64_t hist_entry__collapse(struct hist_entry *left, struct hist_entry *right); int hist_entry__transaction_len(void); From f8be1c8c48c8469d1ce95ccdc77b1e2c6a29700e Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 11 Sep 2012 13:15:07 +0900 Subject: [PATCH 053/101] perf hists: Add support for accumulated stat of hist entry Maintain accumulated stat information in hist_entry->stat_acc if symbol_conf.cumulate_callchain is set. Fields in ->stat_acc have same vaules initially, and will be updated as callchain is processed later. Signed-off-by: Namhyung Kim Tested-by: Arun Sharma Tested-by: Rodrigo Campos Cc: Frederic Weisbecker Link: http://lkml.kernel.org/r/1401335910-16832-4-git-send-email-namhyung@kernel.org Signed-off-by: Jiri Olsa --- tools/perf/util/hist.c | 28 ++++++++++++++++++++++++++-- tools/perf/util/sort.h | 1 + tools/perf/util/symbol.h | 1 + 3 files changed, 28 insertions(+), 2 deletions(-) diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index d8662356de202e..dfff2ee8effb6c 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c @@ -232,6 +232,8 @@ static bool hists__decay_entry(struct hists *hists, struct hist_entry *he) return true; he_stat__decay(&he->stat); + if (symbol_conf.cumulate_callchain) + he_stat__decay(he->stat_acc); diff = prev_period - he->stat.period; @@ -279,12 +281,26 @@ void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel) static struct hist_entry *hist_entry__new(struct hist_entry *template) { - size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0; - struct hist_entry *he = zalloc(sizeof(*he) + callchain_size); + size_t callchain_size = 0; + struct hist_entry *he; + + if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain) + callchain_size = sizeof(struct callchain_root); + + he = zalloc(sizeof(*he) + callchain_size); if (he != NULL) { *he = *template; + if (symbol_conf.cumulate_callchain) { + he->stat_acc = malloc(sizeof(he->stat)); + if (he->stat_acc == NULL) { + free(he); + return NULL; + } + memcpy(he->stat_acc, &he->stat, sizeof(he->stat)); + } + if (he->ms.map) he->ms.map->referenced = true; @@ -296,6 +312,7 @@ static struct hist_entry *hist_entry__new(struct hist_entry *template) */ he->branch_info = malloc(sizeof(*he->branch_info)); if (he->branch_info == NULL) { + free(he->stat_acc); free(he); return NULL; } @@ -359,6 +376,8 @@ static struct hist_entry *add_hist_entry(struct hists *hists, if (!cmp) { he_stat__add_period(&he->stat, period, weight); + if (symbol_conf.cumulate_callchain) + he_stat__add_period(he->stat_acc, period, weight); /* * This mem info was allocated from sample__resolve_mem @@ -394,6 +413,8 @@ static struct hist_entry *add_hist_entry(struct hists *hists, rb_insert_color(&he->rb_node_in, hists->entries_in); out: he_stat__add_cpumode_period(&he->stat, al->cpumode, period); + if (symbol_conf.cumulate_callchain) + he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period); return he; } @@ -768,6 +789,7 @@ void hist_entry__free(struct hist_entry *he) { zfree(&he->branch_info); zfree(&he->mem_info); + zfree(&he->stat_acc); free_srcline(he->srcline); free(he); } @@ -793,6 +815,8 @@ static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused, if (!cmp) { he_stat__add_stat(&iter->stat, &he->stat); + if (symbol_conf.cumulate_callchain) + he_stat__add_stat(iter->stat_acc, he->stat_acc); if (symbol_conf.use_callchain) { callchain_cursor_reset(&callchain_cursor); diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h index 5f38d925e92f4f..c9ffa031becdf6 100644 --- a/tools/perf/util/sort.h +++ b/tools/perf/util/sort.h @@ -82,6 +82,7 @@ struct hist_entry { struct list_head head; } pairs; struct he_stat stat; + struct he_stat *stat_acc; struct map_symbol ms; struct thread *thread; struct comm *comm; diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index 33ede53fa6b994..615c752dd76736 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -109,6 +109,7 @@ struct symbol_conf { show_nr_samples, show_total_period, use_callchain, + cumulate_callchain, exclude_other, show_cpu_utilization, initialized, From a0b51af367a6831330564c96dc4cc1ac63413701 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 11 Sep 2012 13:34:27 +0900 Subject: [PATCH 054/101] perf hists: Check if accumulated when adding a hist entry To support callchain accumulation, @entry should be recognized if it's accumulated or not when add_hist_entry() called. The period of an accumulated entry should be added to ->stat_acc but not ->stat. Add @sample_self arg for that. Signed-off-by: Namhyung Kim Tested-by: Arun Sharma Tested-by: Rodrigo Campos Cc: Frederic Weisbecker Link: http://lkml.kernel.org/r/1401335910-16832-5-git-send-email-namhyung@kernel.org Signed-off-by: Jiri Olsa --- tools/perf/builtin-annotate.c | 3 ++- tools/perf/builtin-diff.c | 2 +- tools/perf/builtin-top.c | 2 +- tools/perf/tests/hists_link.c | 4 ++-- tools/perf/util/hist.c | 29 ++++++++++++++++++----------- tools/perf/util/hist.h | 3 ++- 6 files changed, 26 insertions(+), 17 deletions(-) diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index bf52461a88bd2a..1ec429fef2be93 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -65,7 +65,8 @@ static int perf_evsel__add_sample(struct perf_evsel *evsel, return 0; } - he = __hists__add_entry(&evsel->hists, al, NULL, NULL, NULL, 1, 1, 0); + he = __hists__add_entry(&evsel->hists, al, NULL, NULL, NULL, 1, 1, 0, + true); if (he == NULL) return -ENOMEM; diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c index 8bff543acaab7a..9a5a035cb4262a 100644 --- a/tools/perf/builtin-diff.c +++ b/tools/perf/builtin-diff.c @@ -315,7 +315,7 @@ static int hists__add_entry(struct hists *hists, u64 weight, u64 transaction) { if (__hists__add_entry(hists, al, NULL, NULL, NULL, period, weight, - transaction) != NULL) + transaction, true) != NULL) return 0; return -ENOMEM; } diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 51309264d2104a..12e2e1227e474e 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -247,7 +247,7 @@ static struct hist_entry *perf_evsel__add_hist_entry(struct perf_evsel *evsel, pthread_mutex_lock(&evsel->hists.lock); he = __hists__add_entry(&evsel->hists, al, NULL, NULL, NULL, sample->period, sample->weight, - sample->transaction); + sample->transaction, true); pthread_mutex_unlock(&evsel->hists.lock); if (he == NULL) return NULL; diff --git a/tools/perf/tests/hists_link.c b/tools/perf/tests/hists_link.c index 5ffa2c3eb77d69..ca6693b37cd71d 100644 --- a/tools/perf/tests/hists_link.c +++ b/tools/perf/tests/hists_link.c @@ -88,7 +88,7 @@ static int add_hist_entries(struct perf_evlist *evlist, struct machine *machine) goto out; he = __hists__add_entry(&evsel->hists, &al, NULL, - NULL, NULL, 1, 1, 0); + NULL, NULL, 1, 1, 0, true); if (he == NULL) goto out; @@ -112,7 +112,7 @@ static int add_hist_entries(struct perf_evlist *evlist, struct machine *machine) goto out; he = __hists__add_entry(&evsel->hists, &al, NULL, - NULL, NULL, 1, 1, 0); + NULL, NULL, 1, 1, 0, true); if (he == NULL) goto out; diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index dfff2ee8effb6c..b9facf33b22448 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c @@ -279,7 +279,8 @@ void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel) * histogram, sorted on item, collects periods */ -static struct hist_entry *hist_entry__new(struct hist_entry *template) +static struct hist_entry *hist_entry__new(struct hist_entry *template, + bool sample_self) { size_t callchain_size = 0; struct hist_entry *he; @@ -299,6 +300,8 @@ static struct hist_entry *hist_entry__new(struct hist_entry *template) return NULL; } memcpy(he->stat_acc, &he->stat, sizeof(he->stat)); + if (!sample_self) + memset(&he->stat, 0, sizeof(he->stat)); } if (he->ms.map) @@ -351,7 +354,8 @@ static u8 symbol__parent_filter(const struct symbol *parent) static struct hist_entry *add_hist_entry(struct hists *hists, struct hist_entry *entry, - struct addr_location *al) + struct addr_location *al, + bool sample_self) { struct rb_node **p; struct rb_node *parent = NULL; @@ -375,7 +379,8 @@ static struct hist_entry *add_hist_entry(struct hists *hists, cmp = hist_entry__cmp(he, entry); if (!cmp) { - he_stat__add_period(&he->stat, period, weight); + if (sample_self) + he_stat__add_period(&he->stat, period, weight); if (symbol_conf.cumulate_callchain) he_stat__add_period(he->stat_acc, period, weight); @@ -405,14 +410,15 @@ static struct hist_entry *add_hist_entry(struct hists *hists, p = &(*p)->rb_right; } - he = hist_entry__new(entry); + he = hist_entry__new(entry, sample_self); if (!he) return NULL; rb_link_node(&he->rb_node_in, parent, p); rb_insert_color(&he->rb_node_in, hists->entries_in); out: - he_stat__add_cpumode_period(&he->stat, al->cpumode, period); + if (sample_self) + he_stat__add_cpumode_period(&he->stat, al->cpumode, period); if (symbol_conf.cumulate_callchain) he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period); return he; @@ -423,7 +429,8 @@ struct hist_entry *__hists__add_entry(struct hists *hists, struct symbol *sym_parent, struct branch_info *bi, struct mem_info *mi, - u64 period, u64 weight, u64 transaction) + u64 period, u64 weight, u64 transaction, + bool sample_self) { struct hist_entry entry = { .thread = al->thread, @@ -448,7 +455,7 @@ struct hist_entry *__hists__add_entry(struct hists *hists, .transaction = transaction, }; - return add_hist_entry(hists, &entry, al); + return add_hist_entry(hists, &entry, al, sample_self); } static int @@ -501,7 +508,7 @@ iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al * and the he_stat__add_period() function. */ he = __hists__add_entry(&iter->evsel->hists, al, iter->parent, NULL, mi, - cost, cost, 0); + cost, cost, 0, true); if (!he) return -ENOMEM; @@ -608,7 +615,7 @@ iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *a * and not events sampled. Thus we use a pseudo period of 1. */ he = __hists__add_entry(&evsel->hists, al, iter->parent, &bi[i], NULL, - 1, 1, 0); + 1, 1, 0, true); if (he == NULL) return -ENOMEM; @@ -657,7 +664,7 @@ iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location he = __hists__add_entry(&evsel->hists, al, iter->parent, NULL, NULL, sample->period, sample->weight, - sample->transaction); + sample->transaction, true); if (he == NULL) return -ENOMEM; @@ -1161,7 +1168,7 @@ static struct hist_entry *hists__add_dummy_entry(struct hists *hists, p = &(*p)->rb_right; } - he = hist_entry__new(pair); + he = hist_entry__new(pair, true); if (he) { memset(&he->stat, 0, sizeof(he->stat)); he->hists = hists; diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h index 8894f184357cd8..bedb24d3643c5d 100644 --- a/tools/perf/util/hist.h +++ b/tools/perf/util/hist.h @@ -130,7 +130,8 @@ struct hist_entry *__hists__add_entry(struct hists *hists, struct symbol *parent, struct branch_info *bi, struct mem_info *mi, u64 period, - u64 weight, u64 transaction); + u64 weight, u64 transaction, + bool sample_self); int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al, struct perf_evsel *evsel, struct perf_sample *sample, int max_stack_depth); From 7a13aa28aa268359cee006059731f49bcd1f839e Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 11 Sep 2012 14:13:04 +0900 Subject: [PATCH 055/101] perf hists: Accumulate hist entry stat based on the callchain Call __hists__add_entry() for each callchain node to get an accumulated stat for an entry. Introduce new cumulative_iter ops to process them properly. Signed-off-by: Namhyung Kim Tested-by: Arun Sharma Tested-by: Rodrigo Campos Cc: Frederic Weisbecker Link: http://lkml.kernel.org/r/1401335910-16832-6-git-send-email-namhyung@kernel.org Signed-off-by: Jiri Olsa --- tools/perf/builtin-report.c | 2 + tools/perf/util/callchain.c | 3 +- tools/perf/util/hist.c | 96 +++++++++++++++++++++++++++++++++++++ tools/perf/util/hist.h | 1 + 4 files changed, 101 insertions(+), 1 deletion(-) diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 3201bdfa8c3f2b..e8fa9fea341f27 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -115,6 +115,8 @@ static int process_sample_event(struct perf_tool *tool, iter.ops = &hist_iter_branch; else if (rep->mem_mode) iter.ops = &hist_iter_mem; + else if (symbol_conf.cumulate_callchain) + iter.ops = &hist_iter_cumulative; else iter.ops = &hist_iter_normal; diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c index 9a42382b3921a7..2af69c47b72557 100644 --- a/tools/perf/util/callchain.c +++ b/tools/perf/util/callchain.c @@ -616,7 +616,8 @@ int sample__resolve_callchain(struct perf_sample *sample, struct symbol **parent if (sample->callchain == NULL) return 0; - if (symbol_conf.use_callchain || sort__has_parent) { + if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain || + sort__has_parent) { return machine__resolve_callchain(al->machine, evsel, al->thread, sample, parent, al, max_stack); } diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index b9facf33b22448..6079b5acfb6db7 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c @@ -696,6 +696,94 @@ iter_finish_normal_entry(struct hist_entry_iter *iter, struct addr_location *al) return hist_entry__append_callchain(he, sample); } +static int +iter_prepare_cumulative_entry(struct hist_entry_iter *iter __maybe_unused, + struct addr_location *al __maybe_unused) +{ + callchain_cursor_commit(&callchain_cursor); + return 0; +} + +static int +iter_add_single_cumulative_entry(struct hist_entry_iter *iter, + struct addr_location *al) +{ + struct perf_evsel *evsel = iter->evsel; + struct perf_sample *sample = iter->sample; + struct hist_entry *he; + int err = 0; + + he = __hists__add_entry(&evsel->hists, al, iter->parent, NULL, NULL, + sample->period, sample->weight, + sample->transaction, true); + if (he == NULL) + return -ENOMEM; + + iter->he = he; + + /* + * The iter->he will be over-written after ->add_next_entry() + * called so inc stats for the original entry now. + */ + if (ui__has_annotation()) + err = hist_entry__inc_addr_samples(he, evsel->idx, al->addr); + + hists__inc_nr_samples(&evsel->hists, he->filtered); + + return err; +} + +static int +iter_next_cumulative_entry(struct hist_entry_iter *iter, + struct addr_location *al) +{ + struct callchain_cursor_node *node; + + node = callchain_cursor_current(&callchain_cursor); + if (node == NULL) + return 0; + + al->map = node->map; + al->sym = node->sym; + if (node->map) + al->addr = node->map->map_ip(node->map, node->ip); + else + al->addr = node->ip; + + if (iter->hide_unresolved && al->sym == NULL) + return 0; + + callchain_cursor_advance(&callchain_cursor); + return 1; +} + +static int +iter_add_next_cumulative_entry(struct hist_entry_iter *iter, + struct addr_location *al) +{ + struct perf_evsel *evsel = iter->evsel; + struct perf_sample *sample = iter->sample; + struct hist_entry *he; + + he = __hists__add_entry(&evsel->hists, al, iter->parent, NULL, NULL, + sample->period, sample->weight, + sample->transaction, false); + if (he == NULL) + return -ENOMEM; + + iter->he = he; + + return 0; +} + +static int +iter_finish_cumulative_entry(struct hist_entry_iter *iter, + struct addr_location *al __maybe_unused) +{ + iter->he = NULL; + return 0; +} + const struct hist_iter_ops hist_iter_mem = { .prepare_entry = iter_prepare_mem_entry, .add_single_entry = iter_add_single_mem_entry, @@ -720,6 +808,14 @@ const struct hist_iter_ops hist_iter_normal = { .finish_entry = iter_finish_normal_entry, }; +const struct hist_iter_ops hist_iter_cumulative = { + .prepare_entry = iter_prepare_cumulative_entry, + .add_single_entry = iter_add_single_cumulative_entry, + .next_entry = iter_next_cumulative_entry, + .add_next_entry = iter_add_next_cumulative_entry, + .finish_entry = iter_finish_cumulative_entry, +}; + int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al, struct perf_evsel *evsel, struct perf_sample *sample, int max_stack_depth) diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h index bedb24d3643c5d..78409f95d01240 100644 --- a/tools/perf/util/hist.h +++ b/tools/perf/util/hist.h @@ -124,6 +124,7 @@ struct hist_entry_iter { extern const struct hist_iter_ops hist_iter_normal; extern const struct hist_iter_ops hist_iter_branch; extern const struct hist_iter_ops hist_iter_mem; +extern const struct hist_iter_ops hist_iter_cumulative; struct hist_entry *__hists__add_entry(struct hists *hists, struct addr_location *al, From c7405d85d7a354b8ba49e2db7c4b027e6cb997c1 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Thu, 31 Oct 2013 13:58:30 +0900 Subject: [PATCH 056/101] perf tools: Update cpumode for each cumulative entry The cpumode and level in struct addr_localtion was set for a sample and but updated as cumulative callchains were added. This led to have non-matching symbol and cpumode in the output. Update it accordingly based on the fact whether the map is a part of the kernel or not. This is a reverse of what thread__find_addr_map() does. Signed-off-by: Namhyung Kim Tested-by: Arun Sharma Tested-by: Rodrigo Campos Cc: Frederic Weisbecker Link: http://lkml.kernel.org/r/1401335910-16832-7-git-send-email-namhyung@kernel.org Signed-off-by: Jiri Olsa --- tools/perf/util/callchain.c | 42 +++++++++++++++++++++++++++++++++++++ tools/perf/util/callchain.h | 2 ++ tools/perf/util/hist.c | 13 ++---------- 3 files changed, 46 insertions(+), 11 deletions(-) diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c index 2af69c47b72557..48b6d3f5001231 100644 --- a/tools/perf/util/callchain.c +++ b/tools/perf/util/callchain.c @@ -630,3 +630,45 @@ int hist_entry__append_callchain(struct hist_entry *he, struct perf_sample *samp return 0; return callchain_append(he->callchain, &callchain_cursor, sample->period); } + +int fill_callchain_info(struct addr_location *al, struct callchain_cursor_node *node, + bool hide_unresolved) +{ + al->map = node->map; + al->sym = node->sym; + if (node->map) + al->addr = node->map->map_ip(node->map, node->ip); + else + al->addr = node->ip; + + if (al->sym == NULL) { + if (hide_unresolved) + return 0; + if (al->map == NULL) + goto out; + } + + if (al->map->groups == &al->machine->kmaps) { + if (machine__is_host(al->machine)) { + al->cpumode = PERF_RECORD_MISC_KERNEL; + al->level = 'k'; + } else { + al->cpumode = PERF_RECORD_MISC_GUEST_KERNEL; + al->level = 'g'; + } + } else { + if (machine__is_host(al->machine)) { + al->cpumode = PERF_RECORD_MISC_USER; + al->level = '.'; + } else if (perf_guest) { + al->cpumode = PERF_RECORD_MISC_GUEST_USER; + al->level = 'u'; + } else { + al->cpumode = PERF_RECORD_MISC_HYPERVISOR; + al->level = 'H'; + } + } + +out: + return 1; +} diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h index bde2b0cc24cf47..24a53d562d0a3c 100644 --- a/tools/perf/util/callchain.h +++ b/tools/perf/util/callchain.h @@ -162,6 +162,8 @@ int sample__resolve_callchain(struct perf_sample *sample, struct symbol **parent struct perf_evsel *evsel, struct addr_location *al, int max_stack); int hist_entry__append_callchain(struct hist_entry *he, struct perf_sample *sample); +int fill_callchain_info(struct addr_location *al, struct callchain_cursor_node *node, + bool hide_unresolved); extern const char record_callchain_help[]; int parse_callchain_report_opt(const char *arg); diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index 6079b5acfb6db7..37c28fc13dc3c2 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c @@ -743,18 +743,9 @@ iter_next_cumulative_entry(struct hist_entry_iter *iter, if (node == NULL) return 0; - al->map = node->map; - al->sym = node->sym; - if (node->map) - al->addr = node->map->map_ip(node->map, node->ip); - else - al->addr = node->ip; - - if (iter->hide_unresolved && al->sym == NULL) - return 0; - callchain_cursor_advance(&callchain_cursor); - return 1; + + return fill_callchain_info(al, node, iter->hide_unresolved); } static int From b4d3c8bd86c4eda08456691121f83b4e1db46866 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Thu, 31 Oct 2013 10:05:29 +0900 Subject: [PATCH 057/101] perf report: Cache cumulative callchains It is possble that a callchain has cycles or recursive calls. In that case it'll end up having entries more than 100% overhead in the output. In order to prevent such entries, cache each callchain node and skip if same entry already cumulated. Signed-off-by: Namhyung Kim Tested-by: Arun Sharma Tested-by: Rodrigo Campos Cc: Frederic Weisbecker Link: http://lkml.kernel.org/r/1401335910-16832-8-git-send-email-namhyung@kernel.org Signed-off-by: Jiri Olsa --- tools/perf/util/hist.c | 42 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index 37c28fc13dc3c2..bf03db528db6d0 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c @@ -700,7 +700,22 @@ static int iter_prepare_cumulative_entry(struct hist_entry_iter *iter __maybe_unused, struct addr_location *al __maybe_unused) { + struct hist_entry **he_cache; + callchain_cursor_commit(&callchain_cursor); + + /* + * This is for detecting cycles or recursions so that they're + * cumulated only one time to prevent entries more than 100% + * overhead. + */ + he_cache = malloc(sizeof(*he_cache) * (PERF_MAX_STACK_DEPTH + 1)); + if (he_cache == NULL) + return -ENOMEM; + + iter->priv = he_cache; + iter->curr = 0; + return 0; } @@ -710,6 +725,7 @@ iter_add_single_cumulative_entry(struct hist_entry_iter *iter, { struct perf_evsel *evsel = iter->evsel; struct perf_sample *sample = iter->sample; + struct hist_entry **he_cache = iter->priv; struct hist_entry *he; int err = 0; @@ -720,6 +736,7 @@ iter_add_single_cumulative_entry(struct hist_entry_iter *iter, return -ENOMEM; iter->he = he; + he_cache[iter->curr++] = he; /* * The iter->he will be over-written after ->add_next_entry() @@ -754,7 +771,29 @@ iter_add_next_cumulative_entry(struct hist_entry_iter *iter, { struct perf_evsel *evsel = iter->evsel; struct perf_sample *sample = iter->sample; + struct hist_entry **he_cache = iter->priv; struct hist_entry *he; + struct hist_entry he_tmp = { + .cpu = al->cpu, + .thread = al->thread, + .comm = thread__comm(al->thread), + .ip = al->addr, + .ms = { + .map = al->map, + .sym = al->sym, + }, + .parent = iter->parent, + }; + int i; + + /* + * Check if there's duplicate entries in the callchain. + * It's possible that it has cycles or recursive calls. + */ + for (i = 0; i < iter->curr; i++) { + if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) + return 0; + } he = __hists__add_entry(&evsel->hists, al, iter->parent, NULL, NULL, sample->period, sample->weight, @@ -763,6 +802,7 @@ iter_add_next_cumulative_entry(struct hist_entry_iter *iter, return -ENOMEM; iter->he = he; + he_cache[iter->curr++] = he; return 0; } @@ -771,7 +811,9 @@ static int iter_finish_cumulative_entry(struct hist_entry_iter *iter, struct addr_location *al __maybe_unused) { + zfree(&iter->priv); iter->he = NULL; + return 0; } From be1f13e30862ab6b0fffaecd556856a965cefa0c Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Mon, 10 Sep 2012 13:38:00 +0900 Subject: [PATCH 058/101] perf callchain: Add callchain_cursor_snapshot() The callchain_cursor_snapshot() is for saving current status of the callchain. It'll be used to accumulate callchain information for each node. Signed-off-by: Namhyung Kim Tested-by: Arun Sharma Tested-by: Rodrigo Campos Cc: Frederic Weisbecker Link: http://lkml.kernel.org/r/1401335910-16832-9-git-send-email-namhyung@kernel.org Signed-off-by: Jiri Olsa --- tools/perf/util/callchain.h | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h index 24a53d562d0a3c..8f84423a75da74 100644 --- a/tools/perf/util/callchain.h +++ b/tools/perf/util/callchain.h @@ -167,4 +167,13 @@ int fill_callchain_info(struct addr_location *al, struct callchain_cursor_node * extern const char record_callchain_help[]; int parse_callchain_report_opt(const char *arg); + +static inline void callchain_cursor_snapshot(struct callchain_cursor *dest, + struct callchain_cursor *src) +{ + *dest = *src; + + dest->first = src->curr; + dest->nr -= src->pos; +} #endif /* __PERF_CALLCHAIN_H */ From be7f855a3eebe07f797b9e4a43bf59bab8ca3dbe Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Thu, 26 Dec 2013 17:44:10 +0900 Subject: [PATCH 059/101] perf tools: Save callchain info for each cumulative entry When accumulating callchain entry, also save current snapshot of the chain so that it can show the rest of the chain. Signed-off-by: Namhyung Kim Tested-by: Arun Sharma Tested-by: Rodrigo Campos Cc: Frederic Weisbecker Link: http://lkml.kernel.org/r/1401335910-16832-10-git-send-email-namhyung@kernel.org Signed-off-by: Jiri Olsa --- tools/perf/util/hist.c | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index bf03db528db6d0..c6f5f5251aadc4 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c @@ -738,6 +738,14 @@ iter_add_single_cumulative_entry(struct hist_entry_iter *iter, iter->he = he; he_cache[iter->curr++] = he; + callchain_append(he->callchain, &callchain_cursor, sample->period); + + /* + * We need to re-initialize the cursor since callchain_append() + * advanced the cursor to the end. + */ + callchain_cursor_commit(&callchain_cursor); + /* * The iter->he will be over-written after ->add_next_entry() * called so inc stats for the original entry now. @@ -760,8 +768,6 @@ iter_next_cumulative_entry(struct hist_entry_iter *iter, if (node == NULL) return 0; - callchain_cursor_advance(&callchain_cursor); - return fill_callchain_info(al, node, iter->hide_unresolved); } @@ -785,6 +791,11 @@ iter_add_next_cumulative_entry(struct hist_entry_iter *iter, .parent = iter->parent, }; int i; + struct callchain_cursor cursor; + + callchain_cursor_snapshot(&cursor, &callchain_cursor); + + callchain_cursor_advance(&callchain_cursor); /* * Check if there's duplicate entries in the callchain. @@ -804,6 +815,7 @@ iter_add_next_cumulative_entry(struct hist_entry_iter *iter, iter->he = he; he_cache[iter->curr++] = he; + callchain_append(he->callchain, &cursor, sample->period); return 0; } From 594dcbf3186e2e1e5c08fa21e8826b90d347f23f Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Wed, 30 Oct 2013 16:06:59 +0900 Subject: [PATCH 060/101] perf ui/hist: Add support to accumulated hist stat Print accumulated stat of a hist entry if requested. To do that, add new HPP_PERCENT_ACC_FNS macro and generate a perf_hpp_fmt using it. The __hpp__sort_acc() function sorts entries by accumulated period value. When accumulated periods of two entries are same (i.e. single path callchain) put the caller above since accumulation tends to put callers on higher position for obvious reason. Also add "overhead_children" output field to be selected by user. Signed-off-by: Namhyung Kim Tested-by: Arun Sharma Tested-by: Rodrigo Campos Cc: Frederic Weisbecker Link: http://lkml.kernel.org/r/1401335910-16832-11-git-send-email-namhyung@kernel.org Signed-off-by: Jiri Olsa --- tools/perf/ui/hist.c | 99 ++++++++++++++++++++++++++++++++++++++++++ tools/perf/util/hist.h | 4 ++ tools/perf/util/sort.c | 1 + 3 files changed, 104 insertions(+) diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c index 4484f5bd1b14a6..0ce3e79b2ca744 100644 --- a/tools/perf/ui/hist.c +++ b/tools/perf/ui/hist.c @@ -104,6 +104,18 @@ int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he, return ret; } +int __hpp__fmt_acc(struct perf_hpp *hpp, struct hist_entry *he, + hpp_field_fn get_field, const char *fmt, + hpp_snprint_fn print_fn, bool fmt_percent) +{ + if (!symbol_conf.cumulate_callchain) { + return snprintf(hpp->buf, hpp->size, "%*s", + fmt_percent ? 8 : 12, "N/A"); + } + + return __hpp__fmt(hpp, he, get_field, fmt, print_fn, fmt_percent); +} + static int field_cmp(u64 field_a, u64 field_b) { if (field_a > field_b) @@ -160,6 +172,24 @@ static int __hpp__sort(struct hist_entry *a, struct hist_entry *b, return ret; } +static int __hpp__sort_acc(struct hist_entry *a, struct hist_entry *b, + hpp_field_fn get_field) +{ + s64 ret = 0; + + if (symbol_conf.cumulate_callchain) { + /* + * Put caller above callee when they have equal period. + */ + ret = field_cmp(get_field(a), get_field(b)); + if (ret) + return ret; + + ret = b->callchain->max_depth - a->callchain->max_depth; + } + return ret; +} + #define __HPP_HEADER_FN(_type, _str, _min_width, _unit_width) \ static int hpp__header_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \ struct perf_hpp *hpp, \ @@ -242,6 +272,34 @@ static int64_t hpp__sort_##_type(struct hist_entry *a, struct hist_entry *b) \ return __hpp__sort(a, b, he_get_##_field); \ } +#define __HPP_COLOR_ACC_PERCENT_FN(_type, _field) \ +static u64 he_get_acc_##_field(struct hist_entry *he) \ +{ \ + return he->stat_acc->_field; \ +} \ + \ +static int hpp__color_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \ + struct perf_hpp *hpp, struct hist_entry *he) \ +{ \ + return __hpp__fmt_acc(hpp, he, he_get_acc_##_field, " %6.2f%%", \ + hpp_color_scnprintf, true); \ +} + +#define __HPP_ENTRY_ACC_PERCENT_FN(_type, _field) \ +static int hpp__entry_##_type(struct perf_hpp_fmt *_fmt __maybe_unused, \ + struct perf_hpp *hpp, struct hist_entry *he) \ +{ \ + const char *fmt = symbol_conf.field_sep ? " %.2f" : " %6.2f%%"; \ + return __hpp__fmt_acc(hpp, he, he_get_acc_##_field, fmt, \ + hpp_entry_scnprintf, true); \ +} + +#define __HPP_SORT_ACC_FN(_type, _field) \ +static int64_t hpp__sort_##_type(struct hist_entry *a, struct hist_entry *b) \ +{ \ + return __hpp__sort_acc(a, b, he_get_acc_##_field); \ +} + #define __HPP_ENTRY_RAW_FN(_type, _field) \ static u64 he_get_raw_##_field(struct hist_entry *he) \ { \ @@ -270,18 +328,27 @@ __HPP_COLOR_PERCENT_FN(_type, _field) \ __HPP_ENTRY_PERCENT_FN(_type, _field) \ __HPP_SORT_FN(_type, _field) +#define HPP_PERCENT_ACC_FNS(_type, _str, _field, _min_width, _unit_width)\ +__HPP_HEADER_FN(_type, _str, _min_width, _unit_width) \ +__HPP_WIDTH_FN(_type, _min_width, _unit_width) \ +__HPP_COLOR_ACC_PERCENT_FN(_type, _field) \ +__HPP_ENTRY_ACC_PERCENT_FN(_type, _field) \ +__HPP_SORT_ACC_FN(_type, _field) + #define HPP_RAW_FNS(_type, _str, _field, _min_width, _unit_width) \ __HPP_HEADER_FN(_type, _str, _min_width, _unit_width) \ __HPP_WIDTH_FN(_type, _min_width, _unit_width) \ __HPP_ENTRY_RAW_FN(_type, _field) \ __HPP_SORT_RAW_FN(_type, _field) +__HPP_HEADER_FN(overhead_self, "Self", 8, 8) HPP_PERCENT_FNS(overhead, "Overhead", period, 8, 8) HPP_PERCENT_FNS(overhead_sys, "sys", period_sys, 8, 8) HPP_PERCENT_FNS(overhead_us, "usr", period_us, 8, 8) HPP_PERCENT_FNS(overhead_guest_sys, "guest sys", period_guest_sys, 9, 8) HPP_PERCENT_FNS(overhead_guest_us, "guest usr", period_guest_us, 9, 8) +HPP_PERCENT_ACC_FNS(overhead_acc, "Children", period, 8, 8) HPP_RAW_FNS(samples, "Samples", nr_events, 12, 12) HPP_RAW_FNS(period, "Period", period, 12, 12) @@ -303,6 +370,17 @@ static int64_t hpp__nop_cmp(struct hist_entry *a __maybe_unused, .sort = hpp__sort_ ## _name, \ } +#define HPP__COLOR_ACC_PRINT_FNS(_name) \ + { \ + .header = hpp__header_ ## _name, \ + .width = hpp__width_ ## _name, \ + .color = hpp__color_ ## _name, \ + .entry = hpp__entry_ ## _name, \ + .cmp = hpp__nop_cmp, \ + .collapse = hpp__nop_cmp, \ + .sort = hpp__sort_ ## _name, \ + } + #define HPP__PRINT_FNS(_name) \ { \ .header = hpp__header_ ## _name, \ @@ -319,6 +397,7 @@ struct perf_hpp_fmt perf_hpp__format[] = { HPP__COLOR_PRINT_FNS(overhead_us), HPP__COLOR_PRINT_FNS(overhead_guest_sys), HPP__COLOR_PRINT_FNS(overhead_guest_us), + HPP__COLOR_ACC_PRINT_FNS(overhead_acc), HPP__PRINT_FNS(samples), HPP__PRINT_FNS(period) }; @@ -328,16 +407,23 @@ LIST_HEAD(perf_hpp__sort_list); #undef HPP__COLOR_PRINT_FNS +#undef HPP__COLOR_ACC_PRINT_FNS #undef HPP__PRINT_FNS #undef HPP_PERCENT_FNS +#undef HPP_PERCENT_ACC_FNS #undef HPP_RAW_FNS #undef __HPP_HEADER_FN #undef __HPP_WIDTH_FN #undef __HPP_COLOR_PERCENT_FN #undef __HPP_ENTRY_PERCENT_FN +#undef __HPP_COLOR_ACC_PERCENT_FN +#undef __HPP_ENTRY_ACC_PERCENT_FN #undef __HPP_ENTRY_RAW_FN +#undef __HPP_SORT_FN +#undef __HPP_SORT_ACC_FN +#undef __HPP_SORT_RAW_FN void perf_hpp__init(void) @@ -361,6 +447,13 @@ void perf_hpp__init(void) if (field_order) return; + if (symbol_conf.cumulate_callchain) { + perf_hpp__column_enable(PERF_HPP__OVERHEAD_ACC); + + perf_hpp__format[PERF_HPP__OVERHEAD].header = + hpp__header_overhead_self; + } + perf_hpp__column_enable(PERF_HPP__OVERHEAD); if (symbol_conf.show_cpu_utilization) { @@ -383,6 +476,12 @@ void perf_hpp__init(void) list = &perf_hpp__format[PERF_HPP__OVERHEAD].sort_list; if (list_empty(list)) list_add(list, &perf_hpp__sort_list); + + if (symbol_conf.cumulate_callchain) { + list = &perf_hpp__format[PERF_HPP__OVERHEAD_ACC].sort_list; + if (list_empty(list)) + list_add(list, &perf_hpp__sort_list); + } } void perf_hpp__column_register(struct perf_hpp_fmt *format) diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h index 78409f95d01240..efd73e48902794 100644 --- a/tools/perf/util/hist.h +++ b/tools/perf/util/hist.h @@ -228,6 +228,7 @@ enum { PERF_HPP__OVERHEAD_US, PERF_HPP__OVERHEAD_GUEST_SYS, PERF_HPP__OVERHEAD_GUEST_US, + PERF_HPP__OVERHEAD_ACC, PERF_HPP__SAMPLES, PERF_HPP__PERIOD, @@ -254,6 +255,9 @@ typedef int (*hpp_snprint_fn)(struct perf_hpp *hpp, const char *fmt, ...); int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he, hpp_field_fn get_field, const char *fmt, hpp_snprint_fn print_fn, bool fmt_percent); +int __hpp__fmt_acc(struct perf_hpp *hpp, struct hist_entry *he, + hpp_field_fn get_field, const char *fmt, + hpp_snprint_fn print_fn, bool fmt_percent); static inline void advance_hpp(struct perf_hpp *hpp, int inc) { diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index 901b9bece2ee43..9da8931d23948c 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c @@ -1061,6 +1061,7 @@ static struct hpp_dimension hpp_sort_dimensions[] = { DIM(PERF_HPP__OVERHEAD_US, "overhead_us"), DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"), DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"), + DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"), DIM(PERF_HPP__SAMPLES, "sample"), DIM(PERF_HPP__PERIOD, "period"), }; From 0434ddd21466a61cfc539ffc3a4cb3bdc67d82ec Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Wed, 30 Oct 2013 16:12:59 +0900 Subject: [PATCH 061/101] perf ui/browser: Add support to accumulated hist stat Print accumulated stat of a hist entry if requested. Signed-off-by: Namhyung Kim Tested-by: Arun Sharma Tested-by: Rodrigo Campos Cc: Frederic Weisbecker Link: http://lkml.kernel.org/r/1401335910-16832-12-git-send-email-namhyung@kernel.org Signed-off-by: Jiri Olsa --- tools/perf/ui/browsers/hists.c | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c index 1c331b934ffc50..2dcbe3d15a5f34 100644 --- a/tools/perf/ui/browsers/hists.c +++ b/tools/perf/ui/browsers/hists.c @@ -651,13 +651,36 @@ hist_browser__hpp_color_##_type(struct perf_hpp_fmt *fmt __maybe_unused,\ __hpp__slsmg_color_printf, true); \ } +#define __HPP_COLOR_ACC_PERCENT_FN(_type, _field) \ +static u64 __hpp_get_acc_##_field(struct hist_entry *he) \ +{ \ + return he->stat_acc->_field; \ +} \ + \ +static int \ +hist_browser__hpp_color_##_type(struct perf_hpp_fmt *fmt __maybe_unused,\ + struct perf_hpp *hpp, \ + struct hist_entry *he) \ +{ \ + if (!symbol_conf.cumulate_callchain) { \ + int ret = scnprintf(hpp->buf, hpp->size, "%8s", "N/A"); \ + slsmg_printf("%s", hpp->buf); \ + \ + return ret; \ + } \ + return __hpp__fmt(hpp, he, __hpp_get_acc_##_field, " %6.2f%%", \ + __hpp__slsmg_color_printf, true); \ +} + __HPP_COLOR_PERCENT_FN(overhead, period) __HPP_COLOR_PERCENT_FN(overhead_sys, period_sys) __HPP_COLOR_PERCENT_FN(overhead_us, period_us) __HPP_COLOR_PERCENT_FN(overhead_guest_sys, period_guest_sys) __HPP_COLOR_PERCENT_FN(overhead_guest_us, period_guest_us) +__HPP_COLOR_ACC_PERCENT_FN(overhead_acc, period) #undef __HPP_COLOR_PERCENT_FN +#undef __HPP_COLOR_ACC_PERCENT_FN void hist_browser__init_hpp(void) { @@ -671,6 +694,8 @@ void hist_browser__init_hpp(void) hist_browser__hpp_color_overhead_guest_sys; perf_hpp__format[PERF_HPP__OVERHEAD_GUEST_US].color = hist_browser__hpp_color_overhead_guest_us; + perf_hpp__format[PERF_HPP__OVERHEAD_ACC].color = + hist_browser__hpp_color_overhead_acc; } static int hist_browser__show_entry(struct hist_browser *browser, From b09955b2a3d5fd02ed31d279f8c0ac29b32abe83 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Wed, 30 Oct 2013 16:15:23 +0900 Subject: [PATCH 062/101] perf ui/gtk: Add support to accumulated hist stat Print accumulated stat of a hist entry if requested. Signed-off-by: Namhyung Kim Tested-by: Arun Sharma Tested-by: Rodrigo Campos Cc: Frederic Weisbecker Link: http://lkml.kernel.org/r/1401335910-16832-13-git-send-email-namhyung@kernel.org Signed-off-by: Jiri Olsa --- tools/perf/ui/gtk/hists.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/tools/perf/ui/gtk/hists.c b/tools/perf/ui/gtk/hists.c index 9d90683914d46c..7e5da4af98d862 100644 --- a/tools/perf/ui/gtk/hists.c +++ b/tools/perf/ui/gtk/hists.c @@ -47,11 +47,26 @@ static int perf_gtk__hpp_color_##_type(struct perf_hpp_fmt *fmt __maybe_unused, __percent_color_snprintf, true); \ } +#define __HPP_COLOR_ACC_PERCENT_FN(_type, _field) \ +static u64 he_get_acc_##_field(struct hist_entry *he) \ +{ \ + return he->stat_acc->_field; \ +} \ + \ +static int perf_gtk__hpp_color_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \ + struct perf_hpp *hpp, \ + struct hist_entry *he) \ +{ \ + return __hpp__fmt_acc(hpp, he, he_get_acc_##_field, " %6.2f%%", \ + __percent_color_snprintf, true); \ +} + __HPP_COLOR_PERCENT_FN(overhead, period) __HPP_COLOR_PERCENT_FN(overhead_sys, period_sys) __HPP_COLOR_PERCENT_FN(overhead_us, period_us) __HPP_COLOR_PERCENT_FN(overhead_guest_sys, period_guest_sys) __HPP_COLOR_PERCENT_FN(overhead_guest_us, period_guest_us) +__HPP_COLOR_ACC_PERCENT_FN(overhead_acc, period) #undef __HPP_COLOR_PERCENT_FN @@ -68,6 +83,8 @@ void perf_gtk__init_hpp(void) perf_gtk__hpp_color_overhead_guest_sys; perf_hpp__format[PERF_HPP__OVERHEAD_GUEST_US].color = perf_gtk__hpp_color_overhead_guest_us; + perf_hpp__format[PERF_HPP__OVERHEAD_ACC].color = + perf_gtk__hpp_color_overhead_acc; } static void callchain_list__sym_name(struct callchain_list *cl, From 14135663f1d770bb057f8bf345e5436c985eb29c Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Thu, 31 Oct 2013 10:17:39 +0900 Subject: [PATCH 063/101] perf tools: Apply percent-limit to cumulative percentage If -g cumulative option is given, it needs to show entries which don't have self overhead. So apply percent-limit to accumulated overhead percentage in this case. Signed-off-by: Namhyung Kim Tested-by: Arun Sharma Tested-by: Rodrigo Campos Cc: Frederic Weisbecker Link: http://lkml.kernel.org/r/1401335910-16832-14-git-send-email-namhyung@kernel.org Signed-off-by: Jiri Olsa --- tools/perf/ui/browsers/hists.c | 40 ++++++++++------------------------ tools/perf/ui/gtk/hists.c | 6 ++--- tools/perf/ui/stdio/hist.c | 4 ++-- tools/perf/util/sort.h | 17 ++++++++++++++- 4 files changed, 31 insertions(+), 36 deletions(-) diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c index 2dcbe3d15a5f34..5905acde5f1da5 100644 --- a/tools/perf/ui/browsers/hists.c +++ b/tools/perf/ui/browsers/hists.c @@ -37,7 +37,6 @@ static int hists__browser_title(struct hists *hists, char *bf, size_t size, static void hist_browser__update_nr_entries(struct hist_browser *hb); static struct rb_node *hists__filter_entries(struct rb_node *nd, - struct hists *hists, float min_pcnt); static bool hist_browser__has_filter(struct hist_browser *hb) @@ -319,7 +318,7 @@ __hist_browser__set_folding(struct hist_browser *browser, bool unfold) struct hists *hists = browser->hists; for (nd = rb_first(&hists->entries); - (nd = hists__filter_entries(nd, hists, browser->min_pcnt)) != NULL; + (nd = hists__filter_entries(nd, browser->min_pcnt)) != NULL; nd = rb_next(nd)) { struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node); hist_entry__set_folding(he, unfold); @@ -808,15 +807,12 @@ static unsigned int hist_browser__refresh(struct ui_browser *browser) for (nd = browser->top; nd; nd = rb_next(nd)) { struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); - u64 total = hists__total_period(h->hists); - float percent = 0.0; + float percent; if (h->filtered) continue; - if (total) - percent = h->stat.period * 100.0 / total; - + percent = hist_entry__get_percent_limit(h); if (percent < hb->min_pcnt) continue; @@ -829,16 +825,11 @@ static unsigned int hist_browser__refresh(struct ui_browser *browser) } static struct rb_node *hists__filter_entries(struct rb_node *nd, - struct hists *hists, float min_pcnt) { while (nd != NULL) { struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); - u64 total = hists__total_period(hists); - float percent = 0.0; - - if (total) - percent = h->stat.period * 100.0 / total; + float percent = hist_entry__get_percent_limit(h); if (!h->filtered && percent >= min_pcnt) return nd; @@ -850,16 +841,11 @@ static struct rb_node *hists__filter_entries(struct rb_node *nd, } static struct rb_node *hists__filter_prev_entries(struct rb_node *nd, - struct hists *hists, float min_pcnt) { while (nd != NULL) { struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); - u64 total = hists__total_period(hists); - float percent = 0.0; - - if (total) - percent = h->stat.period * 100.0 / total; + float percent = hist_entry__get_percent_limit(h); if (!h->filtered && percent >= min_pcnt) return nd; @@ -888,14 +874,14 @@ static void ui_browser__hists_seek(struct ui_browser *browser, switch (whence) { case SEEK_SET: nd = hists__filter_entries(rb_first(browser->entries), - hb->hists, hb->min_pcnt); + hb->min_pcnt); break; case SEEK_CUR: nd = browser->top; goto do_offset; case SEEK_END: nd = hists__filter_prev_entries(rb_last(browser->entries), - hb->hists, hb->min_pcnt); + hb->min_pcnt); first = false; break; default: @@ -938,8 +924,7 @@ static void ui_browser__hists_seek(struct ui_browser *browser, break; } } - nd = hists__filter_entries(rb_next(nd), hb->hists, - hb->min_pcnt); + nd = hists__filter_entries(rb_next(nd), hb->min_pcnt); if (nd == NULL) break; --offset; @@ -972,7 +957,7 @@ static void ui_browser__hists_seek(struct ui_browser *browser, } } - nd = hists__filter_prev_entries(rb_prev(nd), hb->hists, + nd = hists__filter_prev_entries(rb_prev(nd), hb->min_pcnt); if (nd == NULL) break; @@ -1151,7 +1136,6 @@ static int hist_browser__fprintf_entry(struct hist_browser *browser, static int hist_browser__fprintf(struct hist_browser *browser, FILE *fp) { struct rb_node *nd = hists__filter_entries(rb_first(browser->b.entries), - browser->hists, browser->min_pcnt); int printed = 0; @@ -1159,8 +1143,7 @@ static int hist_browser__fprintf(struct hist_browser *browser, FILE *fp) struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); printed += hist_browser__fprintf_entry(browser, h, fp); - nd = hists__filter_entries(rb_next(nd), browser->hists, - browser->min_pcnt); + nd = hists__filter_entries(rb_next(nd), browser->min_pcnt); } return printed; @@ -1397,8 +1380,7 @@ static void hist_browser__update_nr_entries(struct hist_browser *hb) return; } - while ((nd = hists__filter_entries(nd, hb->hists, - hb->min_pcnt)) != NULL) { + while ((nd = hists__filter_entries(nd, hb->min_pcnt)) != NULL) { nr_entries++; nd = rb_next(nd); } diff --git a/tools/perf/ui/gtk/hists.c b/tools/perf/ui/gtk/hists.c index 7e5da4af98d862..03d6812d25dd40 100644 --- a/tools/perf/ui/gtk/hists.c +++ b/tools/perf/ui/gtk/hists.c @@ -226,14 +226,12 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists, struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); GtkTreeIter iter; u64 total = hists__total_period(h->hists); - float percent = 0.0; + float percent; if (h->filtered) continue; - if (total) - percent = h->stat.period * 100.0 / total; - + percent = hist_entry__get_percent_limit(h); if (percent < min_pcnt) continue; diff --git a/tools/perf/ui/stdio/hist.c b/tools/perf/ui/stdio/hist.c index 9f57991025a90f..475d2f5c7e16cb 100644 --- a/tools/perf/ui/stdio/hist.c +++ b/tools/perf/ui/stdio/hist.c @@ -461,12 +461,12 @@ size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows, for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) { struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); - float percent = h->stat.period * 100.0 / - hists->stats.total_period; + float percent; if (h->filtered) continue; + percent = hist_entry__get_percent_limit(h); if (percent < min_pcnt) continue; diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h index c9ffa031becdf6..426b873e16ff9f 100644 --- a/tools/perf/util/sort.h +++ b/tools/perf/util/sort.h @@ -20,7 +20,7 @@ #include "parse-options.h" #include "parse-events.h" - +#include "hist.h" #include "thread.h" extern regex_t parent_regex; @@ -131,6 +131,21 @@ static inline void hist_entry__add_pair(struct hist_entry *pair, list_add_tail(&pair->pairs.node, &he->pairs.head); } +static inline float hist_entry__get_percent_limit(struct hist_entry *he) +{ + u64 period = he->stat.period; + u64 total_period = hists__total_period(he->hists); + + if (unlikely(total_period == 0)) + return 0; + + if (symbol_conf.cumulate_callchain) + period = he->stat_acc->period; + + return period * 100.0 / total_period; +} + + enum sort_mode { SORT_MODE__NORMAL, SORT_MODE__BRANCH, From 77284de326e6d8c3b8e866cda5b415c86b522e61 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Mon, 16 Dec 2013 16:55:13 +0900 Subject: [PATCH 064/101] perf tools: Add more hpp helper functions Sometimes it needs to disable some columns at runtime. Add help functions to support that. Signed-off-by: Namhyung Kim Tested-by: Arun Sharma Tested-by: Rodrigo Campos Cc: Frederic Weisbecker Link: http://lkml.kernel.org/r/1401335910-16832-15-git-send-email-namhyung@kernel.org Signed-off-by: Jiri Olsa --- tools/perf/ui/hist.c | 17 +++++++++++++++++ tools/perf/util/hist.h | 4 ++++ 2 files changed, 21 insertions(+) diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c index 0ce3e79b2ca744..8ca638754acc8e 100644 --- a/tools/perf/ui/hist.c +++ b/tools/perf/ui/hist.c @@ -489,6 +489,11 @@ void perf_hpp__column_register(struct perf_hpp_fmt *format) list_add_tail(&format->list, &perf_hpp__list); } +void perf_hpp__column_unregister(struct perf_hpp_fmt *format) +{ + list_del(&format->list); +} + void perf_hpp__register_sort_field(struct perf_hpp_fmt *format) { list_add_tail(&format->sort_list, &perf_hpp__sort_list); @@ -500,6 +505,18 @@ void perf_hpp__column_enable(unsigned col) perf_hpp__column_register(&perf_hpp__format[col]); } +void perf_hpp__column_disable(unsigned col) +{ + BUG_ON(col >= PERF_HPP__MAX_INDEX); + perf_hpp__column_unregister(&perf_hpp__format[col]); +} + +void perf_hpp__cancel_cumulate(void) +{ + perf_hpp__column_disable(PERF_HPP__OVERHEAD_ACC); + perf_hpp__format[PERF_HPP__OVERHEAD].header = hpp__header_overhead; +} + void perf_hpp__setup_output_field(void) { struct perf_hpp_fmt *fmt; diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h index efd73e48902794..99ad3cb433fbde 100644 --- a/tools/perf/util/hist.h +++ b/tools/perf/util/hist.h @@ -237,7 +237,11 @@ enum { void perf_hpp__init(void); void perf_hpp__column_register(struct perf_hpp_fmt *format); +void perf_hpp__column_unregister(struct perf_hpp_fmt *format); void perf_hpp__column_enable(unsigned col); +void perf_hpp__column_disable(unsigned col); +void perf_hpp__cancel_cumulate(void); + void perf_hpp__register_sort_field(struct perf_hpp_fmt *format); void perf_hpp__setup_output_field(void); void perf_hpp__reset_output_field(void); From 793aaaabb79803a0154fc6a98c472a29bb6d5cc9 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Wed, 30 Oct 2013 17:05:55 +0900 Subject: [PATCH 065/101] perf report: Add --children option The --children option is for showing accumulated overhead (period) value as well as self overhead. Signed-off-by: Namhyung Kim Tested-by: Arun Sharma Tested-by: Rodrigo Campos Cc: Frederic Weisbecker Link: http://lkml.kernel.org/r/1401335910-16832-16-git-send-email-namhyung@kernel.org Signed-off-by: Jiri Olsa --- tools/perf/Documentation/perf-report.txt | 7 ++++++- tools/perf/builtin-report.c | 15 ++++++++++++++- 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt index a1b5185402d5d2..cefdf430d1b406 100644 --- a/tools/perf/Documentation/perf-report.txt +++ b/tools/perf/Documentation/perf-report.txt @@ -111,7 +111,7 @@ OPTIONS --fields=:: Specify output field - multiple keys can be specified in CSV format. Following fields are available: - overhead, overhead_sys, overhead_us, sample and period. + overhead, overhead_sys, overhead_us, overhead_children, sample and period. Also it can contain any sort key(s). By default, every sort keys not specified in -F will be appended @@ -163,6 +163,11 @@ OPTIONS Default: fractal,0.5,callee,function. +--children:: + Accumulate callchain of children to parent entry so that then can + show up in the output. The output will have a new "Children" column + and will be sorted on the data. It requires callchains are recorded. + --max-stack:: Set the stack depth limit when parsing the callchain, anything beyond the specified depth will be ignored. This is a trade-off diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index e8fa9fea341f27..f27a8aad6a3f8c 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -185,6 +185,14 @@ static int report__setup_sample_type(struct report *rep) } } + if (symbol_conf.cumulate_callchain) { + /* Silently ignore if callchain is missing */ + if (!(sample_type & PERF_SAMPLE_CALLCHAIN)) { + symbol_conf.cumulate_callchain = false; + perf_hpp__cancel_cumulate(); + } + } + if (sort__mode == SORT_MODE__BRANCH) { if (!is_pipe && !(sample_type & PERF_SAMPLE_BRANCH_STACK)) { @@ -568,6 +576,8 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused) OPT_CALLBACK_DEFAULT('g', "call-graph", &report, "output_type,min_percent[,print_limit],call_order", "Display callchains using output_type (graph, flat, fractal, or none) , min percent threshold, optional print limit, callchain order, key (function or address). " "Default: fractal,0.5,callee,function", &report_parse_callchain_opt, callchain_default_opt), + OPT_BOOLEAN(0, "children", &symbol_conf.cumulate_callchain, + "Accumulate callchains of children and show total overhead as well"), OPT_INTEGER(0, "max-stack", &report.max_stack, "Set the maximum stack depth when parsing the callchain, " "anything beyond the specified depth will be ignored. " @@ -660,8 +670,10 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused) has_br_stack = perf_header__has_feat(&session->header, HEADER_BRANCH_STACK); - if (branch_mode == -1 && has_br_stack) + if (branch_mode == -1 && has_br_stack) { sort__mode = SORT_MODE__BRANCH; + symbol_conf.cumulate_callchain = false; + } if (report.mem_mode) { if (sort__mode == SORT_MODE__BRANCH) { @@ -669,6 +681,7 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused) goto error; } sort__mode = SORT_MODE__MEMORY; + symbol_conf.cumulate_callchain = false; } if (setup_sorting() < 0) { From 8d8e645ceafd726b8317949f899e4b3acfb20d29 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 22 Jan 2013 18:09:46 +0900 Subject: [PATCH 066/101] perf report: Add report.children config option Add report.children config option for setting default value of callchain accumulation. It affects the report output only if perf.data contains callchain info. A user can write .perfconfig file like below to enable accumulation by default: $ cat ~/.perfconfig [report] children = true And it can be disabled through command line: $ perf report --no-children Signed-off-by: Namhyung Kim Tested-by: Arun Sharma Tested-by: Rodrigo Campos Cc: Frederic Weisbecker Link: http://lkml.kernel.org/r/1401335910-16832-17-git-send-email-namhyung@kernel.org Signed-off-by: Jiri Olsa --- tools/perf/builtin-report.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index f27a8aad6a3f8c..6cac509212ee7e 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -72,6 +72,10 @@ static int report__config(const char *var, const char *value, void *cb) rep->min_percent = strtof(value, NULL); return 0; } + if (!strcmp(var, "report.children")) { + symbol_conf.cumulate_callchain = perf_config_bool(var, value); + return 0; + } return perf_default_config(var, value, cb); } From 2bf1a12340bda1bf621f27b9892094a51b1297fd Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Thu, 20 Mar 2014 09:10:29 +0900 Subject: [PATCH 067/101] perf tools: Do not auto-remove Children column if --fields given Depending on the configuration perf inserts/removes the Children column in the output automatically. But it might not be what user wants if [s]he give --fields option explicitly. Signed-off-by: Namhyung Kim Tested-by: Rodrigo Campos Cc: Arun Sharma Cc: Frederic Weisbecker Link: http://lkml.kernel.org/r/1401335910-16832-18-git-send-email-namhyung@kernel.org Signed-off-by: Jiri Olsa --- tools/perf/ui/hist.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c index 8ca638754acc8e..498adb23c02ef9 100644 --- a/tools/perf/ui/hist.c +++ b/tools/perf/ui/hist.c @@ -513,6 +513,9 @@ void perf_hpp__column_disable(unsigned col) void perf_hpp__cancel_cumulate(void) { + if (field_order) + return; + perf_hpp__column_disable(PERF_HPP__OVERHEAD_ACC); perf_hpp__format[PERF_HPP__OVERHEAD].header = hpp__header_overhead; } From 9d3c02d7188866299eebe3c4a652c08140a71f40 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 7 Jan 2014 17:02:25 +0900 Subject: [PATCH 068/101] perf tools: Add callback function to hist_entry_iter The new ->add_entry_cb() will be called after an entry was added to the histogram. It's used for code sharing between perf report and perf top. Note that ops->add_*_entry() should set iter->he properly in order to call the ->add_entry_cb. Also pass @arg to the callback function. It'll be used by perf top later. Signed-off-by: Namhyung Kim Tested-by: Arun Sharma Tested-by: Rodrigo Campos Cc: Frederic Weisbecker Link: http://lkml.kernel.org/r/87k393g999.fsf@sejong.aot.lge.com Signed-off-by: Jiri Olsa --- tools/perf/builtin-report.c | 61 ++++++++++++++++++++++++++---- tools/perf/tests/hists_filter.c | 2 +- tools/perf/tests/hists_output.c | 2 +- tools/perf/util/hist.c | 67 ++++++++++++--------------------- tools/perf/util/hist.h | 5 ++- 5 files changed, 84 insertions(+), 53 deletions(-) diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 6cac509212ee7e..21d830bafff32a 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -80,14 +80,59 @@ static int report__config(const char *var, const char *value, void *cb) return perf_default_config(var, value, cb); } -static void report__inc_stats(struct report *rep, - struct hist_entry *he __maybe_unused) +static void report__inc_stats(struct report *rep, struct hist_entry *he) { /* - * We cannot access @he at this time. Just assume it's a new entry. - * It'll be fixed once we have a callback mechanism in hist_iter. + * The @he is either of a newly created one or an existing one + * merging current sample. We only want to count a new one so + * checking ->nr_events being 1. */ - rep->nr_entries++; + if (he->stat.nr_events == 1) + rep->nr_entries++; +} + +static int hist_iter__report_callback(struct hist_entry_iter *iter, + struct addr_location *al, bool single, + void *arg) +{ + int err = 0; + struct report *rep = arg; + struct hist_entry *he = iter->he; + struct perf_evsel *evsel = iter->evsel; + struct mem_info *mi; + struct branch_info *bi; + + report__inc_stats(rep, he); + + if (!ui__has_annotation()) + return 0; + + if (sort__mode == SORT_MODE__BRANCH) { + bi = he->branch_info; + err = addr_map_symbol__inc_samples(&bi->from, evsel->idx); + if (err) + goto out; + + err = addr_map_symbol__inc_samples(&bi->to, evsel->idx); + + } else if (rep->mem_mode) { + mi = he->mem_info; + err = addr_map_symbol__inc_samples(&mi->daddr, evsel->idx); + if (err) + goto out; + + err = hist_entry__inc_addr_samples(he, evsel->idx, al->addr); + + } else if (symbol_conf.cumulate_callchain) { + if (single) + err = hist_entry__inc_addr_samples(he, evsel->idx, + al->addr); + } else { + err = hist_entry__inc_addr_samples(he, evsel->idx, al->addr); + } + +out: + return err; } static int process_sample_event(struct perf_tool *tool, @@ -100,6 +145,7 @@ static int process_sample_event(struct perf_tool *tool, struct addr_location al; struct hist_entry_iter iter = { .hide_unresolved = rep->hide_unresolved, + .add_entry_cb = hist_iter__report_callback, }; int ret; @@ -127,9 +173,8 @@ static int process_sample_event(struct perf_tool *tool, if (al.map != NULL) al.map->dso->hit = 1; - report__inc_stats(rep, NULL); - - ret = hist_entry_iter__add(&iter, &al, evsel, sample, rep->max_stack); + ret = hist_entry_iter__add(&iter, &al, evsel, sample, rep->max_stack, + rep); if (ret < 0) pr_debug("problem adding hist entry, skipping event\n"); diff --git a/tools/perf/tests/hists_filter.c b/tools/perf/tests/hists_filter.c index 76b02e1de701a5..3539403bbad42f 100644 --- a/tools/perf/tests/hists_filter.c +++ b/tools/perf/tests/hists_filter.c @@ -82,7 +82,7 @@ static int add_hist_entries(struct perf_evlist *evlist, goto out; if (hist_entry_iter__add(&iter, &al, evsel, &sample, - PERF_MAX_STACK_DEPTH) < 0) + PERF_MAX_STACK_DEPTH, NULL) < 0) goto out; fake_samples[i].thread = al.thread; diff --git a/tools/perf/tests/hists_output.c b/tools/perf/tests/hists_output.c index 1308f88a916948..d40461ecd21066 100644 --- a/tools/perf/tests/hists_output.c +++ b/tools/perf/tests/hists_output.c @@ -71,7 +71,7 @@ static int add_hist_entries(struct hists *hists, struct machine *machine) goto out; if (hist_entry_iter__add(&iter, &al, evsel, &sample, - PERF_MAX_STACK_DEPTH) < 0) + PERF_MAX_STACK_DEPTH, NULL) < 0) goto out; fake_samples[i].thread = al.thread; diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index c6f5f5251aadc4..5a0a4b2cadc457 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c @@ -517,27 +517,16 @@ iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al } static int -iter_finish_mem_entry(struct hist_entry_iter *iter, struct addr_location *al) +iter_finish_mem_entry(struct hist_entry_iter *iter, + struct addr_location *al __maybe_unused) { struct perf_evsel *evsel = iter->evsel; struct hist_entry *he = iter->he; - struct mem_info *mx; int err = -EINVAL; if (he == NULL) goto out; - if (ui__has_annotation()) { - err = hist_entry__inc_addr_samples(he, evsel->idx, al->addr); - if (err) - goto out; - - mx = he->mem_info; - err = addr_map_symbol__inc_samples(&mx->daddr, evsel->idx); - if (err) - goto out; - } - hists__inc_nr_samples(&evsel->hists, he->filtered); err = hist_entry__append_callchain(he, iter->sample); @@ -575,6 +564,9 @@ static int iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused, struct addr_location *al __maybe_unused) { + /* to avoid calling callback function */ + iter->he = NULL; + return 0; } @@ -599,7 +591,7 @@ iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al) static int iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al) { - struct branch_info *bi, *bx; + struct branch_info *bi; struct perf_evsel *evsel = iter->evsel; struct hist_entry *he = NULL; int i = iter->curr; @@ -619,17 +611,6 @@ iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *a if (he == NULL) return -ENOMEM; - if (ui__has_annotation()) { - bx = he->branch_info; - err = addr_map_symbol__inc_samples(&bx->from, evsel->idx); - if (err) - goto out; - - err = addr_map_symbol__inc_samples(&bx->to, evsel->idx); - if (err) - goto out; - } - hists__inc_nr_samples(&evsel->hists, he->filtered); out: @@ -673,9 +654,9 @@ iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location } static int -iter_finish_normal_entry(struct hist_entry_iter *iter, struct addr_location *al) +iter_finish_normal_entry(struct hist_entry_iter *iter, + struct addr_location *al __maybe_unused) { - int err; struct hist_entry *he = iter->he; struct perf_evsel *evsel = iter->evsel; struct perf_sample *sample = iter->sample; @@ -685,12 +666,6 @@ iter_finish_normal_entry(struct hist_entry_iter *iter, struct addr_location *al) iter->he = NULL; - if (ui__has_annotation()) { - err = hist_entry__inc_addr_samples(he, evsel->idx, al->addr); - if (err) - return err; - } - hists__inc_nr_samples(&evsel->hists, he->filtered); return hist_entry__append_callchain(he, sample); @@ -746,13 +721,6 @@ iter_add_single_cumulative_entry(struct hist_entry_iter *iter, */ callchain_cursor_commit(&callchain_cursor); - /* - * The iter->he will be over-written after ->add_next_entry() - * called so inc stats for the original entry now. - */ - if (ui__has_annotation()) - err = hist_entry__inc_addr_samples(he, evsel->idx, al->addr); - hists__inc_nr_samples(&evsel->hists, he->filtered); return err; @@ -802,8 +770,11 @@ iter_add_next_cumulative_entry(struct hist_entry_iter *iter, * It's possible that it has cycles or recursive calls. */ for (i = 0; i < iter->curr; i++) { - if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) + if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) { + /* to avoid calling callback function */ + iter->he = NULL; return 0; + } } he = __hists__add_entry(&evsel->hists, al, iter->parent, NULL, NULL, @@ -863,7 +834,7 @@ const struct hist_iter_ops hist_iter_cumulative = { int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al, struct perf_evsel *evsel, struct perf_sample *sample, - int max_stack_depth) + int max_stack_depth, void *arg) { int err, err2; @@ -883,10 +854,22 @@ int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al, if (err) goto out; + if (iter->he && iter->add_entry_cb) { + err = iter->add_entry_cb(iter, al, true, arg); + if (err) + goto out; + } + while (iter->ops->next_entry(iter, al)) { err = iter->ops->add_next_entry(iter, al); if (err) break; + + if (iter->he && iter->add_entry_cb) { + err = iter->add_entry_cb(iter, al, false, arg); + if (err) + goto out; + } } out: diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h index 99ad3cb433fbde..82b28ff980626d 100644 --- a/tools/perf/util/hist.h +++ b/tools/perf/util/hist.h @@ -119,6 +119,9 @@ struct hist_entry_iter { void *priv; const struct hist_iter_ops *ops; + /* user-defined callback function (optional) */ + int (*add_entry_cb)(struct hist_entry_iter *iter, + struct addr_location *al, bool single, void *arg); }; extern const struct hist_iter_ops hist_iter_normal; @@ -135,7 +138,7 @@ struct hist_entry *__hists__add_entry(struct hists *hists, bool sample_self); int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al, struct perf_evsel *evsel, struct perf_sample *sample, - int max_stack_depth); + int max_stack_depth, void *arg); int64_t hist_entry__cmp(struct hist_entry *left, struct hist_entry *right); int64_t hist_entry__collapse(struct hist_entry *left, struct hist_entry *right); From 7c50391f536ea6ed1e75b0f4d90922a2606da3de Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 7 Jan 2014 17:41:03 +0900 Subject: [PATCH 069/101] perf top: Convert to hist_entry_iter Reuse hist_entry_iter__add() function to share the similar code with perf report. Note that it needs to be called with hists.lock so tweak some internal functions not to deadlock or hold the lock too long. Signed-off-by: Namhyung Kim Tested-by: Arun Sharma Tested-by: Rodrigo Campos Link: http://lkml.kernel.org/r/1401335910-16832-20-git-send-email-namhyung@kernel.org Signed-off-by: Jiri Olsa --- tools/perf/builtin-top.c | 76 ++++++++++++++++++++++------------------ 1 file changed, 41 insertions(+), 35 deletions(-) diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 12e2e1227e474e..b1cb5f589ade4f 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -196,6 +196,12 @@ static void perf_top__record_precise_ip(struct perf_top *top, pthread_mutex_unlock(¬es->lock); + /* + * This function is now called with he->hists->lock held. + * Release it before going to sleep. + */ + pthread_mutex_unlock(&he->hists->lock); + if (err == -ERANGE && !he->ms.map->erange_warned) ui__warn_map_erange(he->ms.map, sym, ip); else if (err == -ENOMEM) { @@ -203,6 +209,8 @@ static void perf_top__record_precise_ip(struct perf_top *top, sym->name); sleep(1); } + + pthread_mutex_lock(&he->hists->lock); } static void perf_top__show_details(struct perf_top *top) @@ -238,24 +246,6 @@ static void perf_top__show_details(struct perf_top *top) pthread_mutex_unlock(¬es->lock); } -static struct hist_entry *perf_evsel__add_hist_entry(struct perf_evsel *evsel, - struct addr_location *al, - struct perf_sample *sample) -{ - struct hist_entry *he; - - pthread_mutex_lock(&evsel->hists.lock); - he = __hists__add_entry(&evsel->hists, al, NULL, NULL, NULL, - sample->period, sample->weight, - sample->transaction, true); - pthread_mutex_unlock(&evsel->hists.lock); - if (he == NULL) - return NULL; - - hists__inc_nr_samples(&evsel->hists, he->filtered); - return he; -} - static void perf_top__print_sym_table(struct perf_top *top) { char bf[160]; @@ -659,6 +649,26 @@ static int symbol_filter(struct map *map __maybe_unused, struct symbol *sym) return 0; } +static int hist_iter__top_callback(struct hist_entry_iter *iter, + struct addr_location *al, bool single, + void *arg) +{ + struct perf_top *top = arg; + struct hist_entry *he = iter->he; + struct perf_evsel *evsel = iter->evsel; + + if (sort__has_sym && single) { + u64 ip = al->addr; + + if (al->map) + ip = al->map->unmap_ip(al->map, ip); + + perf_top__record_precise_ip(top, he, evsel->idx, ip); + } + + return 0; +} + static void perf_event__process_sample(struct perf_tool *tool, const union perf_event *event, struct perf_evsel *evsel, @@ -666,8 +676,6 @@ static void perf_event__process_sample(struct perf_tool *tool, struct machine *machine) { struct perf_top *top = container_of(tool, struct perf_top, tool); - struct symbol *parent = NULL; - u64 ip = sample->ip; struct addr_location al; int err; @@ -742,25 +750,23 @@ static void perf_event__process_sample(struct perf_tool *tool, } if (al.sym == NULL || !al.sym->ignore) { - struct hist_entry *he; + struct hist_entry_iter iter = { + .add_entry_cb = hist_iter__top_callback, + }; - err = sample__resolve_callchain(sample, &parent, evsel, &al, - top->max_stack); - if (err) - return; + if (symbol_conf.cumulate_callchain) + iter.ops = &hist_iter_cumulative; + else + iter.ops = &hist_iter_normal; - he = perf_evsel__add_hist_entry(evsel, &al, sample); - if (he == NULL) { - pr_err("Problem incrementing symbol period, skipping event\n"); - return; - } + pthread_mutex_lock(&evsel->hists.lock); - err = hist_entry__append_callchain(he, sample); - if (err) - return; + err = hist_entry_iter__add(&iter, &al, evsel, sample, + top->max_stack, top); + if (err < 0) + pr_err("Problem incrementing symbol period, skipping event\n"); - if (sort__has_sym) - perf_top__record_precise_ip(top, he, evsel->idx, ip); + pthread_mutex_unlock(&evsel->hists.lock); } return; From 1432ec342ece6a7ef78825ae3a9ba1c91686f71d Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Wed, 30 Oct 2013 17:05:55 +0900 Subject: [PATCH 070/101] perf top: Add --children option The --children option is for showing accumulated overhead (period) value as well as self overhead. It should be used with one of -g or --call-graph option. Signed-off-by: Namhyung Kim Tested-by: Arun Sharma Tested-by: Rodrigo Campos Cc: Frederic Weisbecker Link: http://lkml.kernel.org/r/1401335910-16832-21-git-send-email-namhyung@kernel.org Signed-off-by: Jiri Olsa --- tools/perf/Documentation/perf-top.txt | 8 +++++++- tools/perf/builtin-top.c | 7 +++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/tools/perf/Documentation/perf-top.txt b/tools/perf/Documentation/perf-top.txt index dcfa54c851e9d6..180ae02137a519 100644 --- a/tools/perf/Documentation/perf-top.txt +++ b/tools/perf/Documentation/perf-top.txt @@ -119,7 +119,7 @@ Default is to monitor all CPUS. --fields=:: Specify output field - multiple keys can be specified in CSV format. Following fields are available: - overhead, overhead_sys, overhead_us, sample and period. + overhead, overhead_sys, overhead_us, overhead_children, sample and period. Also it can contain any sort key(s). By default, every sort keys not specified in --field will be appended @@ -161,6 +161,12 @@ Default is to monitor all CPUS. Setup and enable call-graph (stack chain/backtrace) recording, implies -g. +--children:: + Accumulate callchain of children to parent entry so that then can + show up in the output. The output will have a new "Children" column + and will be sorted on the data. It requires -g/--call-graph option + enabled. + --max-stack:: Set the stack depth limit when parsing the callchain, anything beyond the specified depth will be ignored. This is a trade-off diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index b1cb5f589ade4f..fea55e3fc93167 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -1098,6 +1098,8 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused) OPT_CALLBACK(0, "call-graph", &top.record_opts, "mode[,dump_size]", record_callchain_help, &parse_callchain_opt), + OPT_BOOLEAN(0, "children", &symbol_conf.cumulate_callchain, + "Accumulate callchains of children and show total overhead as well"), OPT_INTEGER(0, "max-stack", &top.max_stack, "Set the maximum stack depth when parsing the callchain. " "Default: " __stringify(PERF_MAX_STACK_DEPTH)), @@ -1203,6 +1205,11 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused) top.sym_evsel = perf_evlist__first(top.evlist); + if (!symbol_conf.use_callchain) { + symbol_conf.cumulate_callchain = false; + perf_hpp__cancel_cumulate(); + } + symbol_conf.priv_size = sizeof(struct annotation); symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL); From 104ac991bd821773cba6f262f97a4a752ed76dd5 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 22 Jan 2013 18:09:46 +0900 Subject: [PATCH 071/101] perf top: Add top.children config option Add top.children config option for setting default value of callchain accumulation. It affects the output only if one of -g or --call-graph option is given as well. A user can write .perfconfig file like below to enable accumulation by default: $ cat ~/.perfconfig [top] children = true And it can be disabled through command line: $ perf top --no-children Signed-off-by: Namhyung Kim Tested-by: Arun Sharma Tested-by: Rodrigo Campos Cc: Frederic Weisbecker Link: http://lkml.kernel.org/r/1401335910-16832-22-git-send-email-namhyung@kernel.org Signed-off-by: Jiri Olsa --- tools/perf/builtin-top.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index fea55e3fc93167..377971dc89a3b2 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -1004,6 +1004,10 @@ static int perf_top_config(const char *var, const char *value, void *cb) if (!strcmp(var, "top.call-graph")) return record_parse_callchain(value, &top->record_opts); + if (!strcmp(var, "top.children")) { + symbol_conf.cumulate_callchain = perf_config_bool(var, value); + return 0; + } return perf_default_config(var, value, cb); } From e511db5e94f056083e821aa3ab74b03ad1216e14 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 24 Dec 2013 16:19:25 +0900 Subject: [PATCH 072/101] perf tools: Enable --children option by default Now perf top and perf report will show children column by default if it has callchain information. Requested-by: Ingo Molnar Signed-off-by: Namhyung Kim Tested-by: Rodrigo Campos Tested-by: Arun Sharma Cc: Frederic Weisbecker Link: http://lkml.kernel.org/r/1401335910-16832-23-git-send-email-namhyung@kernel.org Signed-off-by: Jiri Olsa --- tools/perf/util/symbol.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 95e24977993121..7b9096f29cdbf7 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -29,11 +29,12 @@ int vmlinux_path__nr_entries; char **vmlinux_path; struct symbol_conf symbol_conf = { - .use_modules = true, - .try_vmlinux_path = true, - .annotate_src = true, - .demangle = true, - .symfs = "", + .use_modules = true, + .try_vmlinux_path = true, + .annotate_src = true, + .demangle = true, + .cumulate_callchain = true, + .symfs = "", }; static enum dso_binary_type binary_type_symtab[] = { From 56772ad4750e23460a4b80f7ece5377d8c922ee1 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Fri, 23 May 2014 18:31:52 +0900 Subject: [PATCH 073/101] perf ui/stdio: Fix invalid percentage value of cumulated hist entries On stdio, there's a problem that it shows invalid values for callchains in cumulated hist entries. It's because it only cares about the self period. But with --children behavior, we always add callchain info to the cumulated entries so it should use the value in that case. Before: # Children Self Command Shared Object Symbol # ........ ........ ....... ................. ................ # 61.22% 0.32% swapper [kernel.kallsyms] [k] cpu_idle | --- cpu_idle | |--16530.76%-- start_secondary | |--2758.70%-- rest_init | start_kernel | x86_64_start_reservations | x86_64_start_kernel --6837850969203030.00%-- [...] After: # Children Self Command Shared Object Symbol # ........ ........ ....... ................. ................ # 61.22% 0.32% swapper [kernel.kallsyms] [k] cpu_idle | --- cpu_idle | |--85.70%-- start_secondary | --14.30%-- rest_init start_kernel x86_64_start_reservations x86_64_start_kernel Signed-off-by: Namhyung Kim Cc: Arun Sharma Cc: Frederic Weisbecker Link: http://lkml.kernel.org/r/1401335910-16832-24-git-send-email-namhyung@kernel.org Signed-off-by: Jiri Olsa --- tools/perf/ui/stdio/hist.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/perf/ui/stdio/hist.c b/tools/perf/ui/stdio/hist.c index 475d2f5c7e16cb..90122abd372133 100644 --- a/tools/perf/ui/stdio/hist.c +++ b/tools/perf/ui/stdio/hist.c @@ -271,7 +271,9 @@ static size_t hist_entry_callchain__fprintf(struct hist_entry *he, { switch (callchain_param.mode) { case CHAIN_GRAPH_REL: - return callchain__fprintf_graph(fp, &he->sorted_chain, he->stat.period, + return callchain__fprintf_graph(fp, &he->sorted_chain, + symbol_conf.cumulate_callchain ? + he->stat_acc->period : he->stat.period, left_margin); break; case CHAIN_GRAPH_ABS: From e4cf6f886f3158061fb589df9ed452f9b30f67f1 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Fri, 23 May 2014 18:49:33 +0900 Subject: [PATCH 074/101] perf ui/gtk: Fix callchain display With current output field change, GTK browser cannot display callchain information correctly since it couldn't determine where the symbol column is. This is a problem - just for now I changed to use the last column since it'll work for most cases. Also it has a same problem of the percentage as stdio code. Signed-off-by: Namhyung Kim Cc: Arun Sharma Cc: Frederic Weisbecker Link: http://lkml.kernel.org/r/1401335910-16832-25-git-send-email-namhyung@kernel.org Signed-off-by: Jiri Olsa --- tools/perf/ui/gtk/hists.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/tools/perf/ui/gtk/hists.c b/tools/perf/ui/gtk/hists.c index 03d6812d25dd40..6ca60e482cdc25 100644 --- a/tools/perf/ui/gtk/hists.c +++ b/tools/perf/ui/gtk/hists.c @@ -198,6 +198,13 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists, if (perf_hpp__should_skip(fmt)) continue; + /* + * XXX no way to determine where symcol column is.. + * Just use last column for now. + */ + if (perf_hpp__is_sort_entry(fmt)) + sym_col = col_idx; + fmt->header(fmt, &hpp, hists_to_evsel(hists)); gtk_tree_view_insert_column_with_attributes(GTK_TREE_VIEW(view), @@ -253,7 +260,8 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists, if (symbol_conf.use_callchain && sort__has_sym) { if (callchain_param.mode == CHAIN_GRAPH_REL) - total = h->stat.period; + total = symbol_conf.cumulate_callchain ? + h->stat_acc->period : h->stat.period; perf_gtk__add_callchain(&h->sorted_chain, store, &iter, sym_col, total); From d69b2962a0aebd431cdda939f4418dd606e2f77e Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Fri, 23 May 2014 10:59:01 +0900 Subject: [PATCH 075/101] perf tools: Reset output/sort order to default When reset_output_field() is called, also reset field/sort order to NULL so that it can have the default values. It's needed for testing. Signed-off-by: Namhyung Kim CC: Arun Sharma Cc: Frederic Weisbecker Link: http://lkml.kernel.org/r/1401335910-16832-26-git-send-email-namhyung@kernel.org Signed-off-by: Jiri Olsa --- tools/perf/util/sort.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index 9da8931d23948c..254f583a52abff 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c @@ -1582,6 +1582,9 @@ void reset_output_field(void) sort__has_sym = 0; sort__has_dso = 0; + field_order = NULL; + sort_order = NULL; + reset_dimensions(); perf_hpp__reset_output_field(); } From a1891aa4805fa77d98db44ec6e1d93e2921828fb Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Fri, 23 May 2014 14:59:57 +0900 Subject: [PATCH 076/101] perf tests: Define and use symbolic names for fake symbols In various histogram test cases, fake symbols are used as raw numbers. Define macros for each pid, map, symbols so that it can increase readability somewhat. Signed-off-by: Namhyung Kim Cc: Arun Sharma Cc: Frederic Weisbecker Link: http://lkml.kernel.org/r/1401335910-16832-27-git-send-email-namhyung@kernel.org Signed-off-by: Jiri Olsa --- tools/perf/tests/hists_common.c | 47 ++++++++++++++++++--------------- tools/perf/tests/hists_common.h | 32 ++++++++++++++++++++-- tools/perf/tests/hists_filter.c | 23 ++++++++-------- tools/perf/tests/hists_link.c | 32 +++++++++++----------- tools/perf/tests/hists_output.c | 20 +++++++------- 5 files changed, 92 insertions(+), 62 deletions(-) diff --git a/tools/perf/tests/hists_common.c b/tools/perf/tests/hists_common.c index e4e01aadc3be4c..e4e120d3c16f78 100644 --- a/tools/perf/tests/hists_common.c +++ b/tools/perf/tests/hists_common.c @@ -12,9 +12,9 @@ static struct { u32 pid; const char *comm; } fake_threads[] = { - { 100, "perf" }, - { 200, "perf" }, - { 300, "bash" }, + { FAKE_PID_PERF1, "perf" }, + { FAKE_PID_PERF2, "perf" }, + { FAKE_PID_BASH, "bash" }, }; static struct { @@ -22,15 +22,15 @@ static struct { u64 start; const char *filename; } fake_mmap_info[] = { - { 100, 0x40000, "perf" }, - { 100, 0x50000, "libc" }, - { 100, 0xf0000, "[kernel]" }, - { 200, 0x40000, "perf" }, - { 200, 0x50000, "libc" }, - { 200, 0xf0000, "[kernel]" }, - { 300, 0x40000, "bash" }, - { 300, 0x50000, "libc" }, - { 300, 0xf0000, "[kernel]" }, + { FAKE_PID_PERF1, FAKE_MAP_PERF, "perf" }, + { FAKE_PID_PERF1, FAKE_MAP_LIBC, "libc" }, + { FAKE_PID_PERF1, FAKE_MAP_KERNEL, "[kernel]" }, + { FAKE_PID_PERF2, FAKE_MAP_PERF, "perf" }, + { FAKE_PID_PERF2, FAKE_MAP_LIBC, "libc" }, + { FAKE_PID_PERF2, FAKE_MAP_KERNEL, "[kernel]" }, + { FAKE_PID_BASH, FAKE_MAP_BASH, "bash" }, + { FAKE_PID_BASH, FAKE_MAP_LIBC, "libc" }, + { FAKE_PID_BASH, FAKE_MAP_KERNEL, "[kernel]" }, }; struct fake_sym { @@ -40,27 +40,30 @@ struct fake_sym { }; static struct fake_sym perf_syms[] = { - { 700, 100, "main" }, - { 800, 100, "run_command" }, - { 900, 100, "cmd_record" }, + { FAKE_SYM_OFFSET1, FAKE_SYM_LENGTH, "main" }, + { FAKE_SYM_OFFSET2, FAKE_SYM_LENGTH, "run_command" }, + { FAKE_SYM_OFFSET3, FAKE_SYM_LENGTH, "cmd_record" }, }; static struct fake_sym bash_syms[] = { - { 700, 100, "main" }, - { 800, 100, "xmalloc" }, - { 900, 100, "xfree" }, + { FAKE_SYM_OFFSET1, FAKE_SYM_LENGTH, "main" }, + { FAKE_SYM_OFFSET2, FAKE_SYM_LENGTH, "xmalloc" }, + { FAKE_SYM_OFFSET3, FAKE_SYM_LENGTH, "xfree" }, }; static struct fake_sym libc_syms[] = { { 700, 100, "malloc" }, { 800, 100, "free" }, { 900, 100, "realloc" }, + { FAKE_SYM_OFFSET1, FAKE_SYM_LENGTH, "malloc" }, + { FAKE_SYM_OFFSET2, FAKE_SYM_LENGTH, "free" }, + { FAKE_SYM_OFFSET3, FAKE_SYM_LENGTH, "realloc" }, }; static struct fake_sym kernel_syms[] = { - { 700, 100, "schedule" }, - { 800, 100, "page_fault" }, - { 900, 100, "sys_perf_event_open" }, + { FAKE_SYM_OFFSET1, FAKE_SYM_LENGTH, "schedule" }, + { FAKE_SYM_OFFSET2, FAKE_SYM_LENGTH, "page_fault" }, + { FAKE_SYM_OFFSET3, FAKE_SYM_LENGTH, "sys_perf_event_open" }, }; static struct { @@ -102,7 +105,7 @@ struct machine *setup_fake_machine(struct machines *machines) .pid = fake_mmap_info[i].pid, .tid = fake_mmap_info[i].pid, .start = fake_mmap_info[i].start, - .len = 0x1000ULL, + .len = FAKE_MAP_LENGTH, .pgoff = 0ULL, }, }; diff --git a/tools/perf/tests/hists_common.h b/tools/perf/tests/hists_common.h index 1415ae69d7b6fa..888254e8665c41 100644 --- a/tools/perf/tests/hists_common.h +++ b/tools/perf/tests/hists_common.h @@ -4,6 +4,34 @@ struct machine; struct machines; +#define FAKE_PID_PERF1 100 +#define FAKE_PID_PERF2 200 +#define FAKE_PID_BASH 300 + +#define FAKE_MAP_PERF 0x400000 +#define FAKE_MAP_BASH 0x400000 +#define FAKE_MAP_LIBC 0x500000 +#define FAKE_MAP_KERNEL 0xf00000 +#define FAKE_MAP_LENGTH 0x100000 + +#define FAKE_SYM_OFFSET1 700 +#define FAKE_SYM_OFFSET2 800 +#define FAKE_SYM_OFFSET3 900 +#define FAKE_SYM_LENGTH 100 + +#define FAKE_IP_PERF_MAIN FAKE_MAP_PERF + FAKE_SYM_OFFSET1 +#define FAKE_IP_PERF_RUN_COMMAND FAKE_MAP_PERF + FAKE_SYM_OFFSET2 +#define FAKE_IP_PERF_CMD_RECORD FAKE_MAP_PERF + FAKE_SYM_OFFSET3 +#define FAKE_IP_BASH_MAIN FAKE_MAP_BASH + FAKE_SYM_OFFSET1 +#define FAKE_IP_BASH_XMALLOC FAKE_MAP_BASH + FAKE_SYM_OFFSET2 +#define FAKE_IP_BASH_XFREE FAKE_MAP_BASH + FAKE_SYM_OFFSET3 +#define FAKE_IP_LIBC_MALLOC FAKE_MAP_LIBC + FAKE_SYM_OFFSET1 +#define FAKE_IP_LIBC_FREE FAKE_MAP_LIBC + FAKE_SYM_OFFSET2 +#define FAKE_IP_LIBC_REALLOC FAKE_MAP_LIBC + FAKE_SYM_OFFSET3 +#define FAKE_IP_KERNEL_SCHEDULE FAKE_MAP_KERNEL + FAKE_SYM_OFFSET1 +#define FAKE_IP_KERNEL_PAGE_FAULT FAKE_MAP_KERNEL + FAKE_SYM_OFFSET2 +#define FAKE_IP_KERNEL_SYS_PERF_EVENT_OPEN FAKE_MAP_KERNEL + FAKE_SYM_OFFSET3 + /* * The setup_fake_machine() provides a test environment which consists * of 3 processes that have 3 mappings and in turn, have 3 symbols @@ -13,7 +41,7 @@ struct machines; * ............. ............. ................... * perf: 100 perf main * perf: 100 perf run_command - * perf: 100 perf comd_record + * perf: 100 perf cmd_record * perf: 100 libc malloc * perf: 100 libc free * perf: 100 libc realloc @@ -22,7 +50,7 @@ struct machines; * perf: 100 [kernel] sys_perf_event_open * perf: 200 perf main * perf: 200 perf run_command - * perf: 200 perf comd_record + * perf: 200 perf cmd_record * perf: 200 libc malloc * perf: 200 libc free * perf: 200 libc realloc diff --git a/tools/perf/tests/hists_filter.c b/tools/perf/tests/hists_filter.c index 3539403bbad42f..821f581fd93036 100644 --- a/tools/perf/tests/hists_filter.c +++ b/tools/perf/tests/hists_filter.c @@ -21,25 +21,25 @@ struct sample { /* For the numbers, see hists_common.c */ static struct sample fake_samples[] = { /* perf [kernel] schedule() */ - { .pid = 100, .ip = 0xf0000 + 700, }, + { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_SCHEDULE, }, /* perf [perf] main() */ - { .pid = 100, .ip = 0x40000 + 700, }, + { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_MAIN, }, /* perf [libc] malloc() */ - { .pid = 100, .ip = 0x50000 + 700, }, + { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_MALLOC, }, /* perf [perf] main() */ - { .pid = 200, .ip = 0x40000 + 700, }, /* will be merged */ + { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_MAIN, }, /* will be merged */ /* perf [perf] cmd_record() */ - { .pid = 200, .ip = 0x40000 + 900, }, + { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_CMD_RECORD, }, /* perf [kernel] page_fault() */ - { .pid = 200, .ip = 0xf0000 + 800, }, + { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_KERNEL_PAGE_FAULT, }, /* bash [bash] main() */ - { .pid = 300, .ip = 0x40000 + 700, }, + { .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_MAIN, }, /* bash [bash] xmalloc() */ - { .pid = 300, .ip = 0x40000 + 800, }, + { .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_XMALLOC, }, /* bash [libc] malloc() */ - { .pid = 300, .ip = 0x50000 + 700, }, + { .pid = FAKE_PID_BASH, .ip = FAKE_IP_LIBC_MALLOC, }, /* bash [kernel] page_fault() */ - { .pid = 300, .ip = 0xf0000 + 800, }, + { .pid = FAKE_PID_BASH, .ip = FAKE_IP_KERNEL_PAGE_FAULT, }, }; static int add_hist_entries(struct perf_evlist *evlist, @@ -47,7 +47,7 @@ static int add_hist_entries(struct perf_evlist *evlist, { struct perf_evsel *evsel; struct addr_location al; - struct perf_sample sample = { .cpu = 0, }; + struct perf_sample sample = { .period = 100, }; size_t i; /* @@ -75,7 +75,6 @@ static int add_hist_entries(struct perf_evlist *evlist, sample.pid = fake_samples[i].pid; sample.tid = fake_samples[i].pid; sample.ip = fake_samples[i].ip; - sample.period = 100; if (perf_event__preprocess_sample(&event, machine, &al, &sample) < 0) diff --git a/tools/perf/tests/hists_link.c b/tools/perf/tests/hists_link.c index ca6693b37cd71d..d4b34b0f50a228 100644 --- a/tools/perf/tests/hists_link.c +++ b/tools/perf/tests/hists_link.c @@ -21,41 +21,41 @@ struct sample { /* For the numbers, see hists_common.c */ static struct sample fake_common_samples[] = { /* perf [kernel] schedule() */ - { .pid = 100, .ip = 0xf0000 + 700, }, + { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_SCHEDULE, }, /* perf [perf] main() */ - { .pid = 200, .ip = 0x40000 + 700, }, + { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_MAIN, }, /* perf [perf] cmd_record() */ - { .pid = 200, .ip = 0x40000 + 900, }, + { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_CMD_RECORD, }, /* bash [bash] xmalloc() */ - { .pid = 300, .ip = 0x40000 + 800, }, + { .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_XMALLOC, }, /* bash [libc] malloc() */ - { .pid = 300, .ip = 0x50000 + 700, }, + { .pid = FAKE_PID_BASH, .ip = FAKE_IP_LIBC_MALLOC, }, }; static struct sample fake_samples[][5] = { { /* perf [perf] run_command() */ - { .pid = 100, .ip = 0x40000 + 800, }, + { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_RUN_COMMAND, }, /* perf [libc] malloc() */ - { .pid = 100, .ip = 0x50000 + 700, }, + { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_MALLOC, }, /* perf [kernel] page_fault() */ - { .pid = 100, .ip = 0xf0000 + 800, }, + { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_PAGE_FAULT, }, /* perf [kernel] sys_perf_event_open() */ - { .pid = 200, .ip = 0xf0000 + 900, }, + { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_KERNEL_SYS_PERF_EVENT_OPEN, }, /* bash [libc] free() */ - { .pid = 300, .ip = 0x50000 + 800, }, + { .pid = FAKE_PID_BASH, .ip = FAKE_IP_LIBC_FREE, }, }, { /* perf [libc] free() */ - { .pid = 200, .ip = 0x50000 + 800, }, + { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_LIBC_FREE, }, /* bash [libc] malloc() */ - { .pid = 300, .ip = 0x50000 + 700, }, /* will be merged */ + { .pid = FAKE_PID_BASH, .ip = FAKE_IP_LIBC_MALLOC, }, /* will be merged */ /* bash [bash] xfee() */ - { .pid = 300, .ip = 0x40000 + 900, }, + { .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_XFREE, }, /* bash [libc] realloc() */ - { .pid = 300, .ip = 0x50000 + 900, }, + { .pid = FAKE_PID_BASH, .ip = FAKE_IP_LIBC_REALLOC, }, /* bash [kernel] page_fault() */ - { .pid = 300, .ip = 0xf0000 + 800, }, + { .pid = FAKE_PID_BASH, .ip = FAKE_IP_KERNEL_PAGE_FAULT, }, }, }; @@ -64,7 +64,7 @@ static int add_hist_entries(struct perf_evlist *evlist, struct machine *machine) struct perf_evsel *evsel; struct addr_location al; struct hist_entry *he; - struct perf_sample sample = { .cpu = 0, }; + struct perf_sample sample = { .period = 1, }; size_t i = 0, k; /* diff --git a/tools/perf/tests/hists_output.c b/tools/perf/tests/hists_output.c index d40461ecd21066..e3bbd6c54c1b02 100644 --- a/tools/perf/tests/hists_output.c +++ b/tools/perf/tests/hists_output.c @@ -22,25 +22,25 @@ struct sample { /* For the numbers, see hists_common.c */ static struct sample fake_samples[] = { /* perf [kernel] schedule() */ - { .cpu = 0, .pid = 100, .ip = 0xf0000 + 700, }, + { .cpu = 0, .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_SCHEDULE, }, /* perf [perf] main() */ - { .cpu = 1, .pid = 100, .ip = 0x40000 + 700, }, + { .cpu = 1, .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_MAIN, }, /* perf [perf] cmd_record() */ - { .cpu = 1, .pid = 100, .ip = 0x40000 + 900, }, + { .cpu = 1, .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_CMD_RECORD, }, /* perf [libc] malloc() */ - { .cpu = 1, .pid = 100, .ip = 0x50000 + 700, }, + { .cpu = 1, .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_MALLOC, }, /* perf [libc] free() */ - { .cpu = 2, .pid = 100, .ip = 0x50000 + 800, }, + { .cpu = 2, .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_FREE, }, /* perf [perf] main() */ - { .cpu = 2, .pid = 200, .ip = 0x40000 + 700, }, + { .cpu = 2, .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_MAIN, }, /* perf [kernel] page_fault() */ - { .cpu = 2, .pid = 200, .ip = 0xf0000 + 800, }, + { .cpu = 2, .pid = FAKE_PID_PERF2, .ip = FAKE_IP_KERNEL_PAGE_FAULT, }, /* bash [bash] main() */ - { .cpu = 3, .pid = 300, .ip = 0x40000 + 700, }, + { .cpu = 3, .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_MAIN, }, /* bash [bash] xmalloc() */ - { .cpu = 0, .pid = 300, .ip = 0x40000 + 800, }, + { .cpu = 0, .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_XMALLOC, }, /* bash [kernel] page_fault() */ - { .cpu = 1, .pid = 300, .ip = 0xf0000 + 800, }, + { .cpu = 1, .pid = FAKE_PID_BASH, .ip = FAKE_IP_KERNEL_PAGE_FAULT, }, }; static int add_hist_entries(struct hists *hists, struct machine *machine) From 0506aecce999d4370b979892f88cf1118cfe8dcb Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Fri, 23 May 2014 18:04:42 +0900 Subject: [PATCH 077/101] perf tests: Add a test case for cumulating callchains Now it adds a new testcase to verify --children option working correctly. Signed-off-by: Namhyung Kim Cc: Arun Sharma Cc: Frederic Weisbecker Link: http://lkml.kernel.org/r/1401335910-16832-28-git-send-email-namhyung@kernel.org Signed-off-by: Jiri Olsa --- tools/perf/Makefile.perf | 1 + tools/perf/tests/builtin-test.c | 4 + tools/perf/tests/hists_common.c | 5 +- tools/perf/tests/hists_cumulate.c | 726 ++++++++++++++++++++++++++++++ tools/perf/tests/tests.h | 1 + 5 files changed, 735 insertions(+), 2 deletions(-) create mode 100644 tools/perf/tests/hists_cumulate.c diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf index 02f0a4dd1a80f3..67f7c0575b2614 100644 --- a/tools/perf/Makefile.perf +++ b/tools/perf/Makefile.perf @@ -400,6 +400,7 @@ LIB_OBJS += $(OUTPUT)tests/hists_common.o LIB_OBJS += $(OUTPUT)tests/hists_link.o LIB_OBJS += $(OUTPUT)tests/hists_filter.o LIB_OBJS += $(OUTPUT)tests/hists_output.o +LIB_OBJS += $(OUTPUT)tests/hists_cumulate.o LIB_OBJS += $(OUTPUT)tests/python-use.o LIB_OBJS += $(OUTPUT)tests/bp_signal.o LIB_OBJS += $(OUTPUT)tests/bp_signal_overflow.o diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c index 831f52cae1972e..802e3cd50f6fb7 100644 --- a/tools/perf/tests/builtin-test.c +++ b/tools/perf/tests/builtin-test.c @@ -139,6 +139,10 @@ static struct test { .desc = "Test output sorting of hist entries", .func = test__hists_output, }, + { + .desc = "Test cumulation of child hist entries", + .func = test__hists_cumulate, + }, { .func = NULL, }, diff --git a/tools/perf/tests/hists_common.c b/tools/perf/tests/hists_common.c index e4e120d3c16f78..a62c091345163f 100644 --- a/tools/perf/tests/hists_common.c +++ b/tools/perf/tests/hists_common.c @@ -196,10 +196,11 @@ void print_hists_out(struct hists *hists) he = rb_entry(node, struct hist_entry, rb_node); if (!he->filtered) { - pr_info("%2d: entry: %8s:%5d [%-8s] %20s: period = %"PRIu64"\n", + pr_info("%2d: entry: %8s:%5d [%-8s] %20s: period = %"PRIu64"/%"PRIu64"\n", i, thread__comm_str(he->thread), he->thread->tid, he->ms.map->dso->short_name, - he->ms.sym->name, he->stat.period); + he->ms.sym->name, he->stat.period, + he->stat_acc ? he->stat_acc->period : 0); } i++; diff --git a/tools/perf/tests/hists_cumulate.c b/tools/perf/tests/hists_cumulate.c new file mode 100644 index 00000000000000..0ac240db2e24d9 --- /dev/null +++ b/tools/perf/tests/hists_cumulate.c @@ -0,0 +1,726 @@ +#include "perf.h" +#include "util/debug.h" +#include "util/symbol.h" +#include "util/sort.h" +#include "util/evsel.h" +#include "util/evlist.h" +#include "util/machine.h" +#include "util/thread.h" +#include "util/parse-events.h" +#include "tests/tests.h" +#include "tests/hists_common.h" + +struct sample { + u32 pid; + u64 ip; + struct thread *thread; + struct map *map; + struct symbol *sym; +}; + +/* For the numbers, see hists_common.c */ +static struct sample fake_samples[] = { + /* perf [kernel] schedule() */ + { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_SCHEDULE, }, + /* perf [perf] main() */ + { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_MAIN, }, + /* perf [perf] cmd_record() */ + { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_CMD_RECORD, }, + /* perf [libc] malloc() */ + { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_MALLOC, }, + /* perf [libc] free() */ + { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_FREE, }, + /* perf [perf] main() */ + { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_MAIN, }, + /* perf [kernel] page_fault() */ + { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_KERNEL_PAGE_FAULT, }, + /* bash [bash] main() */ + { .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_MAIN, }, + /* bash [bash] xmalloc() */ + { .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_XMALLOC, }, + /* bash [kernel] page_fault() */ + { .pid = FAKE_PID_BASH, .ip = FAKE_IP_KERNEL_PAGE_FAULT, }, +}; + +/* + * Will be casted to struct ip_callchain which has all 64 bit entries + * of nr and ips[]. + */ +static u64 fake_callchains[][10] = { + /* schedule => run_command => main */ + { 3, FAKE_IP_KERNEL_SCHEDULE, FAKE_IP_PERF_RUN_COMMAND, FAKE_IP_PERF_MAIN, }, + /* main */ + { 1, FAKE_IP_PERF_MAIN, }, + /* cmd_record => run_command => main */ + { 3, FAKE_IP_PERF_CMD_RECORD, FAKE_IP_PERF_RUN_COMMAND, FAKE_IP_PERF_MAIN, }, + /* malloc => cmd_record => run_command => main */ + { 4, FAKE_IP_LIBC_MALLOC, FAKE_IP_PERF_CMD_RECORD, FAKE_IP_PERF_RUN_COMMAND, + FAKE_IP_PERF_MAIN, }, + /* free => cmd_record => run_command => main */ + { 4, FAKE_IP_LIBC_FREE, FAKE_IP_PERF_CMD_RECORD, FAKE_IP_PERF_RUN_COMMAND, + FAKE_IP_PERF_MAIN, }, + /* main */ + { 1, FAKE_IP_PERF_MAIN, }, + /* page_fault => sys_perf_event_open => run_command => main */ + { 4, FAKE_IP_KERNEL_PAGE_FAULT, FAKE_IP_KERNEL_SYS_PERF_EVENT_OPEN, + FAKE_IP_PERF_RUN_COMMAND, FAKE_IP_PERF_MAIN, }, + /* main */ + { 1, FAKE_IP_BASH_MAIN, }, + /* xmalloc => malloc => xmalloc => malloc => xmalloc => main */ + { 6, FAKE_IP_BASH_XMALLOC, FAKE_IP_LIBC_MALLOC, FAKE_IP_BASH_XMALLOC, + FAKE_IP_LIBC_MALLOC, FAKE_IP_BASH_XMALLOC, FAKE_IP_BASH_MAIN, }, + /* page_fault => malloc => main */ + { 3, FAKE_IP_KERNEL_PAGE_FAULT, FAKE_IP_LIBC_MALLOC, FAKE_IP_BASH_MAIN, }, +}; + +static int add_hist_entries(struct hists *hists, struct machine *machine) +{ + struct addr_location al; + struct perf_evsel *evsel = hists_to_evsel(hists); + struct perf_sample sample = { .period = 1000, }; + size_t i; + + for (i = 0; i < ARRAY_SIZE(fake_samples); i++) { + const union perf_event event = { + .header = { + .misc = PERF_RECORD_MISC_USER, + }, + }; + struct hist_entry_iter iter = { + .hide_unresolved = false, + }; + + if (symbol_conf.cumulate_callchain) + iter.ops = &hist_iter_cumulative; + else + iter.ops = &hist_iter_normal; + + sample.pid = fake_samples[i].pid; + sample.tid = fake_samples[i].pid; + sample.ip = fake_samples[i].ip; + sample.callchain = (struct ip_callchain *)fake_callchains[i]; + + if (perf_event__preprocess_sample(&event, machine, &al, + &sample) < 0) + goto out; + + if (hist_entry_iter__add(&iter, &al, evsel, &sample, + PERF_MAX_STACK_DEPTH, NULL) < 0) + goto out; + + fake_samples[i].thread = al.thread; + fake_samples[i].map = al.map; + fake_samples[i].sym = al.sym; + } + + return TEST_OK; + +out: + pr_debug("Not enough memory for adding a hist entry\n"); + return TEST_FAIL; +} + +static void del_hist_entries(struct hists *hists) +{ + struct hist_entry *he; + struct rb_root *root_in; + struct rb_root *root_out; + struct rb_node *node; + + if (sort__need_collapse) + root_in = &hists->entries_collapsed; + else + root_in = hists->entries_in; + + root_out = &hists->entries; + + while (!RB_EMPTY_ROOT(root_out)) { + node = rb_first(root_out); + + he = rb_entry(node, struct hist_entry, rb_node); + rb_erase(node, root_out); + rb_erase(&he->rb_node_in, root_in); + hist_entry__free(he); + } +} + +typedef int (*test_fn_t)(struct perf_evsel *, struct machine *); + +#define COMM(he) (thread__comm_str(he->thread)) +#define DSO(he) (he->ms.map->dso->short_name) +#define SYM(he) (he->ms.sym->name) +#define CPU(he) (he->cpu) +#define PID(he) (he->thread->tid) +#define DEPTH(he) (he->callchain->max_depth) +#define CDSO(cl) (cl->ms.map->dso->short_name) +#define CSYM(cl) (cl->ms.sym->name) + +struct result { + u64 children; + u64 self; + const char *comm; + const char *dso; + const char *sym; +}; + +struct callchain_result { + u64 nr; + struct { + const char *dso; + const char *sym; + } node[10]; +}; + +static int do_test(struct hists *hists, struct result *expected, size_t nr_expected, + struct callchain_result *expected_callchain, size_t nr_callchain) +{ + char buf[32]; + size_t i, c; + struct hist_entry *he; + struct rb_root *root; + struct rb_node *node; + struct callchain_node *cnode; + struct callchain_list *clist; + + /* + * adding and deleting hist entries must be done outside of this + * function since TEST_ASSERT_VAL() returns in case of failure. + */ + hists__collapse_resort(hists, NULL); + hists__output_resort(hists); + + if (verbose > 2) { + pr_info("use callchain: %d, cumulate callchain: %d\n", + symbol_conf.use_callchain, + symbol_conf.cumulate_callchain); + print_hists_out(hists); + } + + root = &hists->entries; + for (node = rb_first(root), i = 0; + node && (he = rb_entry(node, struct hist_entry, rb_node)); + node = rb_next(node), i++) { + scnprintf(buf, sizeof(buf), "Invalid hist entry #%zd", i); + + TEST_ASSERT_VAL("Incorrect number of hist entry", + i < nr_expected); + TEST_ASSERT_VAL(buf, he->stat.period == expected[i].self && + !strcmp(COMM(he), expected[i].comm) && + !strcmp(DSO(he), expected[i].dso) && + !strcmp(SYM(he), expected[i].sym)); + + if (symbol_conf.cumulate_callchain) + TEST_ASSERT_VAL(buf, he->stat_acc->period == expected[i].children); + + if (!symbol_conf.use_callchain) + continue; + + /* check callchain entries */ + root = &he->callchain->node.rb_root; + cnode = rb_entry(rb_first(root), struct callchain_node, rb_node); + + c = 0; + list_for_each_entry(clist, &cnode->val, list) { + scnprintf(buf, sizeof(buf), "Invalid callchain entry #%zd/%zd", i, c); + + TEST_ASSERT_VAL("Incorrect number of callchain entry", + c < expected_callchain[i].nr); + TEST_ASSERT_VAL(buf, + !strcmp(CDSO(clist), expected_callchain[i].node[c].dso) && + !strcmp(CSYM(clist), expected_callchain[i].node[c].sym)); + c++; + } + /* TODO: handle multiple child nodes properly */ + TEST_ASSERT_VAL("Incorrect number of callchain entry", + c <= expected_callchain[i].nr); + } + TEST_ASSERT_VAL("Incorrect number of hist entry", + i == nr_expected); + TEST_ASSERT_VAL("Incorrect number of callchain entry", + !symbol_conf.use_callchain || nr_expected == nr_callchain); + return 0; +} + +/* NO callchain + NO children */ +static int test1(struct perf_evsel *evsel, struct machine *machine) +{ + int err; + struct hists *hists = &evsel->hists; + /* + * expected output: + * + * Overhead Command Shared Object Symbol + * ======== ======= ============= ============== + * 20.00% perf perf [.] main + * 10.00% bash [kernel] [k] page_fault + * 10.00% bash bash [.] main + * 10.00% bash bash [.] xmalloc + * 10.00% perf [kernel] [k] page_fault + * 10.00% perf [kernel] [k] schedule + * 10.00% perf libc [.] free + * 10.00% perf libc [.] malloc + * 10.00% perf perf [.] cmd_record + */ + struct result expected[] = { + { 0, 2000, "perf", "perf", "main" }, + { 0, 1000, "bash", "[kernel]", "page_fault" }, + { 0, 1000, "bash", "bash", "main" }, + { 0, 1000, "bash", "bash", "xmalloc" }, + { 0, 1000, "perf", "[kernel]", "page_fault" }, + { 0, 1000, "perf", "[kernel]", "schedule" }, + { 0, 1000, "perf", "libc", "free" }, + { 0, 1000, "perf", "libc", "malloc" }, + { 0, 1000, "perf", "perf", "cmd_record" }, + }; + + symbol_conf.use_callchain = false; + symbol_conf.cumulate_callchain = false; + + setup_sorting(); + callchain_register_param(&callchain_param); + + err = add_hist_entries(hists, machine); + if (err < 0) + goto out; + + err = do_test(hists, expected, ARRAY_SIZE(expected), NULL, 0); + +out: + del_hist_entries(hists); + reset_output_field(); + return err; +} + +/* callcain + NO children */ +static int test2(struct perf_evsel *evsel, struct machine *machine) +{ + int err; + struct hists *hists = &evsel->hists; + /* + * expected output: + * + * Overhead Command Shared Object Symbol + * ======== ======= ============= ============== + * 20.00% perf perf [.] main + * | + * --- main + * + * 10.00% bash [kernel] [k] page_fault + * | + * --- page_fault + * malloc + * main + * + * 10.00% bash bash [.] main + * | + * --- main + * + * 10.00% bash bash [.] xmalloc + * | + * --- xmalloc + * malloc + * xmalloc <--- NOTE: there's a cycle + * malloc + * xmalloc + * main + * + * 10.00% perf [kernel] [k] page_fault + * | + * --- page_fault + * sys_perf_event_open + * run_command + * main + * + * 10.00% perf [kernel] [k] schedule + * | + * --- schedule + * run_command + * main + * + * 10.00% perf libc [.] free + * | + * --- free + * cmd_record + * run_command + * main + * + * 10.00% perf libc [.] malloc + * | + * --- malloc + * cmd_record + * run_command + * main + * + * 10.00% perf perf [.] cmd_record + * | + * --- cmd_record + * run_command + * main + * + */ + struct result expected[] = { + { 0, 2000, "perf", "perf", "main" }, + { 0, 1000, "bash", "[kernel]", "page_fault" }, + { 0, 1000, "bash", "bash", "main" }, + { 0, 1000, "bash", "bash", "xmalloc" }, + { 0, 1000, "perf", "[kernel]", "page_fault" }, + { 0, 1000, "perf", "[kernel]", "schedule" }, + { 0, 1000, "perf", "libc", "free" }, + { 0, 1000, "perf", "libc", "malloc" }, + { 0, 1000, "perf", "perf", "cmd_record" }, + }; + struct callchain_result expected_callchain[] = { + { + 1, { { "perf", "main" }, }, + }, + { + 3, { { "[kernel]", "page_fault" }, + { "libc", "malloc" }, + { "bash", "main" }, }, + }, + { + 1, { { "bash", "main" }, }, + }, + { + 6, { { "bash", "xmalloc" }, + { "libc", "malloc" }, + { "bash", "xmalloc" }, + { "libc", "malloc" }, + { "bash", "xmalloc" }, + { "bash", "main" }, }, + }, + { + 4, { { "[kernel]", "page_fault" }, + { "[kernel]", "sys_perf_event_open" }, + { "perf", "run_command" }, + { "perf", "main" }, }, + }, + { + 3, { { "[kernel]", "schedule" }, + { "perf", "run_command" }, + { "perf", "main" }, }, + }, + { + 4, { { "libc", "free" }, + { "perf", "cmd_record" }, + { "perf", "run_command" }, + { "perf", "main" }, }, + }, + { + 4, { { "libc", "malloc" }, + { "perf", "cmd_record" }, + { "perf", "run_command" }, + { "perf", "main" }, }, + }, + { + 3, { { "perf", "cmd_record" }, + { "perf", "run_command" }, + { "perf", "main" }, }, + }, + }; + + symbol_conf.use_callchain = true; + symbol_conf.cumulate_callchain = false; + + setup_sorting(); + callchain_register_param(&callchain_param); + + err = add_hist_entries(hists, machine); + if (err < 0) + goto out; + + err = do_test(hists, expected, ARRAY_SIZE(expected), + expected_callchain, ARRAY_SIZE(expected_callchain)); + +out: + del_hist_entries(hists); + reset_output_field(); + return err; +} + +/* NO callchain + children */ +static int test3(struct perf_evsel *evsel, struct machine *machine) +{ + int err; + struct hists *hists = &evsel->hists; + /* + * expected output: + * + * Children Self Command Shared Object Symbol + * ======== ======== ======= ============= ======================= + * 70.00% 20.00% perf perf [.] main + * 50.00% 0.00% perf perf [.] run_command + * 30.00% 10.00% bash bash [.] main + * 30.00% 10.00% perf perf [.] cmd_record + * 20.00% 0.00% bash libc [.] malloc + * 10.00% 10.00% bash [kernel] [k] page_fault + * 10.00% 10.00% perf [kernel] [k] schedule + * 10.00% 0.00% perf [kernel] [k] sys_perf_event_open + * 10.00% 10.00% perf [kernel] [k] page_fault + * 10.00% 10.00% perf libc [.] free + * 10.00% 10.00% perf libc [.] malloc + * 10.00% 10.00% bash bash [.] xmalloc + */ + struct result expected[] = { + { 7000, 2000, "perf", "perf", "main" }, + { 5000, 0, "perf", "perf", "run_command" }, + { 3000, 1000, "bash", "bash", "main" }, + { 3000, 1000, "perf", "perf", "cmd_record" }, + { 2000, 0, "bash", "libc", "malloc" }, + { 1000, 1000, "bash", "[kernel]", "page_fault" }, + { 1000, 1000, "perf", "[kernel]", "schedule" }, + { 1000, 0, "perf", "[kernel]", "sys_perf_event_open" }, + { 1000, 1000, "perf", "[kernel]", "page_fault" }, + { 1000, 1000, "perf", "libc", "free" }, + { 1000, 1000, "perf", "libc", "malloc" }, + { 1000, 1000, "bash", "bash", "xmalloc" }, + }; + + symbol_conf.use_callchain = false; + symbol_conf.cumulate_callchain = true; + + setup_sorting(); + callchain_register_param(&callchain_param); + + err = add_hist_entries(hists, machine); + if (err < 0) + goto out; + + err = do_test(hists, expected, ARRAY_SIZE(expected), NULL, 0); + +out: + del_hist_entries(hists); + reset_output_field(); + return err; +} + +/* callchain + children */ +static int test4(struct perf_evsel *evsel, struct machine *machine) +{ + int err; + struct hists *hists = &evsel->hists; + /* + * expected output: + * + * Children Self Command Shared Object Symbol + * ======== ======== ======= ============= ======================= + * 70.00% 20.00% perf perf [.] main + * | + * --- main + * + * 50.00% 0.00% perf perf [.] run_command + * | + * --- run_command + * main + * + * 30.00% 10.00% bash bash [.] main + * | + * --- main + * + * 30.00% 10.00% perf perf [.] cmd_record + * | + * --- cmd_record + * run_command + * main + * + * 20.00% 0.00% bash libc [.] malloc + * | + * --- malloc + * | + * |--50.00%-- xmalloc + * | main + * --50.00%-- main + * + * 10.00% 10.00% bash [kernel] [k] page_fault + * | + * --- page_fault + * malloc + * main + * + * 10.00% 10.00% perf [kernel] [k] schedule + * | + * --- schedule + * run_command + * main + * + * 10.00% 0.00% perf [kernel] [k] sys_perf_event_open + * | + * --- sys_perf_event_open + * run_command + * main + * + * 10.00% 10.00% perf [kernel] [k] page_fault + * | + * --- page_fault + * sys_perf_event_open + * run_command + * main + * + * 10.00% 10.00% perf libc [.] free + * | + * --- free + * cmd_record + * run_command + * main + * + * 10.00% 10.00% perf libc [.] malloc + * | + * --- malloc + * cmd_record + * run_command + * main + * + * 10.00% 10.00% bash bash [.] xmalloc + * | + * --- xmalloc + * malloc + * xmalloc <--- NOTE: there's a cycle + * malloc + * xmalloc + * main + * + */ + struct result expected[] = { + { 7000, 2000, "perf", "perf", "main" }, + { 5000, 0, "perf", "perf", "run_command" }, + { 3000, 1000, "bash", "bash", "main" }, + { 3000, 1000, "perf", "perf", "cmd_record" }, + { 2000, 0, "bash", "libc", "malloc" }, + { 1000, 1000, "bash", "[kernel]", "page_fault" }, + { 1000, 1000, "perf", "[kernel]", "schedule" }, + { 1000, 0, "perf", "[kernel]", "sys_perf_event_open" }, + { 1000, 1000, "perf", "[kernel]", "page_fault" }, + { 1000, 1000, "perf", "libc", "free" }, + { 1000, 1000, "perf", "libc", "malloc" }, + { 1000, 1000, "bash", "bash", "xmalloc" }, + }; + struct callchain_result expected_callchain[] = { + { + 1, { { "perf", "main" }, }, + }, + { + 2, { { "perf", "run_command" }, + { "perf", "main" }, }, + }, + { + 1, { { "bash", "main" }, }, + }, + { + 3, { { "perf", "cmd_record" }, + { "perf", "run_command" }, + { "perf", "main" }, }, + }, + { + 4, { { "libc", "malloc" }, + { "bash", "xmalloc" }, + { "bash", "main" }, + { "bash", "main" }, }, + }, + { + 3, { { "[kernel]", "page_fault" }, + { "libc", "malloc" }, + { "bash", "main" }, }, + }, + { + 3, { { "[kernel]", "schedule" }, + { "perf", "run_command" }, + { "perf", "main" }, }, + }, + { + 3, { { "[kernel]", "sys_perf_event_open" }, + { "perf", "run_command" }, + { "perf", "main" }, }, + }, + { + 4, { { "[kernel]", "page_fault" }, + { "[kernel]", "sys_perf_event_open" }, + { "perf", "run_command" }, + { "perf", "main" }, }, + }, + { + 4, { { "libc", "free" }, + { "perf", "cmd_record" }, + { "perf", "run_command" }, + { "perf", "main" }, }, + }, + { + 4, { { "libc", "malloc" }, + { "perf", "cmd_record" }, + { "perf", "run_command" }, + { "perf", "main" }, }, + }, + { + 6, { { "bash", "xmalloc" }, + { "libc", "malloc" }, + { "bash", "xmalloc" }, + { "libc", "malloc" }, + { "bash", "xmalloc" }, + { "bash", "main" }, }, + }, + }; + + symbol_conf.use_callchain = true; + symbol_conf.cumulate_callchain = true; + + setup_sorting(); + callchain_register_param(&callchain_param); + + err = add_hist_entries(hists, machine); + if (err < 0) + goto out; + + err = do_test(hists, expected, ARRAY_SIZE(expected), + expected_callchain, ARRAY_SIZE(expected_callchain)); + +out: + del_hist_entries(hists); + reset_output_field(); + return err; +} + +int test__hists_cumulate(void) +{ + int err = TEST_FAIL; + struct machines machines; + struct machine *machine; + struct perf_evsel *evsel; + struct perf_evlist *evlist = perf_evlist__new(); + size_t i; + test_fn_t testcases[] = { + test1, + test2, + test3, + test4, + }; + + TEST_ASSERT_VAL("No memory", evlist); + + err = parse_events(evlist, "cpu-clock"); + if (err) + goto out; + + machines__init(&machines); + + /* setup threads/dso/map/symbols also */ + machine = setup_fake_machine(&machines); + if (!machine) + goto out; + + if (verbose > 1) + machine__fprintf(machine, stderr); + + evsel = perf_evlist__first(evlist); + + for (i = 0; i < ARRAY_SIZE(testcases); i++) { + err = testcases[i](evsel, machine); + if (err < 0) + break; + } + +out: + /* tear down everything */ + perf_evlist__delete(evlist); + machines__exit(&machines); + + return err; +} diff --git a/tools/perf/tests/tests.h b/tools/perf/tests/tests.h index d76c0e2e663502..022bb68fd9c753 100644 --- a/tools/perf/tests/tests.h +++ b/tools/perf/tests/tests.h @@ -45,6 +45,7 @@ int test__hists_filter(void); int test__mmap_thread_lookup(void); int test__thread_mg_share(void); int test__hists_output(void); +int test__hists_cumulate(void); #if defined(__x86_64__) || defined(__i386__) || defined(__arm__) #ifdef HAVE_DWARF_UNWIND_SUPPORT From 7fa3134817d3cc813aeba9acc7e284f2f535f1f5 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Fri, 16 May 2014 16:23:18 +0200 Subject: [PATCH 078/101] uprobes: Shift ->readpage check from __copy_insn() to uprobe_register() copy_insn() fails with -EIO if ->readpage == NULL, but this error is not propagated unless uprobe_register() path finds ->mm which already mmaps this file. In this case (say) "perf record" does not actually install the probe, but the user can't know about this. Move this check into uprobe_register() so that this problem can be detected earlier and reported to user. Note: this is still not perfect, - copy_insn() and arch_uprobe_analyze_insn() should be called by uprobe_register() but this is not simple, we need vm_file for read_mapping_page() (although perhaps we can pass NULL), and we need ->mm for is_64bit_mm() (although this logic is broken anyway). - uprobe_register() should be called by create_trace_uprobe(), not by probe_event_enable(), so that an error can be detected at "perf probe -x" time. This also needs more changes in the core uprobe code, uprobe register/unregister interface was poorly designed from the very beginning. Reported-by: Denys Vlasenko Acked-by: Srikar Dronamraju Signed-off-by: Oleg Nesterov --- kernel/events/uprobes.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 3b02c72938a834..c56b13e3b5e16a 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -536,9 +536,6 @@ static int __copy_insn(struct address_space *mapping, struct file *filp, void *insn, int nbytes, loff_t offset) { struct page *page; - - if (!mapping->a_ops->readpage) - return -EIO; /* * Ensure that the page that has the original instruction is * populated and in page-cache. @@ -879,6 +876,9 @@ int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer * if (!uc->handler && !uc->ret_handler) return -EINVAL; + /* copy_insn()->read_mapping_page() needs ->readpage() */ + if (!inode->i_mapping->a_ops->readpage) + return -EIO; /* Racy, just to catch the obvious mistakes */ if (offset > i_size_read(inode)) return -EINVAL; From 45e0a79a82d31ddfa4c7c1dd9751ca48406f2fa4 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Mon, 19 May 2014 16:13:34 +0200 Subject: [PATCH 079/101] uprobes: Teach copy_insn() to support tmpfs tmpfs is widely used but as Denys reports shmem_aops doesn't have ->readpage() and thus you can't probe a binary on this filesystem. As Hugh suggested we can use shmem_read_mapping_page() in this case, just we need to check shmem_mapping() if ->readpage == NULL. Reported-by: Denys Vlasenko Suggested-by: Hugh Dickins Acked-by: Srikar Dronamraju Signed-off-by: Oleg Nesterov --- kernel/events/uprobes.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index c56b13e3b5e16a..6bfb6717f878e2 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -36,6 +36,7 @@ #include "../../mm/internal.h" /* munlock_vma_page */ #include #include +#include #include @@ -537,10 +538,14 @@ static int __copy_insn(struct address_space *mapping, struct file *filp, { struct page *page; /* - * Ensure that the page that has the original instruction is - * populated and in page-cache. + * Ensure that the page that has the original instruction is populated + * and in page-cache. If ->readpage == NULL it must be shmem_mapping(), + * see uprobe_register(). */ - page = read_mapping_page(mapping, offset >> PAGE_CACHE_SHIFT, filp); + if (mapping->a_ops->readpage) + page = read_mapping_page(mapping, offset >> PAGE_CACHE_SHIFT, filp); + else + page = shmem_read_mapping_page(mapping, offset >> PAGE_CACHE_SHIFT); if (IS_ERR(page)) return PTR_ERR(page); @@ -876,8 +881,8 @@ int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer * if (!uc->handler && !uc->ret_handler) return -EINVAL; - /* copy_insn()->read_mapping_page() needs ->readpage() */ - if (!inode->i_mapping->a_ops->readpage) + /* copy_insn() uses read_mapping_page() or shmem_read_mapping_page() */ + if (!inode->i_mapping->a_ops->readpage && !shmem_mapping(inode->i_mapping)) return -EIO; /* Racy, just to catch the obvious mistakes */ if (offset > i_size_read(inode)) From 1b5726220fe16ac38eb2db43e7bc82e69f449fca Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 26 May 2014 16:02:29 -0300 Subject: [PATCH 080/101] perf trace: Warn the user when not available When the audit-libs devel package is not found at build time we disable the 'trace' command, as we are not able to map syscall numbers to strings, but then the message the user is presented is cryptic: [root@zoo linux]# trace ls perf: 'ls' is not a perf-command. See 'perf --help'. Fix it by presenting a more helpful message: [root@zoo linux]# trace l trace command not available: missing audit-libs devel package at build time. Signed-off-by: Arnaldo Carvalho de Melo Cc: Adrian Hunter Cc: David Ahern Cc: Don Zickus Cc: Frederic Weisbecker Cc: Mike Galbraith Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-uxeunqetd0sgxyibusapen9a@git.kernel.org Signed-off-by: Jiri Olsa --- tools/perf/perf.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/tools/perf/perf.c b/tools/perf/perf.c index 431798a4110d6a..78f7b920e5483a 100644 --- a/tools/perf/perf.c +++ b/tools/perf/perf.c @@ -481,14 +481,18 @@ int main(int argc, const char **argv) fprintf(stderr, "cannot handle %s internally", cmd); goto out; } -#ifdef HAVE_LIBAUDIT_SUPPORT if (!prefixcmp(cmd, "trace")) { +#ifdef HAVE_LIBAUDIT_SUPPORT set_buildid_dir(); setup_path(); argv[0] = "trace"; return cmd_trace(argc, argv, NULL); - } +#else + fprintf(stderr, + "trace command not available: missing audit-libs devel package at build time.\n"); + goto out; #endif + } /* Look for flags.. */ argv++; argc--; From 6a2f2543a1f3aa0e7766e27c30b93d164771e892 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 26 May 2014 16:02:30 -0300 Subject: [PATCH 081/101] perf tools: Add warning when disabling perl scripting support due to missing devel files We were just showing "libperl: OFF", unlike other features where we present the user with a message helping have a feature built in. Fix it by adding the following message: config/Makefile:450: Missing perl devel files. Disabling perl scripting support, consider installing perl-ExtUtils-Embed Signed-off-by: Arnaldo Carvalho de Melo Cc: Adrian Hunter Cc: David Ahern Cc: Don Zickus Cc: Frederic Weisbecker Cc: Mike Galbraith Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-t7yeud34ehimlfi6pklb29p7@git.kernel.org Signed-off-by: Jiri Olsa --- tools/perf/config/Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/perf/config/Makefile b/tools/perf/config/Makefile index 729bbdf5cec7c6..319426f632fccf 100644 --- a/tools/perf/config/Makefile +++ b/tools/perf/config/Makefile @@ -447,6 +447,7 @@ else ifneq ($(feature-libperl), 1) CFLAGS += -DNO_LIBPERL NO_LIBPERL := 1 + msg := $(warning Missing perl devel files. Disabling perl scripting support, consider installing perl-ExtUtils-Embed); else LDFLAGS += $(PERL_EMBED_LDFLAGS) EXTLIBS += $(PERL_EMBED_LIBADD) From 16a6433615c14097fd8d406b10ce6393aebb7017 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 28 May 2014 10:19:18 +0200 Subject: [PATCH 082/101] perf tools: Consider header files outside perf directory in tags target This fixes lookups like "vi -t event_format" Signed-off-by: Sebastian Andrzej Siewior Link: http://lkml.kernel.org/r/20140528081918.GA28567@linutronix.de Signed-off-by: Jiri Olsa --- tools/perf/Makefile.perf | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf index 67f7c0575b2614..6286e13adc2ec0 100644 --- a/tools/perf/Makefile.perf +++ b/tools/perf/Makefile.perf @@ -815,17 +815,20 @@ INSTALL_DOC_TARGETS += quick-install-doc quick-install-man quick-install-html $(DOC_TARGETS): $(QUIET_SUBDIR0)Documentation $(QUIET_SUBDIR1) $(@:doc=all) +TAG_FOLDERS= . ../lib/traceevent ../lib/api ../lib/symbol +TAG_FILES= ../../include/uapi/linux/perf_event.h + TAGS: $(RM) TAGS - $(FIND) . -name '*.[hcS]' -print | xargs etags -a + $(FIND) $(TAG_FOLDERS) -name '*.[hcS]' -print | xargs etags -a $(TAG_FILES) tags: $(RM) tags - $(FIND) . -name '*.[hcS]' -print | xargs ctags -a + $(FIND) $(TAG_FOLDERS) -name '*.[hcS]' -print | xargs ctags -a $(TAG_FILES) cscope: $(RM) cscope* - $(FIND) . -name '*.[hcS]' -print | xargs cscope -b + $(FIND) $(TAG_FOLDERS) -name '*.[hcS]' -print | xargs cscope -b $(TAG_FILES) ### Detect prefix changes TRACK_CFLAGS = $(subst ','\'',$(CFLAGS)):\ From f2d9627b2b31506204417bb6842a7ea88970b700 Mon Sep 17 00:00:00 2001 From: Cody P Schafer Date: Tue, 27 May 2014 17:21:56 -0700 Subject: [PATCH 083/101] perf tools: Allow overriding sysfs and proc finding with env var SYSFS_PATH and PROC_PATH environment variables now let the user override the detection of sysfs and proc locations for testing purposes. Signed-off-by: Cody P Schafer Cc: Sukadev Bhattiprolu Link: http://lkml.kernel.org/r/1401236684-10579-2-git-send-email-dev@codyps.com Signed-off-by: Jiri Olsa --- tools/lib/api/fs/fs.c | 43 ++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 42 insertions(+), 1 deletion(-) diff --git a/tools/lib/api/fs/fs.c b/tools/lib/api/fs/fs.c index 5b5eb788996e1d..c1b49c36a951d7 100644 --- a/tools/lib/api/fs/fs.c +++ b/tools/lib/api/fs/fs.c @@ -1,8 +1,10 @@ /* TODO merge/factor in debugfs.c here */ +#include #include #include #include +#include #include #include @@ -96,12 +98,51 @@ static bool fs__check_mounts(struct fs *fs) return false; } +static void mem_toupper(char *f, size_t len) +{ + while (len) { + *f = toupper(*f); + f++; + len--; + } +} + +/* + * Check for "NAME_PATH" environment variable to override fs location (for + * testing). This matches the recommendation in Documentation/sysfs-rules.txt + * for SYSFS_PATH. + */ +static bool fs__env_override(struct fs *fs) +{ + char *override_path; + size_t name_len = strlen(fs->name); + /* name + "_PATH" + '\0' */ + char upper_name[name_len + 5 + 1]; + memcpy(upper_name, fs->name, name_len); + mem_toupper(upper_name, name_len); + strcpy(&upper_name[name_len], "_PATH"); + + override_path = getenv(upper_name); + if (!override_path) + return false; + + fs->found = true; + strncpy(fs->path, override_path, sizeof(fs->path)); + return true; +} + static const char *fs__get_mountpoint(struct fs *fs) { + if (fs__env_override(fs)) + return fs->path; + if (fs__check_mounts(fs)) return fs->path; - return fs__read_mounts(fs) ? fs->path : NULL; + if (fs__read_mounts(fs)) + return fs->path; + + return NULL; } static const char *fs__mountpoint(int idx) From 7f3e508ee1e6cc1b5865edcbf04a14a76ff1534c Mon Sep 17 00:00:00 2001 From: zhangdianfang Date: Fri, 30 May 2014 08:53:58 +0800 Subject: [PATCH 084/101] perf tools: Fix "==" into "=" in ui_browser__warning assignment Convert "==" into "=" in ui_browser__warning assignment. Bug description: https://bugzilla.kernel.org/show_bug.cgi?id=76751 Reported-by: David Binderman Signed-off-by: Dianfang Zhang Acked-by: Arnaldo Carvalho de Melo Cc: Jean Delvare Link: http://lkml.kernel.org/r/20140530154709.GC1202@kernel.org [ changed the changelog a bit ] Signed-off-by: Jiri Olsa --- tools/perf/ui/browser.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/ui/browser.c b/tools/perf/ui/browser.c index d11541d4d7d7ff..3ccf6e14f89bfe 100644 --- a/tools/perf/ui/browser.c +++ b/tools/perf/ui/browser.c @@ -194,7 +194,7 @@ int ui_browser__warning(struct ui_browser *browser, int timeout, ui_helpline__vpush(format, args); va_end(args); } else { - while ((key == ui__question_window("Warning!", text, + while ((key = ui__question_window("Warning!", text, "Press any key...", timeout)) == K_RESIZE) ui_browser__handle_resize(browser); From 2ec85c628c4cecef0f82d177279c579aed0f9706 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Fri, 23 May 2014 17:15:46 +0200 Subject: [PATCH 085/101] perf tools: Remove elide setup for SORT_MODE__MEMORY mode There's no need to setup elide of sort_dso sort entry again with symbol_conf.dso_list list. The only difference were list names of memory mode data, which does not make much sense to me. Acked-by: Namhyung Kim Cc: Arnaldo Carvalho de Melo Cc: Corey Ashford Cc: David Ahern Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/r/1400858147-7155-2-git-send-email-jolsa@kernel.org Signed-off-by: Jiri Olsa --- tools/perf/util/sort.c | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index 254f583a52abff..2aba620a86f639 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c @@ -1412,19 +1412,6 @@ void sort__setup_elide(FILE *output) sort_entry__setup_elide(&sort_sym_to, symbol_conf.sym_to_list, "sym_to", output); - } else if (sort__mode == SORT_MODE__MEMORY) { - sort_entry__setup_elide(&sort_dso, symbol_conf.dso_list, - "symbol_daddr", output); - sort_entry__setup_elide(&sort_dso, symbol_conf.dso_list, - "dso_daddr", output); - sort_entry__setup_elide(&sort_dso, symbol_conf.dso_list, - "mem", output); - sort_entry__setup_elide(&sort_dso, symbol_conf.dso_list, - "local_weight", output); - sort_entry__setup_elide(&sort_dso, symbol_conf.dso_list, - "tlb", output); - sort_entry__setup_elide(&sort_dso, symbol_conf.dso_list, - "snoop", output); } /* From f29984226978313039d7dfe9b45eaa55a3aad03d Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Fri, 23 May 2014 17:15:47 +0200 Subject: [PATCH 086/101] perf tools: Move elide bool into perf_hpp_fmt struct After output/sort fields refactoring, it's expensive to check the elide bool in its current location inside the 'struct sort_entry'. The perf_hpp__should_skip function gets highly noticable in workloads with high number of output/sort fields, like for: $ perf report -i perf-test.data -F overhead,sample,period,comm,pid,dso,symbol,cpu --stdio Performance report: 9.70% perf [.] perf_hpp__should_skip Moving the elide bool into the 'struct perf_hpp_fmt', which makes the perf_hpp__should_skip just single struct read. Got speedup of around 22% for my test perf.data workload. The change should not harm any other workload types. Performance counter stats for (10 runs): before: 358,319,732,626 cycles ( +- 0.55% ) 467,129,581,515 instructions # 1.30 insns per cycle ( +- 0.00% ) 150.943975206 seconds time elapsed ( +- 0.62% ) now: 278,785,972,990 cycles ( +- 0.12% ) 370,146,797,640 instructions # 1.33 insns per cycle ( +- 0.00% ) 116.416670507 seconds time elapsed ( +- 0.31% ) Acked-by: Namhyung Kim Cc: Arnaldo Carvalho de Melo Cc: Corey Ashford Cc: David Ahern Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/r/20140601142622.GA9131@krava.brq.redhat.com Signed-off-by: Jiri Olsa --- tools/perf/ui/browsers/hists.c | 8 +-- tools/perf/util/hist.h | 8 ++- tools/perf/util/sort.c | 90 +++++++++++++++++++++------------- tools/perf/util/sort.h | 2 +- 4 files changed, 68 insertions(+), 40 deletions(-) diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c index 5905acde5f1da5..52c03fbbba1774 100644 --- a/tools/perf/ui/browsers/hists.c +++ b/tools/perf/ui/browsers/hists.c @@ -1706,14 +1706,14 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events, zoom_out_dso: ui_helpline__pop(); browser->hists->dso_filter = NULL; - sort_dso.elide = false; + perf_hpp__set_elide(HISTC_DSO, false); } else { if (dso == NULL) continue; ui_helpline__fpush("To zoom out press <- or -> + \"Zoom out of %s DSO\"", dso->kernel ? "the Kernel" : dso->short_name); browser->hists->dso_filter = dso; - sort_dso.elide = true; + perf_hpp__set_elide(HISTC_DSO, true); pstack__push(fstack, &browser->hists->dso_filter); } hists__filter_by_dso(hists); @@ -1725,13 +1725,13 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events, zoom_out_thread: ui_helpline__pop(); browser->hists->thread_filter = NULL; - sort_thread.elide = false; + perf_hpp__set_elide(HISTC_THREAD, false); } else { ui_helpline__fpush("To zoom out press <- or -> + \"Zoom out of %s(%d) thread\"", thread->comm_set ? thread__comm_str(thread) : "", thread->tid); browser->hists->thread_filter = thread; - sort_thread.elide = true; + perf_hpp__set_elide(HISTC_THREAD, false); pstack__push(fstack, &browser->hists->thread_filter); } hists__filter_by_thread(hists); diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h index 82b28ff980626d..d2bf03575d5f08 100644 --- a/tools/perf/util/hist.h +++ b/tools/perf/util/hist.h @@ -205,6 +205,7 @@ struct perf_hpp_fmt { struct list_head list; struct list_head sort_list; + bool elide; }; extern struct list_head perf_hpp__list; @@ -252,7 +253,12 @@ void perf_hpp__append_sort_keys(void); bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format); bool perf_hpp__same_sort_entry(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b); -bool perf_hpp__should_skip(struct perf_hpp_fmt *format); + +static inline bool perf_hpp__should_skip(struct perf_hpp_fmt *format) +{ + return format->elide; +} + void perf_hpp__reset_width(struct perf_hpp_fmt *fmt, struct hists *hists); typedef u64 (*hpp_field_fn)(struct hist_entry *he); diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index 2aba620a86f639..45512baaab6727 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c @@ -1157,6 +1157,7 @@ __sort_dimension__alloc_hpp(struct sort_dimension *sd) INIT_LIST_HEAD(&hse->hpp.list); INIT_LIST_HEAD(&hse->hpp.sort_list); + hse->hpp.elide = false; return hse; } @@ -1364,27 +1365,64 @@ static int __setup_sorting(void) return ret; } -bool perf_hpp__should_skip(struct perf_hpp_fmt *format) +void perf_hpp__set_elide(int idx, bool elide) { - if (perf_hpp__is_sort_entry(format)) { - struct hpp_sort_entry *hse; + struct perf_hpp_fmt *fmt; + struct hpp_sort_entry *hse; - hse = container_of(format, struct hpp_sort_entry, hpp); - return hse->se->elide; + perf_hpp__for_each_format(fmt) { + if (!perf_hpp__is_sort_entry(fmt)) + continue; + + hse = container_of(fmt, struct hpp_sort_entry, hpp); + if (hse->se->se_width_idx == idx) { + fmt->elide = elide; + break; + } } - return false; } -static void sort_entry__setup_elide(struct sort_entry *se, - struct strlist *list, - const char *list_name, FILE *fp) +static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp) { if (list && strlist__nr_entries(list) == 1) { if (fp != NULL) fprintf(fp, "# %s: %s\n", list_name, strlist__entry(list, 0)->s); - se->elide = true; + return true; + } + return false; +} + +static bool get_elide(int idx, FILE *output) +{ + switch (idx) { + case HISTC_SYMBOL: + return __get_elide(symbol_conf.sym_list, "symbol", output); + case HISTC_DSO: + return __get_elide(symbol_conf.dso_list, "dso", output); + case HISTC_COMM: + return __get_elide(symbol_conf.comm_list, "comm", output); + default: + break; } + + if (sort__mode != SORT_MODE__BRANCH) + return false; + + switch (idx) { + case HISTC_SYMBOL_FROM: + return __get_elide(symbol_conf.sym_from_list, "sym_from", output); + case HISTC_SYMBOL_TO: + return __get_elide(symbol_conf.sym_to_list, "sym_to", output); + case HISTC_DSO_FROM: + return __get_elide(symbol_conf.dso_from_list, "dso_from", output); + case HISTC_DSO_TO: + return __get_elide(symbol_conf.dso_to_list, "dso_to", output); + default: + break; + } + + return false; } void sort__setup_elide(FILE *output) @@ -1392,26 +1430,12 @@ void sort__setup_elide(FILE *output) struct perf_hpp_fmt *fmt; struct hpp_sort_entry *hse; - sort_entry__setup_elide(&sort_dso, symbol_conf.dso_list, - "dso", output); - sort_entry__setup_elide(&sort_comm, symbol_conf.comm_list, - "comm", output); - sort_entry__setup_elide(&sort_sym, symbol_conf.sym_list, - "symbol", output); - - if (sort__mode == SORT_MODE__BRANCH) { - sort_entry__setup_elide(&sort_dso_from, - symbol_conf.dso_from_list, - "dso_from", output); - sort_entry__setup_elide(&sort_dso_to, - symbol_conf.dso_to_list, - "dso_to", output); - sort_entry__setup_elide(&sort_sym_from, - symbol_conf.sym_from_list, - "sym_from", output); - sort_entry__setup_elide(&sort_sym_to, - symbol_conf.sym_to_list, - "sym_to", output); + perf_hpp__for_each_format(fmt) { + if (!perf_hpp__is_sort_entry(fmt)) + continue; + + hse = container_of(fmt, struct hpp_sort_entry, hpp); + fmt->elide = get_elide(hse->se->se_width_idx, output); } /* @@ -1422,8 +1446,7 @@ void sort__setup_elide(FILE *output) if (!perf_hpp__is_sort_entry(fmt)) continue; - hse = container_of(fmt, struct hpp_sort_entry, hpp); - if (!hse->se->elide) + if (!fmt->elide) return; } @@ -1431,8 +1454,7 @@ void sort__setup_elide(FILE *output) if (!perf_hpp__is_sort_entry(fmt)) continue; - hse = container_of(fmt, struct hpp_sort_entry, hpp); - hse->se->elide = false; + fmt->elide = false; } } diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h index 426b873e16ff9f..5bf0098d6b0689 100644 --- a/tools/perf/util/sort.h +++ b/tools/perf/util/sort.h @@ -202,7 +202,6 @@ struct sort_entry { int (*se_snprintf)(struct hist_entry *he, char *bf, size_t size, unsigned int width); u8 se_width_idx; - bool elide; }; extern struct sort_entry sort_thread; @@ -213,6 +212,7 @@ int setup_output_field(void); void reset_output_field(void); extern int sort_dimension__add(const char *); void sort__setup_elide(FILE *fp); +void perf_hpp__set_elide(int idx, bool elide); int report_parse_ignore_callees_opt(const struct option *opt, const char *arg, int unset); From a515114fa3cff8f1da10cd68914d55c10879c3e0 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Mon, 2 Jun 2014 13:44:23 -0400 Subject: [PATCH 087/101] perf record: Fix poll return value propagation If the perf record command is interrupted in record__mmap_read_all function, the 'done' is set and err has the latest poll return value, which is most likely positive number (= number of pollfds ready to read). This 'positive err' is then propagated to the exit code, resulting in not finishing the perf.data header properly, causing following error in report: # perf record -F 50000 -a --- make the system real busy, so there's more chance to interrupt perf in event writing code --- ^C[ perf record: Woken up 16 times to write data ] [ perf record: Captured and wrote 30.292 MB perf.data (~1323468 samples) ] # perf report --stdio > /dev/null WARNING: The perf.data file's data size field is 0 which is unexpected. Was the 'perf record' command properly terminated? Fixing this by checking for positive poll return value and setting err to 0. Acked-by: Arnaldo Carvalho de Melo Acked-by: Namhyung Kim Cc: Arnaldo Carvalho de Melo Cc: Corey Ashford Cc: David Ahern Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1401732126-19465-1-git-send-email-jolsa@kernel.org Signed-off-by: Jiri Olsa --- tools/perf/builtin-record.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index e4c85b8f46c29f..ce2cfec5c76470 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -454,7 +454,11 @@ static int __cmd_record(struct record *rec, int argc, const char **argv) if (done) break; err = poll(rec->evlist->pollfd, rec->evlist->nr_fds, -1); - if (err < 0 && errno == EINTR) + /* + * Propagate error, only if there's any. Ignore positive + * number of returned events and interrupt error. + */ + if (err > 0 || (err < 0 && errno == EINTR)) err = 0; waking++; } From fc9cabeabf42d76854059e7bce81a02645e7e5ca Mon Sep 17 00:00:00 2001 From: Jianyu Zhan Date: Tue, 3 Jun 2014 00:44:34 +0800 Subject: [PATCH 088/101] perf tools: Fix 'make help' message error Currently 'make help' message has such hint: use "make prefix= " to install to a particular path like make prefix=/usr/local install install-doc But this is misleading, when I specify "prefix=/usr/local", it has got no respect at all. This is because that, "DESTDIR" is considered first. In this case, "DESTDIR" has an empty value, so "prefix" is honored. However, "prefix" is unconditionally assigned to $HOME, regardless of what it is set to from command line. So our "prefix" setting got no respect and the actual destination falls back to $HOME. This patch fixes this issue and corrects the help message. Signed-off-by: Jianyu Zhan Acked-by: Namhyung Kim Link: http://lkml.kernel.org/r/1401727474-19370-1-git-send-email-nasa4836@gmail.com Signed-off-by: Jiri Olsa --- tools/perf/Makefile.perf | 4 ++-- tools/perf/config/Makefile | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf index 6286e13adc2ec0..ae20edfcc3f7e3 100644 --- a/tools/perf/Makefile.perf +++ b/tools/perf/Makefile.perf @@ -789,8 +789,8 @@ help: @echo '' @echo 'Perf install targets:' @echo ' NOTE: documentation build requires asciidoc, xmlto packages to be installed' - @echo ' HINT: use "make prefix= " to install to a particular' - @echo ' path like make prefix=/usr/local install install-doc' + @echo ' HINT: use "prefix" or "DESTDIR" to install to a particular' + @echo ' path like "make prefix=/usr/local install install-doc"' @echo ' install - install compiled binaries' @echo ' install-doc - install *all* documentation' @echo ' install-man - install manpage documentation' diff --git a/tools/perf/config/Makefile b/tools/perf/config/Makefile index 319426f632fccf..4f100b54ba8bf0 100644 --- a/tools/perf/config/Makefile +++ b/tools/perf/config/Makefile @@ -600,7 +600,7 @@ endif # Make the path relative to DESTDIR, not to prefix ifndef DESTDIR -prefix = $(HOME) +prefix ?= $(HOME) endif bindir_relative = bin bindir = $(prefix)/$(bindir_relative) From ebf905fc7a6e7c99c53b5afc888d8f950da90aff Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 29 May 2014 19:00:24 +0200 Subject: [PATCH 089/101] perf: Fix use after free in perf_remove_from_context() While that mutex should guard the elements, it doesn't guard against the use-after-free that's from list_for_each_entry_rcu(). __perf_event_exit_task() can actually free the event. And because list addition/deletion is guarded by both ctx->mutex and ctx->lock, holding ctx->mutex is sufficient for reading the list, so we don't actually need the rcu list iteration. Fixes: 3a497f48637e ("perf: Simplify perf_event_exit_task_context()") Reported-by: Sasha Levin Tested-by: Sasha Levin Signed-off-by: Peter Zijlstra Cc: Dave Jones Cc: acme@ghostprotocols.net Cc: Arnaldo Carvalho de Melo Cc: Linus Torvalds Link: http://lkml.kernel.org/r/20140529170024.GA2315@laptop.programming.kicks-ass.net Signed-off-by: Ingo Molnar --- kernel/events/core.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/events/core.c b/kernel/events/core.c index ed50b0943213fc..a62d142ad49817 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -7431,7 +7431,7 @@ __perf_event_exit_task(struct perf_event *child_event, static void perf_event_exit_task_context(struct task_struct *child, int ctxn) { - struct perf_event *child_event; + struct perf_event *child_event, *next; struct perf_event_context *child_ctx; unsigned long flags; @@ -7485,7 +7485,7 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn) */ mutex_lock(&child_ctx->mutex); - list_for_each_entry_rcu(child_event, &child_ctx->event_list, event_entry) + list_for_each_entry_safe(child_event, next, &child_ctx->event_list, event_entry) __perf_event_exit_task(child_event, child_ctx, child); mutex_unlock(&child_ctx->mutex); From 53b25335dd60981ad608da7890420898a34469a6 Mon Sep 17 00:00:00 2001 From: Vince Weaver Date: Fri, 16 May 2014 17:12:12 -0400 Subject: [PATCH 090/101] perf: Disable sampled events if no PMU interrupt Add common code to generate -ENOTSUPP at event creation time if an architecture attempts to create a sampled event and PERF_PMU_NO_INTERRUPT is set. This adds a new pmu->capabilities flag. Initially we only support PERF_PMU_NO_INTERRUPT (to indicate a PMU has no support for generating hardware interrupts) but there are other capabilities that can be added later. Signed-off-by: Vince Weaver Acked-by: Will Deacon [peterz: rename to PERF_PMU_CAP_* and moved the pmu::capabilities word into a hole] Signed-off-by: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Linus Torvalds Link: http://lkml.kernel.org/r/alpine.DEB.2.10.1405161708060.11099@vincent-weaver-1.umelst.maine.edu Signed-off-by: Ingo Molnar Signed-off-by: Ingo Molnar --- include/linux/perf_event.h | 10 ++++++++++ kernel/events/core.c | 7 +++++++ 2 files changed, 17 insertions(+) diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index af6dcf1d9e476c..267c8f37012cf5 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -166,6 +166,11 @@ struct perf_event; */ #define PERF_EVENT_TXN 0x1 +/** + * pmu::capabilities flags + */ +#define PERF_PMU_CAP_NO_INTERRUPT 0x01 + /** * struct pmu - generic performance monitoring unit */ @@ -178,6 +183,11 @@ struct pmu { const char *name; int type; + /* + * various common per-pmu feature flags + */ + int capabilities; + int * __percpu pmu_disable_count; struct perf_cpu_context * __percpu pmu_cpu_context; int task_ctx_nr; diff --git a/kernel/events/core.c b/kernel/events/core.c index a62d142ad49817..e9ef0c6646afa1 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -7120,6 +7120,13 @@ SYSCALL_DEFINE5(perf_event_open, } } + if (is_sampling_event(event)) { + if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) { + err = -ENOTSUPP; + goto err_alloc; + } + } + account_event(event); /* From edcb4d3c36a6429caa03ddfeab4cbb153c7002b2 Mon Sep 17 00:00:00 2001 From: Vince Weaver Date: Fri, 16 May 2014 17:15:49 -0400 Subject: [PATCH 091/101] perf/ARM: Use common PMU interrupt disabled code Make the ARM perf code use the new common PMU interrupt disabled code. This allows perf to work on ARM machines without a working PMU interrupt (for example, raspberry pi). Acked-by: Will Deacon Signed-off-by: Vince Weaver [peterz: applied changes suggested by Will] Signed-off-by: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Grant Likely Cc: Linus Torvalds Cc: Paul Mackerras Cc: Rob Herring Cc: Russell King Cc: Will Deacon Cc: devicetree@vger.kernel.org Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org Link: http://lkml.kernel.org/r/alpine.DEB.2.10.1405161712190.11099@vincent-weaver-1.umelst.maine.edu [ Small readability tweaks to the code. ] Signed-off-by: Ingo Molnar Signed-off-by: Ingo Molnar --- arch/arm/kernel/perf_event.c | 2 +- arch/arm/kernel/perf_event_cpu.c | 8 ++++++-- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index a6bc431cde7010..4238bcba9d60fc 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -410,7 +410,7 @@ __hw_perf_event_init(struct perf_event *event) */ hwc->config_base |= (unsigned long)mapping; - if (!hwc->sample_period) { + if (!is_sampling_event(event)) { /* * For non-sampling runs, limit the sample_period to half * of the counter width. That way, the new counter value diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c index 51798d7854aca9..bbdbffd06cf77a 100644 --- a/arch/arm/kernel/perf_event_cpu.c +++ b/arch/arm/kernel/perf_event_cpu.c @@ -126,8 +126,8 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler) irqs = min(pmu_device->num_resources, num_possible_cpus()); if (irqs < 1) { - pr_err("no irqs for PMUs defined\n"); - return -ENODEV; + printk_once("perf/ARM: No irqs for PMU defined, sampling events not supported\n"); + return 0; } irq = platform_get_irq(pmu_device, 0); @@ -191,6 +191,10 @@ static void cpu_pmu_init(struct arm_pmu *cpu_pmu) /* Ensure the PMU has sane values out of reset. */ if (cpu_pmu->reset) on_each_cpu(cpu_pmu->reset, cpu_pmu, 1); + + /* If no interrupts available, set the corresponding capability flag */ + if (!platform_get_irq(cpu_pmu->plat_device, 0)) + cpu_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; } /* From c184c980de30dc5f6fec4b281928aa6743708da9 Mon Sep 17 00:00:00 2001 From: Vince Weaver Date: Fri, 16 May 2014 17:18:07 -0400 Subject: [PATCH 092/101] perf/x86: Use common PMU interrupt disabled code Make the x86 perf code use the new common PMU interrupt disabled code. Typically most x86 machines have working PMU interrupts, although some older p6-class machines had this problem. Signed-off-by: Vince Weaver Signed-off-by: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Linus Torvalds Link: http://lkml.kernel.org/r/alpine.DEB.2.10.1405161715560.11099@vincent-weaver-1.umelst.maine.edu Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 32029e35f2b926..2bdfbff8a4f616 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -303,15 +303,6 @@ int x86_setup_perfctr(struct perf_event *event) hwc->sample_period = x86_pmu.max_period; hwc->last_period = hwc->sample_period; local64_set(&hwc->period_left, hwc->sample_period); - } else { - /* - * If we have a PMU initialized but no APIC - * interrupts, we cannot sample hardware - * events (user-space has to fall back and - * sample via a hrtimer based software event): - */ - if (!x86_pmu.apic) - return -EOPNOTSUPP; } if (attr->type == PERF_TYPE_RAW) @@ -1367,6 +1358,15 @@ static void __init pmu_check_apic(void) x86_pmu.apic = 0; pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n"); pr_info("no hardware sampling interrupt available.\n"); + + /* + * If we have a PMU initialized but no APIC + * interrupts, we cannot sample hardware + * events (user-space has to fall back and + * sample via a hrtimer based software event): + */ + pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; + } static struct attribute_group x86_pmu_format_group = { From 41ccba029e9492dd0fc1bf5c23b72c6322a6dfe9 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Mon, 19 May 2014 20:40:54 +0200 Subject: [PATCH 093/101] uprobes: Shift ->readpage check from __copy_insn() to uprobe_register() copy_insn() fails with -EIO if ->readpage == NULL, but this error is not propagated unless uprobe_register() path finds ->mm which already mmaps this file. In this case (say) "perf record" does not actually install the probe, but the user can't know about this. Move this check into uprobe_register() so that this problem can be detected earlier and reported to user. Note: this is still not perfect, - copy_insn() and arch_uprobe_analyze_insn() should be called by uprobe_register() but this is not simple, we need vm_file for read_mapping_page() (although perhaps we can pass NULL), and we need ->mm for is_64bit_mm() (although this logic is broken anyway). - uprobe_register() should be called by create_trace_uprobe(), not by probe_event_enable(), so that an error can be detected at "perf probe -x" time. This also needs more changes in the core uprobe code, uprobe register/unregister interface was poorly designed from the very beginning. Reported-by: Denys Vlasenko Signed-off-by: Oleg Nesterov Acked-by: Srikar Dronamraju Signed-off-by: Peter Zijlstra Cc: Hugh Dickins Cc: Arnaldo Carvalho de Melo Cc: Linus Torvalds Link: http://lkml.kernel.org/r/20140519184054.GA6750@redhat.com Signed-off-by: Ingo Molnar --- kernel/events/uprobes.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 3b02c72938a834..c56b13e3b5e16a 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -536,9 +536,6 @@ static int __copy_insn(struct address_space *mapping, struct file *filp, void *insn, int nbytes, loff_t offset) { struct page *page; - - if (!mapping->a_ops->readpage) - return -EIO; /* * Ensure that the page that has the original instruction is * populated and in page-cache. @@ -879,6 +876,9 @@ int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer * if (!uc->handler && !uc->ret_handler) return -EINVAL; + /* copy_insn()->read_mapping_page() needs ->readpage() */ + if (!inode->i_mapping->a_ops->readpage) + return -EIO; /* Racy, just to catch the obvious mistakes */ if (offset > i_size_read(inode)) return -EINVAL; From 40814f6805a524130a5ec313871147f7aa9b5318 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Mon, 19 May 2014 20:41:36 +0200 Subject: [PATCH 094/101] uprobes: Teach copy_insn() to support tmpfs tmpfs is widely used but as Denys reports shmem_aops doesn't have ->readpage() and thus you can't probe a binary on this filesystem. As Hugh suggested we can use shmem_read_mapping_page() in this case, just we need to check shmem_mapping() if ->readpage == NULL. Reported-by: Denys Vlasenko Suggested-by: Hugh Dickins Signed-off-by: Oleg Nesterov Acked-by: Srikar Dronamraju Signed-off-by: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Linus Torvalds Link: http://lkml.kernel.org/r/20140519184136.GB6750@redhat.com Signed-off-by: Ingo Molnar --- kernel/events/uprobes.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index c56b13e3b5e16a..6bfb6717f878e2 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -36,6 +36,7 @@ #include "../../mm/internal.h" /* munlock_vma_page */ #include #include +#include #include @@ -537,10 +538,14 @@ static int __copy_insn(struct address_space *mapping, struct file *filp, { struct page *page; /* - * Ensure that the page that has the original instruction is - * populated and in page-cache. + * Ensure that the page that has the original instruction is populated + * and in page-cache. If ->readpage == NULL it must be shmem_mapping(), + * see uprobe_register(). */ - page = read_mapping_page(mapping, offset >> PAGE_CACHE_SHIFT, filp); + if (mapping->a_ops->readpage) + page = read_mapping_page(mapping, offset >> PAGE_CACHE_SHIFT, filp); + else + page = shmem_read_mapping_page(mapping, offset >> PAGE_CACHE_SHIFT); if (IS_ERR(page)) return PTR_ERR(page); @@ -876,8 +881,8 @@ int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer * if (!uc->handler && !uc->ret_handler) return -EINVAL; - /* copy_insn()->read_mapping_page() needs ->readpage() */ - if (!inode->i_mapping->a_ops->readpage) + /* copy_insn() uses read_mapping_page() or shmem_read_mapping_page() */ + if (!inode->i_mapping->a_ops->readpage && !shmem_mapping(inode->i_mapping)) return -EIO; /* Racy, just to catch the obvious mistakes */ if (offset > i_size_read(inode)) From bac52139f0b7ab31330e98fd87fc5a2664951050 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Thu, 22 May 2014 12:50:07 +0530 Subject: [PATCH 095/101] perf: Add new conditional branch filter 'PERF_SAMPLE_BRANCH_COND' This patch introduces new branch filter PERF_SAMPLE_BRANCH_COND which will extend the existing perf ABI. This will filter branches which are conditional. Various architectures can provide this functionality either with HW filtering support (if present) or with SW filtering of captured branch instructions. Signed-off-by: Anshuman Khandual Reviewed-by: Stephane Eranian Reviewed-by: Andi Kleen Signed-off-by: Peter Zijlstra Cc: mpe@ellerman.id.au Cc: benh@kernel.crashing.org Cc: Arnaldo Carvalho de Melo Cc: Linus Torvalds Link: http://lkml.kernel.org/r/1400743210-32289-1-git-send-email-khandual@linux.vnet.ibm.com Signed-off-by: Ingo Molnar --- include/uapi/linux/perf_event.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index e3fc8f09d110ce..d9cd853818ad57 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -163,8 +163,9 @@ enum perf_branch_sample_type { PERF_SAMPLE_BRANCH_ABORT_TX = 1U << 7, /* transaction aborts */ PERF_SAMPLE_BRANCH_IN_TX = 1U << 8, /* in transaction */ PERF_SAMPLE_BRANCH_NO_TX = 1U << 9, /* not in transaction */ + PERF_SAMPLE_BRANCH_COND = 1U << 10, /* conditional branches */ - PERF_SAMPLE_BRANCH_MAX = 1U << 10, /* non-ABI */ + PERF_SAMPLE_BRANCH_MAX = 1U << 11, /* non-ABI */ }; #define PERF_SAMPLE_BRANCH_PLM_ALL \ From 0fffa5df4cf3e22be4f40f6698ab9e49f3ffd6fa Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Thu, 22 May 2014 12:50:08 +0530 Subject: [PATCH 096/101] perf/tool: Add conditional branch filter 'cond' to perf record Adding perf record support for new branch stack filter criteria PERF_SAMPLE_BRANCH_COND. Signed-off-by: Anshuman Khandual Reviewed-by: Stephane Eranian Reviewed-by: Andi Kleen Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1400743210-32289-2-git-send-email-khandual@linux.vnet.ibm.com Cc: mpe@ellerman.id.au Cc: benh@kernel.crashing.org Cc: Arnaldo Carvalho de Melo Cc: Linus Torvalds Signed-off-by: Ingo Molnar --- tools/perf/builtin-record.c | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index ce2cfec5c76470..378b85b731a72f 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -548,6 +548,7 @@ static const struct branch_mode branch_modes[] = { BRANCH_OPT("abort_tx", PERF_SAMPLE_BRANCH_ABORT_TX), BRANCH_OPT("in_tx", PERF_SAMPLE_BRANCH_IN_TX), BRANCH_OPT("no_tx", PERF_SAMPLE_BRANCH_NO_TX), + BRANCH_OPT("cond", PERF_SAMPLE_BRANCH_COND), BRANCH_END }; From 37548914fbfcd56e1955a9b7e55dc3b84a3e9e25 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Thu, 22 May 2014 12:50:09 +0530 Subject: [PATCH 097/101] perf/x86: Add conditional branch filtering support This patch adds conditional branch filtering support, enabling it for PERF_SAMPLE_BRANCH_COND in perf branch stack sampling framework by utilizing an available software filter X86_BR_JCC. Signed-off-by: Anshuman Khandual Reviewed-by: Stephane Eranian Reviewed-by: Andi Kleen Signed-off-by: Peter Zijlstra Cc: mpe@ellerman.id.au Cc: benh@kernel.crashing.org Cc: Arnaldo Carvalho de Melo Cc: Linus Torvalds Link: http://lkml.kernel.org/r/1400743210-32289-3-git-send-email-khandual@linux.vnet.ibm.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event_intel_lbr.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c index d82d155aca8c7c..9dd2459a4c738d 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c +++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c @@ -384,6 +384,9 @@ static void intel_pmu_setup_sw_lbr_filter(struct perf_event *event) if (br_type & PERF_SAMPLE_BRANCH_NO_TX) mask |= X86_BR_NO_TX; + if (br_type & PERF_SAMPLE_BRANCH_COND) + mask |= X86_BR_JCC; + /* * stash actual user request into reg, it may * be used by fixup code for some CPU @@ -678,6 +681,7 @@ static const int nhm_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX] = { * NHM/WSM erratum: must include IND_JMP to capture IND_CALL */ [PERF_SAMPLE_BRANCH_IND_CALL] = LBR_IND_CALL | LBR_IND_JMP, + [PERF_SAMPLE_BRANCH_COND] = LBR_JCC, }; static const int snb_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX] = { @@ -689,6 +693,7 @@ static const int snb_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX] = { [PERF_SAMPLE_BRANCH_ANY_CALL] = LBR_REL_CALL | LBR_IND_CALL | LBR_FAR, [PERF_SAMPLE_BRANCH_IND_CALL] = LBR_IND_CALL, + [PERF_SAMPLE_BRANCH_COND] = LBR_JCC, }; /* core */ From 3e39db4ae2a92ae9e338e8066411b694b0edcb31 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Thu, 22 May 2014 12:50:10 +0530 Subject: [PATCH 098/101] perf/documentation: Add description for conditional branch filter Signed-off-by: Anshuman Khandual Reviewed-by: Stephane Eranian Reviewed-by: Andi Kleen Signed-off-by: Peter Zijlstra Cc: mpe@ellerman.id.au Cc: benh@kernel.crashing.org Cc: Adrian Hunter Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Linus Torvalds Link: http://lkml.kernel.org/r/1400743210-32289-4-git-send-email-khandual@linux.vnet.ibm.com Signed-off-by: Ingo Molnar --- tools/perf/Documentation/perf-record.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt index c71b0f36d9e8d4..d460049cae8e7c 100644 --- a/tools/perf/Documentation/perf-record.txt +++ b/tools/perf/Documentation/perf-record.txt @@ -184,9 +184,10 @@ following filters are defined: - in_tx: only when the target is in a hardware transaction - no_tx: only when the target is not in a hardware transaction - abort_tx: only when the target is a hardware transaction abort + - cond: conditional branches + -The option requires at least one branch type among any, any_call, any_ret, ind_call. +The option requires at least one branch type among any, any_call, any_ret, ind_call, cond. The privilege levels may be omitted, in which case, the privilege levels of the associated event are applied to the branch filter. Both kernel (k) and hypervisor (hv) privilege levels are subject to permissions. When sampling on multiple events, branch stack sampling From 5cdb76d6f0b657c1140de74ed5af7cc8c5ed5faf Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sun, 1 Jun 2014 21:13:46 +0200 Subject: [PATCH 099/101] uprobes/x86: Rename arch_uprobe->def to ->defparam, minor comment updates Purely cosmetic, no changes in .o, 1. As Jim pointed out arch_uprobe->def looks ambiguous, rename it to ->defparam. 2. Add the comment into default_post_xol_op() to explain "regs->sp +=". 3. Remove the stale part of the comment in arch_uprobe_analyze_insn(). Suggested-by: Jim Keniston Reviewed-by: Masami Hiramatsu Acked-by: Srikar Dronamraju Signed-off-by: Oleg Nesterov --- arch/x86/include/asm/uprobes.h | 2 +- arch/x86/kernel/uprobes.c | 37 +++++++++++++++++----------------- 2 files changed, 19 insertions(+), 20 deletions(-) diff --git a/arch/x86/include/asm/uprobes.h b/arch/x86/include/asm/uprobes.h index 7be3c079e389d1..74f4c2ff642729 100644 --- a/arch/x86/include/asm/uprobes.h +++ b/arch/x86/include/asm/uprobes.h @@ -52,7 +52,7 @@ struct arch_uprobe { struct { u8 fixups; u8 ilen; - } def; + } defparam; }; }; diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index 159ca520ef5b75..5d1cbfe4ae58ee 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -254,7 +254,7 @@ static inline bool is_64bit_mm(struct mm_struct *mm) * If arch_uprobe->insn doesn't use rip-relative addressing, return * immediately. Otherwise, rewrite the instruction so that it accesses * its memory operand indirectly through a scratch register. Set - * def->fixups accordingly. (The contents of the scratch register + * defparam->fixups accordingly. (The contents of the scratch register * will be saved before we single-step the modified instruction, * and restored afterward). * @@ -372,14 +372,14 @@ static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn) */ if (reg != 6 && reg2 != 6) { reg2 = 6; - auprobe->def.fixups |= UPROBE_FIX_RIP_SI; + auprobe->defparam.fixups |= UPROBE_FIX_RIP_SI; } else if (reg != 7 && reg2 != 7) { reg2 = 7; - auprobe->def.fixups |= UPROBE_FIX_RIP_DI; + auprobe->defparam.fixups |= UPROBE_FIX_RIP_DI; /* TODO (paranoia): force maskmovq to not use di */ } else { reg2 = 3; - auprobe->def.fixups |= UPROBE_FIX_RIP_BX; + auprobe->defparam.fixups |= UPROBE_FIX_RIP_BX; } /* * Point cursor at the modrm byte. The next 4 bytes are the @@ -398,9 +398,9 @@ static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn) static inline unsigned long * scratch_reg(struct arch_uprobe *auprobe, struct pt_regs *regs) { - if (auprobe->def.fixups & UPROBE_FIX_RIP_SI) + if (auprobe->defparam.fixups & UPROBE_FIX_RIP_SI) return ®s->si; - if (auprobe->def.fixups & UPROBE_FIX_RIP_DI) + if (auprobe->defparam.fixups & UPROBE_FIX_RIP_DI) return ®s->di; return ®s->bx; } @@ -411,18 +411,18 @@ scratch_reg(struct arch_uprobe *auprobe, struct pt_regs *regs) */ static void riprel_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) { - if (auprobe->def.fixups & UPROBE_FIX_RIP_MASK) { + if (auprobe->defparam.fixups & UPROBE_FIX_RIP_MASK) { struct uprobe_task *utask = current->utask; unsigned long *sr = scratch_reg(auprobe, regs); utask->autask.saved_scratch_register = *sr; - *sr = utask->vaddr + auprobe->def.ilen; + *sr = utask->vaddr + auprobe->defparam.ilen; } } static void riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) { - if (auprobe->def.fixups & UPROBE_FIX_RIP_MASK) { + if (auprobe->defparam.fixups & UPROBE_FIX_RIP_MASK) { struct uprobe_task *utask = current->utask; unsigned long *sr = scratch_reg(auprobe, regs); @@ -499,16 +499,16 @@ static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs struct uprobe_task *utask = current->utask; riprel_post_xol(auprobe, regs); - if (auprobe->def.fixups & UPROBE_FIX_IP) { + if (auprobe->defparam.fixups & UPROBE_FIX_IP) { long correction = utask->vaddr - utask->xol_vaddr; regs->ip += correction; - } else if (auprobe->def.fixups & UPROBE_FIX_CALL) { - regs->sp += sizeof_long(); - if (push_ret_address(regs, utask->vaddr + auprobe->def.ilen)) + } else if (auprobe->defparam.fixups & UPROBE_FIX_CALL) { + regs->sp += sizeof_long(); /* Pop incorrect return address */ + if (push_ret_address(regs, utask->vaddr + auprobe->defparam.ilen)) return -ERESTART; } /* popf; tell the caller to not touch TF */ - if (auprobe->def.fixups & UPROBE_FIX_SETF) + if (auprobe->defparam.fixups & UPROBE_FIX_SETF) utask->autask.saved_tf = true; return 0; @@ -711,12 +711,11 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, /* * Figure out which fixups default_post_xol_op() will need to perform, - * and annotate def->fixups accordingly. To start with, ->fixups is - * either zero or it reflects rip-related fixups. + * and annotate defparam->fixups accordingly. */ switch (OPCODE1(&insn)) { case 0x9d: /* popf */ - auprobe->def.fixups |= UPROBE_FIX_SETF; + auprobe->defparam.fixups |= UPROBE_FIX_SETF; break; case 0xc3: /* ret or lret -- ip is correct */ case 0xcb: @@ -742,8 +741,8 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, riprel_analyze(auprobe, &insn); } - auprobe->def.ilen = insn.length; - auprobe->def.fixups |= fix_ip_or_call; + auprobe->defparam.ilen = insn.length; + auprobe->defparam.fixups |= fix_ip_or_call; auprobe->ops = &default_xol_ops; return 0; From e041e328c4b41e1db79bfe5ba9992c2ed771ad19 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 21 May 2014 17:32:19 +0200 Subject: [PATCH 100/101] perf: Fix perf_event_comm() vs. exec() assumption perf_event_comm() assumes that set_task_comm() is only called on exec(), and in particular that its only called on current. Neither are true, as Dave reported a WARN triggered by set_task_comm() being called on !current. Separate the exec() hook from the comm hook. Reported-by: Dave Jones Signed-off-by: Peter Zijlstra Cc: Alexander Viro Cc: Arnaldo Carvalho de Melo Cc: Linus Torvalds Cc: linux-fsdevel@vger.kernel.org Cc: linux-kernel@vger.kernel.org Link: http://lkml.kernel.org/r/20140521153219.GH5226@laptop.programming.kicks-ass.net [ Build fix. ] Signed-off-by: Ingo Molnar --- fs/exec.c | 1 + include/linux/perf_event.h | 4 +++- kernel/events/core.c | 28 ++++++++++++++++------------ 3 files changed, 20 insertions(+), 13 deletions(-) diff --git a/fs/exec.c b/fs/exec.c index 238b7aa26f68ab..a038a41a3677d9 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1110,6 +1110,7 @@ void setup_new_exec(struct linux_binprm * bprm) else set_dumpable(current->mm, suid_dumpable); + perf_event_exec(); set_task_comm(current, kbasename(bprm->filename)); /* Set the new mm task size. We have to do that late because it may diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 3ef6ea12806a29..9b5cd1992a88cc 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -695,6 +695,7 @@ extern struct perf_guest_info_callbacks *perf_guest_cbs; extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); +extern void perf_event_exec(void); extern void perf_event_comm(struct task_struct *tsk); extern void perf_event_fork(struct task_struct *tsk); @@ -772,7 +773,7 @@ extern void perf_event_enable(struct perf_event *event); extern void perf_event_disable(struct perf_event *event); extern int __perf_event_disable(void *info); extern void perf_event_task_tick(void); -#else +#else /* !CONFIG_PERF_EVENTS: */ static inline void perf_event_task_sched_in(struct task_struct *prev, struct task_struct *task) { } @@ -802,6 +803,7 @@ static inline int perf_unregister_guest_info_callbacks (struct perf_guest_info_callbacks *callbacks) { return 0; } static inline void perf_event_mmap(struct vm_area_struct *vma) { } +static inline void perf_event_exec(void) { } static inline void perf_event_comm(struct task_struct *tsk) { } static inline void perf_event_fork(struct task_struct *tsk) { } static inline void perf_event_init(void) { } diff --git a/kernel/events/core.c b/kernel/events/core.c index 440eefc67397e4..647698f919885d 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -2970,6 +2970,22 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx) local_irq_restore(flags); } +void perf_event_exec(void) +{ + struct perf_event_context *ctx; + int ctxn; + + rcu_read_lock(); + for_each_task_context_nr(ctxn) { + ctx = current->perf_event_ctxp[ctxn]; + if (!ctx) + continue; + + perf_event_enable_on_exec(ctx); + } + rcu_read_unlock(); +} + /* * Cross CPU call to read the hardware event */ @@ -5057,18 +5073,6 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event) void perf_event_comm(struct task_struct *task) { struct perf_comm_event comm_event; - struct perf_event_context *ctx; - int ctxn; - - rcu_read_lock(); - for_each_task_context_nr(ctxn) { - ctx = task->perf_event_ctxp[ctxn]; - if (!ctx) - continue; - - perf_event_enable_on_exec(ctx); - } - rcu_read_unlock(); if (!atomic_read(&nr_comm_events)) return; From 82b897782d10fcc4930c9d4a15b175348fdd2871 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Wed, 28 May 2014 11:45:04 +0300 Subject: [PATCH 101/101] perf: Differentiate exec() and non-exec() comm events perf tools like 'perf report' can aggregate samples by comm strings, which generally works. However, there are other potential use-cases. For example, to pair up 'calls' with 'returns' accurately (from branch events like Intel BTS) it is necessary to identify whether the process has exec'd. Although a comm event is generated when an 'exec' happens it is also generated whenever the comm string is changed on a whim (e.g. by prctl PR_SET_NAME). This patch adds a flag to the comm event to differentiate one case from the other. In order to determine whether the kernel supports the new flag, a selection bit named 'exec' is added to struct perf_event_attr. The bit does nothing but will cause perf_event_open() to fail if the bit is set on kernels that do not have it defined. Signed-off-by: Adrian Hunter Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/537D9EBE.7030806@intel.com Cc: Paul Mackerras Cc: Dave Jones Cc: Arnaldo Carvalho de Melo Cc: David Ahern Cc: Jiri Olsa Cc: Alexander Viro Cc: Linus Torvalds Cc: linux-fsdevel@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- fs/exec.c | 6 +++--- include/linux/perf_event.h | 4 ++-- include/linux/sched.h | 6 +++++- include/uapi/linux/perf_event.h | 9 +++++++-- kernel/events/core.c | 4 ++-- 5 files changed, 19 insertions(+), 10 deletions(-) diff --git a/fs/exec.c b/fs/exec.c index a038a41a3677d9..a3d33fe592d6d9 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1046,13 +1046,13 @@ EXPORT_SYMBOL_GPL(get_task_comm); * so that a new one can be started */ -void set_task_comm(struct task_struct *tsk, const char *buf) +void __set_task_comm(struct task_struct *tsk, const char *buf, bool exec) { task_lock(tsk); trace_task_rename(tsk, buf); strlcpy(tsk->comm, buf, sizeof(tsk->comm)); task_unlock(tsk); - perf_event_comm(tsk); + perf_event_comm(tsk, exec); } int flush_old_exec(struct linux_binprm * bprm) @@ -1111,7 +1111,7 @@ void setup_new_exec(struct linux_binprm * bprm) set_dumpable(current->mm, suid_dumpable); perf_event_exec(); - set_task_comm(current, kbasename(bprm->filename)); + __set_task_comm(current, kbasename(bprm->filename), true); /* Set the new mm task size. We have to do that late because it may * depend on TIF_32BIT which is only updated in flush_thread() on diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index b4c1d4685bf050..707617a8c0f6c6 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -707,7 +707,7 @@ extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks * extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); extern void perf_event_exec(void); -extern void perf_event_comm(struct task_struct *tsk); +extern void perf_event_comm(struct task_struct *tsk, bool exec); extern void perf_event_fork(struct task_struct *tsk); /* Callchains */ @@ -815,7 +815,7 @@ static inline int perf_unregister_guest_info_callbacks static inline void perf_event_mmap(struct vm_area_struct *vma) { } static inline void perf_event_exec(void) { } -static inline void perf_event_comm(struct task_struct *tsk) { } +static inline void perf_event_comm(struct task_struct *tsk, bool exec) { } static inline void perf_event_fork(struct task_struct *tsk) { } static inline void perf_event_init(void) { } static inline int perf_swevent_get_recursion_context(void) { return -1; } diff --git a/include/linux/sched.h b/include/linux/sched.h index 221b2bde372363..ad86e1d7dbc27f 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2379,7 +2379,11 @@ extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, i struct task_struct *fork_idle(int); extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); -extern void set_task_comm(struct task_struct *tsk, const char *from); +extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec); +static inline void set_task_comm(struct task_struct *tsk, const char *from) +{ + __set_task_comm(tsk, from, false); +} extern char *get_task_comm(char *to, struct task_struct *tsk); #ifdef CONFIG_SMP diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index d9cd853818ad57..5312fae472187c 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -302,8 +302,8 @@ struct perf_event_attr { exclude_callchain_kernel : 1, /* exclude kernel callchains */ exclude_callchain_user : 1, /* exclude user callchains */ mmap2 : 1, /* include mmap with inode data */ - - __reserved_1 : 40; + comm_exec : 1, /* flag comm events that are due to an exec */ + __reserved_1 : 39; union { __u32 wakeup_events; /* wakeup every n events */ @@ -502,7 +502,12 @@ struct perf_event_mmap_page { #define PERF_RECORD_MISC_GUEST_KERNEL (4 << 0) #define PERF_RECORD_MISC_GUEST_USER (5 << 0) +/* + * PERF_RECORD_MISC_MMAP_DATA and PERF_RECORD_MISC_COMM_EXEC are used on + * different events so can reuse the same bit position. + */ #define PERF_RECORD_MISC_MMAP_DATA (1 << 13) +#define PERF_RECORD_MISC_COMM_EXEC (1 << 13) /* * Indicates that the content of PERF_SAMPLE_IP points to * the actual instruction that triggered the event. See also diff --git a/kernel/events/core.c b/kernel/events/core.c index 8fac2056d51e0c..7da5e561e89a89 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -5090,7 +5090,7 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event) NULL); } -void perf_event_comm(struct task_struct *task) +void perf_event_comm(struct task_struct *task, bool exec) { struct perf_comm_event comm_event; @@ -5104,7 +5104,7 @@ void perf_event_comm(struct task_struct *task) .event_id = { .header = { .type = PERF_RECORD_COMM, - .misc = 0, + .misc = exec ? PERF_RECORD_MISC_COMM_EXEC : 0, /* .size */ }, /* .pid */