Skip to content

Commit

Permalink
Merge branch 'tip/tracing/urgent-1' of git://git.kernel.org/pub/scm/l…
Browse files Browse the repository at this point in the history
…inux/kernel/git/rostedt/linux-2.6-trace into tracing/urgent
  • Loading branch information
Ingo Molnar committed Jun 20, 2009
2 parents 3daeb4d + 71e308a commit d4c4038
Show file tree
Hide file tree
Showing 9 changed files with 54 additions and 9 deletions.
2 changes: 1 addition & 1 deletion arch/powerpc/kernel/ftrace.c
Original file line number Diff line number Diff line change
Expand Up @@ -605,7 +605,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
return;
}

if (ftrace_push_return_trace(old, self_addr, &trace.depth) == -EBUSY) {
if (ftrace_push_return_trace(old, self_addr, &trace.depth, 0) == -EBUSY) {
*parent = old;
return;
}
Expand Down
2 changes: 1 addition & 1 deletion arch/s390/kernel/ftrace.c
Original file line number Diff line number Diff line change
Expand Up @@ -190,7 +190,7 @@ unsigned long prepare_ftrace_return(unsigned long ip, unsigned long parent)
goto out;
if (unlikely(atomic_read(&current->tracing_graph_pause)))
goto out;
if (ftrace_push_return_trace(parent, ip, &trace.depth) == -EBUSY)
if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY)
goto out;
trace.func = ftrace_mcount_call_adjust(ip) & PSW_ADDR_INSN;
/* Only trace if the calling function expects to. */
Expand Down
1 change: 1 addition & 0 deletions arch/x86/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ config X86
select HAVE_DYNAMIC_FTRACE
select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_GRAPH_FP_TEST
select HAVE_FUNCTION_TRACE_MCOUNT_TEST
select HAVE_FTRACE_NMI_ENTER if DYNAMIC_FTRACE
select HAVE_FTRACE_SYSCALLS
Expand Down
2 changes: 2 additions & 0 deletions arch/x86/kernel/entry_32.S
Original file line number Diff line number Diff line change
Expand Up @@ -1154,6 +1154,7 @@ ENTRY(ftrace_graph_caller)
pushl %edx
movl 0xc(%esp), %edx
lea 0x4(%ebp), %eax
movl (%ebp), %ecx
subl $MCOUNT_INSN_SIZE, %edx
call prepare_ftrace_return
popl %edx
Expand All @@ -1168,6 +1169,7 @@ return_to_handler:
pushl %eax
pushl %ecx
pushl %edx
movl %ebp, %eax
call ftrace_return_to_handler
movl %eax, 0xc(%esp)
popl %edx
Expand Down
2 changes: 2 additions & 0 deletions arch/x86/kernel/entry_64.S
Original file line number Diff line number Diff line change
Expand Up @@ -135,6 +135,7 @@ ENTRY(ftrace_graph_caller)

leaq 8(%rbp), %rdi
movq 0x38(%rsp), %rsi
movq (%rbp), %rdx
subq $MCOUNT_INSN_SIZE, %rsi

call prepare_ftrace_return
Expand All @@ -150,6 +151,7 @@ GLOBAL(return_to_handler)
/* Save the return values */
movq %rax, (%rsp)
movq %rdx, 8(%rsp)
movq %rbp, %rdi

call ftrace_return_to_handler

Expand Down
6 changes: 4 additions & 2 deletions arch/x86/kernel/ftrace.c
Original file line number Diff line number Diff line change
Expand Up @@ -408,7 +408,8 @@ int ftrace_disable_ftrace_graph_caller(void)
* Hook the return address and push it in the stack of return addrs
* in current thread info.
*/
void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
unsigned long frame_pointer)
{
unsigned long old;
int faulted;
Expand Down Expand Up @@ -453,7 +454,8 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
return;
}

if (ftrace_push_return_trace(old, self_addr, &trace.depth) == -EBUSY) {
if (ftrace_push_return_trace(old, self_addr, &trace.depth,
frame_pointer) == -EBUSY) {
*parent = old;
return;
}
Expand Down
4 changes: 3 additions & 1 deletion include/linux/ftrace.h
Original file line number Diff line number Diff line change
Expand Up @@ -362,6 +362,7 @@ struct ftrace_ret_stack {
unsigned long func;
unsigned long long calltime;
unsigned long long subtime;
unsigned long fp;
};

/*
Expand All @@ -372,7 +373,8 @@ struct ftrace_ret_stack {
extern void return_to_handler(void);

extern int
ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth);
ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
unsigned long frame_pointer);

/*
* Sometimes we don't want to trace a function with the function
Expand Down
8 changes: 8 additions & 0 deletions kernel/trace/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,13 @@ config HAVE_FUNCTION_TRACER
config HAVE_FUNCTION_GRAPH_TRACER
bool

config HAVE_FUNCTION_GRAPH_FP_TEST
bool
help
An arch may pass in a unique value (frame pointer) to both the
entering and exiting of a function. On exit, the value is compared
and if it does not match, then it will panic the kernel.

config HAVE_FUNCTION_TRACE_MCOUNT_TEST
bool
help
Expand Down Expand Up @@ -121,6 +128,7 @@ config FUNCTION_GRAPH_TRACER
bool "Kernel Function Graph Tracer"
depends on HAVE_FUNCTION_GRAPH_TRACER
depends on FUNCTION_TRACER
depends on !X86_32 || !CC_OPTIMIZE_FOR_SIZE
default y
help
Enable the kernel to trace a function at both its return
Expand Down
36 changes: 32 additions & 4 deletions kernel/trace/trace_functions_graph.c
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,8 @@ static struct tracer_flags tracer_flags = {

/* Add a function return address to the trace stack on thread info.*/
int
ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth)
ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
unsigned long frame_pointer)
{
unsigned long long calltime;
int index;
Expand Down Expand Up @@ -85,14 +86,16 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth)
current->ret_stack[index].func = func;
current->ret_stack[index].calltime = calltime;
current->ret_stack[index].subtime = 0;
current->ret_stack[index].fp = frame_pointer;
*depth = index;

return 0;
}

/* Retrieve a function return address to the trace stack on thread info.*/
static void
ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret)
ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
unsigned long frame_pointer)
{
int index;

Expand All @@ -106,6 +109,31 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret)
return;
}

#ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST
/*
* The arch may choose to record the frame pointer used
* and check it here to make sure that it is what we expect it
* to be. If gcc does not set the place holder of the return
* address in the frame pointer, and does a copy instead, then
* the function graph trace will fail. This test detects this
* case.
*
* Currently, x86_32 with optimize for size (-Os) makes the latest
* gcc do the above.
*/
if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
ftrace_graph_stop();
WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
" from func %pF return to %lx\n",
current->ret_stack[index].fp,
frame_pointer,
(void *)current->ret_stack[index].func,
current->ret_stack[index].ret);
*ret = (unsigned long)panic;
return;
}
#endif

*ret = current->ret_stack[index].ret;
trace->func = current->ret_stack[index].func;
trace->calltime = current->ret_stack[index].calltime;
Expand All @@ -117,12 +145,12 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret)
* Send the trace to the ring-buffer.
* @return the original return address.
*/
unsigned long ftrace_return_to_handler(void)
unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
{
struct ftrace_graph_ret trace;
unsigned long ret;

ftrace_pop_return_trace(&trace, &ret);
ftrace_pop_return_trace(&trace, &ret, frame_pointer);
trace.rettime = trace_clock_local();
ftrace_graph_return(&trace);
barrier();
Expand Down

0 comments on commit d4c4038

Please sign in to comment.