Skip to content

Commit

Permalink
Merge branch 'tip/perf/core' of git://git.kernel.org/pub/scm/linux/ke…
Browse files Browse the repository at this point in the history
…rnel/git/rostedt/linux-trace into perf/core
  • Loading branch information
Ingo Molnar committed Feb 17, 2012
2 parents d1e169d + 47b0edc commit 09bda44
Show file tree
Hide file tree
Showing 15 changed files with 118 additions and 44 deletions.
7 changes: 7 additions & 0 deletions Documentation/trace/ftrace.txt
Original file line number Diff line number Diff line change
Expand Up @@ -226,6 +226,13 @@ Here is the list of current tracers that may be configured.
Traces and records the max latency that it takes for
the highest priority task to get scheduled after
it has been woken up.
Traces all tasks as an average developer would expect.

"wakeup_rt"

Traces and records the max latency that it takes for just
RT tasks (as the current "wakeup" does). This is useful
for those interested in wake up timings of RT tasks.

"hw-branch-tracer"

Expand Down
24 changes: 12 additions & 12 deletions arch/x86/kernel/process.c
Original file line number Diff line number Diff line change
Expand Up @@ -377,8 +377,8 @@ static inline int hlt_use_halt(void)
void default_idle(void)
{
if (hlt_use_halt()) {
trace_power_start(POWER_CSTATE, 1, smp_processor_id());
trace_cpu_idle(1, smp_processor_id());
trace_power_start_rcuidle(POWER_CSTATE, 1, smp_processor_id());
trace_cpu_idle_rcuidle(1, smp_processor_id());
current_thread_info()->status &= ~TS_POLLING;
/*
* TS_POLLING-cleared state must be visible before we
Expand All @@ -391,8 +391,8 @@ void default_idle(void)
else
local_irq_enable();
current_thread_info()->status |= TS_POLLING;
trace_power_end(smp_processor_id());
trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
trace_power_end_rcuidle(smp_processor_id());
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
} else {
local_irq_enable();
/* loop is done by the caller */
Expand Down Expand Up @@ -450,8 +450,8 @@ EXPORT_SYMBOL_GPL(cpu_idle_wait);
static void mwait_idle(void)
{
if (!need_resched()) {
trace_power_start(POWER_CSTATE, 1, smp_processor_id());
trace_cpu_idle(1, smp_processor_id());
trace_power_start_rcuidle(POWER_CSTATE, 1, smp_processor_id());
trace_cpu_idle_rcuidle(1, smp_processor_id());
if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))
clflush((void *)&current_thread_info()->flags);

Expand All @@ -461,8 +461,8 @@ static void mwait_idle(void)
__sti_mwait(0, 0);
else
local_irq_enable();
trace_power_end(smp_processor_id());
trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
trace_power_end_rcuidle(smp_processor_id());
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
} else
local_irq_enable();
}
Expand All @@ -474,13 +474,13 @@ static void mwait_idle(void)
*/
static void poll_idle(void)
{
trace_power_start(POWER_CSTATE, 0, smp_processor_id());
trace_cpu_idle(0, smp_processor_id());
trace_power_start_rcuidle(POWER_CSTATE, 0, smp_processor_id());
trace_cpu_idle_rcuidle(0, smp_processor_id());
local_irq_enable();
while (!need_resched())
cpu_relax();
trace_power_end(smp_processor_id());
trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
trace_power_end_rcuidle(smp_processor_id());
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
}

/*
Expand Down
8 changes: 4 additions & 4 deletions drivers/cpuidle/cpuidle.c
Original file line number Diff line number Diff line change
Expand Up @@ -94,13 +94,13 @@ int cpuidle_idle_call(void)

target_state = &drv->states[next_state];

trace_power_start(POWER_CSTATE, next_state, dev->cpu);
trace_cpu_idle(next_state, dev->cpu);
trace_power_start_rcuidle(POWER_CSTATE, next_state, dev->cpu);
trace_cpu_idle_rcuidle(next_state, dev->cpu);

entered_state = target_state->enter(dev, drv, next_state);

trace_power_end(dev->cpu);
trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu);
trace_power_end_rcuidle(dev->cpu);
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);

if (entered_state >= 0) {
/* Update cpuidle counters */
Expand Down
4 changes: 2 additions & 2 deletions include/linux/ftrace.h
Original file line number Diff line number Diff line change
Expand Up @@ -178,9 +178,9 @@ struct dyn_ftrace {
};

int ftrace_force_update(void);
void ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
int len, int reset);
void ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
int len, int reset);
void ftrace_set_global_filter(unsigned char *buf, int len, int reset);
void ftrace_set_global_notrace(unsigned char *buf, int len, int reset);
Expand Down
7 changes: 1 addition & 6 deletions include/linux/interrupt.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@
#include <linux/atomic.h>
#include <asm/ptrace.h>
#include <asm/system.h>
#include <trace/events/irq.h>

/*
* These correspond to the IORESOURCE_IRQ_* defines in
Expand Down Expand Up @@ -456,11 +455,7 @@ asmlinkage void do_softirq(void);
asmlinkage void __do_softirq(void);
extern void open_softirq(int nr, void (*action)(struct softirq_action *));
extern void softirq_init(void);
static inline void __raise_softirq_irqoff(unsigned int nr)
{
trace_softirq_raise(nr);
or_softirq_pending(1UL << nr);
}
extern void __raise_softirq_irqoff(unsigned int nr);

extern void raise_softirq_irqoff(unsigned int nr);
extern void raise_softirq(unsigned int nr);
Expand Down
22 changes: 18 additions & 4 deletions include/linux/tracepoint.h
Original file line number Diff line number Diff line change
Expand Up @@ -114,14 +114,15 @@ static inline void tracepoint_synchronize_unregister(void)
* as "(void *, void)". The DECLARE_TRACE_NOARGS() will pass in just
* "void *data", where as the DECLARE_TRACE() will pass in "void *data, proto".
*/
#define __DO_TRACE(tp, proto, args, cond) \
#define __DO_TRACE(tp, proto, args, cond, prercu, postrcu) \
do { \
struct tracepoint_func *it_func_ptr; \
void *it_func; \
void *__data; \
\
if (!(cond)) \
return; \
prercu; \
rcu_read_lock_sched_notrace(); \
it_func_ptr = rcu_dereference_sched((tp)->funcs); \
if (it_func_ptr) { \
Expand All @@ -132,22 +133,33 @@ static inline void tracepoint_synchronize_unregister(void)
} while ((++it_func_ptr)->func); \
} \
rcu_read_unlock_sched_notrace(); \
postrcu; \
} while (0)

/*
* Make sure the alignment of the structure in the __tracepoints section will
* not add unwanted padding between the beginning of the section and the
* structure. Force alignment to the same alignment as the section start.
*/
#define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \
#define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \
extern struct tracepoint __tracepoint_##name; \
static inline void trace_##name(proto) \
{ \
if (static_branch(&__tracepoint_##name.key)) \
__DO_TRACE(&__tracepoint_##name, \
TP_PROTO(data_proto), \
TP_ARGS(data_args), \
TP_CONDITION(cond)); \
TP_CONDITION(cond),,); \
} \
static inline void trace_##name##_rcuidle(proto) \
{ \
if (static_branch(&__tracepoint_##name.key)) \
__DO_TRACE(&__tracepoint_##name, \
TP_PROTO(data_proto), \
TP_ARGS(data_args), \
TP_CONDITION(cond), \
rcu_idle_exit(), \
rcu_idle_enter()); \
} \
static inline int \
register_trace_##name(void (*probe)(data_proto), void *data) \
Expand Down Expand Up @@ -190,9 +202,11 @@ static inline void tracepoint_synchronize_unregister(void)
EXPORT_SYMBOL(__tracepoint_##name)

#else /* !CONFIG_TRACEPOINTS */
#define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \
#define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \
static inline void trace_##name(proto) \
{ } \
static inline void trace_##name##_rcuidle(proto) \
{ } \
static inline int \
register_trace_##name(void (*probe)(data_proto), \
void *data) \
Expand Down
2 changes: 2 additions & 0 deletions include/trace/events/power.h
Original file line number Diff line number Diff line change
Expand Up @@ -151,6 +151,8 @@ enum {
events get removed */
static inline void trace_power_start(u64 type, u64 state, u64 cpuid) {};
static inline void trace_power_end(u64 cpuid) {};
static inline void trace_power_start_rcuidle(u64 type, u64 state, u64 cpuid) {};
static inline void trace_power_end_rcuidle(u64 cpuid) {};
static inline void trace_power_frequency(u64 type, u64 state, u64 cpuid) {};
#endif /* _PWR_EVENT_AVOID_DOUBLE_DEFINING_DEPRECATED */

Expand Down
41 changes: 41 additions & 0 deletions include/trace/events/printk.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM printk

#if !defined(_TRACE_PRINTK_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_PRINTK_H

#include <linux/tracepoint.h>

TRACE_EVENT_CONDITION(console,
TP_PROTO(const char *log_buf, unsigned start, unsigned end,
unsigned log_buf_len),

TP_ARGS(log_buf, start, end, log_buf_len),

TP_CONDITION(start != end),

TP_STRUCT__entry(
__dynamic_array(char, msg, end - start + 1)
),

TP_fast_assign(
if ((start & (log_buf_len - 1)) > (end & (log_buf_len - 1))) {
memcpy(__get_dynamic_array(msg),
log_buf + (start & (log_buf_len - 1)),
log_buf_len - (start & (log_buf_len - 1)));
memcpy((char *)__get_dynamic_array(msg) +
log_buf_len - (start & (log_buf_len - 1)),
log_buf, end & (log_buf_len - 1));
} else
memcpy(__get_dynamic_array(msg),
log_buf + (start & (log_buf_len - 1)),
end - start);
((char *)__get_dynamic_array(msg))[end - start] = 0;
),

TP_printk("%s", __get_str(msg))
);
#endif /* _TRACE_PRINTK_H */

/* This part must be outside protection */
#include <trace/define_trace.h>
2 changes: 2 additions & 0 deletions kernel/irq/chip.c
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,8 @@
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>

#include <trace/events/irq.h>

#include "internals.h"

/**
Expand Down
5 changes: 5 additions & 0 deletions kernel/printk.c
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,9 @@

#include <asm/uaccess.h>

#define CREATE_TRACE_POINTS
#include <trace/events/printk.h>

/*
* Architectures can override it:
*/
Expand Down Expand Up @@ -542,6 +545,8 @@ MODULE_PARM_DESC(ignore_loglevel, "ignore loglevel setting, to"
static void _call_console_drivers(unsigned start,
unsigned end, int msg_log_level)
{
trace_console(&LOG_BUF(0), start, end, log_buf_len);

if ((msg_log_level < console_loglevel || ignore_loglevel) &&
console_drivers && start != end) {
if ((start & LOG_BUF_MASK) > (end & LOG_BUF_MASK)) {
Expand Down
6 changes: 6 additions & 0 deletions kernel/softirq.c
Original file line number Diff line number Diff line change
Expand Up @@ -385,6 +385,12 @@ void raise_softirq(unsigned int nr)
local_irq_restore(flags);
}

void __raise_softirq_irqoff(unsigned int nr)
{
trace_softirq_raise(nr);
or_softirq_pending(1UL << nr);
}

void open_softirq(int nr, void (*action)(struct softirq_action *))
{
softirq_vec[nr].action = action;
Expand Down
17 changes: 10 additions & 7 deletions kernel/trace/ftrace.c
Original file line number Diff line number Diff line change
Expand Up @@ -1129,7 +1129,7 @@ static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
return NULL;

size = 1 << size_bits;
hash->buckets = kzalloc(sizeof(*hash->buckets) * size, GFP_KERNEL);
hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL);

if (!hash->buckets) {
kfree(hash);
Expand Down Expand Up @@ -3146,8 +3146,10 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
mutex_lock(&ftrace_regex_lock);
if (reset)
ftrace_filter_reset(hash);
if (buf)
ftrace_match_records(hash, buf, len);
if (buf && !ftrace_match_records(hash, buf, len)) {
ret = -EINVAL;
goto out_regex_unlock;
}

mutex_lock(&ftrace_lock);
ret = ftrace_hash_move(ops, enable, orig_hash, hash);
Expand All @@ -3157,6 +3159,7 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,

mutex_unlock(&ftrace_lock);

out_regex_unlock:
mutex_unlock(&ftrace_regex_lock);

free_ftrace_hash(hash);
Expand All @@ -3173,10 +3176,10 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
* Filters denote which functions should be enabled when tracing is enabled.
* If @buf is NULL and reset is set, all functions will be enabled for tracing.
*/
void ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
int len, int reset)
{
ftrace_set_regex(ops, buf, len, reset, 1);
return ftrace_set_regex(ops, buf, len, reset, 1);
}
EXPORT_SYMBOL_GPL(ftrace_set_filter);

Expand All @@ -3191,10 +3194,10 @@ EXPORT_SYMBOL_GPL(ftrace_set_filter);
* is enabled. If @buf is NULL and reset is set, all functions will be enabled
* for tracing.
*/
void ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
int len, int reset)
{
ftrace_set_regex(ops, buf, len, reset, 0);
return ftrace_set_regex(ops, buf, len, reset, 0);
}
EXPORT_SYMBOL_GPL(ftrace_set_notrace);
/**
Expand Down
6 changes: 3 additions & 3 deletions kernel/trace/trace.c
Original file line number Diff line number Diff line change
Expand Up @@ -2764,12 +2764,12 @@ static const char readme_msg[] =
"tracing mini-HOWTO:\n\n"
"# mount -t debugfs nodev /sys/kernel/debug\n\n"
"# cat /sys/kernel/debug/tracing/available_tracers\n"
"wakeup preemptirqsoff preemptoff irqsoff function sched_switch nop\n\n"
"wakeup wakeup_rt preemptirqsoff preemptoff irqsoff function nop\n\n"
"# cat /sys/kernel/debug/tracing/current_tracer\n"
"nop\n"
"# echo sched_switch > /sys/kernel/debug/tracing/current_tracer\n"
"# echo wakeup > /sys/kernel/debug/tracing/current_tracer\n"
"# cat /sys/kernel/debug/tracing/current_tracer\n"
"sched_switch\n"
"wakeup\n"
"# cat /sys/kernel/debug/tracing/trace_options\n"
"noprint-parent nosym-offset nosym-addr noverbose\n"
"# echo print-parent > /sys/kernel/debug/tracing/trace_options\n"
Expand Down
7 changes: 3 additions & 4 deletions kernel/trace/trace_events_filter.c
Original file line number Diff line number Diff line change
Expand Up @@ -685,7 +685,7 @@ find_event_field(struct ftrace_event_call *call, char *name)

static int __alloc_pred_stack(struct pred_stack *stack, int n_preds)
{
stack->preds = kzalloc(sizeof(*stack->preds)*(n_preds + 1), GFP_KERNEL);
stack->preds = kcalloc(n_preds + 1, sizeof(*stack->preds), GFP_KERNEL);
if (!stack->preds)
return -ENOMEM;
stack->index = n_preds;
Expand Down Expand Up @@ -826,8 +826,7 @@ static int __alloc_preds(struct event_filter *filter, int n_preds)
if (filter->preds)
__free_preds(filter);

filter->preds =
kzalloc(sizeof(*filter->preds) * n_preds, GFP_KERNEL);
filter->preds = kcalloc(n_preds, sizeof(*filter->preds), GFP_KERNEL);

if (!filter->preds)
return -ENOMEM;
Expand Down Expand Up @@ -1486,7 +1485,7 @@ static int fold_pred(struct filter_pred *preds, struct filter_pred *root)
children = count_leafs(preds, &preds[root->left]);
children += count_leafs(preds, &preds[root->right]);

root->ops = kzalloc(sizeof(*root->ops) * children, GFP_KERNEL);
root->ops = kcalloc(children, sizeof(*root->ops), GFP_KERNEL);
if (!root->ops)
return -ENOMEM;

Expand Down
4 changes: 2 additions & 2 deletions kernel/trace/trace_syscalls.c
Original file line number Diff line number Diff line change
Expand Up @@ -468,8 +468,8 @@ int __init init_ftrace_syscalls(void)
unsigned long addr;
int i;

syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) *
NR_syscalls, GFP_KERNEL);
syscalls_metadata = kcalloc(NR_syscalls, sizeof(*syscalls_metadata),
GFP_KERNEL);
if (!syscalls_metadata) {
WARN_ON(1);
return -ENOMEM;
Expand Down

0 comments on commit 09bda44

Please sign in to comment.