Skip to content

Commit

Permalink
Merge tag 'trace-3.11-rc2' of git://git.kernel.org/pub/scm/linux/kern…
Browse files Browse the repository at this point in the history
…el/git/rostedt/linux-trace

Pull tracing fixes and cleanups from Steven Rostedt:
 "This contains fixes, optimizations and some clean ups

  Some of the fixes need to go back to 3.10.  They are minor, and deal
  mostly with incorrect ref counting in accessing event files.

  There was a couple of optimizations that should have perf perform a
  bit better when accessing trace events.

  And some various clean ups.  Some of the clean ups are necessary to
  help in a fix to a theoretical race between opening a event file and
  deleting that event"

* tag 'trace-3.11-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace:
  tracing: Kill the unbalanced tr->ref++ in tracing_buffers_open()
  tracing: Kill trace_array->waiter
  tracing: Do not (ab)use trace_seq in event_id_read()
  tracing: Simplify the iteration logic in f_start/f_next
  tracing: Add ref_data to function and fgraph tracer structs
  tracing: Miscellaneous fixes for trace_array ref counting
  tracing: Fix error handling to ensure instances can always be removed
  tracing/kprobe: Wait for disabling all running kprobe handlers
  tracing/perf: Move the PERF_MAX_TRACE_SIZE check into perf_trace_buf_prepare()
  tracing/syscall: Avoid perf_trace_buf_*() if sys_data->perf_events is empty
  tracing/function: Avoid perf_trace_buf_*() if event_function.perf_events is empty
  tracing: Typo fix on ring buffer comments
  tracing: Use trace_seq_puts()/trace_seq_putc() where possible
  tracing: Use correct config guard CONFIG_STACK_TRACER
  • Loading branch information
torvalds committed Jul 23, 2013
2 parents a582e5f + e70e78e commit b3a3a9c
Show file tree
Hide file tree
Showing 14 changed files with 166 additions and 155 deletions.
4 changes: 0 additions & 4 deletions include/trace/ftrace.h
Original file line number Diff line number Diff line change
Expand Up @@ -670,10 +670,6 @@ perf_trace_##call(void *__data, proto) \
sizeof(u64)); \
__entry_size -= sizeof(u32); \
\
if (WARN_ONCE(__entry_size > PERF_MAX_TRACE_SIZE, \
"profile buffer not large enough")) \
return; \
\
entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare( \
__entry_size, event_call->event.type, &__regs, &rctx); \
if (!entry) \
Expand Down
26 changes: 14 additions & 12 deletions kernel/trace/ring_buffer.c
Original file line number Diff line number Diff line change
Expand Up @@ -36,11 +36,11 @@ int ring_buffer_print_entry_header(struct trace_seq *s)
{
int ret;

ret = trace_seq_printf(s, "# compressed entry header\n");
ret = trace_seq_printf(s, "\ttype_len : 5 bits\n");
ret = trace_seq_printf(s, "\ttime_delta : 27 bits\n");
ret = trace_seq_printf(s, "\tarray : 32 bits\n");
ret = trace_seq_printf(s, "\n");
ret = trace_seq_puts(s, "# compressed entry header\n");
ret = trace_seq_puts(s, "\ttype_len : 5 bits\n");
ret = trace_seq_puts(s, "\ttime_delta : 27 bits\n");
ret = trace_seq_puts(s, "\tarray : 32 bits\n");
ret = trace_seq_putc(s, '\n');
ret = trace_seq_printf(s, "\tpadding : type == %d\n",
RINGBUF_TYPE_PADDING);
ret = trace_seq_printf(s, "\ttime_extend : type == %d\n",
Expand Down Expand Up @@ -1066,7 +1066,7 @@ static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer,
}

/**
* check_pages - integrity check of buffer pages
* rb_check_pages - integrity check of buffer pages
* @cpu_buffer: CPU buffer with pages to test
*
* As a safety measure we check to make sure the data pages have not
Expand Down Expand Up @@ -1258,7 +1258,7 @@ static int rb_cpu_notify(struct notifier_block *self,
#endif

/**
* ring_buffer_alloc - allocate a new ring_buffer
* __ring_buffer_alloc - allocate a new ring_buffer
* @size: the size in bytes per cpu that is needed.
* @flags: attributes to set for the ring buffer.
*
Expand Down Expand Up @@ -1607,6 +1607,7 @@ static void update_pages_handler(struct work_struct *work)
* ring_buffer_resize - resize the ring buffer
* @buffer: the buffer to resize.
* @size: the new size.
* @cpu_id: the cpu buffer to resize
*
* Minimum size is 2 * BUF_PAGE_SIZE.
*
Expand Down Expand Up @@ -3956,11 +3957,11 @@ EXPORT_SYMBOL_GPL(ring_buffer_consume);
* expected.
*
* After a sequence of ring_buffer_read_prepare calls, the user is
* expected to make at least one call to ring_buffer_prepare_sync.
* expected to make at least one call to ring_buffer_read_prepare_sync.
* Afterwards, ring_buffer_read_start is invoked to get things going
* for real.
*
* This overall must be paired with ring_buffer_finish.
* This overall must be paired with ring_buffer_read_finish.
*/
struct ring_buffer_iter *
ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu)
Expand Down Expand Up @@ -4009,7 +4010,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync);
* an intervening ring_buffer_read_prepare_sync must have been
* performed.
*
* Must be paired with ring_buffer_finish.
* Must be paired with ring_buffer_read_finish.
*/
void
ring_buffer_read_start(struct ring_buffer_iter *iter)
Expand All @@ -4031,7 +4032,7 @@ ring_buffer_read_start(struct ring_buffer_iter *iter)
EXPORT_SYMBOL_GPL(ring_buffer_read_start);

/**
* ring_buffer_finish - finish reading the iterator of the buffer
* ring_buffer_read_finish - finish reading the iterator of the buffer
* @iter: The iterator retrieved by ring_buffer_start
*
* This re-enables the recording to the buffer, and frees the
Expand Down Expand Up @@ -4346,6 +4347,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
/**
* ring_buffer_alloc_read_page - allocate a page to read from buffer
* @buffer: the buffer to allocate for.
* @cpu: the cpu buffer to allocate.
*
* This function is used in conjunction with ring_buffer_read_page.
* When reading a full page from the ring buffer, these functions
Expand Down Expand Up @@ -4403,7 +4405,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
* to swap with a page in the ring buffer.
*
* for example:
* rpage = ring_buffer_alloc_read_page(buffer);
* rpage = ring_buffer_alloc_read_page(buffer, cpu);
* if (!rpage)
* return error;
* ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
Expand Down
34 changes: 23 additions & 11 deletions kernel/trace/trace.c
Original file line number Diff line number Diff line change
Expand Up @@ -3008,7 +3008,6 @@ static int tracing_release(struct inode *inode, struct file *file)

iter = m->private;
tr = iter->tr;
trace_array_put(tr);

mutex_lock(&trace_types_lock);

Expand All @@ -3023,6 +3022,9 @@ static int tracing_release(struct inode *inode, struct file *file)
if (!iter->snapshot)
/* reenable tracing if it was previously enabled */
tracing_start_tr(tr);

__trace_array_put(tr);

mutex_unlock(&trace_types_lock);

mutex_destroy(&iter->mutex);
Expand Down Expand Up @@ -3447,14 +3449,19 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
static int tracing_trace_options_open(struct inode *inode, struct file *file)
{
struct trace_array *tr = inode->i_private;
int ret;

if (tracing_disabled)
return -ENODEV;

if (trace_array_get(tr) < 0)
return -ENODEV;

return single_open(file, tracing_trace_options_show, inode->i_private);
ret = single_open(file, tracing_trace_options_show, inode->i_private);
if (ret < 0)
trace_array_put(tr);

return ret;
}

static const struct file_operations tracing_iter_fops = {
Expand Down Expand Up @@ -3537,14 +3544,14 @@ static const char readme_msg[] =
"\n snapshot\t\t- Like 'trace' but shows the content of the static snapshot buffer\n"
"\t\t\t Read the contents for more information\n"
#endif
#ifdef CONFIG_STACKTRACE
#ifdef CONFIG_STACK_TRACER
" stack_trace\t\t- Shows the max stack trace when active\n"
" stack_max_size\t- Shows current max stack size that was traced\n"
"\t\t\t Write into this file to reset the max size (trigger a new trace)\n"
#ifdef CONFIG_DYNAMIC_FTRACE
" stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace traces\n"
#endif
#endif /* CONFIG_STACKTRACE */
#endif /* CONFIG_STACK_TRACER */
;

static ssize_t
Expand Down Expand Up @@ -3958,6 +3965,7 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
iter = kzalloc(sizeof(*iter), GFP_KERNEL);
if (!iter) {
ret = -ENOMEM;
__trace_array_put(tr);
goto out;
}

Expand Down Expand Up @@ -4704,21 +4712,24 @@ static int tracing_snapshot_open(struct inode *inode, struct file *file)
ret = PTR_ERR(iter);
} else {
/* Writes still need the seq_file to hold the private data */
ret = -ENOMEM;
m = kzalloc(sizeof(*m), GFP_KERNEL);
if (!m)
return -ENOMEM;
goto out;
iter = kzalloc(sizeof(*iter), GFP_KERNEL);
if (!iter) {
kfree(m);
return -ENOMEM;
goto out;
}
ret = 0;

iter->tr = tr;
iter->trace_buffer = &tc->tr->max_buffer;
iter->cpu_file = tc->cpu;
m->private = iter;
file->private_data = m;
}

out:
if (ret < 0)
trace_array_put(tr);

Expand Down Expand Up @@ -4948,8 +4959,6 @@ static int tracing_buffers_open(struct inode *inode, struct file *filp)

mutex_lock(&trace_types_lock);

tr->ref++;

info->iter.tr = tr;
info->iter.cpu_file = tc->cpu;
info->iter.trace = tr->current_trace;
Expand Down Expand Up @@ -5328,9 +5337,10 @@ tracing_stats_read(struct file *filp, char __user *ubuf,
}

static const struct file_operations tracing_stats_fops = {
.open = tracing_open_generic,
.open = tracing_open_generic_tc,
.read = tracing_stats_read,
.llseek = generic_file_llseek,
.release = tracing_release_generic_tc,
};

#ifdef CONFIG_DYNAMIC_FTRACE
Expand Down Expand Up @@ -5973,8 +5983,10 @@ static int new_instance_create(const char *name)
goto out_free_tr;

ret = event_trace_add_tracer(tr->dir, tr);
if (ret)
if (ret) {
debugfs_remove_recursive(tr->dir);
goto out_free_tr;
}

init_tracer_debugfs(tr, tr->dir);

Expand Down
10 changes: 9 additions & 1 deletion kernel/trace/trace.h
Original file line number Diff line number Diff line change
Expand Up @@ -214,7 +214,6 @@ struct trace_array {
struct dentry *event_dir;
struct list_head systems;
struct list_head events;
struct task_struct *waiter;
int ref;
};

Expand Down Expand Up @@ -680,6 +679,15 @@ extern int trace_selftest_startup_sched_switch(struct tracer *trace,
struct trace_array *tr);
extern int trace_selftest_startup_branch(struct tracer *trace,
struct trace_array *tr);
/*
* Tracer data references selftest functions that only occur
* on boot up. These can be __init functions. Thus, when selftests
* are enabled, then the tracers need to reference __init functions.
*/
#define __tracer_data __refdata
#else
/* Tracers are seldom changed. Optimize when selftests are disabled. */
#define __tracer_data __read_mostly
#endif /* CONFIG_FTRACE_STARTUP_TEST */

extern void *head_page(struct trace_array_cpu *data);
Expand Down
10 changes: 8 additions & 2 deletions kernel/trace/trace_event_perf.c
Original file line number Diff line number Diff line change
Expand Up @@ -236,6 +236,10 @@ __kprobes void *perf_trace_buf_prepare(int size, unsigned short type,

BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));

if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
"perf buffer not large enough"))
return NULL;

pc = preempt_count();

*rctxp = perf_swevent_get_recursion_context();
Expand Down Expand Up @@ -266,6 +270,10 @@ perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
struct pt_regs regs;
int rctx;

head = this_cpu_ptr(event_function.perf_events);
if (hlist_empty(head))
return;

#define ENTRY_SIZE (ALIGN(sizeof(struct ftrace_entry) + sizeof(u32), \
sizeof(u64)) - sizeof(u32))

Expand All @@ -279,8 +287,6 @@ perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,

entry->ip = ip;
entry->parent_ip = parent_ip;

head = this_cpu_ptr(event_function.perf_events);
perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, 0,
1, &regs, head, NULL);

Expand Down
Loading

0 comments on commit b3a3a9c

Please sign in to comment.