Skip to content

Commit

Permalink
ftrace: Use the rcu _notrace variants for rcu_dereference_raw() and f…
Browse files Browse the repository at this point in the history
…riends

As rcu_dereference_raw() under RCU debug config options can add quite a
bit of checks, and that tracing uses rcu_dereference_raw(), these checks
happen with the function tracer. The function tracer also happens to trace
these debug checks too. This added overhead can livelock the system.

Have the function tracer use the new RCU _notrace equivalents that do
not do the debug checks for RCU.

Link: http://lkml.kernel.org/r/[email protected]

Acked-by: Paul E. McKenney <[email protected]>
Signed-off-by: Steven Rostedt <[email protected]>
  • Loading branch information
rostedt committed May 29, 2013
1 parent 12bcbe6 commit 1bb539c
Showing 1 changed file with 9 additions and 9 deletions.
18 changes: 9 additions & 9 deletions kernel/trace/ftrace.c
Original file line number Diff line number Diff line change
Expand Up @@ -120,22 +120,22 @@ static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);

/*
* Traverse the ftrace_global_list, invoking all entries. The reason that we
* can use rcu_dereference_raw() is that elements removed from this list
* can use rcu_dereference_raw_notrace() is that elements removed from this list
* are simply leaked, so there is no need to interact with a grace-period
* mechanism. The rcu_dereference_raw() calls are needed to handle
* mechanism. The rcu_dereference_raw_notrace() calls are needed to handle
* concurrent insertions into the ftrace_global_list.
*
* Silly Alpha and silly pointer-speculation compiler optimizations!
*/
#define do_for_each_ftrace_op(op, list) \
op = rcu_dereference_raw(list); \
op = rcu_dereference_raw_notrace(list); \
do

/*
* Optimized for just a single item in the list (as that is the normal case).
*/
#define while_for_each_ftrace_op(op) \
while (likely(op = rcu_dereference_raw((op)->next)) && \
while (likely(op = rcu_dereference_raw_notrace((op)->next)) && \
unlikely((op) != &ftrace_list_end))

static inline void ftrace_ops_init(struct ftrace_ops *ops)
Expand Down Expand Up @@ -779,7 +779,7 @@ ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
if (hlist_empty(hhd))
return NULL;

hlist_for_each_entry_rcu(rec, hhd, node) {
hlist_for_each_entry_rcu_notrace(rec, hhd, node) {
if (rec->ip == ip)
return rec;
}
Expand Down Expand Up @@ -1165,7 +1165,7 @@ ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)

hhd = &hash->buckets[key];

hlist_for_each_entry_rcu(entry, hhd, hlist) {
hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) {
if (entry->ip == ip)
return entry;
}
Expand Down Expand Up @@ -1422,8 +1422,8 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
struct ftrace_hash *notrace_hash;
int ret;

filter_hash = rcu_dereference_raw(ops->filter_hash);
notrace_hash = rcu_dereference_raw(ops->notrace_hash);
filter_hash = rcu_dereference_raw_notrace(ops->filter_hash);
notrace_hash = rcu_dereference_raw_notrace(ops->notrace_hash);

if ((ftrace_hash_empty(filter_hash) ||
ftrace_lookup_ip(filter_hash, ip)) &&
Expand Down Expand Up @@ -2920,7 +2920,7 @@ static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
* on the hash. rcu_read_lock is too dangerous here.
*/
preempt_disable_notrace();
hlist_for_each_entry_rcu(entry, hhd, node) {
hlist_for_each_entry_rcu_notrace(entry, hhd, node) {
if (entry->ip == ip)
entry->ops->func(ip, parent_ip, &entry->data);
}
Expand Down

0 comments on commit 1bb539c

Please sign in to comment.