Skip to content

Commit

Permalink
perf/core: Collapse more IPI loops
Browse files Browse the repository at this point in the history
This patch collapses the two 'hard' cases, which are
perf_event_{dis,en}able().

I cannot seem to convince myself the current code is correct.

So starting with perf_event_disable(); we don't strictly need to test
for event->state == ACTIVE, ctx->is_active is enough. If the event is
not scheduled while the ctx is, __perf_event_disable() still does the
right thing.  Its a little less efficient to IPI in that case,
over-all simpler.

For perf_event_enable(); the same goes, but I think that's actually
broken in its current form. The current condition is: ctx->is_active
&& event->state == OFF, that means it doesn't do anything when
!ctx->active && event->state == OFF. This is wrong, it should still
mark the event INACTIVE in that case, otherwise we'll still not try
and schedule the event once the context becomes active again.

This patch implements the two function using the new
event_function_call() and does away with the tricky event->state
tests.

Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Reviewed-by: Alexander Shishkin <[email protected]>
Cc: Arnaldo Carvalho de Melo <[email protected]>
Cc: Jiri Olsa <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Stephane Eranian <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Vince Weaver <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
  • Loading branch information
Peter Zijlstra authored and Ingo Molnar committed Jan 6, 2016
1 parent 9cc96b0 commit 7b64801
Showing 1 changed file with 33 additions and 73 deletions.
106 changes: 33 additions & 73 deletions kernel/events/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -1766,6 +1766,20 @@ int __perf_event_disable(void *info)
return 0;
}

void ___perf_event_disable(void *info)
{
struct perf_event *event = info;

/*
* Since we have the lock this context can't be scheduled
* in, so we can change the state safely.
*/
if (event->state == PERF_EVENT_STATE_INACTIVE) {
update_group_times(event);
event->state = PERF_EVENT_STATE_OFF;
}
}

/*
* Disable a event.
*
Expand All @@ -1782,43 +1796,16 @@ int __perf_event_disable(void *info)
static void _perf_event_disable(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
struct task_struct *task = ctx->task;

if (!task) {
/*
* Disable the event on the cpu that it's on
*/
cpu_function_call(event->cpu, __perf_event_disable, event);
return;
}

retry:
if (!task_function_call(task, __perf_event_disable, event))
return;

raw_spin_lock_irq(&ctx->lock);
/*
* If the event is still active, we need to retry the cross-call.
*/
if (event->state == PERF_EVENT_STATE_ACTIVE) {
if (event->state <= PERF_EVENT_STATE_OFF) {
raw_spin_unlock_irq(&ctx->lock);
/*
* Reload the task pointer, it might have been changed by
* a concurrent perf_event_context_sched_out().
*/
task = ctx->task;
goto retry;
}

/*
* Since we have the lock this context can't be scheduled
* in, so we can change the state safely.
*/
if (event->state == PERF_EVENT_STATE_INACTIVE) {
update_group_times(event);
event->state = PERF_EVENT_STATE_OFF;
return;
}
raw_spin_unlock_irq(&ctx->lock);

event_function_call(event, __perf_event_disable,
___perf_event_disable, event);
}

/*
Expand Down Expand Up @@ -2269,6 +2256,11 @@ static int __perf_event_enable(void *info)
return 0;
}

void ___perf_event_enable(void *info)
{
__perf_event_mark_enabled((struct perf_event *)info);
}

/*
* Enable a event.
*
Expand All @@ -2281,58 +2273,26 @@ static int __perf_event_enable(void *info)
static void _perf_event_enable(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
struct task_struct *task = ctx->task;

if (!task) {
/*
* Enable the event on the cpu that it's on
*/
cpu_function_call(event->cpu, __perf_event_enable, event);
raw_spin_lock_irq(&ctx->lock);
if (event->state >= PERF_EVENT_STATE_INACTIVE) {
raw_spin_unlock_irq(&ctx->lock);
return;
}

raw_spin_lock_irq(&ctx->lock);
if (event->state >= PERF_EVENT_STATE_INACTIVE)
goto out;

/*
* If the event is in error state, clear that first.
* That way, if we see the event in error state below, we
* know that it has gone back into error state, as distinct
* from the task having been scheduled away before the
* cross-call arrived.
*
* That way, if we see the event in error state below, we know that it
* has gone back into error state, as distinct from the task having
* been scheduled away before the cross-call arrived.
*/
if (event->state == PERF_EVENT_STATE_ERROR)
event->state = PERF_EVENT_STATE_OFF;

retry:
if (!ctx->is_active) {
__perf_event_mark_enabled(event);
goto out;
}

raw_spin_unlock_irq(&ctx->lock);

if (!task_function_call(task, __perf_event_enable, event))
return;

raw_spin_lock_irq(&ctx->lock);

/*
* If the context is active and the event is still off,
* we need to retry the cross-call.
*/
if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF) {
/*
* task could have been flipped by a concurrent
* perf_event_context_sched_out()
*/
task = ctx->task;
goto retry;
}

out:
raw_spin_unlock_irq(&ctx->lock);
event_function_call(event, __perf_event_enable,
___perf_event_enable, event);
}

/*
Expand Down

0 comments on commit 7b64801

Please sign in to comment.