Skip to content

Commit

Permalink
sched: Micro-optimize by dropping unnecessary task_rq() calls
Browse files Browse the repository at this point in the history
We always know the rq used, let's just pass it around.
This seems to cut the size of scheduler core down a tiny bit:

Before:

  [linux]$ size kernel/sched/core.o.orig
     text    data     bss     dec     hex filename
    62760   16130    3876   82766   1434e kernel/sched/core.o.orig

After:

  [linux]$ size kernel/sched/core.o.patched
     text    data     bss     dec     hex filename
    62566   16130    3876   82572   1428c kernel/sched/core.o.patched

Probably speeds it up as well.

Signed-off-by: Michael S. Tsirkin <[email protected]>
Signed-off-by: Peter Zijlstra <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
  • Loading branch information
mstsirkin authored and Ingo Molnar committed Sep 25, 2013
1 parent f48627e commit 4314895
Show file tree
Hide file tree
Showing 2 changed files with 27 additions and 25 deletions.
6 changes: 3 additions & 3 deletions kernel/sched/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -767,14 +767,14 @@ static void set_load_weight(struct task_struct *p)
static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
{
update_rq_clock(rq);
sched_info_queued(p);
sched_info_queued(rq, p);
p->sched_class->enqueue_task(rq, p, flags);
}

static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
{
update_rq_clock(rq);
sched_info_dequeued(p);
sched_info_dequeued(rq, p);
p->sched_class->dequeue_task(rq, p, flags);
}

Expand Down Expand Up @@ -1839,7 +1839,7 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev,
struct task_struct *next)
{
trace_sched_switch(prev, next);
sched_info_switch(prev, next);
sched_info_switch(rq, prev, next);
perf_event_task_sched_out(prev, next);
fire_sched_out_preempt_notifiers(prev, next);
prepare_lock_switch(rq, next);
Expand Down
46 changes: 24 additions & 22 deletions kernel/sched/stats.h
Original file line number Diff line number Diff line change
Expand Up @@ -59,27 +59,27 @@ static inline void sched_info_reset_dequeued(struct task_struct *t)
* from dequeue_task() to account for possible rq->clock skew across cpus. The
* delta taken on each cpu would annul the skew.
*/
static inline void sched_info_dequeued(struct task_struct *t)
static inline void sched_info_dequeued(struct rq *rq, struct task_struct *t)
{
unsigned long long now = rq_clock(task_rq(t)), delta = 0;
unsigned long long now = rq_clock(rq), delta = 0;

if (unlikely(sched_info_on()))
if (t->sched_info.last_queued)
delta = now - t->sched_info.last_queued;
sched_info_reset_dequeued(t);
t->sched_info.run_delay += delta;

rq_sched_info_dequeued(task_rq(t), delta);
rq_sched_info_dequeued(rq, delta);
}

/*
* Called when a task finally hits the cpu. We can now calculate how
* long it was waiting to run. We also note when it began so that we
* can keep stats on how long its timeslice is.
*/
static void sched_info_arrive(struct task_struct *t)
static void sched_info_arrive(struct rq *rq, struct task_struct *t)
{
unsigned long long now = rq_clock(task_rq(t)), delta = 0;
unsigned long long now = rq_clock(rq), delta = 0;

if (t->sched_info.last_queued)
delta = now - t->sched_info.last_queued;
Expand All @@ -88,19 +88,19 @@ static void sched_info_arrive(struct task_struct *t)
t->sched_info.last_arrival = now;
t->sched_info.pcount++;

rq_sched_info_arrive(task_rq(t), delta);
rq_sched_info_arrive(rq, delta);
}

/*
* This function is only called from enqueue_task(), but also only updates
* the timestamp if it is already not set. It's assumed that
* sched_info_dequeued() will clear that stamp when appropriate.
*/
static inline void sched_info_queued(struct task_struct *t)
static inline void sched_info_queued(struct rq *rq, struct task_struct *t)
{
if (unlikely(sched_info_on()))
if (!t->sched_info.last_queued)
t->sched_info.last_queued = rq_clock(task_rq(t));
t->sched_info.last_queued = rq_clock(rq);
}

/*
Expand All @@ -111,15 +111,15 @@ static inline void sched_info_queued(struct task_struct *t)
* sched_info_queued() to mark that it has now again started waiting on
* the runqueue.
*/
static inline void sched_info_depart(struct task_struct *t)
static inline void sched_info_depart(struct rq *rq, struct task_struct *t)
{
unsigned long long delta = rq_clock(task_rq(t)) -
unsigned long long delta = rq_clock(rq) -
t->sched_info.last_arrival;

rq_sched_info_depart(task_rq(t), delta);
rq_sched_info_depart(rq, delta);

if (t->state == TASK_RUNNING)
sched_info_queued(t);
sched_info_queued(rq, t);
}

/*
Expand All @@ -128,32 +128,34 @@ static inline void sched_info_depart(struct task_struct *t)
* the idle task.) We are only called when prev != next.
*/
static inline void
__sched_info_switch(struct task_struct *prev, struct task_struct *next)
__sched_info_switch(struct rq *rq,
struct task_struct *prev, struct task_struct *next)
{
struct rq *rq = task_rq(prev);

/*
* prev now departs the cpu. It's not interesting to record
* stats about how efficient we were at scheduling the idle
* process, however.
*/
if (prev != rq->idle)
sched_info_depart(prev);
sched_info_depart(rq, prev);

if (next != rq->idle)
sched_info_arrive(next);
sched_info_arrive(rq, next);
}
static inline void
sched_info_switch(struct task_struct *prev, struct task_struct *next)
sched_info_switch(struct rq *rq,
struct task_struct *prev, struct task_struct *next)
{
if (unlikely(sched_info_on()))
__sched_info_switch(prev, next);
__sched_info_switch(rq, prev, next);
}
#else
#define sched_info_queued(t) do { } while (0)
#define sched_info_queued(rq, t) do { } while (0)
#define sched_info_reset_dequeued(t) do { } while (0)
#define sched_info_dequeued(t) do { } while (0)
#define sched_info_switch(t, next) do { } while (0)
#define sched_info_dequeued(rq, t) do { } while (0)
#define sched_info_depart(rq, t) do { } while (0)
#define sched_info_arrive(rq, next) do { } while (0)
#define sched_info_switch(rq, t, next) do { } while (0)
#endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */

/*
Expand Down

0 comments on commit 4314895

Please sign in to comment.