Skip to content

Commit

Permalink
sched: Make sched_class::set_cpus_allowed() unconditional
Browse files Browse the repository at this point in the history
Give every class a set_cpus_allowed() method, this enables some small
optimization in the RT,DL implementation by avoiding a double
cpumask_weight() call.

Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Mike Galbraith <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
  • Loading branch information
Peter Zijlstra authored and Ingo Molnar committed Aug 12, 2015
1 parent 25834c7 commit c5b2803
Show file tree
Hide file tree
Showing 7 changed files with 36 additions and 18 deletions.
17 changes: 11 additions & 6 deletions kernel/sched/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -1151,17 +1151,22 @@ static int migration_cpu_stop(void *data)
return 0;
}

void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
/*
* sched_class::set_cpus_allowed must do the below, but is not required to
* actually call this function.
*/
void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask)
{
lockdep_assert_held(&p->pi_lock);

if (p->sched_class->set_cpus_allowed)
p->sched_class->set_cpus_allowed(p, new_mask);

cpumask_copy(&p->cpus_allowed, new_mask);
p->nr_cpus_allowed = cpumask_weight(new_mask);
}

void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
{
lockdep_assert_held(&p->pi_lock);
p->sched_class->set_cpus_allowed(p, new_mask);
}

/*
* Change a given task's CPU affinity. Migrate the thread to a
* proper CPU and schedule it away if the CPU it's executing on
Expand Down
20 changes: 12 additions & 8 deletions kernel/sched/deadline.c
Original file line number Diff line number Diff line change
Expand Up @@ -1696,21 +1696,21 @@ static void set_cpus_allowed_dl(struct task_struct *p,
raw_spin_unlock(&src_dl_b->lock);
}

/*
* Update only if the task is actually running (i.e.,
* it is on the rq AND it is not throttled).
*/
if (!on_dl_rq(&p->dl))
return;

weight = cpumask_weight(new_mask);

/*
* Only update if the process changes its state from whether it
* can migrate or not.
*/
if ((p->nr_cpus_allowed > 1) == (weight > 1))
return;
goto done;

/*
* Update only if the task is actually running (i.e.,
* it is on the rq AND it is not throttled).
*/
if (!on_dl_rq(&p->dl))
goto done;

/*
* The process used to be able to migrate OR it can now migrate
Expand All @@ -1727,6 +1727,10 @@ static void set_cpus_allowed_dl(struct task_struct *p,
}

update_dl_migration(&rq->dl);

done:
cpumask_copy(&p->cpus_allowed, new_mask);
p->nr_cpus_allowed = weight;
}

/* Assumes rq->lock is held */
Expand Down
1 change: 1 addition & 0 deletions kernel/sched/fair.c
Original file line number Diff line number Diff line change
Expand Up @@ -8252,6 +8252,7 @@ const struct sched_class fair_sched_class = {

.task_waking = task_waking_fair,
.task_dead = task_dead_fair,
.set_cpus_allowed = set_cpus_allowed_common,
#endif

.set_curr_task = set_curr_task_fair,
Expand Down
1 change: 1 addition & 0 deletions kernel/sched/idle_task.c
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,7 @@ const struct sched_class idle_sched_class = {

#ifdef CONFIG_SMP
.select_task_rq = select_task_rq_idle,
.set_cpus_allowed = set_cpus_allowed_common,
#endif

.set_curr_task = set_curr_task_idle,
Expand Down
12 changes: 8 additions & 4 deletions kernel/sched/rt.c
Original file line number Diff line number Diff line change
Expand Up @@ -2084,17 +2084,17 @@ static void set_cpus_allowed_rt(struct task_struct *p,

BUG_ON(!rt_task(p));

if (!task_on_rq_queued(p))
return;

weight = cpumask_weight(new_mask);

/*
* Only update if the process changes its state from whether it
* can migrate or not.
*/
if ((p->nr_cpus_allowed > 1) == (weight > 1))
return;
goto done;

if (!task_on_rq_queued(p))
goto done;

rq = task_rq(p);

Expand All @@ -2113,6 +2113,10 @@ static void set_cpus_allowed_rt(struct task_struct *p,
}

update_rt_migration(&rq->rt);

done:
cpumask_copy(&p->cpus_allowed, new_mask);
p->nr_cpus_allowed = weight;
}

/* Assumes rq->lock is held */
Expand Down
2 changes: 2 additions & 0 deletions kernel/sched/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -1255,6 +1255,8 @@ extern void trigger_load_balance(struct rq *rq);
extern void idle_enter_fair(struct rq *this_rq);
extern void idle_exit_fair(struct rq *this_rq);

extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask);

#else

static inline void idle_enter_fair(struct rq *rq) { }
Expand Down
1 change: 1 addition & 0 deletions kernel/sched/stop_task.c
Original file line number Diff line number Diff line change
Expand Up @@ -123,6 +123,7 @@ const struct sched_class stop_sched_class = {

#ifdef CONFIG_SMP
.select_task_rq = select_task_rq_stop,
.set_cpus_allowed = set_cpus_allowed_common,
#endif

.set_curr_task = set_curr_task_stop,
Expand Down

0 comments on commit c5b2803

Please sign in to comment.