Skip to content

Commit

Permalink
cpuset: Honour task_cpu_possible_mask() in guarantee_online_cpus()
Browse files Browse the repository at this point in the history
Asymmetric systems may not offer the same level of userspace ISA support
across all CPUs, meaning that some applications cannot be executed by
some CPUs. As a concrete example, upcoming arm64 big.LITTLE designs do
not feature support for 32-bit applications on both clusters.

Modify guarantee_online_cpus() to take task_cpu_possible_mask() into
account when trying to find a suitable set of online CPUs for a given
task. This will avoid passing an invalid mask to set_cpus_allowed_ptr()
during ->attach() and will subsequently allow the cpuset hierarchy to be
taken into account when forcefully overriding the affinity mask for a
task which requires migration to a compatible CPU.

Signed-off-by: Will Deacon <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Reviewed-by: Valentin Schneider <[email protected]>
Link: https://lkml.kernel.org/r/[email protected]
  • Loading branch information
willdeacon authored and Peter Zijlstra committed Aug 20, 2021
1 parent d4b96fb commit 431c69f
Show file tree
Hide file tree
Showing 2 changed files with 27 additions and 18 deletions.
2 changes: 1 addition & 1 deletion include/linux/cpuset.h
Original file line number Diff line number Diff line change
Expand Up @@ -185,7 +185,7 @@ static inline void cpuset_read_unlock(void) { }
static inline void cpuset_cpus_allowed(struct task_struct *p,
struct cpumask *mask)
{
cpumask_copy(mask, cpu_possible_mask);
cpumask_copy(mask, task_cpu_possible_mask(p));
}

static inline void cpuset_cpus_allowed_fallback(struct task_struct *p)
Expand Down
43 changes: 26 additions & 17 deletions kernel/cgroup/cpuset.c
Original file line number Diff line number Diff line change
Expand Up @@ -372,18 +372,29 @@ static inline bool is_in_v2_mode(void)
}

/*
* Return in pmask the portion of a cpusets's cpus_allowed that
* are online. If none are online, walk up the cpuset hierarchy
* until we find one that does have some online cpus.
* Return in pmask the portion of a task's cpusets's cpus_allowed that
* are online and are capable of running the task. If none are found,
* walk up the cpuset hierarchy until we find one that does have some
* appropriate cpus.
*
* One way or another, we guarantee to return some non-empty subset
* of cpu_online_mask.
*
* Call with callback_lock or cpuset_mutex held.
*/
static void guarantee_online_cpus(struct cpuset *cs, struct cpumask *pmask)
static void guarantee_online_cpus(struct task_struct *tsk,
struct cpumask *pmask)
{
while (!cpumask_intersects(cs->effective_cpus, cpu_online_mask)) {
const struct cpumask *possible_mask = task_cpu_possible_mask(tsk);
struct cpuset *cs;

if (WARN_ON(!cpumask_and(pmask, possible_mask, cpu_online_mask)))
cpumask_copy(pmask, cpu_online_mask);

rcu_read_lock();
cs = task_cs(tsk);

while (!cpumask_intersects(cs->effective_cpus, pmask)) {
cs = parent_cs(cs);
if (unlikely(!cs)) {
/*
Expand All @@ -393,11 +404,13 @@ static void guarantee_online_cpus(struct cpuset *cs, struct cpumask *pmask)
* cpuset's effective_cpus is on its way to be
* identical to cpu_online_mask.
*/
cpumask_copy(pmask, cpu_online_mask);
return;
goto out_unlock;
}
}
cpumask_and(pmask, cs->effective_cpus, cpu_online_mask);
cpumask_and(pmask, pmask, cs->effective_cpus);

out_unlock:
rcu_read_unlock();
}

/*
Expand Down Expand Up @@ -2199,15 +2212,13 @@ static void cpuset_attach(struct cgroup_taskset *tset)

percpu_down_write(&cpuset_rwsem);

/* prepare for attach */
if (cs == &top_cpuset)
cpumask_copy(cpus_attach, cpu_possible_mask);
else
guarantee_online_cpus(cs, cpus_attach);

guarantee_online_mems(cs, &cpuset_attach_nodemask_to);

cgroup_taskset_for_each(task, css, tset) {
if (cs != &top_cpuset)
guarantee_online_cpus(task, cpus_attach);
else
cpumask_copy(cpus_attach, task_cpu_possible_mask(task));
/*
* can_attach beforehand should guarantee that this doesn't
* fail. TODO: have a better way to handle failure here
Expand Down Expand Up @@ -3302,9 +3313,7 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
unsigned long flags;

spin_lock_irqsave(&callback_lock, flags);
rcu_read_lock();
guarantee_online_cpus(task_cs(tsk), pmask);
rcu_read_unlock();
guarantee_online_cpus(tsk, pmask);
spin_unlock_irqrestore(&callback_lock, flags);
}

Expand Down

0 comments on commit 431c69f

Please sign in to comment.