Skip to content

Commit

Permalink
xen: sched: improve checking soft-affinity
Browse files Browse the repository at this point in the history
The function has_soft_affinity() determines whether the soft-affinity
of a vcpu will have any effect -- that is, whether the affinity will
have any difference, scheduling-wise, from an empty soft-affinity
mask.

Such function takes a custom cpumask as its third parameter for better
flexibility; but that mask is different from the vCPU's hard-affinity
only in one case. Getting rid of that parameter not only simplifies
the function, but enables optimizing the soft affinity check.

It's mostly mechanical, with the exception of
sched_credit.c:_cshed_cpu_pick(), which was the one case where we
passed in something other than the existing hard-affinity.

Signed-off-by: Dario Faggioli <[email protected]>
Reviewed-by: George Dunlap <[email protected]>
  • Loading branch information
dfaggioli authored and George Dunlap committed Mar 21, 2018
1 parent b37b61d commit 5e9e3f8
Show file tree
Hide file tree
Showing 4 changed files with 42 additions and 58 deletions.
74 changes: 32 additions & 42 deletions xen/common/sched_credit.c
Original file line number Diff line number Diff line change
Expand Up @@ -410,8 +410,7 @@ static inline void __runq_tickle(struct csched_vcpu *new)
int new_idlers_empty;

if ( balance_step == BALANCE_SOFT_AFFINITY
&& !has_soft_affinity(new->vcpu,
new->vcpu->cpu_hard_affinity) )
&& !has_soft_affinity(new->vcpu) )
continue;

/* Are there idlers suitable for new (for this balance step)? */
Expand Down Expand Up @@ -733,50 +732,42 @@ __csched_vcpu_is_migrateable(const struct csched_private *prv, struct vcpu *vc,
static int
_csched_cpu_pick(const struct scheduler *ops, struct vcpu *vc, bool_t commit)
{
cpumask_t cpus;
/* We must always use vc->procssor's scratch space */
cpumask_t *cpus = cpumask_scratch_cpu(vc->processor);
cpumask_t idlers;
cpumask_t *online;
cpumask_t *online = cpupool_domain_cpumask(vc->domain);
struct csched_pcpu *spc = NULL;
int cpu = vc->processor;
int balance_step;

/* Store in cpus the mask of online cpus on which the domain can run */
online = cpupool_domain_cpumask(vc->domain);
cpumask_and(&cpus, vc->cpu_hard_affinity, online);

for_each_affinity_balance_step( balance_step )
{
affinity_balance_cpumask(vc, balance_step, cpus);
cpumask_and(cpus, online, cpus);
/*
* We want to pick up a pcpu among the ones that are online and
* can accommodate vc, which is basically what we computed above
* and stored in cpus. As far as hard affinity is concerned,
* there always will be at least one of these pcpus, hence cpus
* is never empty and the calls to cpumask_cycle() and
* cpumask_test_cpu() below are ok.
* can accommodate vc. As far as hard affinity is concerned, there
* always will be at least one of these pcpus in the scratch cpumask,
* hence, the calls to cpumask_cycle() and cpumask_test_cpu() below
* are ok.
*
* On the other hand, when considering soft affinity too, it
* is possible for the mask to become empty (for instance, if the
* domain has been put in a cpupool that does not contain any of the
* pcpus in its soft affinity), which would result in the ASSERT()-s
* inside cpumask_*() operations triggering (in debug builds).
* On the other hand, when considering soft affinity, it is possible
* that the mask is empty (for instance, if the domain has been put
* in a cpupool that does not contain any of the pcpus in its soft
* affinity), which would result in the ASSERT()-s inside cpumask_*()
* operations triggering (in debug builds).
*
* Therefore, in this case, we filter the soft affinity mask against
* cpus and, if the result is empty, we just skip the soft affinity
* Therefore, if that is the case, we just skip the soft affinity
* balancing step all together.
*/
if ( balance_step == BALANCE_SOFT_AFFINITY
&& !has_soft_affinity(vc, &cpus) )
if ( balance_step == BALANCE_SOFT_AFFINITY &&
(!has_soft_affinity(vc) || cpumask_empty(cpus)) )
continue;

/* Pick an online CPU from the proper affinity mask */
affinity_balance_cpumask(vc, balance_step, &cpus);
cpumask_and(&cpus, &cpus, online);

/* If present, prefer vc's current processor */
cpu = cpumask_test_cpu(vc->processor, &cpus)
? vc->processor
: cpumask_cycle(vc->processor, &cpus);
ASSERT(cpumask_test_cpu(cpu, &cpus));
cpu = cpumask_test_cpu(vc->processor, cpus)
? vc->processor : cpumask_cycle(vc->processor, cpus);
ASSERT(cpumask_test_cpu(cpu, cpus));

/*
* Try to find an idle processor within the above constraints.
Expand All @@ -797,7 +788,7 @@ _csched_cpu_pick(const struct scheduler *ops, struct vcpu *vc, bool_t commit)
cpumask_and(&idlers, &cpu_online_map, CSCHED_PRIV(ops)->idlers);
if ( vc->processor == cpu && is_runq_idle(cpu) )
__cpumask_set_cpu(cpu, &idlers);
cpumask_and(&cpus, &cpus, &idlers);
cpumask_and(cpus, &idlers, cpus);

/*
* It is important that cpu points to an idle processor, if a suitable
Expand All @@ -811,18 +802,18 @@ _csched_cpu_pick(const struct scheduler *ops, struct vcpu *vc, bool_t commit)
* Notice that cpumask_test_cpu() is quicker than cpumask_empty(), so
* we check for it first.
*/
if ( !cpumask_test_cpu(cpu, &cpus) && !cpumask_empty(&cpus) )
cpu = cpumask_cycle(cpu, &cpus);
__cpumask_clear_cpu(cpu, &cpus);
if ( !cpumask_test_cpu(cpu, cpus) && !cpumask_empty(cpus) )
cpu = cpumask_cycle(cpu, cpus);
__cpumask_clear_cpu(cpu, cpus);

while ( !cpumask_empty(&cpus) )
while ( !cpumask_empty(cpus) )
{
cpumask_t cpu_idlers;
cpumask_t nxt_idlers;
int nxt, weight_cpu, weight_nxt;
int migrate_factor;

nxt = cpumask_cycle(cpu, &cpus);
nxt = cpumask_cycle(cpu, cpus);

if ( cpumask_test_cpu(cpu, per_cpu(cpu_core_mask, nxt)) )
{
Expand Down Expand Up @@ -852,14 +843,14 @@ _csched_cpu_pick(const struct scheduler *ops, struct vcpu *vc, bool_t commit)
weight_cpu > weight_nxt :
weight_cpu * migrate_factor < weight_nxt )
{
cpumask_and(&nxt_idlers, &cpus, &nxt_idlers);
cpumask_and(&nxt_idlers, &nxt_idlers, cpus);
spc = CSCHED_PCPU(nxt);
cpu = cpumask_cycle(spc->idle_bias, &nxt_idlers);
cpumask_andnot(&cpus, &cpus, per_cpu(cpu_sibling_mask, cpu));
cpumask_andnot(cpus, cpus, per_cpu(cpu_sibling_mask, cpu));
}
else
{
cpumask_andnot(&cpus, &cpus, &nxt_idlers);
cpumask_andnot(cpus, cpus, &nxt_idlers);
}
}

Expand Down Expand Up @@ -1660,9 +1651,8 @@ csched_runq_steal(int peer_cpu, int cpu, int pri, int balance_step)
* vCPUs with useful soft affinities in some sort of bitmap
* or counter.
*/
if ( vc->is_running ||
(balance_step == BALANCE_SOFT_AFFINITY
&& !has_soft_affinity(vc, vc->cpu_hard_affinity)) )
if ( vc->is_running || (balance_step == BALANCE_SOFT_AFFINITY &&
!has_soft_affinity(vc)) )
continue;

affinity_balance_cpumask(vc, balance_step, cpumask_scratch);
Expand Down
10 changes: 4 additions & 6 deletions xen/common/sched_credit2.c
Original file line number Diff line number Diff line change
Expand Up @@ -700,8 +700,7 @@ static int get_fallback_cpu(struct csched2_vcpu *svc)
{
int cpu = v->processor;

if ( bs == BALANCE_SOFT_AFFINITY &&
!has_soft_affinity(v, v->cpu_hard_affinity) )
if ( bs == BALANCE_SOFT_AFFINITY && !has_soft_affinity(v) )
continue;

affinity_balance_cpumask(v, bs, cpumask_scratch_cpu(cpu));
Expand Down Expand Up @@ -1484,8 +1483,7 @@ runq_tickle(const struct scheduler *ops, struct csched2_vcpu *new, s_time_t now)
for_each_affinity_balance_step( bs )
{
/* Just skip first step, if we don't have a soft affinity */
if ( bs == BALANCE_SOFT_AFFINITY &&
!has_soft_affinity(new->vcpu, new->vcpu->cpu_hard_affinity) )
if ( bs == BALANCE_SOFT_AFFINITY && !has_soft_affinity(new->vcpu) )
continue;

affinity_balance_cpumask(new->vcpu, bs, cpumask_scratch_cpu(cpu));
Expand Down Expand Up @@ -2285,7 +2283,7 @@ csched2_cpu_pick(const struct scheduler *ops, struct vcpu *vc)
*
* Find both runqueues in one pass.
*/
has_soft = has_soft_affinity(vc, vc->cpu_hard_affinity);
has_soft = has_soft_affinity(vc);
for_each_cpu(i, &prv->active_queues)
{
struct csched2_runqueue_data *rqd;
Expand Down Expand Up @@ -3307,7 +3305,7 @@ runq_candidate(struct csched2_runqueue_data *rqd,
}

/* If scurr has a soft-affinity, let's check whether cpu is part of it */
if ( has_soft_affinity(scurr->vcpu, scurr->vcpu->cpu_hard_affinity) )
if ( has_soft_affinity(scurr->vcpu) )
{
affinity_balance_cpumask(scurr->vcpu, BALANCE_SOFT_AFFINITY,
cpumask_scratch);
Expand Down
8 changes: 3 additions & 5 deletions xen/common/sched_null.c
Original file line number Diff line number Diff line change
Expand Up @@ -278,8 +278,7 @@ static unsigned int pick_cpu(struct null_private *prv, struct vcpu *v)

for_each_affinity_balance_step( bs )
{
if ( bs == BALANCE_SOFT_AFFINITY &&
!has_soft_affinity(v, v->cpu_hard_affinity) )
if ( bs == BALANCE_SOFT_AFFINITY && !has_soft_affinity(v) )
continue;

affinity_balance_cpumask(v, bs, cpumask_scratch_cpu(cpu));
Expand Down Expand Up @@ -491,8 +490,7 @@ static void _vcpu_remove(struct null_private *prv, struct vcpu *v)
{
list_for_each_entry( wvc, &prv->waitq, waitq_elem )
{
if ( bs == BALANCE_SOFT_AFFINITY &&
!has_soft_affinity(wvc->vcpu, wvc->vcpu->cpu_hard_affinity) )
if ( bs == BALANCE_SOFT_AFFINITY && !has_soft_affinity(wvc->vcpu) )
continue;

if ( vcpu_check_affinity(wvc->vcpu, cpu, bs) )
Expand Down Expand Up @@ -761,7 +759,7 @@ static struct task_slice null_schedule(const struct scheduler *ops,
list_for_each_entry( wvc, &prv->waitq, waitq_elem )
{
if ( bs == BALANCE_SOFT_AFFINITY &&
!has_soft_affinity(wvc->vcpu, wvc->vcpu->cpu_hard_affinity) )
!has_soft_affinity(wvc->vcpu) )
continue;

if ( vcpu_check_affinity(wvc->vcpu, cpu, bs) )
Expand Down
8 changes: 3 additions & 5 deletions xen/include/xen/sched-if.h
Original file line number Diff line number Diff line change
Expand Up @@ -266,16 +266,14 @@ static inline cpumask_t* cpupool_domain_cpumask(struct domain *d)
* Soft affinity only needs to be considered if:
* * The cpus in the cpupool are not a subset of soft affinity
* * The hard affinity is not a subset of soft affinity
* * There is an overlap between the soft affinity and the mask which is
* currently being considered.
* * There is an overlap between the soft and hard affinity masks
*/
static inline int has_soft_affinity(const struct vcpu *v,
const cpumask_t *mask)
static inline int has_soft_affinity(const struct vcpu *v)
{
return !cpumask_subset(cpupool_domain_cpumask(v->domain),
v->cpu_soft_affinity) &&
!cpumask_subset(v->cpu_hard_affinity, v->cpu_soft_affinity) &&
cpumask_intersects(v->cpu_soft_affinity, mask);
cpumask_intersects(v->cpu_soft_affinity, v->cpu_hard_affinity);
}

/*
Expand Down

0 comments on commit 5e9e3f8

Please sign in to comment.