Skip to content

Commit

Permalink
sched/pelt: Relax the sync of runnable_sum with runnable_avg
Browse files Browse the repository at this point in the history
Similarly to util_avg and util_sum, don't sync runnable_sum with the low
bound of runnable_avg but only ensure that runnable_sum stays in the
correct range.

Signed-off-by: Vincent Guittot <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Reviewed-by: Dietmar Eggemann <[email protected]>
Tested-by: Sachin Sant <[email protected]>
Link: https://lkml.kernel.org/r/[email protected]
  • Loading branch information
vingu-linaro authored and Peter Zijlstra committed Jan 18, 2022
1 parent 7ceb771 commit 95246d1
Showing 1 changed file with 19 additions and 14 deletions.
33 changes: 19 additions & 14 deletions kernel/sched/fair.c
Original file line number Diff line number Diff line change
Expand Up @@ -3483,11 +3483,11 @@ update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq
static inline void
update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
{
long delta = gcfs_rq->avg.runnable_avg - se->avg.runnable_avg;
u32 divider;
long delta_sum, delta_avg = gcfs_rq->avg.runnable_avg - se->avg.runnable_avg;
u32 new_sum, divider;

/* Nothing to update */
if (!delta)
if (!delta_avg)
return;

/*
Expand All @@ -3498,11 +3498,16 @@ update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cf

/* Set new sched_entity's runnable */
se->avg.runnable_avg = gcfs_rq->avg.runnable_avg;
se->avg.runnable_sum = se->avg.runnable_avg * divider;
new_sum = se->avg.runnable_avg * divider;
delta_sum = (long)new_sum - (long)se->avg.runnable_sum;
se->avg.runnable_sum = new_sum;

/* Update parent cfs_rq runnable */
add_positive(&cfs_rq->avg.runnable_avg, delta);
cfs_rq->avg.runnable_sum = cfs_rq->avg.runnable_avg * divider;
add_positive(&cfs_rq->avg.runnable_avg, delta_avg);
add_positive(&cfs_rq->avg.runnable_sum, delta_sum);
/* See update_cfs_rq_load_avg() */
cfs_rq->avg.runnable_sum = max_t(u32, cfs_rq->avg.runnable_sum,
cfs_rq->avg.runnable_avg * PELT_MIN_DIVIDER);
}

static inline void
Expand Down Expand Up @@ -3702,7 +3707,10 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)

r = removed_runnable;
sub_positive(&sa->runnable_avg, r);
sa->runnable_sum = sa->runnable_avg * divider;
sub_positive(&sa->runnable_sum, r * divider);
/* See sa->util_sum above */
sa->runnable_sum = max_t(u32, sa->runnable_sum,
sa->runnable_avg * PELT_MIN_DIVIDER);

/*
* removed_runnable is the unweighted version of removed_load so we
Expand Down Expand Up @@ -3789,12 +3797,6 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
*/
static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
/*
* cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
* See ___update_load_avg() for details.
*/
u32 divider = get_pelt_divider(&cfs_rq->avg);

dequeue_load_avg(cfs_rq, se);
sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum);
Expand All @@ -3803,7 +3805,10 @@ static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
cfs_rq->avg.util_avg * PELT_MIN_DIVIDER);

sub_positive(&cfs_rq->avg.runnable_avg, se->avg.runnable_avg);
cfs_rq->avg.runnable_sum = cfs_rq->avg.runnable_avg * divider;
sub_positive(&cfs_rq->avg.runnable_sum, se->avg.runnable_sum);
/* See update_cfs_rq_load_avg() */
cfs_rq->avg.runnable_sum = max_t(u32, cfs_rq->avg.runnable_sum,
cfs_rq->avg.runnable_avg * PELT_MIN_DIVIDER);

add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum);

Expand Down

0 comments on commit 95246d1

Please sign in to comment.