Skip to content

Commit

Permalink
sched/fair: Init cfs_rq's sched_entity load average
Browse files Browse the repository at this point in the history
The runnable load and utilization averages of cfs_rq's sched_entity
were not initiated. Like done to a task, give new cfs_rq' sched_entity
start values to heavy its load in infant time.

Signed-off-by: Yuyang Du <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Mike Galbraith <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
  • Loading branch information
ydu19 authored and Ingo Molnar committed Aug 3, 2015
1 parent 6c1d47c commit 540247f
Show file tree
Hide file tree
Showing 3 changed files with 8 additions and 7 deletions.
2 changes: 1 addition & 1 deletion kernel/sched/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -2304,7 +2304,7 @@ void wake_up_new_task(struct task_struct *p)
#endif

/* Initialize new task's runnable average */
init_task_runnable_average(p);
init_entity_runnable_average(&p->se);
rq = __task_rq_lock(p);
activate_task(rq, p, 0);
p->on_rq = TASK_ON_RQ_QUEUED;
Expand Down
11 changes: 6 additions & 5 deletions kernel/sched/fair.c
Original file line number Diff line number Diff line change
Expand Up @@ -667,10 +667,10 @@ static unsigned long task_h_load(struct task_struct *p);
#define LOAD_AVG_MAX 47742 /* maximum possible load avg */
#define LOAD_AVG_MAX_N 345 /* number of full periods to produce LOAD_MAX_AVG */

/* Give new task start runnable values to heavy its load in infant time */
void init_task_runnable_average(struct task_struct *p)
/* Give new sched_entity start runnable values to heavy its load in infant time */
void init_entity_runnable_average(struct sched_entity *se)
{
struct sched_avg *sa = &p->se.avg;
struct sched_avg *sa = &se->avg;

sa->last_update_time = 0;
/*
Expand All @@ -679,14 +679,14 @@ void init_task_runnable_average(struct task_struct *p)
* will definitely be update (after enqueue).
*/
sa->period_contrib = 1023;
sa->load_avg = scale_load_down(p->se.load.weight);
sa->load_avg = scale_load_down(se->load.weight);
sa->load_sum = sa->load_avg * LOAD_AVG_MAX;
sa->util_avg = scale_load_down(SCHED_LOAD_SCALE);
sa->util_sum = LOAD_AVG_MAX;
/* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */
}
#else
void init_task_runnable_average(struct task_struct *p)
void init_entity_runnable_average(struct sched_entity *se)
{
}
#endif
Expand Down Expand Up @@ -8029,6 +8029,7 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)

init_cfs_rq(cfs_rq);
init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
init_entity_runnable_average(se);
}

return 1;
Expand Down
2 changes: 1 addition & 1 deletion kernel/sched/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -1307,7 +1307,7 @@ extern void init_dl_task_timer(struct sched_dl_entity *dl_se);

unsigned long to_ratio(u64 period, u64 runtime);

extern void init_task_runnable_average(struct task_struct *p);
extern void init_entity_runnable_average(struct sched_entity *se);

static inline void add_nr_running(struct rq *rq, unsigned count)
{
Expand Down

0 comments on commit 540247f

Please sign in to comment.