Skip to content

Commit

Permalink
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm…
Browse files Browse the repository at this point in the history
…/linux/kernel/git/tip/tip

Pull scheduler fixes from Thomas Gleixner:
 "A few scheduler fixes:

   - Prevent a bogus warning vs. runqueue clock update flags in
     do_sched_rt_period_timer()

   - Simplify the helper functions which handle requests for skipping
     the runqueue clock updat.

   - Do not unlock the tunables mutex in the error path of the cpu
     frequency scheduler utils. Its not held.

   - Enforce proper alignement for 'struct util_est' in sched_avg to
     prevent a misalignment fault on IA64"

* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  sched/core: Force proper alignment of 'struct util_est'
  sched/core: Simplify helpers for rq clock update skip requests
  sched/rt: Fix rq->clock_update_flags < RQCF_ACT_SKIP warning
  sched/cpufreq/schedutil: Fix error path mutex unlock
  • Loading branch information
torvalds committed Apr 15, 2018
2 parents 174e719 + 317d359 commit 71b8ebb
Show file tree
Hide file tree
Showing 7 changed files with 22 additions and 14 deletions.
6 changes: 3 additions & 3 deletions include/linux/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -300,7 +300,7 @@ struct util_est {
unsigned int enqueued;
unsigned int ewma;
#define UTIL_EST_WEIGHT_SHIFT 2
};
} __attribute__((__aligned__(sizeof(u64))));

/*
* The load_avg/util_avg accumulates an infinite geometric series
Expand Down Expand Up @@ -364,7 +364,7 @@ struct sched_avg {
unsigned long runnable_load_avg;
unsigned long util_avg;
struct util_est util_est;
};
} ____cacheline_aligned;

struct sched_statistics {
#ifdef CONFIG_SCHEDSTATS
Expand Down Expand Up @@ -435,7 +435,7 @@ struct sched_entity {
* Put into separate cache line so it does not
* collide with read-mostly values above.
*/
struct sched_avg avg ____cacheline_aligned_in_smp;
struct sched_avg avg;
#endif
};

Expand Down
2 changes: 1 addition & 1 deletion kernel/sched/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -874,7 +874,7 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
* this case, we can save a useless back to back clock update.
*/
if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr))
rq_clock_skip_update(rq, true);
rq_clock_skip_update(rq);
}

#ifdef CONFIG_SMP
Expand Down
3 changes: 1 addition & 2 deletions kernel/sched/cpufreq_schedutil.c
Original file line number Diff line number Diff line change
Expand Up @@ -631,10 +631,9 @@ static int sugov_init(struct cpufreq_policy *policy)

stop_kthread:
sugov_kthread_stop(sg_policy);

free_sg_policy:
mutex_unlock(&global_tunables_lock);

free_sg_policy:
sugov_policy_free(sg_policy);

disable_fast_switch:
Expand Down
2 changes: 1 addition & 1 deletion kernel/sched/deadline.c
Original file line number Diff line number Diff line change
Expand Up @@ -1560,7 +1560,7 @@ static void yield_task_dl(struct rq *rq)
* so we don't do microscopic update in schedule()
* and double the fastpath cost.
*/
rq_clock_skip_update(rq, true);
rq_clock_skip_update(rq);
}

#ifdef CONFIG_SMP
Expand Down
2 changes: 1 addition & 1 deletion kernel/sched/fair.c
Original file line number Diff line number Diff line change
Expand Up @@ -7089,7 +7089,7 @@ static void yield_task_fair(struct rq *rq)
* so we don't do microscopic update in schedule()
* and double the fastpath cost.
*/
rq_clock_skip_update(rq, true);
rq_clock_skip_update(rq);
}

set_skip_buddy(se);
Expand Down
4 changes: 3 additions & 1 deletion kernel/sched/rt.c
Original file line number Diff line number Diff line change
Expand Up @@ -839,6 +839,8 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
continue;

raw_spin_lock(&rq->lock);
update_rq_clock(rq);

if (rt_rq->rt_time) {
u64 runtime;

Expand All @@ -859,7 +861,7 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
* 'runtime'.
*/
if (rt_rq->rt_nr_running && rq->curr == rq->idle)
rq_clock_skip_update(rq, false);
rq_clock_cancel_skipupdate(rq);
}
if (rt_rq->rt_time || rt_rq->rt_nr_running)
idle = 0;
Expand Down
17 changes: 12 additions & 5 deletions kernel/sched/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -976,13 +976,20 @@ static inline u64 rq_clock_task(struct rq *rq)
return rq->clock_task;
}

static inline void rq_clock_skip_update(struct rq *rq, bool skip)
static inline void rq_clock_skip_update(struct rq *rq)
{
lockdep_assert_held(&rq->lock);
if (skip)
rq->clock_update_flags |= RQCF_REQ_SKIP;
else
rq->clock_update_flags &= ~RQCF_REQ_SKIP;
rq->clock_update_flags |= RQCF_REQ_SKIP;
}

/*
* See rt task throttoling, which is the only time a skip
* request is cancelled.
*/
static inline void rq_clock_cancel_skipupdate(struct rq *rq)
{
lockdep_assert_held(&rq->lock);
rq->clock_update_flags &= ~RQCF_REQ_SKIP;
}

struct rq_flags {
Expand Down

0 comments on commit 71b8ebb

Please sign in to comment.