Skip to content

Commit

Permalink
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm…
Browse files Browse the repository at this point in the history
…/linux/kernel/git/tip/tip

Pull scheduler fixes from Ingo Molnar:
 "Two fixes: a guest-cputime accounting fix, and a cgroup bandwidth
  quota precision fix"

* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  sched/vtime: Fix guest/system mis-accounting on task switch
  sched/fair: Scale bandwidth quota and period without losing quota/period ratio precision
  • Loading branch information
torvalds committed Oct 12, 2019
2 parents 465a7e2 + 68e7a4d commit 328fefa
Show file tree
Hide file tree
Showing 2 changed files with 25 additions and 17 deletions.
6 changes: 3 additions & 3 deletions kernel/sched/cputime.c
Original file line number Diff line number Diff line change
Expand Up @@ -740,7 +740,7 @@ void vtime_account_system(struct task_struct *tsk)

write_seqcount_begin(&vtime->seqcount);
/* We might have scheduled out from guest path */
if (current->flags & PF_VCPU)
if (tsk->flags & PF_VCPU)
vtime_account_guest(tsk, vtime);
else
__vtime_account_system(tsk, vtime);
Expand Down Expand Up @@ -783,7 +783,7 @@ void vtime_guest_enter(struct task_struct *tsk)
*/
write_seqcount_begin(&vtime->seqcount);
__vtime_account_system(tsk, vtime);
current->flags |= PF_VCPU;
tsk->flags |= PF_VCPU;
write_seqcount_end(&vtime->seqcount);
}
EXPORT_SYMBOL_GPL(vtime_guest_enter);
Expand All @@ -794,7 +794,7 @@ void vtime_guest_exit(struct task_struct *tsk)

write_seqcount_begin(&vtime->seqcount);
vtime_account_guest(tsk, vtime);
current->flags &= ~PF_VCPU;
tsk->flags &= ~PF_VCPU;
write_seqcount_end(&vtime->seqcount);
}
EXPORT_SYMBOL_GPL(vtime_guest_exit);
Expand Down
36 changes: 22 additions & 14 deletions kernel/sched/fair.c
Original file line number Diff line number Diff line change
Expand Up @@ -4926,20 +4926,28 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
if (++count > 3) {
u64 new, old = ktime_to_ns(cfs_b->period);

new = (old * 147) / 128; /* ~115% */
new = min(new, max_cfs_quota_period);

cfs_b->period = ns_to_ktime(new);

/* since max is 1s, this is limited to 1e9^2, which fits in u64 */
cfs_b->quota *= new;
cfs_b->quota = div64_u64(cfs_b->quota, old);

pr_warn_ratelimited(
"cfs_period_timer[cpu%d]: period too short, scaling up (new cfs_period_us %lld, cfs_quota_us = %lld)\n",
smp_processor_id(),
div_u64(new, NSEC_PER_USEC),
div_u64(cfs_b->quota, NSEC_PER_USEC));
/*
* Grow period by a factor of 2 to avoid losing precision.
* Precision loss in the quota/period ratio can cause __cfs_schedulable
* to fail.
*/
new = old * 2;
if (new < max_cfs_quota_period) {
cfs_b->period = ns_to_ktime(new);
cfs_b->quota *= 2;

pr_warn_ratelimited(
"cfs_period_timer[cpu%d]: period too short, scaling up (new cfs_period_us = %lld, cfs_quota_us = %lld)\n",
smp_processor_id(),
div_u64(new, NSEC_PER_USEC),
div_u64(cfs_b->quota, NSEC_PER_USEC));
} else {
pr_warn_ratelimited(
"cfs_period_timer[cpu%d]: period too short, but cannot scale up without losing precision (cfs_period_us = %lld, cfs_quota_us = %lld)\n",
smp_processor_id(),
div_u64(old, NSEC_PER_USEC),
div_u64(cfs_b->quota, NSEC_PER_USEC));
}

/* reset count so we don't come right back in here */
count = 0;
Expand Down

0 comments on commit 328fefa

Please sign in to comment.