Skip to content

Commit

Permalink
blk-throttle: ignore idle cgroup limit
Browse files Browse the repository at this point in the history
Last patch introduces a way to detect idle cgroup. We use it to make
upgrade/downgrade decision. And the new algorithm can detect completely
idle cgroup too, so we can delete the corresponding code.

Signed-off-by: Shaohua Li <[email protected]>
Signed-off-by: Jens Axboe <[email protected]>
  • Loading branch information
shligit authored and axboe committed Mar 28, 2017
1 parent ada75b6 commit fa6fb5a
Showing 1 changed file with 26 additions and 14 deletions.
40 changes: 26 additions & 14 deletions block/blk-throttle.c
Original file line number Diff line number Diff line change
Expand Up @@ -152,8 +152,6 @@ struct throtl_grp {

unsigned long last_check_time;

unsigned long last_dispatch_time[2];

/* When did we start a new slice */
unsigned long slice_start[2];
unsigned long slice_end[2];
Expand Down Expand Up @@ -508,8 +506,6 @@ static void throtl_pd_online(struct blkg_policy_data *pd)
* Update has_rules[] after a new group is brought online.
*/
tg_update_has_rules(tg);
tg->last_dispatch_time[READ] = jiffies;
tg->last_dispatch_time[WRITE] = jiffies;
}

static void blk_throtl_update_limit_valid(struct throtl_data *td)
Expand Down Expand Up @@ -1708,9 +1704,8 @@ static bool throtl_tg_can_upgrade(struct throtl_grp *tg)
return true;

if (time_after_eq(jiffies,
tg->last_dispatch_time[READ] + tg->td->throtl_slice) &&
time_after_eq(jiffies,
tg->last_dispatch_time[WRITE] + tg->td->throtl_slice))
tg_last_low_overflow_time(tg) + tg->td->throtl_slice) &&
throtl_tg_is_idle(tg))
return true;
return false;
}
Expand Down Expand Up @@ -1756,6 +1751,26 @@ static bool throtl_can_upgrade(struct throtl_data *td,
return true;
}

static void throtl_upgrade_check(struct throtl_grp *tg)
{
unsigned long now = jiffies;

if (tg->td->limit_index != LIMIT_LOW)
return;

if (time_after(tg->last_check_time + tg->td->throtl_slice, now))
return;

tg->last_check_time = now;

if (!time_after_eq(now,
__tg_last_low_overflow_time(tg) + tg->td->throtl_slice))
return;

if (throtl_can_upgrade(tg->td, NULL))
throtl_upgrade_state(tg->td);
}

static void throtl_upgrade_state(struct throtl_data *td)
{
struct cgroup_subsys_state *pos_css;
Expand Down Expand Up @@ -1797,18 +1812,15 @@ static bool throtl_tg_can_downgrade(struct throtl_grp *tg)
struct throtl_data *td = tg->td;
unsigned long now = jiffies;

if (time_after_eq(now, tg->last_dispatch_time[READ] +
td->throtl_slice) &&
time_after_eq(now, tg->last_dispatch_time[WRITE] +
td->throtl_slice))
return false;
/*
* If cgroup is below low limit, consider downgrade and throttle other
* cgroups
*/
if (time_after_eq(now, td->low_upgrade_time + td->throtl_slice) &&
time_after_eq(now, tg_last_low_overflow_time(tg) +
td->throtl_slice))
td->throtl_slice) &&
(!throtl_tg_is_idle(tg) ||
!list_empty(&tg_to_blkg(tg)->blkcg->css.children)))
return true;
return false;
}
Expand Down Expand Up @@ -1931,10 +1943,10 @@ bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,

again:
while (true) {
tg->last_dispatch_time[rw] = jiffies;
if (tg->last_low_overflow_time[rw] == 0)
tg->last_low_overflow_time[rw] = jiffies;
throtl_downgrade_check(tg);
throtl_upgrade_check(tg);
/* throtl is FIFO - if bios are already queued, should queue */
if (sq->nr_queued[rw])
break;
Expand Down

0 comments on commit fa6fb5a

Please sign in to comment.