forked from torvalds/linux
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
blk-rq-qos: refactor out common elements of blk-wbt
blkcg-qos is going to do essentially what wbt does, only on a cgroup basis. Break out the common code that will be shared between blkcg-qos and wbt into blk-rq-qos.* so they can both utilize the same infrastructure. Signed-off-by: Josef Bacik <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
- Loading branch information
Showing
10 changed files
with
478 additions
and
251 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,178 @@ | ||
#include "blk-rq-qos.h" | ||
|
||
#include "blk-wbt.h" | ||
|
||
/* | ||
* Increment 'v', if 'v' is below 'below'. Returns true if we succeeded, | ||
* false if 'v' + 1 would be bigger than 'below'. | ||
*/ | ||
static bool atomic_inc_below(atomic_t *v, int below) | ||
{ | ||
int cur = atomic_read(v); | ||
|
||
for (;;) { | ||
int old; | ||
|
||
if (cur >= below) | ||
return false; | ||
old = atomic_cmpxchg(v, cur, cur + 1); | ||
if (old == cur) | ||
break; | ||
cur = old; | ||
} | ||
|
||
return true; | ||
} | ||
|
||
bool rq_wait_inc_below(struct rq_wait *rq_wait, int limit) | ||
{ | ||
return atomic_inc_below(&rq_wait->inflight, limit); | ||
} | ||
|
||
void rq_qos_cleanup(struct request_queue *q, enum wbt_flags wb_acct) | ||
{ | ||
struct rq_qos *rqos; | ||
|
||
for (rqos = q->rq_qos; rqos; rqos = rqos->next) { | ||
if (rqos->ops->cleanup) | ||
rqos->ops->cleanup(rqos, wb_acct); | ||
} | ||
} | ||
|
||
void rq_qos_done(struct request_queue *q, struct request *rq) | ||
{ | ||
struct rq_qos *rqos; | ||
|
||
for (rqos = q->rq_qos; rqos; rqos = rqos->next) { | ||
if (rqos->ops->done) | ||
rqos->ops->done(rqos, rq); | ||
} | ||
} | ||
|
||
void rq_qos_issue(struct request_queue *q, struct request *rq) | ||
{ | ||
struct rq_qos *rqos; | ||
|
||
for(rqos = q->rq_qos; rqos; rqos = rqos->next) { | ||
if (rqos->ops->issue) | ||
rqos->ops->issue(rqos, rq); | ||
} | ||
} | ||
|
||
void rq_qos_requeue(struct request_queue *q, struct request *rq) | ||
{ | ||
struct rq_qos *rqos; | ||
|
||
for(rqos = q->rq_qos; rqos; rqos = rqos->next) { | ||
if (rqos->ops->requeue) | ||
rqos->ops->requeue(rqos, rq); | ||
} | ||
} | ||
|
||
enum wbt_flags rq_qos_throttle(struct request_queue *q, struct bio *bio, | ||
spinlock_t *lock) | ||
{ | ||
struct rq_qos *rqos; | ||
enum wbt_flags flags = 0; | ||
|
||
for(rqos = q->rq_qos; rqos; rqos = rqos->next) { | ||
if (rqos->ops->throttle) | ||
flags |= rqos->ops->throttle(rqos, bio, lock); | ||
} | ||
return flags; | ||
} | ||
|
||
/* | ||
* Return true, if we can't increase the depth further by scaling | ||
*/ | ||
bool rq_depth_calc_max_depth(struct rq_depth *rqd) | ||
{ | ||
unsigned int depth; | ||
bool ret = false; | ||
|
||
/* | ||
* For QD=1 devices, this is a special case. It's important for those | ||
* to have one request ready when one completes, so force a depth of | ||
* 2 for those devices. On the backend, it'll be a depth of 1 anyway, | ||
* since the device can't have more than that in flight. If we're | ||
* scaling down, then keep a setting of 1/1/1. | ||
*/ | ||
if (rqd->queue_depth == 1) { | ||
if (rqd->scale_step > 0) | ||
rqd->max_depth = 1; | ||
else { | ||
rqd->max_depth = 2; | ||
ret = true; | ||
} | ||
} else { | ||
/* | ||
* scale_step == 0 is our default state. If we have suffered | ||
* latency spikes, step will be > 0, and we shrink the | ||
* allowed write depths. If step is < 0, we're only doing | ||
* writes, and we allow a temporarily higher depth to | ||
* increase performance. | ||
*/ | ||
depth = min_t(unsigned int, rqd->default_depth, | ||
rqd->queue_depth); | ||
if (rqd->scale_step > 0) | ||
depth = 1 + ((depth - 1) >> min(31, rqd->scale_step)); | ||
else if (rqd->scale_step < 0) { | ||
unsigned int maxd = 3 * rqd->queue_depth / 4; | ||
|
||
depth = 1 + ((depth - 1) << -rqd->scale_step); | ||
if (depth > maxd) { | ||
depth = maxd; | ||
ret = true; | ||
} | ||
} | ||
|
||
rqd->max_depth = depth; | ||
} | ||
|
||
return ret; | ||
} | ||
|
||
void rq_depth_scale_up(struct rq_depth *rqd) | ||
{ | ||
/* | ||
* Hit max in previous round, stop here | ||
*/ | ||
if (rqd->scaled_max) | ||
return; | ||
|
||
rqd->scale_step--; | ||
|
||
rqd->scaled_max = rq_depth_calc_max_depth(rqd); | ||
} | ||
|
||
/* | ||
* Scale rwb down. If 'hard_throttle' is set, do it quicker, since we | ||
* had a latency violation. | ||
*/ | ||
void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle) | ||
{ | ||
/* | ||
* Stop scaling down when we've hit the limit. This also prevents | ||
* ->scale_step from going to crazy values, if the device can't | ||
* keep up. | ||
*/ | ||
if (rqd->max_depth == 1) | ||
return; | ||
|
||
if (rqd->scale_step < 0 && hard_throttle) | ||
rqd->scale_step = 0; | ||
else | ||
rqd->scale_step++; | ||
|
||
rqd->scaled_max = false; | ||
rq_depth_calc_max_depth(rqd); | ||
} | ||
|
||
void rq_qos_exit(struct request_queue *q) | ||
{ | ||
while (q->rq_qos) { | ||
struct rq_qos *rqos = q->rq_qos; | ||
q->rq_qos = rqos->next; | ||
rqos->ops->exit(rqos); | ||
} | ||
} |
Oops, something went wrong.