Skip to content

Commit

Permalink
blk-stat: use READ and WRITE instead of BLK_STAT_{READ,WRITE}
Browse files Browse the repository at this point in the history
The stats buckets will become generic soon, so make the existing users
use the common READ and WRITE definitions instead of one internal to
blk-stat.

Signed-off-by: Omar Sandoval <[email protected]>
Signed-off-by: Jens Axboe <[email protected]>
  • Loading branch information
osandov authored and axboe committed Mar 21, 2017
1 parent 0315b15 commit fa2e39c
Show file tree
Hide file tree
Showing 6 changed files with 59 additions and 66 deletions.
12 changes: 6 additions & 6 deletions block/blk-mq-debugfs.c
Original file line number Diff line number Diff line change
Expand Up @@ -333,17 +333,17 @@ static int hctx_stats_show(struct seq_file *m, void *v)
struct blk_mq_hw_ctx *hctx = m->private;
struct blk_rq_stat stat[2];

blk_stat_init(&stat[BLK_STAT_READ]);
blk_stat_init(&stat[BLK_STAT_WRITE]);
blk_stat_init(&stat[READ]);
blk_stat_init(&stat[WRITE]);

blk_hctx_stat_get(hctx, stat);

seq_puts(m, "read: ");
print_stat(m, &stat[BLK_STAT_READ]);
print_stat(m, &stat[READ]);
seq_puts(m, "\n");

seq_puts(m, "write: ");
print_stat(m, &stat[BLK_STAT_WRITE]);
print_stat(m, &stat[WRITE]);
seq_puts(m, "\n");
return 0;
}
Expand All @@ -362,8 +362,8 @@ static ssize_t hctx_stats_write(struct file *file, const char __user *buf,
int i;

hctx_for_each_ctx(hctx, ctx, i) {
blk_stat_init(&ctx->stat[BLK_STAT_READ]);
blk_stat_init(&ctx->stat[BLK_STAT_WRITE]);
blk_stat_init(&ctx->stat[READ]);
blk_stat_init(&ctx->stat[WRITE]);
}
return count;
}
Expand Down
12 changes: 6 additions & 6 deletions block/blk-mq.c
Original file line number Diff line number Diff line change
Expand Up @@ -2040,8 +2040,8 @@ static void blk_mq_init_cpu_queues(struct request_queue *q,
spin_lock_init(&__ctx->lock);
INIT_LIST_HEAD(&__ctx->rq_list);
__ctx->queue = q;
blk_stat_init(&__ctx->stat[BLK_STAT_READ]);
blk_stat_init(&__ctx->stat[BLK_STAT_WRITE]);
blk_stat_init(&__ctx->stat[READ]);
blk_stat_init(&__ctx->stat[WRITE]);

/* If the cpu isn't online, the cpu is mapped to first hctx */
if (!cpu_online(i))
Expand Down Expand Up @@ -2769,10 +2769,10 @@ static unsigned long blk_mq_poll_nsecs(struct request_queue *q,
* important on devices where the completion latencies are longer
* than ~10 usec.
*/
if (req_op(rq) == REQ_OP_READ && stat[BLK_STAT_READ].nr_samples)
ret = (stat[BLK_STAT_READ].mean + 1) / 2;
else if (req_op(rq) == REQ_OP_WRITE && stat[BLK_STAT_WRITE].nr_samples)
ret = (stat[BLK_STAT_WRITE].mean + 1) / 2;
if (req_op(rq) == REQ_OP_READ && stat[READ].nr_samples)
ret = (stat[READ].mean + 1) / 2;
else if (req_op(rq) == REQ_OP_WRITE && stat[WRITE].nr_samples)
ret = (stat[WRITE].mean + 1) / 2;

return ret;
}
Expand Down
80 changes: 39 additions & 41 deletions block/blk-stat.c
Original file line number Diff line number Diff line change
Expand Up @@ -55,25 +55,25 @@ static void blk_mq_stat_get(struct request_queue *q, struct blk_rq_stat *dst)
uint64_t latest = 0;
int i, j, nr;

blk_stat_init(&dst[BLK_STAT_READ]);
blk_stat_init(&dst[BLK_STAT_WRITE]);
blk_stat_init(&dst[READ]);
blk_stat_init(&dst[WRITE]);

nr = 0;
do {
uint64_t newest = 0;

queue_for_each_hw_ctx(q, hctx, i) {
hctx_for_each_ctx(hctx, ctx, j) {
blk_stat_flush_batch(&ctx->stat[BLK_STAT_READ]);
blk_stat_flush_batch(&ctx->stat[BLK_STAT_WRITE]);
blk_stat_flush_batch(&ctx->stat[READ]);
blk_stat_flush_batch(&ctx->stat[WRITE]);

if (!ctx->stat[BLK_STAT_READ].nr_samples &&
!ctx->stat[BLK_STAT_WRITE].nr_samples)
if (!ctx->stat[READ].nr_samples &&
!ctx->stat[WRITE].nr_samples)
continue;
if (ctx->stat[BLK_STAT_READ].time > newest)
newest = ctx->stat[BLK_STAT_READ].time;
if (ctx->stat[BLK_STAT_WRITE].time > newest)
newest = ctx->stat[BLK_STAT_WRITE].time;
if (ctx->stat[READ].time > newest)
newest = ctx->stat[READ].time;
if (ctx->stat[WRITE].time > newest)
newest = ctx->stat[WRITE].time;
}
}

Expand All @@ -88,14 +88,14 @@ static void blk_mq_stat_get(struct request_queue *q, struct blk_rq_stat *dst)

queue_for_each_hw_ctx(q, hctx, i) {
hctx_for_each_ctx(hctx, ctx, j) {
if (ctx->stat[BLK_STAT_READ].time == newest) {
blk_stat_sum(&dst[BLK_STAT_READ],
&ctx->stat[BLK_STAT_READ]);
if (ctx->stat[READ].time == newest) {
blk_stat_sum(&dst[READ],
&ctx->stat[READ]);
nr++;
}
if (ctx->stat[BLK_STAT_WRITE].time == newest) {
blk_stat_sum(&dst[BLK_STAT_WRITE],
&ctx->stat[BLK_STAT_WRITE]);
if (ctx->stat[WRITE].time == newest) {
blk_stat_sum(&dst[WRITE],
&ctx->stat[WRITE]);
nr++;
}
}
Expand All @@ -106,20 +106,20 @@ static void blk_mq_stat_get(struct request_queue *q, struct blk_rq_stat *dst)
*/
} while (!nr);

dst[BLK_STAT_READ].time = dst[BLK_STAT_WRITE].time = latest;
dst[READ].time = dst[WRITE].time = latest;
}

void blk_queue_stat_get(struct request_queue *q, struct blk_rq_stat *dst)
{
if (q->mq_ops)
blk_mq_stat_get(q, dst);
else {
blk_stat_flush_batch(&q->rq_stats[BLK_STAT_READ]);
blk_stat_flush_batch(&q->rq_stats[BLK_STAT_WRITE]);
memcpy(&dst[BLK_STAT_READ], &q->rq_stats[BLK_STAT_READ],
sizeof(struct blk_rq_stat));
memcpy(&dst[BLK_STAT_WRITE], &q->rq_stats[BLK_STAT_WRITE],
sizeof(struct blk_rq_stat));
blk_stat_flush_batch(&q->rq_stats[READ]);
blk_stat_flush_batch(&q->rq_stats[WRITE]);
memcpy(&dst[READ], &q->rq_stats[READ],
sizeof(struct blk_rq_stat));
memcpy(&dst[WRITE], &q->rq_stats[WRITE],
sizeof(struct blk_rq_stat));
}
}

Expand All @@ -133,31 +133,29 @@ void blk_hctx_stat_get(struct blk_mq_hw_ctx *hctx, struct blk_rq_stat *dst)
uint64_t newest = 0;

hctx_for_each_ctx(hctx, ctx, i) {
blk_stat_flush_batch(&ctx->stat[BLK_STAT_READ]);
blk_stat_flush_batch(&ctx->stat[BLK_STAT_WRITE]);
blk_stat_flush_batch(&ctx->stat[READ]);
blk_stat_flush_batch(&ctx->stat[WRITE]);

if (!ctx->stat[BLK_STAT_READ].nr_samples &&
!ctx->stat[BLK_STAT_WRITE].nr_samples)
if (!ctx->stat[READ].nr_samples &&
!ctx->stat[WRITE].nr_samples)
continue;

if (ctx->stat[BLK_STAT_READ].time > newest)
newest = ctx->stat[BLK_STAT_READ].time;
if (ctx->stat[BLK_STAT_WRITE].time > newest)
newest = ctx->stat[BLK_STAT_WRITE].time;
if (ctx->stat[READ].time > newest)
newest = ctx->stat[READ].time;
if (ctx->stat[WRITE].time > newest)
newest = ctx->stat[WRITE].time;
}

if (!newest)
break;

hctx_for_each_ctx(hctx, ctx, i) {
if (ctx->stat[BLK_STAT_READ].time == newest) {
blk_stat_sum(&dst[BLK_STAT_READ],
&ctx->stat[BLK_STAT_READ]);
if (ctx->stat[READ].time == newest) {
blk_stat_sum(&dst[READ], &ctx->stat[READ]);
nr++;
}
if (ctx->stat[BLK_STAT_WRITE].time == newest) {
blk_stat_sum(&dst[BLK_STAT_WRITE],
&ctx->stat[BLK_STAT_WRITE]);
if (ctx->stat[WRITE].time == newest) {
blk_stat_sum(&dst[WRITE], &ctx->stat[WRITE]);
nr++;
}
}
Expand Down Expand Up @@ -226,13 +224,13 @@ void blk_stat_clear(struct request_queue *q)

queue_for_each_hw_ctx(q, hctx, i) {
hctx_for_each_ctx(hctx, ctx, j) {
blk_stat_init(&ctx->stat[BLK_STAT_READ]);
blk_stat_init(&ctx->stat[BLK_STAT_WRITE]);
blk_stat_init(&ctx->stat[READ]);
blk_stat_init(&ctx->stat[WRITE]);
}
}
} else {
blk_stat_init(&q->rq_stats[BLK_STAT_READ]);
blk_stat_init(&q->rq_stats[BLK_STAT_WRITE]);
blk_stat_init(&q->rq_stats[READ]);
blk_stat_init(&q->rq_stats[WRITE]);
}
}

Expand Down
5 changes: 0 additions & 5 deletions block/blk-stat.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,11 +15,6 @@
#define BLK_STAT_TIME_MASK ((1ULL << BLK_STAT_SHIFT) - 1)
#define BLK_STAT_MASK ~BLK_STAT_TIME_MASK

enum {
BLK_STAT_READ = 0,
BLK_STAT_WRITE,
};

void blk_stat_add(struct blk_rq_stat *, struct request *);
void blk_hctx_stat_get(struct blk_mq_hw_ctx *, struct blk_rq_stat *);
void blk_queue_stat_get(struct request_queue *, struct blk_rq_stat *);
Expand Down
4 changes: 2 additions & 2 deletions block/blk-sysfs.c
Original file line number Diff line number Diff line change
Expand Up @@ -518,8 +518,8 @@ static ssize_t queue_stats_show(struct request_queue *q, char *page)

blk_queue_stat_get(q, stat);

ret = print_stat(page, &stat[BLK_STAT_READ], "read :");
ret += print_stat(page + ret, &stat[BLK_STAT_WRITE], "write:");
ret = print_stat(page, &stat[READ], "read :");
ret += print_stat(page + ret, &stat[WRITE], "write:");
return ret;
}

Expand Down
12 changes: 6 additions & 6 deletions block/blk-wbt.c
Original file line number Diff line number Diff line change
Expand Up @@ -255,8 +255,8 @@ static inline bool stat_sample_valid(struct blk_rq_stat *stat)
* that it's writes impacting us, and not just some sole read on
* a device that is in a lower power state.
*/
return stat[BLK_STAT_READ].nr_samples >= 1 &&
stat[BLK_STAT_WRITE].nr_samples >= RWB_MIN_WRITE_SAMPLES;
return (stat[READ].nr_samples >= 1 &&
stat[WRITE].nr_samples >= RWB_MIN_WRITE_SAMPLES);
}

static u64 rwb_sync_issue_lat(struct rq_wb *rwb)
Expand Down Expand Up @@ -293,7 +293,7 @@ static int __latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
*/
thislat = rwb_sync_issue_lat(rwb);
if (thislat > rwb->cur_win_nsec ||
(thislat > rwb->min_lat_nsec && !stat[BLK_STAT_READ].nr_samples)) {
(thislat > rwb->min_lat_nsec && !stat[READ].nr_samples)) {
trace_wbt_lat(bdi, thislat);
return LAT_EXCEEDED;
}
Expand All @@ -308,7 +308,7 @@ static int __latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
* waited or still has writes in flights, consider us doing
* just writes as well.
*/
if ((stat[BLK_STAT_WRITE].nr_samples && blk_stat_is_current(stat)) ||
if ((stat[WRITE].nr_samples && blk_stat_is_current(stat)) ||
wb_recent_wait(rwb) || wbt_inflight(rwb))
return LAT_UNKNOWN_WRITES;
return LAT_UNKNOWN;
Expand All @@ -317,8 +317,8 @@ static int __latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
/*
* If the 'min' latency exceeds our target, step down.
*/
if (stat[BLK_STAT_READ].min > rwb->min_lat_nsec) {
trace_wbt_lat(bdi, stat[BLK_STAT_READ].min);
if (stat[READ].min > rwb->min_lat_nsec) {
trace_wbt_lat(bdi, stat[READ].min);
trace_wbt_stat(bdi, stat);
return LAT_EXCEEDED;
}
Expand Down

0 comments on commit fa2e39c

Please sign in to comment.