Skip to content

Commit

Permalink
blk-mq: pass in request/bio flags to queue mapping
Browse files Browse the repository at this point in the history
Prep patch for being able to place request based not just on
CPU location, but also on the type of request.

Reviewed-by: Hannes Reinecke <[email protected]>
Reviewed-by: Keith Busch <[email protected]>
Signed-off-by: Jens Axboe <[email protected]>
  • Loading branch information
axboe committed Nov 7, 2018
1 parent ff2c566 commit f9afca4
Show file tree
Hide file tree
Showing 7 changed files with 57 additions and 37 deletions.
7 changes: 4 additions & 3 deletions block/blk-flush.c
Original file line number Diff line number Diff line change
Expand Up @@ -215,7 +215,7 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)

/* release the tag's ownership to the req cloned from */
spin_lock_irqsave(&fq->mq_flush_lock, flags);
hctx = blk_mq_map_queue(q, flush_rq->mq_ctx->cpu);
hctx = blk_mq_map_queue(q, flush_rq->cmd_flags, flush_rq->mq_ctx->cpu);
if (!q->elevator) {
blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq);
flush_rq->tag = -1;
Expand Down Expand Up @@ -301,7 +301,8 @@ static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
if (!q->elevator) {
fq->orig_rq = first_rq;
flush_rq->tag = first_rq->tag;
hctx = blk_mq_map_queue(q, first_rq->mq_ctx->cpu);
hctx = blk_mq_map_queue(q, first_rq->cmd_flags,
first_rq->mq_ctx->cpu);
blk_mq_tag_set_rq(hctx, first_rq->tag, flush_rq);
} else {
flush_rq->internal_tag = first_rq->internal_tag;
Expand All @@ -324,7 +325,7 @@ static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
unsigned long flags;
struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx);

hctx = blk_mq_map_queue(q, ctx->cpu);
hctx = blk_mq_map_queue(q, rq->cmd_flags, ctx->cpu);

if (q->elevator) {
WARN_ON(rq->tag < 0);
Expand Down
4 changes: 3 additions & 1 deletion block/blk-mq-debugfs.c
Original file line number Diff line number Diff line change
Expand Up @@ -427,8 +427,10 @@ struct show_busy_params {
static void hctx_show_busy_rq(struct request *rq, void *data, bool reserved)
{
const struct show_busy_params *params = data;
struct blk_mq_hw_ctx *hctx;

if (blk_mq_map_queue(rq->q, rq->mq_ctx->cpu) == params->hctx)
hctx = blk_mq_map_queue(rq->q, rq->cmd_flags, rq->mq_ctx->cpu);
if (hctx == params->hctx)
__blk_mq_debugfs_rq_show(params->m,
list_entry_rq(&rq->queuelist));
}
Expand Down
16 changes: 12 additions & 4 deletions block/blk-mq-sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -310,7 +310,7 @@ bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
{
struct elevator_queue *e = q->elevator;
struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx->cpu);
bool ret = false;

if (e && e->type->ops.bio_merge) {
Expand Down Expand Up @@ -366,7 +366,9 @@ void blk_mq_sched_insert_request(struct request *rq, bool at_head,
struct request_queue *q = rq->q;
struct elevator_queue *e = q->elevator;
struct blk_mq_ctx *ctx = rq->mq_ctx;
struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
struct blk_mq_hw_ctx *hctx;

hctx = blk_mq_map_queue(q, rq->cmd_flags, ctx->cpu);

/* flush rq in flush machinery need to be dispatched directly */
if (!(rq->rq_flags & RQF_FLUSH_SEQ) && op_is_flush(rq->cmd_flags)) {
Expand Down Expand Up @@ -399,9 +401,15 @@ void blk_mq_sched_insert_requests(struct request_queue *q,
struct blk_mq_ctx *ctx,
struct list_head *list, bool run_queue_async)
{
struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
struct elevator_queue *e = hctx->queue->elevator;
struct blk_mq_hw_ctx *hctx;
struct elevator_queue *e;
struct request *rq;

/* For list inserts, requests better be on the same hw queue */
rq = list_first_entry(list, struct request, queuelist);
hctx = blk_mq_map_queue(q, rq->cmd_flags, ctx->cpu);

e = hctx->queue->elevator;
if (e && e->type->ops.insert_requests)
e->type->ops.insert_requests(hctx, list, false);
else {
Expand Down
5 changes: 3 additions & 2 deletions block/blk-mq-tag.c
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,8 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
io_schedule();

data->ctx = blk_mq_get_ctx(data->q);
data->hctx = blk_mq_map_queue(data->q, data->ctx->cpu);
data->hctx = blk_mq_map_queue(data->q, data->cmd_flags,
data->ctx->cpu);
tags = blk_mq_tags_from_data(data);
if (data->flags & BLK_MQ_REQ_RESERVED)
bt = &tags->breserved_tags;
Expand Down Expand Up @@ -530,7 +531,7 @@ u32 blk_mq_unique_tag(struct request *rq)
struct blk_mq_hw_ctx *hctx;
int hwq = 0;

hctx = blk_mq_map_queue(q, rq->mq_ctx->cpu);
hctx = blk_mq_map_queue(q, rq->cmd_flags, rq->mq_ctx->cpu);
hwq = hctx->queue_num;

return (hwq << BLK_MQ_UNIQUE_TAG_BITS) |
Expand Down
50 changes: 28 additions & 22 deletions block/blk-mq.c
Original file line number Diff line number Diff line change
Expand Up @@ -331,8 +331,8 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
}

static struct request *blk_mq_get_request(struct request_queue *q,
struct bio *bio, unsigned int op,
struct blk_mq_alloc_data *data)
struct bio *bio,
struct blk_mq_alloc_data *data)
{
struct elevator_queue *e = q->elevator;
struct request *rq;
Expand All @@ -346,8 +346,9 @@ static struct request *blk_mq_get_request(struct request_queue *q,
put_ctx_on_error = true;
}
if (likely(!data->hctx))
data->hctx = blk_mq_map_queue(q, data->ctx->cpu);
if (op & REQ_NOWAIT)
data->hctx = blk_mq_map_queue(q, data->cmd_flags,
data->ctx->cpu);
if (data->cmd_flags & REQ_NOWAIT)
data->flags |= BLK_MQ_REQ_NOWAIT;

if (e) {
Expand All @@ -358,9 +359,10 @@ static struct request *blk_mq_get_request(struct request_queue *q,
* dispatch list. Don't include reserved tags in the
* limiting, as it isn't useful.
*/
if (!op_is_flush(op) && e->type->ops.limit_depth &&
if (!op_is_flush(data->cmd_flags) &&
e->type->ops.limit_depth &&
!(data->flags & BLK_MQ_REQ_RESERVED))
e->type->ops.limit_depth(op, data);
e->type->ops.limit_depth(data->cmd_flags, data);
} else {
blk_mq_tag_busy(data->hctx);
}
Expand All @@ -375,8 +377,8 @@ static struct request *blk_mq_get_request(struct request_queue *q,
return NULL;
}

rq = blk_mq_rq_ctx_init(data, tag, op);
if (!op_is_flush(op)) {
rq = blk_mq_rq_ctx_init(data, tag, data->cmd_flags);
if (!op_is_flush(data->cmd_flags)) {
rq->elv.icq = NULL;
if (e && e->type->ops.prepare_request) {
if (e->type->icq_cache && rq_ioc(bio))
Expand All @@ -393,15 +395,15 @@ static struct request *blk_mq_get_request(struct request_queue *q,
struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
blk_mq_req_flags_t flags)
{
struct blk_mq_alloc_data alloc_data = { .flags = flags };
struct blk_mq_alloc_data alloc_data = { .flags = flags, .cmd_flags = op };
struct request *rq;
int ret;

ret = blk_queue_enter(q, flags);
if (ret)
return ERR_PTR(ret);

rq = blk_mq_get_request(q, NULL, op, &alloc_data);
rq = blk_mq_get_request(q, NULL, &alloc_data);
blk_queue_exit(q);

if (!rq)
Expand All @@ -419,7 +421,7 @@ EXPORT_SYMBOL(blk_mq_alloc_request);
struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
unsigned int op, blk_mq_req_flags_t flags, unsigned int hctx_idx)
{
struct blk_mq_alloc_data alloc_data = { .flags = flags };
struct blk_mq_alloc_data alloc_data = { .flags = flags, .cmd_flags = op };
struct request *rq;
unsigned int cpu;
int ret;
Expand Down Expand Up @@ -452,7 +454,7 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
cpu = cpumask_first_and(alloc_data.hctx->cpumask, cpu_online_mask);
alloc_data.ctx = __blk_mq_get_ctx(q, cpu);

rq = blk_mq_get_request(q, NULL, op, &alloc_data);
rq = blk_mq_get_request(q, NULL, &alloc_data);
blk_queue_exit(q);

if (!rq)
Expand All @@ -466,7 +468,7 @@ static void __blk_mq_free_request(struct request *rq)
{
struct request_queue *q = rq->q;
struct blk_mq_ctx *ctx = rq->mq_ctx;
struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, rq->cmd_flags, ctx->cpu);
const int sched_tag = rq->internal_tag;

blk_pm_mark_last_busy(rq);
Expand All @@ -483,7 +485,7 @@ void blk_mq_free_request(struct request *rq)
struct request_queue *q = rq->q;
struct elevator_queue *e = q->elevator;
struct blk_mq_ctx *ctx = rq->mq_ctx;
struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, rq->cmd_flags, ctx->cpu);

if (rq->rq_flags & RQF_ELVPRIV) {
if (e && e->type->ops.finish_request)
Expand Down Expand Up @@ -977,8 +979,9 @@ bool blk_mq_get_driver_tag(struct request *rq)
{
struct blk_mq_alloc_data data = {
.q = rq->q,
.hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu),
.hctx = blk_mq_map_queue(rq->q, rq->cmd_flags, rq->mq_ctx->cpu),
.flags = BLK_MQ_REQ_NOWAIT,
.cmd_flags = rq->cmd_flags,
};
bool shared;

Expand Down Expand Up @@ -1142,7 +1145,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,

rq = list_first_entry(list, struct request, queuelist);

hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu);
hctx = blk_mq_map_queue(rq->q, rq->cmd_flags, rq->mq_ctx->cpu);
if (!got_budget && !blk_mq_get_dispatch_budget(hctx))
break;

Expand Down Expand Up @@ -1573,7 +1576,8 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
void blk_mq_request_bypass_insert(struct request *rq, bool run_queue)
{
struct blk_mq_ctx *ctx = rq->mq_ctx;
struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(rq->q, ctx->cpu);
struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(rq->q, rq->cmd_flags,
ctx->cpu);

spin_lock(&hctx->lock);
list_add_tail(&rq->queuelist, &hctx->dispatch);
Expand Down Expand Up @@ -1783,7 +1787,8 @@ blk_status_t blk_mq_request_issue_directly(struct request *rq)
int srcu_idx;
blk_qc_t unused_cookie;
struct blk_mq_ctx *ctx = rq->mq_ctx;
struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(rq->q, ctx->cpu);
struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(rq->q, rq->cmd_flags,
ctx->cpu);

hctx_lock(hctx, &srcu_idx);
ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true);
Expand Down Expand Up @@ -1817,7 +1822,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
{
const int is_sync = op_is_sync(bio->bi_opf);
const int is_flush_fua = op_is_flush(bio->bi_opf);
struct blk_mq_alloc_data data = { .flags = 0 };
struct blk_mq_alloc_data data = { .flags = 0, .cmd_flags = bio->bi_opf };
struct request *rq;
unsigned int request_count = 0;
struct blk_plug *plug;
Expand All @@ -1840,7 +1845,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)

rq_qos_throttle(q, bio, NULL);

rq = blk_mq_get_request(q, bio, bio->bi_opf, &data);
rq = blk_mq_get_request(q, bio, &data);
if (unlikely(!rq)) {
rq_qos_cleanup(q, bio);
if (bio->bi_opf & REQ_NOWAIT)
Expand Down Expand Up @@ -1909,6 +1914,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)

if (same_queue_rq) {
data.hctx = blk_mq_map_queue(q,
same_queue_rq->cmd_flags,
same_queue_rq->mq_ctx->cpu);
blk_mq_try_issue_directly(data.hctx, same_queue_rq,
&cookie);
Expand Down Expand Up @@ -2263,7 +2269,7 @@ static void blk_mq_init_cpu_queues(struct request_queue *q,
* Set local node, IFF we have more than one hw queue. If
* not, we remain on the home node of the device
*/
hctx = blk_mq_map_queue(q, i);
hctx = blk_mq_map_queue_type(q, 0, i);
if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
hctx->numa_node = local_memory_node(cpu_to_node(i));
}
Expand Down Expand Up @@ -2336,7 +2342,7 @@ static void blk_mq_map_swqueue(struct request_queue *q)
}

ctx = per_cpu_ptr(q->queue_ctx, i);
hctx = blk_mq_map_queue(q, i);
hctx = blk_mq_map_queue_type(q, 0, i);

cpumask_set_cpu(i, hctx->cpumask);
ctx->index_hw = hctx->nr_ctx;
Expand Down
6 changes: 4 additions & 2 deletions block/blk-mq.h
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,7 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int);

static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
unsigned int flags,
unsigned int cpu)
{
struct blk_mq_tag_set *set = q->tag_set;
Expand All @@ -84,7 +85,7 @@ static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *
unsigned int hctx_type,
unsigned int cpu)
{
return blk_mq_map_queue(q, cpu);
return blk_mq_map_queue(q, hctx_type, cpu);
}

/*
Expand Down Expand Up @@ -135,6 +136,7 @@ struct blk_mq_alloc_data {
struct request_queue *q;
blk_mq_req_flags_t flags;
unsigned int shallow_depth;
unsigned int cmd_flags;

/* input & output parameter */
struct blk_mq_ctx *ctx;
Expand Down Expand Up @@ -209,7 +211,7 @@ static inline void blk_mq_put_driver_tag(struct request *rq)
if (rq->tag == -1 || rq->internal_tag == -1)
return;

hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu);
hctx = blk_mq_map_queue(rq->q, rq->cmd_flags, rq->mq_ctx->cpu);
__blk_mq_put_driver_tag(hctx, rq);
}

Expand Down
6 changes: 3 additions & 3 deletions block/blk.h
Original file line number Diff line number Diff line change
Expand Up @@ -104,10 +104,10 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
__clear_bit(flag, &q->queue_flags);
}

static inline struct blk_flush_queue *blk_get_flush_queue(
struct request_queue *q, struct blk_mq_ctx *ctx)
static inline struct blk_flush_queue *
blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx)
{
return blk_mq_map_queue(q, ctx->cpu)->fq;
return blk_mq_map_queue(q, REQ_OP_FLUSH, ctx->cpu)->fq;
}

static inline void __blk_get_queue(struct request_queue *q)
Expand Down

0 comments on commit f9afca4

Please sign in to comment.