Skip to content

Commit

Permalink
blk-mq: abstract out blk_mq_dispatch_rq_list() helper
Browse files Browse the repository at this point in the history
Takes a list of requests, and dispatches it. Moves any residual
requests to the dispatch list.

Signed-off-by: Jens Axboe <[email protected]>
Reviewed-by: Hannes Reinecke <[email protected]>
  • Loading branch information
axboe committed Dec 9, 2016
1 parent ae911c5 commit f04c3df
Show file tree
Hide file tree
Showing 2 changed files with 48 additions and 38 deletions.
85 changes: 47 additions & 38 deletions block/blk-mq.c
Original file line number Diff line number Diff line change
Expand Up @@ -821,41 +821,13 @@ static inline unsigned int queued_to_index(unsigned int queued)
return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1);
}

/*
* Run this hardware queue, pulling any software queues mapped to it in.
* Note that this function currently has various problems around ordering
* of IO. In particular, we'd like FIFO behaviour on handling existing
* items on the hctx->dispatch list. Ignore that for now.
*/
static void blk_mq_process_rq_list(struct blk_mq_hw_ctx *hctx)
bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
{
struct request_queue *q = hctx->queue;
struct request *rq;
LIST_HEAD(rq_list);
LIST_HEAD(driver_list);
struct list_head *dptr;
int queued;

if (unlikely(blk_mq_hctx_stopped(hctx)))
return;

hctx->run++;

/*
* Touch any software queue that has pending entries.
*/
flush_busy_ctxs(hctx, &rq_list);

/*
* If we have previous entries on our dispatch list, grab them
* and stuff them at the front for more fair dispatch.
*/
if (!list_empty_careful(&hctx->dispatch)) {
spin_lock(&hctx->lock);
if (!list_empty(&hctx->dispatch))
list_splice_init(&hctx->dispatch, &rq_list);
spin_unlock(&hctx->lock);
}
int queued, ret = BLK_MQ_RQ_QUEUE_OK;

/*
* Start off with dptr being NULL, so we start the first request
Expand All @@ -867,24 +839,23 @@ static void blk_mq_process_rq_list(struct blk_mq_hw_ctx *hctx)
* Now process all the entries, sending them to the driver.
*/
queued = 0;
while (!list_empty(&rq_list)) {
while (!list_empty(list)) {
struct blk_mq_queue_data bd;
int ret;

rq = list_first_entry(&rq_list, struct request, queuelist);
rq = list_first_entry(list, struct request, queuelist);
list_del_init(&rq->queuelist);

bd.rq = rq;
bd.list = dptr;
bd.last = list_empty(&rq_list);
bd.last = list_empty(list);

ret = q->mq_ops->queue_rq(hctx, &bd);
switch (ret) {
case BLK_MQ_RQ_QUEUE_OK:
queued++;
break;
case BLK_MQ_RQ_QUEUE_BUSY:
list_add(&rq->queuelist, &rq_list);
list_add(&rq->queuelist, list);
__blk_mq_requeue_request(rq);
break;
default:
Expand All @@ -902,7 +873,7 @@ static void blk_mq_process_rq_list(struct blk_mq_hw_ctx *hctx)
* We've done the first request. If we have more than 1
* left in the list, set dptr to defer issue.
*/
if (!dptr && rq_list.next != rq_list.prev)
if (!dptr && list->next != list->prev)
dptr = &driver_list;
}

Expand All @@ -912,10 +883,11 @@ static void blk_mq_process_rq_list(struct blk_mq_hw_ctx *hctx)
* Any items that need requeuing? Stuff them into hctx->dispatch,
* that is where we will continue on next queue run.
*/
if (!list_empty(&rq_list)) {
if (!list_empty(list)) {
spin_lock(&hctx->lock);
list_splice(&rq_list, &hctx->dispatch);
list_splice(list, &hctx->dispatch);
spin_unlock(&hctx->lock);

/*
* the queue is expected stopped with BLK_MQ_RQ_QUEUE_BUSY, but
* it's possible the queue is stopped and restarted again
Expand All @@ -927,6 +899,43 @@ static void blk_mq_process_rq_list(struct blk_mq_hw_ctx *hctx)
**/
blk_mq_run_hw_queue(hctx, true);
}

return ret != BLK_MQ_RQ_QUEUE_BUSY;
}

/*
* Run this hardware queue, pulling any software queues mapped to it in.
* Note that this function currently has various problems around ordering
* of IO. In particular, we'd like FIFO behaviour on handling existing
* items on the hctx->dispatch list. Ignore that for now.
*/
static void blk_mq_process_rq_list(struct blk_mq_hw_ctx *hctx)
{
LIST_HEAD(rq_list);
LIST_HEAD(driver_list);

if (unlikely(blk_mq_hctx_stopped(hctx)))
return;

hctx->run++;

/*
* Touch any software queue that has pending entries.
*/
flush_busy_ctxs(hctx, &rq_list);

/*
* If we have previous entries on our dispatch list, grab them
* and stuff them at the front for more fair dispatch.
*/
if (!list_empty_careful(&hctx->dispatch)) {
spin_lock(&hctx->lock);
if (!list_empty(&hctx->dispatch))
list_splice_init(&hctx->dispatch, &rq_list);
spin_unlock(&hctx->lock);
}

blk_mq_dispatch_rq_list(hctx, &rq_list);
}

static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
Expand Down
1 change: 1 addition & 0 deletions block/blk-mq.h
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ void blk_mq_freeze_queue(struct request_queue *q);
void blk_mq_free_queue(struct request_queue *q);
int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
void blk_mq_wake_waiters(struct request_queue *q);
bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *, struct list_head *);

/*
* CPU hotplug helpers
Expand Down

0 comments on commit f04c3df

Please sign in to comment.