Skip to content

Commit

Permalink
blk-mq: avoid excessive boot delays with large lun counts
Browse files Browse the repository at this point in the history
Hi,

Zhangqing Luo reported long boot times on a system with thousands of
LUNs when scsi-mq was enabled.  He narrowed the problem down to
blk_mq_add_queue_tag_set, where every queue is frozen in order to set
the BLK_MQ_F_TAG_SHARED flag.  Each added device will freeze all queues
added before it in sequence, which involves waiting for an RCU grace
period for each one.  We don't need to do this.  After the second queue
is added, only new queues need to be initialized with the shared tag.
We can do that by percolating the flag up to the blk_mq_tag_set, and
updating the newly added queue's hctxs if the flag is set.

This problem was introduced by commit 0d2602c (blk-mq: improve
support for shared tags maps).

Reported-and-tested-by: Jason Luo <[email protected]>
Reviewed-by: Ming Lei <[email protected]>
Signed-off-by: Jeff Moyer <[email protected]>
Signed-off-by: Jens Axboe <[email protected]>
  • Loading branch information
JeffMoyer authored and axboe committed Nov 3, 2015
1 parent cdea01b commit 2404e60
Showing 1 changed file with 30 additions and 17 deletions.
47 changes: 30 additions & 17 deletions block/blk-mq.c
Original file line number Diff line number Diff line change
Expand Up @@ -1695,7 +1695,7 @@ static int blk_mq_init_hctx(struct request_queue *q,
INIT_LIST_HEAD(&hctx->dispatch);
hctx->queue = q;
hctx->queue_num = hctx_idx;
hctx->flags = set->flags;
hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED;

blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
blk_mq_hctx_notify, hctx);
Expand Down Expand Up @@ -1882,27 +1882,26 @@ static void blk_mq_map_swqueue(struct request_queue *q,
}
}

static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set)
static void queue_set_hctx_shared(struct request_queue *q, bool shared)
{
struct blk_mq_hw_ctx *hctx;
struct request_queue *q;
bool shared;
int i;

if (set->tag_list.next == set->tag_list.prev)
shared = false;
else
shared = true;
queue_for_each_hw_ctx(q, hctx, i) {
if (shared)
hctx->flags |= BLK_MQ_F_TAG_SHARED;
else
hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
}
}

static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set, bool shared)
{
struct request_queue *q;

list_for_each_entry(q, &set->tag_list, tag_set_list) {
blk_mq_freeze_queue(q);

queue_for_each_hw_ctx(q, hctx, i) {
if (shared)
hctx->flags |= BLK_MQ_F_TAG_SHARED;
else
hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
}
queue_set_hctx_shared(q, shared);
blk_mq_unfreeze_queue(q);
}
}
Expand All @@ -1913,7 +1912,12 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q)

mutex_lock(&set->tag_list_lock);
list_del_init(&q->tag_set_list);
blk_mq_update_tag_set_depth(set);
if (list_is_singular(&set->tag_list)) {
/* just transitioned to unshared */
set->flags &= ~BLK_MQ_F_TAG_SHARED;
/* update existing queue */
blk_mq_update_tag_set_depth(set, false);
}
mutex_unlock(&set->tag_list_lock);
}

Expand All @@ -1923,8 +1927,17 @@ static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
q->tag_set = set;

mutex_lock(&set->tag_list_lock);

/* Check to see if we're transitioning to shared (from 1 to 2 queues). */
if (!list_empty(&set->tag_list) && !(set->flags & BLK_MQ_F_TAG_SHARED)) {
set->flags |= BLK_MQ_F_TAG_SHARED;
/* update existing queue */
blk_mq_update_tag_set_depth(set, true);
}
if (set->flags & BLK_MQ_F_TAG_SHARED)
queue_set_hctx_shared(q, true);
list_add_tail(&q->tag_set_list, &set->tag_list);
blk_mq_update_tag_set_depth(set);

mutex_unlock(&set->tag_list_lock);
}

Expand Down

0 comments on commit 2404e60

Please sign in to comment.