Skip to content

Commit

Permalink
block: Reduce the amount of memory used for tag sets
Browse files Browse the repository at this point in the history
Instead of allocating an array of size nr_cpu_ids for set->tags, allocate
an array of size set->nr_hw_queues. This patch improves behavior that was
introduced by commit 868f2f0 ("blk-mq: dynamic h/w context count").

Reallocating tag sets from inside __blk_mq_update_nr_hw_queues() is safe
because:
- All request queues that share the tag sets are frozen before the tag sets
  are reallocated.
- blk_mq_queue_tag_busy_iter() holds q->q_usage_counter while active and
  hence is serialized against __blk_mq_update_nr_hw_queues().

Cc: Keith Busch <[email protected]>
Cc: Christoph Hellwig <[email protected]>
Cc: Ming Lei <[email protected]>
Cc: Hannes Reinecke <[email protected]>
Cc: Johannes Thumshirn <[email protected]>
Signed-off-by: Bart Van Assche <[email protected]>
Signed-off-by: Jens Axboe <[email protected]>
  • Loading branch information
bvanassche authored and axboe committed Oct 25, 2019
1 parent ac0d6b9 commit f7e76db
Showing 1 changed file with 30 additions and 17 deletions.
47 changes: 30 additions & 17 deletions block/blk-mq.c
Original file line number Diff line number Diff line change
Expand Up @@ -2833,19 +2833,6 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
mutex_unlock(&q->sysfs_lock);
}

/*
* Maximum number of hardware queues we support. For single sets, we'll never
* have more than the CPUs (software queues). For multiple sets, the tag_set
* user may have set ->nr_hw_queues larger.
*/
static unsigned int nr_hw_queues(struct blk_mq_tag_set *set)
{
if (set->nr_maps == 1)
return nr_cpu_ids;

return max(set->nr_hw_queues, nr_cpu_ids);
}

struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
struct request_queue *q,
bool elevator_init)
Expand Down Expand Up @@ -3012,6 +2999,29 @@ static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
}
}

static int blk_mq_realloc_tag_set_tags(struct blk_mq_tag_set *set,
int cur_nr_hw_queues, int new_nr_hw_queues)
{
struct blk_mq_tags **new_tags;

if (cur_nr_hw_queues >= new_nr_hw_queues)
return 0;

new_tags = kcalloc_node(new_nr_hw_queues, sizeof(struct blk_mq_tags *),
GFP_KERNEL, set->numa_node);
if (!new_tags)
return -ENOMEM;

if (set->tags)
memcpy(new_tags, set->tags, cur_nr_hw_queues *
sizeof(*set->tags));
kfree(set->tags);
set->tags = new_tags;
set->nr_hw_queues = new_nr_hw_queues;

return 0;
}

/*
* Alloc a tag set to be associated with one or more request queues.
* May fail with EINVAL for various error conditions. May adjust the
Expand Down Expand Up @@ -3065,9 +3075,7 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
if (set->nr_maps == 1 && set->nr_hw_queues > nr_cpu_ids)
set->nr_hw_queues = nr_cpu_ids;

set->tags = kcalloc_node(nr_hw_queues(set), sizeof(struct blk_mq_tags *),
GFP_KERNEL, set->numa_node);
if (!set->tags)
if (blk_mq_realloc_tag_set_tags(set, 0, set->nr_hw_queues) < 0)
return -ENOMEM;

ret = -ENOMEM;
Expand Down Expand Up @@ -3108,7 +3116,7 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
{
int i, j;

for (i = 0; i < nr_hw_queues(set); i++)
for (i = 0; i < set->nr_hw_queues; i++)
blk_mq_free_map_and_requests(set, i);

for (j = 0; j < set->nr_maps; j++) {
Expand Down Expand Up @@ -3266,6 +3274,10 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
blk_mq_sysfs_unregister(q);
}

if (blk_mq_realloc_tag_set_tags(set, set->nr_hw_queues, nr_hw_queues) <
0)
goto reregister;

prev_nr_hw_queues = set->nr_hw_queues;
set->nr_hw_queues = nr_hw_queues;
blk_mq_update_queue_map(set);
Expand All @@ -3282,6 +3294,7 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
blk_mq_map_swqueue(q);
}

reregister:
list_for_each_entry(q, &set->tag_list, tag_set_list) {
blk_mq_sysfs_register(q);
blk_mq_debugfs_register_hctxs(q);
Expand Down

0 comments on commit f7e76db

Please sign in to comment.