Skip to content

Commit

Permalink
dm: always defer request allocation to the owner of the request_queue
Browse files Browse the repository at this point in the history
DM already calls blk_mq_alloc_request on the request_queue of the
underlying device if it is a blk-mq device.  But now that we allow drivers
to allocate additional data and initialize it ahead of time we need to do
the same for all drivers.   Doing so and using the new cmd_size
infrastructure in the block layer greatly simplifies the dm-rq and mpath
code, and should also make arbitrary combinations of SQ and MQ devices
with SQ or MQ device mapper tables easily possible as a further step.

Signed-off-by: Christoph Hellwig <[email protected]>
Reviewed-by: Hannes Reinecke <[email protected]>
Reviewed-by: Mike Snitzer <[email protected]>
Signed-off-by: Jens Axboe <[email protected]>
  • Loading branch information
Christoph Hellwig authored and axboe committed Jan 27, 2017
1 parent 4bf5843 commit eb8db83
Show file tree
Hide file tree
Showing 8 changed files with 85 additions and 344 deletions.
1 change: 0 additions & 1 deletion drivers/md/dm-core.h
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,6 @@ struct mapped_device {
* io objects are allocated from here.
*/
mempool_t *io_pool;
mempool_t *rq_pool;

struct bio_set *bs;

Expand Down
132 changes: 20 additions & 112 deletions drivers/md/dm-mpath.c
Original file line number Diff line number Diff line change
Expand Up @@ -92,12 +92,6 @@ struct multipath {

unsigned queue_mode;

/*
* We must use a mempool of dm_mpath_io structs so that we
* can resubmit bios on error.
*/
mempool_t *mpio_pool;

struct mutex work_mutex;
struct work_struct trigger_event;

Expand All @@ -115,8 +109,6 @@ struct dm_mpath_io {

typedef int (*action_fn) (struct pgpath *pgpath);

static struct kmem_cache *_mpio_cache;

static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
static void trigger_event(struct work_struct *work);
static void activate_path(struct work_struct *work);
Expand Down Expand Up @@ -209,7 +201,6 @@ static struct multipath *alloc_multipath(struct dm_target *ti)
init_waitqueue_head(&m->pg_init_wait);
mutex_init(&m->work_mutex);

m->mpio_pool = NULL;
m->queue_mode = DM_TYPE_NONE;

m->ti = ti;
Expand All @@ -229,16 +220,7 @@ static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m)
m->queue_mode = DM_TYPE_MQ_REQUEST_BASED;
else
m->queue_mode = DM_TYPE_REQUEST_BASED;
}

if (m->queue_mode == DM_TYPE_REQUEST_BASED) {
unsigned min_ios = dm_get_reserved_rq_based_ios();

m->mpio_pool = mempool_create_slab_pool(min_ios, _mpio_cache);
if (!m->mpio_pool)
return -ENOMEM;
}
else if (m->queue_mode == DM_TYPE_BIO_BASED) {
} else if (m->queue_mode == DM_TYPE_BIO_BASED) {
INIT_WORK(&m->process_queued_bios, process_queued_bios);
/*
* bio-based doesn't support any direct scsi_dh management;
Expand All @@ -263,7 +245,6 @@ static void free_multipath(struct multipath *m)

kfree(m->hw_handler_name);
kfree(m->hw_handler_params);
mempool_destroy(m->mpio_pool);
kfree(m);
}

Expand All @@ -272,38 +253,6 @@ static struct dm_mpath_io *get_mpio(union map_info *info)
return info->ptr;
}

static struct dm_mpath_io *set_mpio(struct multipath *m, union map_info *info)
{
struct dm_mpath_io *mpio;

if (!m->mpio_pool) {
/* Use blk-mq pdu memory requested via per_io_data_size */
mpio = get_mpio(info);
memset(mpio, 0, sizeof(*mpio));
return mpio;
}

mpio = mempool_alloc(m->mpio_pool, GFP_ATOMIC);
if (!mpio)
return NULL;

memset(mpio, 0, sizeof(*mpio));
info->ptr = mpio;

return mpio;
}

static void clear_request_fn_mpio(struct multipath *m, union map_info *info)
{
/* Only needed for non blk-mq (.request_fn) multipath */
if (m->mpio_pool) {
struct dm_mpath_io *mpio = info->ptr;

info->ptr = NULL;
mempool_free(mpio, m->mpio_pool);
}
}

static size_t multipath_per_bio_data_size(void)
{
return sizeof(struct dm_mpath_io) + sizeof(struct dm_bio_details);
Expand Down Expand Up @@ -530,16 +479,17 @@ static bool must_push_back_bio(struct multipath *m)
/*
* Map cloned requests (request-based multipath)
*/
static int __multipath_map(struct dm_target *ti, struct request *clone,
union map_info *map_context,
struct request *rq, struct request **__clone)
static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
union map_info *map_context,
struct request **__clone)
{
struct multipath *m = ti->private;
int r = DM_MAPIO_REQUEUE;
size_t nr_bytes = clone ? blk_rq_bytes(clone) : blk_rq_bytes(rq);
size_t nr_bytes = blk_rq_bytes(rq);
struct pgpath *pgpath;
struct block_device *bdev;
struct dm_mpath_io *mpio;
struct dm_mpath_io *mpio = get_mpio(map_context);
struct request *clone;

/* Do we need to select a new pgpath? */
pgpath = lockless_dereference(m->current_pgpath);
Expand All @@ -556,42 +506,23 @@ static int __multipath_map(struct dm_target *ti, struct request *clone,
return r;
}

mpio = set_mpio(m, map_context);
if (!mpio)
/* ENOMEM, requeue */
return r;

memset(mpio, 0, sizeof(*mpio));
mpio->pgpath = pgpath;
mpio->nr_bytes = nr_bytes;

bdev = pgpath->path.dev->bdev;

if (clone) {
/*
* Old request-based interface: allocated clone is passed in.
* Used by: .request_fn stacked on .request_fn path(s).
*/
clone->q = bdev_get_queue(bdev);
clone->rq_disk = bdev->bd_disk;
clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
} else {
/*
* blk-mq request-based interface; used by both:
* .request_fn stacked on blk-mq path(s) and
* blk-mq stacked on blk-mq path(s).
*/
clone = blk_mq_alloc_request(bdev_get_queue(bdev),
rq_data_dir(rq), BLK_MQ_REQ_NOWAIT);
if (IS_ERR(clone)) {
/* EBUSY, ENODEV or EWOULDBLOCK: requeue */
clear_request_fn_mpio(m, map_context);
return r;
}
clone->bio = clone->biotail = NULL;
clone->rq_disk = bdev->bd_disk;
clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
*__clone = clone;
clone = blk_get_request(bdev_get_queue(bdev),
rq->cmd_flags | REQ_NOMERGE,
GFP_ATOMIC);
if (IS_ERR(clone)) {
/* EBUSY, ENODEV or EWOULDBLOCK: requeue */
return r;
}
clone->bio = clone->biotail = NULL;
clone->rq_disk = bdev->bd_disk;
clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
*__clone = clone;

if (pgpath->pg->ps.type->start_io)
pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
Expand All @@ -600,22 +531,9 @@ static int __multipath_map(struct dm_target *ti, struct request *clone,
return DM_MAPIO_REMAPPED;
}

static int multipath_map(struct dm_target *ti, struct request *clone,
union map_info *map_context)
{
return __multipath_map(ti, clone, map_context, NULL, NULL);
}

static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
union map_info *map_context,
struct request **clone)
{
return __multipath_map(ti, NULL, map_context, rq, clone);
}

static void multipath_release_clone(struct request *clone)
{
blk_mq_free_request(clone);
blk_put_request(clone);
}

/*
Expand Down Expand Up @@ -1187,7 +1105,7 @@ static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
ti->num_write_same_bios = 1;
if (m->queue_mode == DM_TYPE_BIO_BASED)
ti->per_io_data_size = multipath_per_bio_data_size();
else if (m->queue_mode == DM_TYPE_MQ_REQUEST_BASED)
else
ti->per_io_data_size = sizeof(struct dm_mpath_io);

return 0;
Expand Down Expand Up @@ -1610,7 +1528,6 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone,
if (ps->type->end_io)
ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
}
clear_request_fn_mpio(m, map_context);

return r;
}
Expand Down Expand Up @@ -2060,7 +1977,6 @@ static struct target_type multipath_target = {
.module = THIS_MODULE,
.ctr = multipath_ctr,
.dtr = multipath_dtr,
.map_rq = multipath_map,
.clone_and_map_rq = multipath_clone_and_map,
.release_clone_rq = multipath_release_clone,
.rq_end_io = multipath_end_io,
Expand All @@ -2080,11 +1996,6 @@ static int __init dm_multipath_init(void)
{
int r;

/* allocate a slab for the dm_mpath_ios */
_mpio_cache = KMEM_CACHE(dm_mpath_io, 0);
if (!_mpio_cache)
return -ENOMEM;

r = dm_register_target(&multipath_target);
if (r < 0) {
DMERR("request-based register failed %d", r);
Expand Down Expand Up @@ -2120,8 +2031,6 @@ static int __init dm_multipath_init(void)
bad_alloc_kmultipathd:
dm_unregister_target(&multipath_target);
bad_register_target:
kmem_cache_destroy(_mpio_cache);

return r;
}

Expand All @@ -2131,7 +2040,6 @@ static void __exit dm_multipath_exit(void)
destroy_workqueue(kmultipathd);

dm_unregister_target(&multipath_target);
kmem_cache_destroy(_mpio_cache);
}

module_init(dm_multipath_init);
Expand Down
Loading

0 comments on commit eb8db83

Please sign in to comment.