Skip to content

Commit

Permalink
dm: convert to bioset_init()/mempool_init()
Browse files Browse the repository at this point in the history
Convert dm to embedded bio sets.

Acked-by: Mike Snitzer <[email protected]>
Signed-off-by: Kent Overstreet <[email protected]>
Signed-off-by: Jens Axboe <[email protected]>
  • Loading branch information
koverstreet authored and axboe committed May 30, 2018
1 parent afeee51 commit 6f1c819
Show file tree
Hide file tree
Showing 17 changed files with 197 additions and 206 deletions.
13 changes: 7 additions & 6 deletions drivers/md/dm-bio-prison-v1.c
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@

struct dm_bio_prison {
spinlock_t lock;
mempool_t *cell_pool;
mempool_t cell_pool;
struct rb_root cells;
};

Expand All @@ -34,14 +34,15 @@ static struct kmem_cache *_cell_cache;
struct dm_bio_prison *dm_bio_prison_create(void)
{
struct dm_bio_prison *prison = kmalloc(sizeof(*prison), GFP_KERNEL);
int ret;

if (!prison)
return NULL;

spin_lock_init(&prison->lock);

prison->cell_pool = mempool_create_slab_pool(MIN_CELLS, _cell_cache);
if (!prison->cell_pool) {
ret = mempool_init_slab_pool(&prison->cell_pool, MIN_CELLS, _cell_cache);
if (ret) {
kfree(prison);
return NULL;
}
Expand All @@ -54,21 +55,21 @@ EXPORT_SYMBOL_GPL(dm_bio_prison_create);

void dm_bio_prison_destroy(struct dm_bio_prison *prison)
{
mempool_destroy(prison->cell_pool);
mempool_exit(&prison->cell_pool);
kfree(prison);
}
EXPORT_SYMBOL_GPL(dm_bio_prison_destroy);

struct dm_bio_prison_cell *dm_bio_prison_alloc_cell(struct dm_bio_prison *prison, gfp_t gfp)
{
return mempool_alloc(prison->cell_pool, gfp);
return mempool_alloc(&prison->cell_pool, gfp);
}
EXPORT_SYMBOL_GPL(dm_bio_prison_alloc_cell);

void dm_bio_prison_free_cell(struct dm_bio_prison *prison,
struct dm_bio_prison_cell *cell)
{
mempool_free(cell, prison->cell_pool);
mempool_free(cell, &prison->cell_pool);
}
EXPORT_SYMBOL_GPL(dm_bio_prison_free_cell);

Expand Down
13 changes: 7 additions & 6 deletions drivers/md/dm-bio-prison-v2.c
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ struct dm_bio_prison_v2 {
struct workqueue_struct *wq;

spinlock_t lock;
mempool_t *cell_pool;
mempool_t cell_pool;
struct rb_root cells;
};

Expand All @@ -36,15 +36,16 @@ static struct kmem_cache *_cell_cache;
struct dm_bio_prison_v2 *dm_bio_prison_create_v2(struct workqueue_struct *wq)
{
struct dm_bio_prison_v2 *prison = kmalloc(sizeof(*prison), GFP_KERNEL);
int ret;

if (!prison)
return NULL;

prison->wq = wq;
spin_lock_init(&prison->lock);

prison->cell_pool = mempool_create_slab_pool(MIN_CELLS, _cell_cache);
if (!prison->cell_pool) {
ret = mempool_init_slab_pool(&prison->cell_pool, MIN_CELLS, _cell_cache);
if (ret) {
kfree(prison);
return NULL;
}
Expand All @@ -57,21 +58,21 @@ EXPORT_SYMBOL_GPL(dm_bio_prison_create_v2);

void dm_bio_prison_destroy_v2(struct dm_bio_prison_v2 *prison)
{
mempool_destroy(prison->cell_pool);
mempool_exit(&prison->cell_pool);
kfree(prison);
}
EXPORT_SYMBOL_GPL(dm_bio_prison_destroy_v2);

struct dm_bio_prison_cell_v2 *dm_bio_prison_alloc_cell_v2(struct dm_bio_prison_v2 *prison, gfp_t gfp)
{
return mempool_alloc(prison->cell_pool, gfp);
return mempool_alloc(&prison->cell_pool, gfp);
}
EXPORT_SYMBOL_GPL(dm_bio_prison_alloc_cell_v2);

void dm_bio_prison_free_cell_v2(struct dm_bio_prison_v2 *prison,
struct dm_bio_prison_cell_v2 *cell)
{
mempool_free(cell, prison->cell_pool);
mempool_free(cell, &prison->cell_pool);
}
EXPORT_SYMBOL_GPL(dm_bio_prison_free_cell_v2);

Expand Down
25 changes: 12 additions & 13 deletions drivers/md/dm-cache-target.c
Original file line number Diff line number Diff line change
Expand Up @@ -447,9 +447,9 @@ struct cache {
struct work_struct migration_worker;
struct delayed_work waker;
struct dm_bio_prison_v2 *prison;
struct bio_set *bs;
struct bio_set bs;

mempool_t *migration_pool;
mempool_t migration_pool;

struct dm_cache_policy *policy;
unsigned policy_nr_args;
Expand Down Expand Up @@ -550,7 +550,7 @@ static struct dm_cache_migration *alloc_migration(struct cache *cache)
{
struct dm_cache_migration *mg;

mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT);
mg = mempool_alloc(&cache->migration_pool, GFP_NOWAIT);
if (!mg)
return NULL;

Expand All @@ -569,7 +569,7 @@ static void free_migration(struct dm_cache_migration *mg)
if (atomic_dec_and_test(&cache->nr_allocated_migrations))
wake_up(&cache->migration_wait);

mempool_free(mg, cache->migration_pool);
mempool_free(mg, &cache->migration_pool);
}

/*----------------------------------------------------------------*/
Expand Down Expand Up @@ -924,7 +924,7 @@ static void issue_op(struct bio *bio, void *context)
static void remap_to_origin_and_cache(struct cache *cache, struct bio *bio,
dm_oblock_t oblock, dm_cblock_t cblock)
{
struct bio *origin_bio = bio_clone_fast(bio, GFP_NOIO, cache->bs);
struct bio *origin_bio = bio_clone_fast(bio, GFP_NOIO, &cache->bs);

BUG_ON(!origin_bio);

Expand Down Expand Up @@ -2011,7 +2011,7 @@ static void destroy(struct cache *cache)
{
unsigned i;

mempool_destroy(cache->migration_pool);
mempool_exit(&cache->migration_pool);

if (cache->prison)
dm_bio_prison_destroy_v2(cache->prison);
Expand Down Expand Up @@ -2047,8 +2047,7 @@ static void destroy(struct cache *cache)
kfree(cache->ctr_args[i]);
kfree(cache->ctr_args);

if (cache->bs)
bioset_free(cache->bs);
bioset_exit(&cache->bs);

kfree(cache);
}
Expand Down Expand Up @@ -2498,8 +2497,8 @@ static int cache_create(struct cache_args *ca, struct cache **result)
cache->features = ca->features;
if (writethrough_mode(cache)) {
/* Create bioset for writethrough bios issued to origin */
cache->bs = bioset_create(BIO_POOL_SIZE, 0, 0);
if (!cache->bs)
r = bioset_init(&cache->bs, BIO_POOL_SIZE, 0, 0);
if (r)
goto bad;
}

Expand Down Expand Up @@ -2630,9 +2629,9 @@ static int cache_create(struct cache_args *ca, struct cache **result)
goto bad;
}

cache->migration_pool = mempool_create_slab_pool(MIGRATION_POOL_SIZE,
migration_cache);
if (!cache->migration_pool) {
r = mempool_init_slab_pool(&cache->migration_pool, MIGRATION_POOL_SIZE,
migration_cache);
if (r) {
*error = "Error creating cache's migration mempool";
goto bad;
}
Expand Down
4 changes: 2 additions & 2 deletions drivers/md/dm-core.h
Original file line number Diff line number Diff line change
Expand Up @@ -91,8 +91,8 @@ struct mapped_device {
/*
* io objects are allocated from here.
*/
struct bio_set *io_bs;
struct bio_set *bs;
struct bio_set io_bs;
struct bio_set bs;

/*
* freeze/thaw support require holding onto a super block
Expand Down
60 changes: 28 additions & 32 deletions drivers/md/dm-crypt.c
Original file line number Diff line number Diff line change
Expand Up @@ -143,14 +143,14 @@ struct crypt_config {
* pool for per bio private data, crypto requests,
* encryption requeusts/buffer pages and integrity tags
*/
mempool_t *req_pool;
mempool_t *page_pool;
mempool_t *tag_pool;
mempool_t req_pool;
mempool_t page_pool;
mempool_t tag_pool;
unsigned tag_pool_max_sectors;

struct percpu_counter n_allocated_pages;

struct bio_set *bs;
struct bio_set bs;
struct mutex bio_alloc_lock;

struct workqueue_struct *io_queue;
Expand Down Expand Up @@ -1245,7 +1245,7 @@ static void crypt_alloc_req_skcipher(struct crypt_config *cc,
unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);

if (!ctx->r.req)
ctx->r.req = mempool_alloc(cc->req_pool, GFP_NOIO);
ctx->r.req = mempool_alloc(&cc->req_pool, GFP_NOIO);

skcipher_request_set_tfm(ctx->r.req, cc->cipher_tfm.tfms[key_index]);

Expand All @@ -1262,7 +1262,7 @@ static void crypt_alloc_req_aead(struct crypt_config *cc,
struct convert_context *ctx)
{
if (!ctx->r.req_aead)
ctx->r.req_aead = mempool_alloc(cc->req_pool, GFP_NOIO);
ctx->r.req_aead = mempool_alloc(&cc->req_pool, GFP_NOIO);

aead_request_set_tfm(ctx->r.req_aead, cc->cipher_tfm.tfms_aead[0]);

Expand Down Expand Up @@ -1290,7 +1290,7 @@ static void crypt_free_req_skcipher(struct crypt_config *cc,
struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size);

if ((struct skcipher_request *)(io + 1) != req)
mempool_free(req, cc->req_pool);
mempool_free(req, &cc->req_pool);
}

static void crypt_free_req_aead(struct crypt_config *cc,
Expand All @@ -1299,7 +1299,7 @@ static void crypt_free_req_aead(struct crypt_config *cc,
struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size);

if ((struct aead_request *)(io + 1) != req)
mempool_free(req, cc->req_pool);
mempool_free(req, &cc->req_pool);
}

static void crypt_free_req(struct crypt_config *cc, void *req, struct bio *base_bio)
Expand Down Expand Up @@ -1409,7 +1409,7 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM))
mutex_lock(&cc->bio_alloc_lock);

clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, &cc->bs);
if (!clone)
goto out;

Expand All @@ -1418,7 +1418,7 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
remaining_size = size;

for (i = 0; i < nr_iovecs; i++) {
page = mempool_alloc(cc->page_pool, gfp_mask);
page = mempool_alloc(&cc->page_pool, gfp_mask);
if (!page) {
crypt_free_buffer_pages(cc, clone);
bio_put(clone);
Expand Down Expand Up @@ -1453,7 +1453,7 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)

bio_for_each_segment_all(bv, clone, i) {
BUG_ON(!bv->bv_page);
mempool_free(bv->bv_page, cc->page_pool);
mempool_free(bv->bv_page, &cc->page_pool);
}
}

Expand Down Expand Up @@ -1492,7 +1492,7 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
crypt_free_req(cc, io->ctx.r.req, base_bio);

if (unlikely(io->integrity_metadata_from_pool))
mempool_free(io->integrity_metadata, io->cc->tag_pool);
mempool_free(io->integrity_metadata, &io->cc->tag_pool);
else
kfree(io->integrity_metadata);

Expand Down Expand Up @@ -1565,7 +1565,7 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
* biovecs we don't need to worry about the block layer
* modifying the biovec array; so leverage bio_clone_fast().
*/
clone = bio_clone_fast(io->base_bio, gfp, cc->bs);
clone = bio_clone_fast(io->base_bio, gfp, &cc->bs);
if (!clone)
return 1;

Expand Down Expand Up @@ -2219,17 +2219,16 @@ static void crypt_dtr(struct dm_target *ti)

crypt_free_tfms(cc);

if (cc->bs)
bioset_free(cc->bs);
bioset_exit(&cc->bs);

mempool_destroy(cc->page_pool);
mempool_destroy(cc->req_pool);
mempool_destroy(cc->tag_pool);

if (cc->page_pool)
if (mempool_initialized(&cc->page_pool))
WARN_ON(percpu_counter_sum(&cc->n_allocated_pages) != 0);
percpu_counter_destroy(&cc->n_allocated_pages);

mempool_exit(&cc->page_pool);
mempool_exit(&cc->req_pool);
mempool_exit(&cc->tag_pool);

if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
cc->iv_gen_ops->dtr(cc);

Expand Down Expand Up @@ -2743,17 +2742,15 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
iv_size_padding = align_mask;
}

ret = -ENOMEM;

/* ...| IV + padding | original IV | original sec. number | bio tag offset | */
additional_req_size = sizeof(struct dm_crypt_request) +
iv_size_padding + cc->iv_size +
cc->iv_size +
sizeof(uint64_t) +
sizeof(unsigned int);

cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start + additional_req_size);
if (!cc->req_pool) {
ret = mempool_init_kmalloc_pool(&cc->req_pool, MIN_IOS, cc->dmreq_start + additional_req_size);
if (ret) {
ti->error = "Cannot allocate crypt request mempool";
goto bad;
}
Expand All @@ -2762,14 +2759,14 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ALIGN(sizeof(struct dm_crypt_io) + cc->dmreq_start + additional_req_size,
ARCH_KMALLOC_MINALIGN);

cc->page_pool = mempool_create(BIO_MAX_PAGES, crypt_page_alloc, crypt_page_free, cc);
if (!cc->page_pool) {
ret = mempool_init(&cc->page_pool, BIO_MAX_PAGES, crypt_page_alloc, crypt_page_free, cc);
if (ret) {
ti->error = "Cannot allocate page mempool";
goto bad;
}

cc->bs = bioset_create(MIN_IOS, 0, BIOSET_NEED_BVECS);
if (!cc->bs) {
ret = bioset_init(&cc->bs, MIN_IOS, 0, BIOSET_NEED_BVECS);
if (ret) {
ti->error = "Cannot allocate crypt bioset";
goto bad;
}
Expand Down Expand Up @@ -2806,11 +2803,10 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
if (!cc->tag_pool_max_sectors)
cc->tag_pool_max_sectors = 1;

cc->tag_pool = mempool_create_kmalloc_pool(MIN_IOS,
ret = mempool_init_kmalloc_pool(&cc->tag_pool, MIN_IOS,
cc->tag_pool_max_sectors * cc->on_disk_tag_size);
if (!cc->tag_pool) {
if (ret) {
ti->error = "Cannot allocate integrity tags mempool";
ret = -ENOMEM;
goto bad;
}

Expand Down Expand Up @@ -2903,7 +2899,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN)))) {
if (bio_sectors(bio) > cc->tag_pool_max_sectors)
dm_accept_partial_bio(bio, cc->tag_pool_max_sectors);
io->integrity_metadata = mempool_alloc(cc->tag_pool, GFP_NOIO);
io->integrity_metadata = mempool_alloc(&cc->tag_pool, GFP_NOIO);
io->integrity_metadata_from_pool = true;
}
}
Expand Down
Loading

0 comments on commit 6f1c819

Please sign in to comment.