Skip to content

Commit

Permalink
Merge tag 'for-5.11/dm-fixes-1' of git://git.kernel.org/pub/scm/linux…
Browse files Browse the repository at this point in the history
…/kernel/git/device-mapper/linux-dm

Pull device mapper fixes from Mike Snitzer:

 - Fix DM-raid's raid1 discard limits so discards work.

 - Select missing Kconfig dependencies for DM integrity and zoned
   targets.

 - Four fixes for DM crypt target's support to optionally bypass kcryptd
   workqueues.

 - Fix DM snapshot merge supports missing data flushes before committing
   metadata.

 - Fix DM integrity data device flushing when external metadata is used.

 - Fix DM integrity's maximum number of supported constructor arguments
   that user can request when creating an integrity device.

 - Eliminate DM core ioctl logging noise when an ioctl is issued without
   required CAP_SYS_RAWIO permission.

* tag 'for-5.11/dm-fixes-1' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
  dm crypt: defer decryption to a tasklet if interrupts disabled
  dm integrity: fix the maximum number of arguments
  dm crypt: do not call bio_endio() from the dm-crypt tasklet
  dm integrity: fix flush with external metadata device
  dm: eliminate potential source of excessive kernel log noise
  dm snapshot: flush merged data before committing metadata
  dm crypt: use GFP_ATOMIC when allocating crypto requests from softirq
  dm crypt: do not wait for backlogged crypto request completion in softirq
  dm zoned: select CONFIG_CRC32
  dm integrity: select CRYPTO_SKCIPHER
  dm raid: fix discard limits for raid1
  • Loading branch information
torvalds committed Jan 16, 2021
2 parents b45e2da + c87a95d commit 1d94330
Show file tree
Hide file tree
Showing 8 changed files with 239 additions and 34 deletions.
2 changes: 2 additions & 0 deletions drivers/md/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -605,6 +605,7 @@ config DM_INTEGRITY
select BLK_DEV_INTEGRITY
select DM_BUFIO
select CRYPTO
select CRYPTO_SKCIPHER
select ASYNC_XOR
help
This device-mapper target emulates a block device that has
Expand All @@ -622,6 +623,7 @@ config DM_ZONED
tristate "Drive-managed zoned block device target support"
depends on BLK_DEV_DM
depends on BLK_DEV_ZONED
select CRC32
help
This device-mapper target takes a host-managed or host-aware zoned
block device and exposes most of its capacity as a regular block
Expand Down
6 changes: 6 additions & 0 deletions drivers/md/dm-bufio.c
Original file line number Diff line number Diff line change
Expand Up @@ -1534,6 +1534,12 @@ sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
}
EXPORT_SYMBOL_GPL(dm_bufio_get_device_size);

struct dm_io_client *dm_bufio_get_dm_io_client(struct dm_bufio_client *c)
{
return c->dm_io;
}
EXPORT_SYMBOL_GPL(dm_bufio_get_dm_io_client);

sector_t dm_bufio_get_block_number(struct dm_buffer *b)
{
return b->block;
Expand Down
170 changes: 152 additions & 18 deletions drivers/md/dm-crypt.c
Original file line number Diff line number Diff line change
Expand Up @@ -1454,13 +1454,16 @@ static int crypt_convert_block_skcipher(struct crypt_config *cc,
static void kcryptd_async_done(struct crypto_async_request *async_req,
int error);

static void crypt_alloc_req_skcipher(struct crypt_config *cc,
static int crypt_alloc_req_skcipher(struct crypt_config *cc,
struct convert_context *ctx)
{
unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);

if (!ctx->r.req)
ctx->r.req = mempool_alloc(&cc->req_pool, GFP_NOIO);
if (!ctx->r.req) {
ctx->r.req = mempool_alloc(&cc->req_pool, in_interrupt() ? GFP_ATOMIC : GFP_NOIO);
if (!ctx->r.req)
return -ENOMEM;
}

skcipher_request_set_tfm(ctx->r.req, cc->cipher_tfm.tfms[key_index]);

Expand All @@ -1471,13 +1474,18 @@ static void crypt_alloc_req_skcipher(struct crypt_config *cc,
skcipher_request_set_callback(ctx->r.req,
CRYPTO_TFM_REQ_MAY_BACKLOG,
kcryptd_async_done, dmreq_of_req(cc, ctx->r.req));

return 0;
}

static void crypt_alloc_req_aead(struct crypt_config *cc,
static int crypt_alloc_req_aead(struct crypt_config *cc,
struct convert_context *ctx)
{
if (!ctx->r.req_aead)
ctx->r.req_aead = mempool_alloc(&cc->req_pool, GFP_NOIO);
if (!ctx->r.req) {
ctx->r.req = mempool_alloc(&cc->req_pool, in_interrupt() ? GFP_ATOMIC : GFP_NOIO);
if (!ctx->r.req)
return -ENOMEM;
}

aead_request_set_tfm(ctx->r.req_aead, cc->cipher_tfm.tfms_aead[0]);

Expand All @@ -1488,15 +1496,17 @@ static void crypt_alloc_req_aead(struct crypt_config *cc,
aead_request_set_callback(ctx->r.req_aead,
CRYPTO_TFM_REQ_MAY_BACKLOG,
kcryptd_async_done, dmreq_of_req(cc, ctx->r.req_aead));

return 0;
}

static void crypt_alloc_req(struct crypt_config *cc,
static int crypt_alloc_req(struct crypt_config *cc,
struct convert_context *ctx)
{
if (crypt_integrity_aead(cc))
crypt_alloc_req_aead(cc, ctx);
return crypt_alloc_req_aead(cc, ctx);
else
crypt_alloc_req_skcipher(cc, ctx);
return crypt_alloc_req_skcipher(cc, ctx);
}

static void crypt_free_req_skcipher(struct crypt_config *cc,
Expand Down Expand Up @@ -1529,17 +1539,28 @@ static void crypt_free_req(struct crypt_config *cc, void *req, struct bio *base_
* Encrypt / decrypt data from one bio to another one (can be the same one)
*/
static blk_status_t crypt_convert(struct crypt_config *cc,
struct convert_context *ctx, bool atomic)
struct convert_context *ctx, bool atomic, bool reset_pending)
{
unsigned int tag_offset = 0;
unsigned int sector_step = cc->sector_size >> SECTOR_SHIFT;
int r;

atomic_set(&ctx->cc_pending, 1);
/*
* if reset_pending is set we are dealing with the bio for the first time,
* else we're continuing to work on the previous bio, so don't mess with
* the cc_pending counter
*/
if (reset_pending)
atomic_set(&ctx->cc_pending, 1);

while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) {

crypt_alloc_req(cc, ctx);
r = crypt_alloc_req(cc, ctx);
if (r) {
complete(&ctx->restart);
return BLK_STS_DEV_RESOURCE;
}

atomic_inc(&ctx->cc_pending);

if (crypt_integrity_aead(cc))
Expand All @@ -1553,7 +1574,25 @@ static blk_status_t crypt_convert(struct crypt_config *cc,
* but the driver request queue is full, let's wait.
*/
case -EBUSY:
wait_for_completion(&ctx->restart);
if (in_interrupt()) {
if (try_wait_for_completion(&ctx->restart)) {
/*
* we don't have to block to wait for completion,
* so proceed
*/
} else {
/*
* we can't wait for completion without blocking
* exit and continue processing in a workqueue
*/
ctx->r.req = NULL;
ctx->cc_sector += sector_step;
tag_offset++;
return BLK_STS_DEV_RESOURCE;
}
} else {
wait_for_completion(&ctx->restart);
}
reinit_completion(&ctx->restart);
fallthrough;
/*
Expand Down Expand Up @@ -1691,6 +1730,12 @@ static void crypt_inc_pending(struct dm_crypt_io *io)
atomic_inc(&io->io_pending);
}

static void kcryptd_io_bio_endio(struct work_struct *work)
{
struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
bio_endio(io->base_bio);
}

/*
* One of the bios was finished. Check for completion of
* the whole request and correctly clean up the buffer.
Expand All @@ -1713,7 +1758,23 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
kfree(io->integrity_metadata);

base_bio->bi_status = error;
bio_endio(base_bio);

/*
* If we are running this function from our tasklet,
* we can't call bio_endio() here, because it will call
* clone_endio() from dm.c, which in turn will
* free the current struct dm_crypt_io structure with
* our tasklet. In this case we need to delay bio_endio()
* execution to after the tasklet is done and dequeued.
*/
if (tasklet_trylock(&io->tasklet)) {
tasklet_unlock(&io->tasklet);
bio_endio(base_bio);
return;
}

INIT_WORK(&io->work, kcryptd_io_bio_endio);
queue_work(cc->io_queue, &io->work);
}

/*
Expand Down Expand Up @@ -1945,6 +2006,37 @@ static bool kcryptd_crypt_write_inline(struct crypt_config *cc,
}
}

static void kcryptd_crypt_write_continue(struct work_struct *work)
{
struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
struct crypt_config *cc = io->cc;
struct convert_context *ctx = &io->ctx;
int crypt_finished;
sector_t sector = io->sector;
blk_status_t r;

wait_for_completion(&ctx->restart);
reinit_completion(&ctx->restart);

r = crypt_convert(cc, &io->ctx, true, false);
if (r)
io->error = r;
crypt_finished = atomic_dec_and_test(&ctx->cc_pending);
if (!crypt_finished && kcryptd_crypt_write_inline(cc, ctx)) {
/* Wait for completion signaled by kcryptd_async_done() */
wait_for_completion(&ctx->restart);
crypt_finished = 1;
}

/* Encryption was already finished, submit io now */
if (crypt_finished) {
kcryptd_crypt_write_io_submit(io, 0);
io->sector = sector;
}

crypt_dec_pending(io);
}

static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
{
struct crypt_config *cc = io->cc;
Expand Down Expand Up @@ -1973,7 +2065,17 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)

crypt_inc_pending(io);
r = crypt_convert(cc, ctx,
test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags));
test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags), true);
/*
* Crypto API backlogged the request, because its queue was full
* and we're in softirq context, so continue from a workqueue
* (TODO: is it actually possible to be in softirq in the write path?)
*/
if (r == BLK_STS_DEV_RESOURCE) {
INIT_WORK(&io->work, kcryptd_crypt_write_continue);
queue_work(cc->crypt_queue, &io->work);
return;
}
if (r)
io->error = r;
crypt_finished = atomic_dec_and_test(&ctx->cc_pending);
Expand All @@ -1998,6 +2100,25 @@ static void kcryptd_crypt_read_done(struct dm_crypt_io *io)
crypt_dec_pending(io);
}

static void kcryptd_crypt_read_continue(struct work_struct *work)
{
struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
struct crypt_config *cc = io->cc;
blk_status_t r;

wait_for_completion(&io->ctx.restart);
reinit_completion(&io->ctx.restart);

r = crypt_convert(cc, &io->ctx, true, false);
if (r)
io->error = r;

if (atomic_dec_and_test(&io->ctx.cc_pending))
kcryptd_crypt_read_done(io);

crypt_dec_pending(io);
}

static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
{
struct crypt_config *cc = io->cc;
Expand All @@ -2009,7 +2130,16 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
io->sector);

r = crypt_convert(cc, &io->ctx,
test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags));
test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags), true);
/*
* Crypto API backlogged the request, because its queue was full
* and we're in softirq context, so continue from a workqueue
*/
if (r == BLK_STS_DEV_RESOURCE) {
INIT_WORK(&io->work, kcryptd_crypt_read_continue);
queue_work(cc->crypt_queue, &io->work);
return;
}
if (r)
io->error = r;

Expand Down Expand Up @@ -2091,8 +2221,12 @@ static void kcryptd_queue_crypt(struct dm_crypt_io *io)

if ((bio_data_dir(io->base_bio) == READ && test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags)) ||
(bio_data_dir(io->base_bio) == WRITE && test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags))) {
if (in_irq()) {
/* Crypto API's "skcipher_walk_first() refuses to work in hard IRQ context */
/*
* in_irq(): Crypto API's skcipher_walk_first() refuses to work in hard IRQ context.
* irqs_disabled(): the kernel may run some IO completion from the idle thread, but
* it is being executed with irqs disabled.
*/
if (in_irq() || irqs_disabled()) {
tasklet_init(&io->tasklet, kcryptd_crypt_tasklet, (unsigned long)&io->work);
tasklet_schedule(&io->tasklet);
return;
Expand Down
Loading

0 comments on commit 1d94330

Please sign in to comment.