Skip to content

Commit

Permalink
Merge tag 'for-5.3/dm-changes' of git://git.kernel.org/pub/scm/linux/…
Browse files Browse the repository at this point in the history
…kernel/git/device-mapper/linux-dm

Pull device mapper updates from Mike Snitzer:

 - Add encrypted byte-offset initialization vector (eboiv) to DM crypt.

 - Add optional discard features to DM snapshot which allow freeing
   space from a DM device whose free space was exhausted.

 - Various small improvements to use struct_size() and kzalloc().

 - Fix to check if DM thin metadata is in fail_io mode before attempting
   to update the superblock to set the needs_check flag. Otherwise the
   DM thin-pool can hang.

 - Fix DM bufio shrinker's potential for ABBA recursion deadlock with DM
   thin provisioning on loop usecase.

* tag 'for-5.3/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
  dm bufio: fix deadlock with loop device
  dm snapshot: add optional discard support features
  dm crypt: implement eboiv - encrypted byte-offset initialization vector
  dm crypt: remove obsolete comment about plumb IV
  dm crypt: wipe private IV struct after key invalid flag is set
  dm integrity: use kzalloc() instead of kmalloc() + memset()
  dm: update stale comment in end_clone_bio()
  dm log writes: fix incorrect comment about the logged sequence example
  dm log writes: use struct_size() to calculate size of pending_block
  dm crypt: use struct_size() when allocating encryption context
  dm integrity: always set version on superblock update
  dm thin metadata: check if in fail_io mode when setting needs_check
  • Loading branch information
torvalds committed Jul 13, 2019
2 parents 92adeb6 + bd293d0 commit 2260840
Show file tree
Hide file tree
Showing 8 changed files with 285 additions and 44 deletions.
18 changes: 17 additions & 1 deletion Documentation/device-mapper/snapshot.rst
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ its visible content unchanged, at least until the <COW device> fills up.


- snapshot <origin> <COW device> <persistent?> <chunksize>
[<# feature args> [<arg>]*]

A snapshot of the <origin> block device is created. Changed chunks of
<chunksize> sectors will be stored on the <COW device>. Writes will
Expand All @@ -54,8 +55,23 @@ When loading or unloading the snapshot target, the corresponding
snapshot-origin or snapshot-merge target must be suspended. A failure to
suspend the origin target could result in data corruption.

Optional features:

* snapshot-merge <origin> <COW device> <persistent> <chunksize>
discard_zeroes_cow - a discard issued to the snapshot device that
maps to entire chunks to will zero the corresponding exception(s) in
the snapshot's exception store.

discard_passdown_origin - a discard to the snapshot device is passed
down to the snapshot-origin's underlying device. This doesn't cause
copy-out to the snapshot exception store because the snapshot-origin
target is bypassed.

The discard_passdown_origin feature depends on the discard_zeroes_cow
feature being enabled.


- snapshot-merge <origin> <COW device> <persistent> <chunksize>
[<# feature args> [<arg>]*]

takes the same table arguments as the snapshot target except it only
works with persistent snapshots. This target assumes the role of the
Expand Down
4 changes: 1 addition & 3 deletions drivers/md/dm-bufio.c
Original file line number Diff line number Diff line change
Expand Up @@ -1599,9 +1599,7 @@ dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
unsigned long freed;

c = container_of(shrink, struct dm_bufio_client, shrinker);
if (sc->gfp_mask & __GFP_FS)
dm_bufio_lock(c);
else if (!dm_bufio_trylock(c))
if (!dm_bufio_trylock(c))
return SHRINK_STOP;

freed = __scan(c, sc->nr_to_scan, sc->gfp_mask);
Expand Down
101 changes: 90 additions & 11 deletions drivers/md/dm-crypt.c
Original file line number Diff line number Diff line change
Expand Up @@ -120,6 +120,10 @@ struct iv_tcw_private {
u8 *whitening;
};

struct iv_eboiv_private {
struct crypto_cipher *tfm;
};

/*
* Crypt: maps a linear range of a block device
* and encrypts / decrypts at the same time.
Expand Down Expand Up @@ -159,6 +163,7 @@ struct crypt_config {
struct iv_benbi_private benbi;
struct iv_lmk_private lmk;
struct iv_tcw_private tcw;
struct iv_eboiv_private eboiv;
} iv_gen_private;
u64 iv_offset;
unsigned int iv_size;
Expand Down Expand Up @@ -291,8 +296,9 @@ static struct crypto_aead *any_tfm_aead(struct crypt_config *cc)
* Note that this encryption scheme is vulnerable to watermarking attacks
* and should be used for old compatible containers access only.
*
* plumb: unimplemented, see:
* http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
* eboiv: Encrypted byte-offset IV (used in Bitlocker in CBC mode)
* The IV is encrypted little-endian byte-offset (with the same key
* and cipher as the volume).
*/

static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv,
Expand Down Expand Up @@ -841,6 +847,67 @@ static int crypt_iv_random_gen(struct crypt_config *cc, u8 *iv,
return 0;
}

static void crypt_iv_eboiv_dtr(struct crypt_config *cc)
{
struct iv_eboiv_private *eboiv = &cc->iv_gen_private.eboiv;

crypto_free_cipher(eboiv->tfm);
eboiv->tfm = NULL;
}

static int crypt_iv_eboiv_ctr(struct crypt_config *cc, struct dm_target *ti,
const char *opts)
{
struct iv_eboiv_private *eboiv = &cc->iv_gen_private.eboiv;
struct crypto_cipher *tfm;

tfm = crypto_alloc_cipher(cc->cipher, 0, 0);
if (IS_ERR(tfm)) {
ti->error = "Error allocating crypto tfm for EBOIV";
return PTR_ERR(tfm);
}

if (crypto_cipher_blocksize(tfm) != cc->iv_size) {
ti->error = "Block size of EBOIV cipher does "
"not match IV size of block cipher";
crypto_free_cipher(tfm);
return -EINVAL;
}

eboiv->tfm = tfm;
return 0;
}

static int crypt_iv_eboiv_init(struct crypt_config *cc)
{
struct iv_eboiv_private *eboiv = &cc->iv_gen_private.eboiv;
int err;

err = crypto_cipher_setkey(eboiv->tfm, cc->key, cc->key_size);
if (err)
return err;

return 0;
}

static int crypt_iv_eboiv_wipe(struct crypt_config *cc)
{
/* Called after cc->key is set to random key in crypt_wipe() */
return crypt_iv_eboiv_init(cc);
}

static int crypt_iv_eboiv_gen(struct crypt_config *cc, u8 *iv,
struct dm_crypt_request *dmreq)
{
struct iv_eboiv_private *eboiv = &cc->iv_gen_private.eboiv;

memset(iv, 0, cc->iv_size);
*(__le64 *)iv = cpu_to_le64(dmreq->iv_sector * cc->sector_size);
crypto_cipher_encrypt_one(eboiv->tfm, iv, iv);

return 0;
}

static const struct crypt_iv_operations crypt_iv_plain_ops = {
.generator = crypt_iv_plain_gen
};
Expand Down Expand Up @@ -893,6 +960,14 @@ static struct crypt_iv_operations crypt_iv_random_ops = {
.generator = crypt_iv_random_gen
};

static struct crypt_iv_operations crypt_iv_eboiv_ops = {
.ctr = crypt_iv_eboiv_ctr,
.dtr = crypt_iv_eboiv_dtr,
.init = crypt_iv_eboiv_init,
.wipe = crypt_iv_eboiv_wipe,
.generator = crypt_iv_eboiv_gen
};

/*
* Integrity extensions
*/
Expand Down Expand Up @@ -2158,6 +2233,14 @@ static int crypt_wipe_key(struct crypt_config *cc)

clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
get_random_bytes(&cc->key, cc->key_size);

/* Wipe IV private keys */
if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) {
r = cc->iv_gen_ops->wipe(cc);
if (r)
return r;
}

kzfree(cc->key_string);
cc->key_string = NULL;
r = crypt_setkey(cc);
Expand Down Expand Up @@ -2288,6 +2371,8 @@ static int crypt_ctr_ivmode(struct dm_target *ti, const char *ivmode)
cc->iv_gen_ops = &crypt_iv_benbi_ops;
else if (strcmp(ivmode, "null") == 0)
cc->iv_gen_ops = &crypt_iv_null_ops;
else if (strcmp(ivmode, "eboiv") == 0)
cc->iv_gen_ops = &crypt_iv_eboiv_ops;
else if (strcmp(ivmode, "lmk") == 0) {
cc->iv_gen_ops = &crypt_iv_lmk_ops;
/*
Expand Down Expand Up @@ -2699,7 +2784,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
return -EINVAL;
}

cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL);
cc = kzalloc(struct_size(cc, key, key_size), GFP_KERNEL);
if (!cc) {
ti->error = "Cannot allocate encryption context";
return -ENOMEM;
Expand Down Expand Up @@ -3050,14 +3135,8 @@ static int crypt_message(struct dm_target *ti, unsigned argc, char **argv,
memset(cc->key, 0, cc->key_size * sizeof(u8));
return ret;
}
if (argc == 2 && !strcasecmp(argv[1], "wipe")) {
if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) {
ret = cc->iv_gen_ops->wipe(cc);
if (ret)
return ret;
}
if (argc == 2 && !strcasecmp(argv[1], "wipe"))
return crypt_wipe_key(cc);
}
}

error:
Expand Down Expand Up @@ -3094,7 +3173,7 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)

static struct target_type crypt_target = {
.name = "crypt",
.version = {1, 18, 1},
.version = {1, 19, 0},
.module = THIS_MODULE,
.ctr = crypt_ctr,
.dtr = crypt_dtr,
Expand Down
7 changes: 4 additions & 3 deletions drivers/md/dm-integrity.c
Original file line number Diff line number Diff line change
Expand Up @@ -476,6 +476,9 @@ static int sync_rw_sb(struct dm_integrity_c *ic, int op, int op_flags)
io_loc.sector = ic->start;
io_loc.count = SB_SECTORS;

if (op == REQ_OP_WRITE)
sb_set_version(ic);

return dm_io(&io_req, 1, &io_loc, NULL);
}

Expand Down Expand Up @@ -2317,7 +2320,6 @@ static void recalc_write_super(struct dm_integrity_c *ic)
if (dm_integrity_failed(ic))
return;

sb_set_version(ic);
r = sync_rw_sb(ic, REQ_OP_WRITE, 0);
if (unlikely(r))
dm_integrity_io_error(ic, "writing superblock", r);
Expand Down Expand Up @@ -3358,7 +3360,7 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
goto bad;
}

crypt_iv = kmalloc(ivsize, GFP_KERNEL);
crypt_iv = kzalloc(ivsize, GFP_KERNEL);
if (!crypt_iv) {
*error = "Could not allocate iv";
r = -ENOMEM;
Expand Down Expand Up @@ -3387,7 +3389,6 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
sg_set_buf(&sg[i], va, PAGE_SIZE);
}
sg_set_buf(&sg[i], &ic->commit_ids, sizeof ic->commit_ids);
memset(crypt_iv, 0x00, ivsize);

skcipher_request_set_crypt(req, sg, sg,
PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, crypt_iv);
Expand Down
4 changes: 2 additions & 2 deletions drivers/md/dm-log-writes.c
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@
*
* Would result in the log looking like this:
*
* c,a,flush,fuad,b,<other writes>,<next flush>
* c,a,b,flush,fuad,<other writes>,<next flush>
*
* This is meant to help expose problems where file systems do not properly wait
* on data being written before invoking a FLUSH. FUA bypasses cache so once it
Expand Down Expand Up @@ -699,7 +699,7 @@ static int log_writes_map(struct dm_target *ti, struct bio *bio)
if (discard_bio)
alloc_size = sizeof(struct pending_block);
else
alloc_size = sizeof(struct pending_block) + sizeof(struct bio_vec) * bio_segments(bio);
alloc_size = struct_size(block, vecs, bio_segments(bio));

block = kzalloc(alloc_size, GFP_NOIO);
if (!block) {
Expand Down
2 changes: 1 addition & 1 deletion drivers/md/dm-rq.c
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ static void end_clone_bio(struct bio *clone)

/*
* Update the original request.
* Do not use blk_end_request() here, because it may complete
* Do not use blk_mq_end_request() here, because it may complete
* the original request before the clone, and break the ordering.
*/
if (is_last)
Expand Down
Loading

0 comments on commit 2260840

Please sign in to comment.