Skip to content

Commit

Permalink
Merge tag 'ext4_for_linus' of git://git.kernel.org/pub/scm/linux/kern…
Browse files Browse the repository at this point in the history
…el/git/tytso/ext4

Pull ext4 updates from Ted Ts'o:
 "A very large number of cleanups and bug fixes --- in particular for
  the ext4 encryption patches, which is a new feature added in the last
  merge window.  Also fix a number of long-standing xfstest failures.
  (Quota writes failing due to ENOSPC, a race between truncate and
  writepage in data=journalled mode that was causing generic/068 to
  fail, and other corner cases.)

  Also add support for FALLOC_FL_INSERT_RANGE, and improve jbd2
  performance eliminating locking when a buffer is modified more than
  once during a transaction (which is very common for allocation
  bitmaps, for example), in which case the state of the journalled
  buffer head doesn't need to change"

[ I renamed "ext4_follow_link()" to "ext4_encrypted_follow_link()" in
  the merge resolution, to make it clear that that function is _only_
  used for encrypted symlinks.  The function doesn't actually work for
  non-encrypted symlinks at all, and they use the generic helpers
                                         - Linus ]

* tag 'ext4_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4: (52 commits)
  ext4: set lazytime on remount if MS_LAZYTIME is set by mount
  ext4: only call ext4_truncate when size <= isize
  ext4: make online defrag error reporting consistent
  ext4: minor cleanup of ext4_da_reserve_space()
  ext4: don't retry file block mapping on bigalloc fs with non-extent file
  ext4: prevent ext4_quota_write() from failing due to ENOSPC
  ext4: call sync_blockdev() before invalidate_bdev() in put_super()
  jbd2: speedup jbd2_journal_dirty_metadata()
  jbd2: get rid of open coded allocation retry loop
  ext4: improve warning directory handling messages
  jbd2: fix ocfs2 corrupt when updating journal superblock fails
  ext4: mballoc: avoid 20-argument function call
  ext4: wait for existing dio workers in ext4_alloc_file_blocks()
  ext4: recalculate journal credits as inode depth changes
  jbd2: use GFP_NOFS in jbd2_cleanup_journal_tail()
  ext4: use swap() in mext_page_double_lock()
  ext4: use swap() in memswap()
  ext4: fix race between truncate and __ext4_journalled_writepage()
  ext4 crypto: fail the mount if blocksize != pagesize
  ext4: Add support FALLOC_FL_INSERT_RANGE for fallocate
  ...
  • Loading branch information
torvalds committed Jun 25, 2015
2 parents 77d4316 + a2fd66d commit d857da7
Show file tree
Hide file tree
Showing 29 changed files with 1,488 additions and 1,355 deletions.
1 change: 1 addition & 0 deletions fs/ext4/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,7 @@ config EXT4_ENCRYPTION
select CRYPTO_ECB
select CRYPTO_XTS
select CRYPTO_CTS
select CRYPTO_CTR
select CRYPTO_SHA256
select KEYS
select ENCRYPTED_KEYS
Expand Down
4 changes: 2 additions & 2 deletions fs/ext4/balloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -369,7 +369,7 @@ static void ext4_validate_block_bitmap(struct super_block *sb,
struct ext4_group_info *grp = ext4_get_group_info(sb, block_group);
struct ext4_sb_info *sbi = EXT4_SB(sb);

if (buffer_verified(bh))
if (buffer_verified(bh) || EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
return;

ext4_lock_group(sb, block_group);
Expand Down Expand Up @@ -446,7 +446,7 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
unlock_buffer(bh);
if (err)
ext4_error(sb, "Checksum bad for grp %u", block_group);
return bh;
goto verify;
}
ext4_unlock_group(sb, block_group);
if (buffer_uptodate(bh)) {
Expand Down
211 changes: 64 additions & 147 deletions fs/ext4/crypto.c
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,9 @@ static mempool_t *ext4_bounce_page_pool;
static LIST_HEAD(ext4_free_crypto_ctxs);
static DEFINE_SPINLOCK(ext4_crypto_ctx_lock);

static struct kmem_cache *ext4_crypto_ctx_cachep;
struct kmem_cache *ext4_crypt_info_cachep;

/**
* ext4_release_crypto_ctx() - Releases an encryption context
* @ctx: The encryption context to release.
Expand All @@ -68,42 +71,19 @@ void ext4_release_crypto_ctx(struct ext4_crypto_ctx *ctx)
{
unsigned long flags;

if (ctx->bounce_page) {
if (ctx->flags & EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL)
__free_page(ctx->bounce_page);
else
mempool_free(ctx->bounce_page, ext4_bounce_page_pool);
ctx->bounce_page = NULL;
}
ctx->control_page = NULL;
if (ctx->flags & EXT4_WRITE_PATH_FL && ctx->w.bounce_page)
mempool_free(ctx->w.bounce_page, ext4_bounce_page_pool);
ctx->w.bounce_page = NULL;
ctx->w.control_page = NULL;
if (ctx->flags & EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL) {
if (ctx->tfm)
crypto_free_tfm(ctx->tfm);
kfree(ctx);
kmem_cache_free(ext4_crypto_ctx_cachep, ctx);
} else {
spin_lock_irqsave(&ext4_crypto_ctx_lock, flags);
list_add(&ctx->free_list, &ext4_free_crypto_ctxs);
spin_unlock_irqrestore(&ext4_crypto_ctx_lock, flags);
}
}

/**
* ext4_alloc_and_init_crypto_ctx() - Allocates and inits an encryption context
* @mask: The allocation mask.
*
* Return: An allocated and initialized encryption context on success. An error
* value or NULL otherwise.
*/
static struct ext4_crypto_ctx *ext4_alloc_and_init_crypto_ctx(gfp_t mask)
{
struct ext4_crypto_ctx *ctx = kzalloc(sizeof(struct ext4_crypto_ctx),
mask);

if (!ctx)
return ERR_PTR(-ENOMEM);
return ctx;
}

/**
* ext4_get_crypto_ctx() - Gets an encryption context
* @inode: The inode for which we are doing the crypto
Expand All @@ -118,10 +98,10 @@ struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode)
struct ext4_crypto_ctx *ctx = NULL;
int res = 0;
unsigned long flags;
struct ext4_encryption_key *key = &EXT4_I(inode)->i_encryption_key;
struct ext4_crypt_info *ci = EXT4_I(inode)->i_crypt_info;

if (!ext4_read_workqueue)
ext4_init_crypto();
if (ci == NULL)
return ERR_PTR(-ENOKEY);

/*
* We first try getting the ctx from a free list because in
Expand All @@ -140,50 +120,16 @@ struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode)
list_del(&ctx->free_list);
spin_unlock_irqrestore(&ext4_crypto_ctx_lock, flags);
if (!ctx) {
ctx = ext4_alloc_and_init_crypto_ctx(GFP_NOFS);
if (IS_ERR(ctx)) {
res = PTR_ERR(ctx);
ctx = kmem_cache_zalloc(ext4_crypto_ctx_cachep, GFP_NOFS);
if (!ctx) {
res = -ENOMEM;
goto out;
}
ctx->flags |= EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL;
} else {
ctx->flags &= ~EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL;
}

/* Allocate a new Crypto API context if we don't already have
* one or if it isn't the right mode. */
BUG_ON(key->mode == EXT4_ENCRYPTION_MODE_INVALID);
if (ctx->tfm && (ctx->mode != key->mode)) {
crypto_free_tfm(ctx->tfm);
ctx->tfm = NULL;
ctx->mode = EXT4_ENCRYPTION_MODE_INVALID;
}
if (!ctx->tfm) {
switch (key->mode) {
case EXT4_ENCRYPTION_MODE_AES_256_XTS:
ctx->tfm = crypto_ablkcipher_tfm(
crypto_alloc_ablkcipher("xts(aes)", 0, 0));
break;
case EXT4_ENCRYPTION_MODE_AES_256_GCM:
/* TODO(mhalcrow): AEAD w/ gcm(aes);
* crypto_aead_setauthsize() */
ctx->tfm = ERR_PTR(-ENOTSUPP);
break;
default:
BUG();
}
if (IS_ERR_OR_NULL(ctx->tfm)) {
res = PTR_ERR(ctx->tfm);
ctx->tfm = NULL;
goto out;
}
ctx->mode = key->mode;
}
BUG_ON(key->size != ext4_encryption_key_size(key->mode));

/* There shouldn't be a bounce page attached to the crypto
* context at this point. */
BUG_ON(ctx->bounce_page);
ctx->flags &= ~EXT4_WRITE_PATH_FL;

out:
if (res) {
Expand All @@ -204,27 +150,21 @@ void ext4_exit_crypto(void)
{
struct ext4_crypto_ctx *pos, *n;

list_for_each_entry_safe(pos, n, &ext4_free_crypto_ctxs, free_list) {
if (pos->bounce_page) {
if (pos->flags &
EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL) {
__free_page(pos->bounce_page);
} else {
mempool_free(pos->bounce_page,
ext4_bounce_page_pool);
}
}
if (pos->tfm)
crypto_free_tfm(pos->tfm);
kfree(pos);
}
list_for_each_entry_safe(pos, n, &ext4_free_crypto_ctxs, free_list)
kmem_cache_free(ext4_crypto_ctx_cachep, pos);
INIT_LIST_HEAD(&ext4_free_crypto_ctxs);
if (ext4_bounce_page_pool)
mempool_destroy(ext4_bounce_page_pool);
ext4_bounce_page_pool = NULL;
if (ext4_read_workqueue)
destroy_workqueue(ext4_read_workqueue);
ext4_read_workqueue = NULL;
if (ext4_crypto_ctx_cachep)
kmem_cache_destroy(ext4_crypto_ctx_cachep);
ext4_crypto_ctx_cachep = NULL;
if (ext4_crypt_info_cachep)
kmem_cache_destroy(ext4_crypt_info_cachep);
ext4_crypt_info_cachep = NULL;
}

/**
Expand All @@ -237,23 +177,31 @@ void ext4_exit_crypto(void)
*/
int ext4_init_crypto(void)
{
int i, res;
int i, res = -ENOMEM;

mutex_lock(&crypto_init);
if (ext4_read_workqueue)
goto already_initialized;
ext4_read_workqueue = alloc_workqueue("ext4_crypto", WQ_HIGHPRI, 0);
if (!ext4_read_workqueue) {
res = -ENOMEM;
if (!ext4_read_workqueue)
goto fail;

ext4_crypto_ctx_cachep = KMEM_CACHE(ext4_crypto_ctx,
SLAB_RECLAIM_ACCOUNT);
if (!ext4_crypto_ctx_cachep)
goto fail;

ext4_crypt_info_cachep = KMEM_CACHE(ext4_crypt_info,
SLAB_RECLAIM_ACCOUNT);
if (!ext4_crypt_info_cachep)
goto fail;
}

for (i = 0; i < num_prealloc_crypto_ctxs; i++) {
struct ext4_crypto_ctx *ctx;

ctx = ext4_alloc_and_init_crypto_ctx(GFP_KERNEL);
if (IS_ERR(ctx)) {
res = PTR_ERR(ctx);
ctx = kmem_cache_zalloc(ext4_crypto_ctx_cachep, GFP_NOFS);
if (!ctx) {
res = -ENOMEM;
goto fail;
}
list_add(&ctx->free_list, &ext4_free_crypto_ctxs);
Expand Down Expand Up @@ -317,32 +265,11 @@ static int ext4_page_crypto(struct ext4_crypto_ctx *ctx,
struct ablkcipher_request *req = NULL;
DECLARE_EXT4_COMPLETION_RESULT(ecr);
struct scatterlist dst, src;
struct ext4_inode_info *ei = EXT4_I(inode);
struct crypto_ablkcipher *atfm = __crypto_ablkcipher_cast(ctx->tfm);
struct ext4_crypt_info *ci = EXT4_I(inode)->i_crypt_info;
struct crypto_ablkcipher *tfm = ci->ci_ctfm;
int res = 0;

BUG_ON(!ctx->tfm);
BUG_ON(ctx->mode != ei->i_encryption_key.mode);

if (ctx->mode != EXT4_ENCRYPTION_MODE_AES_256_XTS) {
printk_ratelimited(KERN_ERR
"%s: unsupported crypto algorithm: %d\n",
__func__, ctx->mode);
return -ENOTSUPP;
}

crypto_ablkcipher_clear_flags(atfm, ~0);
crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_REQ_WEAK_KEY);

res = crypto_ablkcipher_setkey(atfm, ei->i_encryption_key.raw,
ei->i_encryption_key.size);
if (res) {
printk_ratelimited(KERN_ERR
"%s: crypto_ablkcipher_setkey() failed\n",
__func__);
return res;
}
req = ablkcipher_request_alloc(atfm, GFP_NOFS);
req = ablkcipher_request_alloc(tfm, GFP_NOFS);
if (!req) {
printk_ratelimited(KERN_ERR
"%s: crypto_request_alloc() failed\n",
Expand Down Expand Up @@ -384,6 +311,15 @@ static int ext4_page_crypto(struct ext4_crypto_ctx *ctx,
return 0;
}

static struct page *alloc_bounce_page(struct ext4_crypto_ctx *ctx)
{
ctx->w.bounce_page = mempool_alloc(ext4_bounce_page_pool, GFP_NOWAIT);
if (ctx->w.bounce_page == NULL)
return ERR_PTR(-ENOMEM);
ctx->flags |= EXT4_WRITE_PATH_FL;
return ctx->w.bounce_page;
}

/**
* ext4_encrypt() - Encrypts a page
* @inode: The inode for which the encryption should take place
Expand Down Expand Up @@ -413,27 +349,17 @@ struct page *ext4_encrypt(struct inode *inode,
return (struct page *) ctx;

/* The encryption operation will require a bounce page. */
ciphertext_page = alloc_page(GFP_NOFS);
if (!ciphertext_page) {
/* This is a potential bottleneck, but at least we'll have
* forward progress. */
ciphertext_page = mempool_alloc(ext4_bounce_page_pool,
GFP_NOFS);
if (WARN_ON_ONCE(!ciphertext_page)) {
ciphertext_page = mempool_alloc(ext4_bounce_page_pool,
GFP_NOFS | __GFP_WAIT);
}
ctx->flags &= ~EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
} else {
ctx->flags |= EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
}
ctx->bounce_page = ciphertext_page;
ctx->control_page = plaintext_page;
ciphertext_page = alloc_bounce_page(ctx);
if (IS_ERR(ciphertext_page))
goto errout;
ctx->w.control_page = plaintext_page;
err = ext4_page_crypto(ctx, inode, EXT4_ENCRYPT, plaintext_page->index,
plaintext_page, ciphertext_page);
if (err) {
ciphertext_page = ERR_PTR(err);
errout:
ext4_release_crypto_ctx(ctx);
return ERR_PTR(err);
return ciphertext_page;
}
SetPagePrivate(ciphertext_page);
set_page_private(ciphertext_page, (unsigned long)ctx);
Expand Down Expand Up @@ -470,8 +396,8 @@ int ext4_decrypt_one(struct inode *inode, struct page *page)

struct ext4_crypto_ctx *ctx = ext4_get_crypto_ctx(inode);

if (!ctx)
return -ENOMEM;
if (IS_ERR(ctx))
return PTR_ERR(ctx);
ret = ext4_decrypt(ctx, page);
ext4_release_crypto_ctx(ctx);
return ret;
Expand All @@ -493,21 +419,11 @@ int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex)
if (IS_ERR(ctx))
return PTR_ERR(ctx);

ciphertext_page = alloc_page(GFP_NOFS);
if (!ciphertext_page) {
/* This is a potential bottleneck, but at least we'll have
* forward progress. */
ciphertext_page = mempool_alloc(ext4_bounce_page_pool,
GFP_NOFS);
if (WARN_ON_ONCE(!ciphertext_page)) {
ciphertext_page = mempool_alloc(ext4_bounce_page_pool,
GFP_NOFS | __GFP_WAIT);
}
ctx->flags &= ~EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
} else {
ctx->flags |= EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
ciphertext_page = alloc_bounce_page(ctx);
if (IS_ERR(ciphertext_page)) {
err = PTR_ERR(ciphertext_page);
goto errout;
}
ctx->bounce_page = ciphertext_page;

while (len--) {
err = ext4_page_crypto(ctx, inode, EXT4_ENCRYPT, lblk,
Expand All @@ -529,6 +445,7 @@ int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex)
goto errout;
}
err = submit_bio_wait(WRITE, bio);
bio_put(bio);
if (err)
goto errout;
}
Expand Down
Loading

0 comments on commit d857da7

Please sign in to comment.