Skip to content

Commit

Permalink
Merge branch 'for-4.13-part1' of git://git.kernel.org/pub/scm/linux/k…
Browse files Browse the repository at this point in the history
…ernel/git/kdave/linux

Pull btrfs updates from David Sterba:
 "The core updates improve error handling (mostly related to bios), with
  the usual incremental work on the GFP_NOFS (mis)use removal,
  refactoring or cleanups. Except the two top patches, all have been in
  for-next for an extensive amount of time.

  User visible changes:

   - statx support

   - quota override tunable

   - improved compression thresholds

   - obsoleted mount option alloc_start

  Core updates:

   - bio-related updates:
       - faster bio cloning
       - no allocation failures
       - preallocated flush bios

   - more kvzalloc use, memalloc_nofs protections, GFP_NOFS updates

   - prep work for btree_inode removal

   - dir-item validation

   - qgoup fixes and updates

   - cleanups:
       - removed unused struct members, unused code, refactoring
       - argument refactoring (fs_info/root, caller -> callee sink)
       - SEARCH_TREE ioctl docs"

* 'for-4.13-part1' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux: (115 commits)
  btrfs: Remove false alert when fiemap range is smaller than on-disk extent
  btrfs: Don't clear SGID when inheriting ACLs
  btrfs: fix integer overflow in calc_reclaim_items_nr
  btrfs: scrub: fix target device intialization while setting up scrub context
  btrfs: qgroup: Fix qgroup reserved space underflow by only freeing reserved ranges
  btrfs: qgroup: Introduce extent changeset for qgroup reserve functions
  btrfs: qgroup: Fix qgroup reserved space underflow caused by buffered write and quotas being enabled
  btrfs: qgroup: Return actually freed bytes for qgroup release or free data
  btrfs: qgroup: Cleanup btrfs_qgroup_prepare_account_extents function
  btrfs: qgroup: Add quick exit for non-fs extents
  Btrfs: rework delayed ref total_bytes_pinned accounting
  Btrfs: return old and new total ref mods when adding delayed refs
  Btrfs: always account pinned bytes when dropping a tree block ref
  Btrfs: update total_bytes_pinned when pinning down extents
  Btrfs: make BUG_ON() in add_pinned_bytes() an ASSERT()
  Btrfs: make add_pinned_bytes() take an s64 num_bytes instead of u64
  btrfs: fix validation of XATTR_ITEM dir items
  btrfs: Verify dir_item in iterate_object_props
  btrfs: Check name_len before in btrfs_del_root_ref
  btrfs: Check name_len before reading btrfs_get_name
  ...
  • Loading branch information
torvalds committed Jul 5, 2017
2 parents 7114f51 + 848c23b commit 8c27cb3
Show file tree
Hide file tree
Showing 47 changed files with 1,723 additions and 1,415 deletions.
13 changes: 7 additions & 6 deletions fs/btrfs/acl.c
Original file line number Diff line number Diff line change
Expand Up @@ -78,12 +78,6 @@ static int __btrfs_set_acl(struct btrfs_trans_handle *trans,
switch (type) {
case ACL_TYPE_ACCESS:
name = XATTR_NAME_POSIX_ACL_ACCESS;
if (acl) {
ret = posix_acl_update_mode(inode, &inode->i_mode, &acl);
if (ret)
return ret;
}
ret = 0;
break;
case ACL_TYPE_DEFAULT:
if (!S_ISDIR(inode->i_mode))
Expand Down Expand Up @@ -119,6 +113,13 @@ static int __btrfs_set_acl(struct btrfs_trans_handle *trans,

int btrfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
{
int ret;

if (type == ACL_TYPE_ACCESS && acl) {
ret = posix_acl_update_mode(inode, &inode->i_mode, &acl);
if (ret)
return ret;
}
return __btrfs_set_acl(NULL, inode, acl, type);
}

Expand Down
10 changes: 5 additions & 5 deletions fs/btrfs/backref.c
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
* Boston, MA 021110-1307, USA.
*/

#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/rbtree.h>
#include "ctree.h"
#include "disk-io.h"
Expand Down Expand Up @@ -2305,7 +2305,7 @@ struct btrfs_data_container *init_data_container(u32 total_bytes)
size_t alloc_bytes;

alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
data = vmalloc(alloc_bytes);
data = kvmalloc(alloc_bytes, GFP_KERNEL);
if (!data)
return ERR_PTR(-ENOMEM);

Expand Down Expand Up @@ -2339,9 +2339,9 @@ struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
if (IS_ERR(fspath))
return (void *)fspath;

ifp = kmalloc(sizeof(*ifp), GFP_NOFS);
ifp = kmalloc(sizeof(*ifp), GFP_KERNEL);
if (!ifp) {
vfree(fspath);
kvfree(fspath);
return ERR_PTR(-ENOMEM);
}

Expand All @@ -2356,6 +2356,6 @@ void free_ipath(struct inode_fs_paths *ipath)
{
if (!ipath)
return;
vfree(ipath->fspath);
kvfree(ipath->fspath);
kfree(ipath);
}
53 changes: 21 additions & 32 deletions fs/btrfs/check-integrity.c
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@
#include <linux/mutex.h>
#include <linux/genhd.h>
#include <linux/blkdev.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/string.h>
#include "ctree.h"
#include "disk-io.h"
Expand Down Expand Up @@ -1638,12 +1638,7 @@ static int btrfsic_read_block(struct btrfsic_state *state,
struct bio *bio;
unsigned int j;

bio = btrfs_io_bio_alloc(GFP_NOFS, num_pages - i);
if (!bio) {
pr_info("btrfsic: bio_alloc() for %u pages failed!\n",
num_pages - i);
return -1;
}
bio = btrfs_io_bio_alloc(num_pages - i);
bio->bi_bdev = block_ctx->dev->bdev;
bio->bi_iter.bi_sector = dev_bytenr >> 9;
bio_set_op_attrs(bio, REQ_OP_READ, 0);
Expand All @@ -1668,14 +1663,8 @@ static int btrfsic_read_block(struct btrfsic_state *state,
dev_bytenr += (j - i) * PAGE_SIZE;
i = j;
}
for (i = 0; i < num_pages; i++) {
for (i = 0; i < num_pages; i++)
block_ctx->datav[i] = kmap(block_ctx->pagev[i]);
if (!block_ctx->datav[i]) {
pr_info("btrfsic: kmap() failed (dev %s)!\n",
block_ctx->dev->name);
return -1;
}
}

return block_ctx->len;
}
Expand Down Expand Up @@ -2822,44 +2811,47 @@ static void __btrfsic_submit_bio(struct bio *bio)
dev_state = btrfsic_dev_state_lookup(bio->bi_bdev);
if (NULL != dev_state &&
(bio_op(bio) == REQ_OP_WRITE) && bio_has_data(bio)) {
unsigned int i;
unsigned int i = 0;
u64 dev_bytenr;
u64 cur_bytenr;
struct bio_vec *bvec;
struct bio_vec bvec;
struct bvec_iter iter;
int bio_is_patched;
char **mapped_datav;
unsigned int segs = bio_segments(bio);

dev_bytenr = 512 * bio->bi_iter.bi_sector;
bio_is_patched = 0;
if (dev_state->state->print_mask &
BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
pr_info("submit_bio(rw=%d,0x%x, bi_vcnt=%u, bi_sector=%llu (bytenr %llu), bi_bdev=%p)\n",
bio_op(bio), bio->bi_opf, bio->bi_vcnt,
bio_op(bio), bio->bi_opf, segs,
(unsigned long long)bio->bi_iter.bi_sector,
dev_bytenr, bio->bi_bdev);

mapped_datav = kmalloc_array(bio->bi_vcnt,
mapped_datav = kmalloc_array(segs,
sizeof(*mapped_datav), GFP_NOFS);
if (!mapped_datav)
goto leave;
cur_bytenr = dev_bytenr;

bio_for_each_segment_all(bvec, bio, i) {
BUG_ON(bvec->bv_len != PAGE_SIZE);
mapped_datav[i] = kmap(bvec->bv_page);
bio_for_each_segment(bvec, bio, iter) {
BUG_ON(bvec.bv_len != PAGE_SIZE);
mapped_datav[i] = kmap(bvec.bv_page);
i++;

if (dev_state->state->print_mask &
BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH_VERBOSE)
pr_info("#%u: bytenr=%llu, len=%u, offset=%u\n",
i, cur_bytenr, bvec->bv_len, bvec->bv_offset);
cur_bytenr += bvec->bv_len;
i, cur_bytenr, bvec.bv_len, bvec.bv_offset);
cur_bytenr += bvec.bv_len;
}
btrfsic_process_written_block(dev_state, dev_bytenr,
mapped_datav, bio->bi_vcnt,
mapped_datav, segs,
bio, &bio_is_patched,
NULL, bio->bi_opf);
bio_for_each_segment_all(bvec, bio, i)
kunmap(bvec->bv_page);
bio_for_each_segment(bvec, bio, iter)
kunmap(bvec.bv_page);
kfree(mapped_datav);
} else if (NULL != dev_state && (bio->bi_opf & REQ_PREFLUSH)) {
if (dev_state->state->print_mask &
Expand Down Expand Up @@ -2923,13 +2915,10 @@ int btrfsic_mount(struct btrfs_fs_info *fs_info,
fs_info->sectorsize, PAGE_SIZE);
return -1;
}
state = kzalloc(sizeof(*state), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
state = kvzalloc(sizeof(*state), GFP_KERNEL);
if (!state) {
state = vzalloc(sizeof(*state));
if (!state) {
pr_info("btrfs check-integrity: vzalloc() failed!\n");
return -1;
}
pr_info("btrfs check-integrity: allocation failed!\n");
return -1;
}

if (!btrfsic_is_initialized) {
Expand Down
94 changes: 21 additions & 73 deletions fs/btrfs/compression.c
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@
#include <linux/writeback.h>
#include <linux/bit_spinlock.h>
#include <linux/slab.h>
#include <linux/sched/mm.h>
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
Expand All @@ -42,48 +43,7 @@
#include "extent_io.h"
#include "extent_map.h"

struct compressed_bio {
/* number of bios pending for this compressed extent */
refcount_t pending_bios;

/* the pages with the compressed data on them */
struct page **compressed_pages;

/* inode that owns this data */
struct inode *inode;

/* starting offset in the inode for our pages */
u64 start;

/* number of bytes in the inode we're working on */
unsigned long len;

/* number of bytes on disk */
unsigned long compressed_len;

/* the compression algorithm for this bio */
int compress_type;

/* number of compressed pages in the array */
unsigned long nr_pages;

/* IO errors */
int errors;
int mirror_num;

/* for reads, this is the bio we are copying the data into */
struct bio *orig_bio;

/*
* the start of a variable length array of checksums only
* used by reads
*/
u32 sums;
};

static int btrfs_decompress_bio(int type, struct page **pages_in,
u64 disk_start, struct bio *orig_bio,
size_t srclen);
static int btrfs_decompress_bio(struct compressed_bio *cb);

static inline int compressed_bio_size(struct btrfs_fs_info *fs_info,
unsigned long disk_size)
Expand All @@ -94,12 +54,6 @@ static inline int compressed_bio_size(struct btrfs_fs_info *fs_info,
(DIV_ROUND_UP(disk_size, fs_info->sectorsize)) * csum_size;
}

static struct bio *compressed_bio_alloc(struct block_device *bdev,
u64 first_byte, gfp_t gfp_flags)
{
return btrfs_bio_alloc(bdev, first_byte >> 9, BIO_MAX_PAGES, gfp_flags);
}

static int check_compressed_csum(struct btrfs_inode *inode,
struct compressed_bio *cb,
u64 disk_start)
Expand Down Expand Up @@ -173,11 +127,8 @@ static void end_compressed_bio_read(struct bio *bio)
/* ok, we're the last bio for this extent, lets start
* the decompression.
*/
ret = btrfs_decompress_bio(cb->compress_type,
cb->compressed_pages,
cb->start,
cb->orig_bio,
cb->compressed_len);
ret = btrfs_decompress_bio(cb);

csum_failed:
if (ret)
cb->errors = 1;
Expand Down Expand Up @@ -355,11 +306,7 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,

bdev = fs_info->fs_devices->latest_bdev;

bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS);
if (!bio) {
kfree(cb);
return BLK_STS_RESOURCE;
}
bio = btrfs_bio_alloc(bdev, first_byte);
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
bio->bi_private = cb;
bio->bi_end_io = end_compressed_bio_write;
Expand Down Expand Up @@ -406,8 +353,7 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,

bio_put(bio);

bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS);
BUG_ON(!bio);
bio = btrfs_bio_alloc(bdev, first_byte);
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
bio->bi_private = cb;
bio->bi_end_io = end_compressed_bio_write;
Expand Down Expand Up @@ -650,9 +596,7 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
/* include any pages we added in add_ra-bio_pages */
cb->len = bio->bi_iter.bi_size;

comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS);
if (!comp_bio)
goto fail2;
comp_bio = btrfs_bio_alloc(bdev, cur_disk_byte);
bio_set_op_attrs (comp_bio, REQ_OP_READ, 0);
comp_bio->bi_private = cb;
comp_bio->bi_end_io = end_compressed_bio_read;
Expand Down Expand Up @@ -703,9 +647,7 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,

bio_put(comp_bio);

comp_bio = compressed_bio_alloc(bdev, cur_disk_byte,
GFP_NOFS);
BUG_ON(!comp_bio);
comp_bio = btrfs_bio_alloc(bdev, cur_disk_byte);
bio_set_op_attrs(comp_bio, REQ_OP_READ, 0);
comp_bio->bi_private = cb;
comp_bio->bi_end_io = end_compressed_bio_read;
Expand Down Expand Up @@ -801,6 +743,7 @@ static struct list_head *find_workspace(int type)
struct list_head *workspace;
int cpus = num_online_cpus();
int idx = type - 1;
unsigned nofs_flag;

struct list_head *idle_ws = &btrfs_comp_ws[idx].idle_ws;
spinlock_t *ws_lock = &btrfs_comp_ws[idx].ws_lock;
Expand Down Expand Up @@ -830,7 +773,15 @@ static struct list_head *find_workspace(int type)
atomic_inc(total_ws);
spin_unlock(ws_lock);

/*
* Allocation helpers call vmalloc that can't use GFP_NOFS, so we have
* to turn it off here because we might get called from the restricted
* context of btrfs_compress_bio/btrfs_compress_pages
*/
nofs_flag = memalloc_nofs_save();
workspace = btrfs_compress_op[idx]->alloc_workspace();
memalloc_nofs_restore(nofs_flag);

if (IS_ERR(workspace)) {
atomic_dec(total_ws);
wake_up(ws_wait);
Expand Down Expand Up @@ -961,19 +912,16 @@ int btrfs_compress_pages(int type, struct address_space *mapping,
* be contiguous. They all correspond to the range of bytes covered by
* the compressed extent.
*/
static int btrfs_decompress_bio(int type, struct page **pages_in,
u64 disk_start, struct bio *orig_bio,
size_t srclen)
static int btrfs_decompress_bio(struct compressed_bio *cb)
{
struct list_head *workspace;
int ret;
int type = cb->compress_type;

workspace = find_workspace(type);

ret = btrfs_compress_op[type-1]->decompress_bio(workspace, pages_in,
disk_start, orig_bio,
srclen);
ret = btrfs_compress_op[type - 1]->decompress_bio(workspace, cb);
free_workspace(type, workspace);

return ret;
}

Expand Down
Loading

0 comments on commit 8c27cb3

Please sign in to comment.