Skip to content

Commit

Permalink
Merge tag 'for-6.0-rc4-tag' of git://git.kernel.org/pub/scm/linux/ker…
Browse files Browse the repository at this point in the history
…nel/git/kdave/linux

Pull btrfs fixes from David Sterba:
 "A few more fixes to zoned mode and one regression fix for chunk limit:

    - Zoned mode fixes:
        - fix how wait/wake up is done when finishing zone
        - fix zone append limit in emulated mode
        - fix mount on devices with conventional zones

   - fix regression, user settable data chunk limit got accidentally
     lowered and causes allocation problems on some profiles (raid0,
     raid1)"

* tag 'for-6.0-rc4-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux:
  btrfs: fix the max chunk size and stripe length calculation
  btrfs: zoned: fix mounting with conventional zones
  btrfs: zoned: set pseudo max append zone limit in zone emulation mode
  btrfs: zoned: fix API misuse of zone finish waiting
  • Loading branch information
torvalds committed Sep 9, 2022
2 parents 725f3f3 + 5da431b commit 9b45094
Show file tree
Hide file tree
Showing 6 changed files with 60 additions and 54 deletions.
2 changes: 0 additions & 2 deletions fs/btrfs/ctree.h
Original file line number Diff line number Diff line change
Expand Up @@ -1088,8 +1088,6 @@ struct btrfs_fs_info {

spinlock_t zone_active_bgs_lock;
struct list_head zone_active_bgs;
/* Waiters when BTRFS_FS_NEED_ZONE_FINISH is set */
wait_queue_head_t zone_finish_wait;

/* Updates are not protected by any lock */
struct btrfs_commit_stats commit_stats;
Expand Down
1 change: 0 additions & 1 deletion fs/btrfs/disk-io.c
Original file line number Diff line number Diff line change
Expand Up @@ -3068,7 +3068,6 @@ void btrfs_init_fs_info(struct btrfs_fs_info *fs_info)
init_waitqueue_head(&fs_info->transaction_blocked_wait);
init_waitqueue_head(&fs_info->async_submit_wait);
init_waitqueue_head(&fs_info->delayed_iputs_wait);
init_waitqueue_head(&fs_info->zone_finish_wait);

/* Usable values until the real ones are cached from the superblock */
fs_info->nodesize = 4096;
Expand Down
7 changes: 3 additions & 4 deletions fs/btrfs/inode.c
Original file line number Diff line number Diff line change
Expand Up @@ -1644,10 +1644,9 @@ static noinline int run_delalloc_zoned(struct btrfs_inode *inode,
done_offset = end;

if (done_offset == start) {
struct btrfs_fs_info *info = inode->root->fs_info;

wait_var_event(&info->zone_finish_wait,
!test_bit(BTRFS_FS_NEED_ZONE_FINISH, &info->flags));
wait_on_bit_io(&inode->root->fs_info->flags,
BTRFS_FS_NEED_ZONE_FINISH,
TASK_UNINTERRUPTIBLE);
continue;
}

Expand Down
2 changes: 1 addition & 1 deletion fs/btrfs/space-info.c
Original file line number Diff line number Diff line change
Expand Up @@ -199,7 +199,7 @@ static u64 calc_chunk_size(const struct btrfs_fs_info *fs_info, u64 flags)
ASSERT(flags & BTRFS_BLOCK_GROUP_TYPE_MASK);

if (flags & BTRFS_BLOCK_GROUP_DATA)
return SZ_1G;
return BTRFS_MAX_DATA_CHUNK_SIZE;
else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
return SZ_32M;

Expand Down
3 changes: 3 additions & 0 deletions fs/btrfs/volumes.c
Original file line number Diff line number Diff line change
Expand Up @@ -5267,6 +5267,9 @@ static int decide_stripe_size_regular(struct alloc_chunk_ctl *ctl,
ctl->stripe_size);
}

/* Stripe size should not go beyond 1G. */
ctl->stripe_size = min_t(u64, ctl->stripe_size, SZ_1G);

/* Align to BTRFS_STRIPE_LEN */
ctl->stripe_size = round_down(ctl->stripe_size, BTRFS_STRIPE_LEN);
ctl->chunk_size = ctl->stripe_size * data_stripes;
Expand Down
99 changes: 53 additions & 46 deletions fs/btrfs/zoned.c
Original file line number Diff line number Diff line change
Expand Up @@ -421,10 +421,19 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache)
* since btrfs adds the pages one by one to a bio, and btrfs cannot
* increase the metadata reservation even if it increases the number of
* extents, it is safe to stick with the limit.
*
* With the zoned emulation, we can have non-zoned device on the zoned
* mode. In this case, we don't have a valid max zone append size. So,
* use max_segments * PAGE_SIZE as the pseudo max_zone_append_size.
*/
zone_info->max_zone_append_size =
min_t(u64, (u64)bdev_max_zone_append_sectors(bdev) << SECTOR_SHIFT,
(u64)bdev_max_segments(bdev) << PAGE_SHIFT);
if (bdev_is_zoned(bdev)) {
zone_info->max_zone_append_size = min_t(u64,
(u64)bdev_max_zone_append_sectors(bdev) << SECTOR_SHIFT,
(u64)bdev_max_segments(bdev) << PAGE_SHIFT);
} else {
zone_info->max_zone_append_size =
(u64)bdev_max_segments(bdev) << PAGE_SHIFT;
}
if (!IS_ALIGNED(nr_sectors, zone_sectors))
zone_info->nr_zones++;

Expand Down Expand Up @@ -1178,7 +1187,7 @@ int btrfs_ensure_empty_zones(struct btrfs_device *device, u64 start, u64 size)
* offset.
*/
static int calculate_alloc_pointer(struct btrfs_block_group *cache,
u64 *offset_ret)
u64 *offset_ret, bool new)
{
struct btrfs_fs_info *fs_info = cache->fs_info;
struct btrfs_root *root;
Expand All @@ -1188,6 +1197,21 @@ static int calculate_alloc_pointer(struct btrfs_block_group *cache,
int ret;
u64 length;

/*
* Avoid tree lookups for a new block group, there's no use for it.
* It must always be 0.
*
* Also, we have a lock chain of extent buffer lock -> chunk mutex.
* For new a block group, this function is called from
* btrfs_make_block_group() which is already taking the chunk mutex.
* Thus, we cannot call calculate_alloc_pointer() which takes extent
* buffer locks to avoid deadlock.
*/
if (new) {
*offset_ret = 0;
return 0;
}

path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
Expand Down Expand Up @@ -1323,6 +1347,13 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
else
num_conventional++;

/*
* Consider a zone as active if we can allow any number of
* active zones.
*/
if (!device->zone_info->max_active_zones)
__set_bit(i, active);

if (!is_sequential) {
alloc_offsets[i] = WP_CONVENTIONAL;
continue;
Expand Down Expand Up @@ -1389,45 +1420,23 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
__set_bit(i, active);
break;
}

/*
* Consider a zone as active if we can allow any number of
* active zones.
*/
if (!device->zone_info->max_active_zones)
__set_bit(i, active);
}

if (num_sequential > 0)
cache->seq_zone = true;

if (num_conventional > 0) {
/*
* Avoid calling calculate_alloc_pointer() for new BG. It
* is no use for new BG. It must be always 0.
*
* Also, we have a lock chain of extent buffer lock ->
* chunk mutex. For new BG, this function is called from
* btrfs_make_block_group() which is already taking the
* chunk mutex. Thus, we cannot call
* calculate_alloc_pointer() which takes extent buffer
* locks to avoid deadlock.
*/

/* Zone capacity is always zone size in emulation */
cache->zone_capacity = cache->length;
if (new) {
cache->alloc_offset = 0;
goto out;
}
ret = calculate_alloc_pointer(cache, &last_alloc);
if (ret || map->num_stripes == num_conventional) {
if (!ret)
cache->alloc_offset = last_alloc;
else
btrfs_err(fs_info,
ret = calculate_alloc_pointer(cache, &last_alloc, new);
if (ret) {
btrfs_err(fs_info,
"zoned: failed to determine allocation offset of bg %llu",
cache->start);
cache->start);
goto out;
} else if (map->num_stripes == num_conventional) {
cache->alloc_offset = last_alloc;
cache->zone_is_active = 1;
goto out;
}
}
Expand Down Expand Up @@ -1495,13 +1504,6 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
goto out;
}

if (cache->zone_is_active) {
btrfs_get_block_group(cache);
spin_lock(&fs_info->zone_active_bgs_lock);
list_add_tail(&cache->active_bg_list, &fs_info->zone_active_bgs);
spin_unlock(&fs_info->zone_active_bgs_lock);
}

out:
if (cache->alloc_offset > fs_info->zone_size) {
btrfs_err(fs_info,
Expand All @@ -1526,10 +1528,16 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
ret = -EIO;
}

if (!ret)
if (!ret) {
cache->meta_write_pointer = cache->alloc_offset + cache->start;

if (ret) {
if (cache->zone_is_active) {
btrfs_get_block_group(cache);
spin_lock(&fs_info->zone_active_bgs_lock);
list_add_tail(&cache->active_bg_list,
&fs_info->zone_active_bgs);
spin_unlock(&fs_info->zone_active_bgs_lock);
}
} else {
kfree(cache->physical_map);
cache->physical_map = NULL;
}
Expand Down Expand Up @@ -2007,8 +2015,7 @@ static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_writ
/* For active_bg_list */
btrfs_put_block_group(block_group);

clear_bit(BTRFS_FS_NEED_ZONE_FINISH, &fs_info->flags);
wake_up_all(&fs_info->zone_finish_wait);
clear_and_wake_up_bit(BTRFS_FS_NEED_ZONE_FINISH, &fs_info->flags);

return 0;
}
Expand Down

0 comments on commit 9b45094

Please sign in to comment.