Skip to content

Commit

Permalink
btrfs: add dedicated members for start and length of a block group
Browse files Browse the repository at this point in the history
The on-disk format of block group item makes use of the key that stores
the offset and length. This is further used in the code, although this
makes thing harder to understand. The key is also packed so the
offset/length is not properly aligned as u64.

Add start (key.objectid) and length (key.offset) members to block group
and remove the embedded key.  When the item is searched or written, a
local variable for key is used.

Reviewed-by: Johannes Thumshirn <[email protected]>
Reviewed-by: Nikolay Borisov <[email protected]>
Reviewed-by: Qu Wenruo <[email protected]>
Signed-off-by: David Sterba <[email protected]>
  • Loading branch information
kdave committed Nov 18, 2019
1 parent 0222dfd commit b3470b5
Show file tree
Hide file tree
Showing 15 changed files with 210 additions and 224 deletions.
137 changes: 69 additions & 68 deletions fs/btrfs/block-group.c

Large diffs are not rendered by default.

3 changes: 2 additions & 1 deletion fs/btrfs/block-group.h
Original file line number Diff line number Diff line change
Expand Up @@ -43,10 +43,11 @@ struct btrfs_caching_control {
#define CACHING_CTL_WAKE_UP SZ_2M

struct btrfs_block_group_cache {
struct btrfs_key key;
struct btrfs_fs_info *fs_info;
struct inode *inode;
spinlock_t lock;
u64 start;
u64 length;
u64 pinned;
u64 reserved;
u64 used;
Expand Down
28 changes: 14 additions & 14 deletions fs/btrfs/extent-tree.c
Original file line number Diff line number Diff line change
Expand Up @@ -75,8 +75,8 @@ void btrfs_free_excluded_extents(struct btrfs_block_group_cache *cache)
struct btrfs_fs_info *fs_info = cache->fs_info;
u64 start, end;

start = cache->key.objectid;
end = start + cache->key.offset - 1;
start = cache->start;
end = start + cache->length - 1;

clear_extent_bits(&fs_info->freed_extents[0],
start, end, EXTENT_UPTODATE);
Expand Down Expand Up @@ -2560,7 +2560,7 @@ static u64 first_logical_byte(struct btrfs_fs_info *fs_info, u64 search_start)
if (!cache)
return 0;

bytenr = cache->key.objectid;
bytenr = cache->start;
btrfs_put_block_group(cache);

return bytenr;
Expand Down Expand Up @@ -2796,7 +2796,7 @@ static int unpin_extent_range(struct btrfs_fs_info *fs_info,
while (start <= end) {
readonly = false;
if (!cache ||
start >= cache->key.objectid + cache->key.offset) {
start >= cache->start + cache->length) {
if (cache)
btrfs_put_block_group(cache);
total_unpinned = 0;
Expand All @@ -2809,7 +2809,7 @@ static int unpin_extent_range(struct btrfs_fs_info *fs_info,
empty_cluster <<= 1;
}

len = cache->key.objectid + cache->key.offset - start;
len = cache->start + cache->length - start;
len = min(len, end + 1 - start);

if (start < cache->last_byte_to_unpin) {
Expand Down Expand Up @@ -2925,8 +2925,8 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans)
ret = -EROFS;
if (!trans->aborted)
ret = btrfs_discard_extent(fs_info,
block_group->key.objectid,
block_group->key.offset,
block_group->start,
block_group->length,
&trimmed);

list_del_init(&block_group->bg_list);
Expand Down Expand Up @@ -3492,7 +3492,7 @@ static int find_free_extent_clustered(struct btrfs_block_group_cache *bg,
goto release_cluster;

offset = btrfs_alloc_from_cluster(cluster_bg, last_ptr,
ffe_ctl->num_bytes, cluster_bg->key.objectid,
ffe_ctl->num_bytes, cluster_bg->start,
&ffe_ctl->max_extent_size);
if (offset) {
/* We have a block, we're done */
Expand Down Expand Up @@ -3903,7 +3903,7 @@ static noinline int find_free_extent(struct btrfs_fs_info *fs_info,
continue;

btrfs_grab_block_group(block_group, delalloc);
ffe_ctl.search_start = block_group->key.objectid;
ffe_ctl.search_start = block_group->start;

/*
* this can happen if we end up cycling through all the
Expand Down Expand Up @@ -3983,7 +3983,7 @@ static noinline int find_free_extent(struct btrfs_fs_info *fs_info,

/* move on to the next group */
if (ffe_ctl.search_start + num_bytes >
block_group->key.objectid + block_group->key.offset) {
block_group->start + block_group->length) {
btrfs_add_free_space(block_group, ffe_ctl.found_offset,
num_bytes);
goto loop;
Expand Down Expand Up @@ -5497,7 +5497,7 @@ u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
}

factor = btrfs_bg_type_to_factor(block_group->flags);
free_bytes += (block_group->key.offset -
free_bytes += (block_group->length -
block_group->used) * factor;

spin_unlock(&block_group->lock);
Expand Down Expand Up @@ -5645,13 +5645,13 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)

cache = btrfs_lookup_first_block_group(fs_info, range->start);
for (; cache; cache = btrfs_next_block_group(cache)) {
if (cache->key.objectid >= range_end) {
if (cache->start >= range_end) {
btrfs_put_block_group(cache);
break;
}

start = max(range->start, cache->key.objectid);
end = min(range_end, cache->key.objectid + cache->key.offset);
start = max(range->start, cache->start);
end = min(range_end, cache->start + cache->length);

if (end - start >= range->minlen) {
if (!btrfs_block_group_cache_done(cache)) {
Expand Down
37 changes: 18 additions & 19 deletions fs/btrfs/free-space-cache.c
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ struct inode *lookup_free_space_inode(
return inode;

inode = __lookup_free_space_inode(fs_info->tree_root, path,
block_group->key.objectid);
block_group->start);
if (IS_ERR(inode))
return inode;

Expand Down Expand Up @@ -201,7 +201,7 @@ int create_free_space_inode(struct btrfs_trans_handle *trans,
return ret;

return __create_free_space_inode(trans->fs_info->tree_root, trans, path,
ino, block_group->key.objectid);
ino, block_group->start);
}

int btrfs_check_trunc_cache_free_space(struct btrfs_fs_info *fs_info,
Expand Down Expand Up @@ -882,21 +882,21 @@ int load_free_space_cache(struct btrfs_block_group_cache *block_group)
spin_unlock(&block_group->lock);

ret = __load_free_space_cache(fs_info->tree_root, inode, ctl,
path, block_group->key.objectid);
path, block_group->start);
btrfs_free_path(path);
if (ret <= 0)
goto out;

spin_lock(&ctl->tree_lock);
matched = (ctl->free_space == (block_group->key.offset - used -
matched = (ctl->free_space == (block_group->length - used -
block_group->bytes_super));
spin_unlock(&ctl->tree_lock);

if (!matched) {
__btrfs_remove_free_space_cache(ctl);
btrfs_warn(fs_info,
"block group %llu has wrong amount of free space",
block_group->key.objectid);
block_group->start);
ret = -1;
}
out:
Expand All @@ -909,7 +909,7 @@ int load_free_space_cache(struct btrfs_block_group_cache *block_group)

btrfs_warn(fs_info,
"failed to load free space cache for block group %llu, rebuilding it now",
block_group->key.objectid);
block_group->start);
}

iput(inode);
Expand Down Expand Up @@ -1067,23 +1067,22 @@ static noinline_for_stack int write_pinned_extent_entries(
*/
unpin = block_group->fs_info->pinned_extents;

start = block_group->key.objectid;
start = block_group->start;

while (start < block_group->key.objectid + block_group->key.offset) {
while (start < block_group->start + block_group->length) {
ret = find_first_extent_bit(unpin, start,
&extent_start, &extent_end,
EXTENT_DIRTY, NULL);
if (ret)
return 0;

/* This pinned extent is out of our range */
if (extent_start >= block_group->key.objectid +
block_group->key.offset)
if (extent_start >= block_group->start + block_group->length)
return 0;

extent_start = max(extent_start, start);
extent_end = min(block_group->key.objectid +
block_group->key.offset, extent_end + 1);
extent_end = min(block_group->start + block_group->length,
extent_end + 1);
len = extent_end - extent_start;

*entries += 1;
Expand Down Expand Up @@ -1174,7 +1173,7 @@ static int __btrfs_wait_cache_io(struct btrfs_root *root,
#ifdef DEBUG
btrfs_err(root->fs_info,
"failed to write free space cache for block group %llu",
block_group->key.objectid);
block_group->start);
#endif
}
}
Expand Down Expand Up @@ -1221,7 +1220,7 @@ int btrfs_wait_cache_io(struct btrfs_trans_handle *trans,
{
return __btrfs_wait_cache_io(block_group->fs_info->tree_root, trans,
block_group, &block_group->io_ctl,
path, block_group->key.objectid);
path, block_group->start);
}

/**
Expand Down Expand Up @@ -1400,7 +1399,7 @@ int btrfs_write_out_cache(struct btrfs_trans_handle *trans,
#ifdef DEBUG
btrfs_err(fs_info,
"failed to write free space cache for block group %llu",
block_group->key.objectid);
block_group->start);
#endif
spin_lock(&block_group->lock);
block_group->disk_cache_state = BTRFS_DC_ERROR;
Expand Down Expand Up @@ -1657,7 +1656,7 @@ static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
u64 max_bytes;
u64 bitmap_bytes;
u64 extent_bytes;
u64 size = block_group->key.offset;
u64 size = block_group->length;
u64 bytes_per_bg = BITS_PER_BITMAP * ctl->unit;
u64 max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg);

Expand Down Expand Up @@ -2034,7 +2033,7 @@ static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
* so allow those block groups to still be allowed to have a bitmap
* entry.
*/
if (((BITS_PER_BITMAP * ctl->unit) >> 1) > block_group->key.offset)
if (((BITS_PER_BITMAP * ctl->unit) >> 1) > block_group->length)
return false;

return true;
Expand Down Expand Up @@ -2516,7 +2515,7 @@ void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group)

spin_lock_init(&ctl->tree_lock);
ctl->unit = fs_info->sectorsize;
ctl->start = block_group->key.objectid;
ctl->start = block_group->start;
ctl->private = block_group;
ctl->op = &free_space_op;
INIT_LIST_HEAD(&ctl->trimming_ranges);
Expand Down Expand Up @@ -3379,7 +3378,7 @@ void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *block_group)
mutex_lock(&fs_info->chunk_mutex);
em_tree = &fs_info->mapping_tree;
write_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, block_group->key.objectid,
em = lookup_extent_mapping(em_tree, block_group->start,
1);
BUG_ON(!em); /* logic error, can't happen */
remove_extent_mapping(em_tree, em);
Expand Down
Loading

0 comments on commit b3470b5

Please sign in to comment.