Skip to content

Commit

Permalink
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel…
Browse files Browse the repository at this point in the history
…/git/mason/btrfs-unstable

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable:
  Btrfs: use join_transaction in btrfs_evict_inode()
  Btrfs - use %pU to print fsid
  Btrfs: fix extent state leak on failed nodatasum reads
  btrfs: fix unlocked access of delalloc_inodes
  Btrfs: avoid stack bloat in btrfs_ioctl_fs_info()
  btrfs: remove 64bit alignment padding to allow extent_buffer to fit into one fewer cacheline
  Btrfs: clear current->journal_info on async transaction commit
  Btrfs: make sure to recheck for bitmaps in clusters
  btrfs: remove unneeded includes from scrub.c
  btrfs: reinitialize scrub workers
  btrfs: scrub: errors in tree enumeration
  Btrfs: don't map extent buffer if path->skip_locking is set
  Btrfs: unlock the trans lock properly
  Btrfs: don't map extent buffer if path->skip_locking is set
  Btrfs: fix duplicate checking logic
  Btrfs: fix the allocator loop logic
  Btrfs: fix bitmap regression
  Btrfs: don't commit the transaction if we dont have enough pinned bytes
  Btrfs: noinline the cluster searching functions
  Btrfs: cache bitmaps when searching for a cluster
  • Loading branch information
torvalds committed Jun 12, 2011
2 parents 9d6fa8f + 30b4caf commit 3c25fa7
Show file tree
Hide file tree
Showing 10 changed files with 233 additions and 113 deletions.
10 changes: 7 additions & 3 deletions fs/btrfs/ctree.c
Original file line number Diff line number Diff line change
Expand Up @@ -1228,6 +1228,7 @@ static void reada_for_search(struct btrfs_root *root,
u32 nr;
u32 blocksize;
u32 nscan = 0;
bool map = true;

if (level != 1)
return;
Expand All @@ -1249,8 +1250,11 @@ static void reada_for_search(struct btrfs_root *root,

nritems = btrfs_header_nritems(node);
nr = slot;
if (node->map_token || path->skip_locking)
map = false;

while (1) {
if (!node->map_token) {
if (map && !node->map_token) {
unsigned long offset = btrfs_node_key_ptr_offset(nr);
map_private_extent_buffer(node, offset,
sizeof(struct btrfs_key_ptr),
Expand All @@ -1277,7 +1281,7 @@ static void reada_for_search(struct btrfs_root *root,
if ((search <= target && target - search <= 65536) ||
(search > target && search - target <= 65536)) {
gen = btrfs_node_ptr_generation(node, nr);
if (node->map_token) {
if (map && node->map_token) {
unmap_extent_buffer(node, node->map_token,
KM_USER1);
node->map_token = NULL;
Expand All @@ -1289,7 +1293,7 @@ static void reada_for_search(struct btrfs_root *root,
if ((nread > 65536 || nscan > 32))
break;
}
if (node->map_token) {
if (map && node->map_token) {
unmap_extent_buffer(node, node->map_token, KM_USER1);
node->map_token = NULL;
}
Expand Down
5 changes: 1 addition & 4 deletions fs/btrfs/disk-io.c
Original file line number Diff line number Diff line change
Expand Up @@ -1668,8 +1668,6 @@ struct btrfs_root *open_ctree(struct super_block *sb,
init_waitqueue_head(&fs_info->scrub_pause_wait);
init_rwsem(&fs_info->scrub_super_lock);
fs_info->scrub_workers_refcnt = 0;
btrfs_init_workers(&fs_info->scrub_workers, "scrub",
fs_info->thread_pool_size, &fs_info->generic_worker);

sb->s_blocksize = 4096;
sb->s_blocksize_bits = blksize_bits(4096);
Expand Down Expand Up @@ -2911,9 +2909,8 @@ static int btrfs_destroy_delalloc_inodes(struct btrfs_root *root)

INIT_LIST_HEAD(&splice);

list_splice_init(&root->fs_info->delalloc_inodes, &splice);

spin_lock(&root->fs_info->delalloc_lock);
list_splice_init(&root->fs_info->delalloc_inodes, &splice);

while (!list_empty(&splice)) {
btrfs_inode = list_entry(splice.next, struct btrfs_inode,
Expand Down
55 changes: 32 additions & 23 deletions fs/btrfs/extent-tree.c
Original file line number Diff line number Diff line change
Expand Up @@ -3089,6 +3089,13 @@ int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
}
goto again;
}

/*
* If we have less pinned bytes than we want to allocate then
* don't bother committing the transaction, it won't help us.
*/
if (data_sinfo->bytes_pinned < bytes)
committed = 1;
spin_unlock(&data_sinfo->lock);

/* commit the current transaction and try again */
Expand Down Expand Up @@ -5211,9 +5218,7 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
* LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
* again
*/
if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE &&
(found_uncached_bg || empty_size || empty_cluster ||
allowed_chunk_alloc)) {
if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
index = 0;
if (loop == LOOP_FIND_IDEAL && found_uncached_bg) {
found_uncached_bg = false;
Expand Down Expand Up @@ -5253,32 +5258,36 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
goto search;
}

if (loop < LOOP_CACHING_WAIT) {
loop++;
goto search;
}
loop++;

if (loop == LOOP_ALLOC_CHUNK) {
empty_size = 0;
empty_cluster = 0;
}
if (allowed_chunk_alloc) {
ret = do_chunk_alloc(trans, root, num_bytes +
2 * 1024 * 1024, data,
CHUNK_ALLOC_LIMITED);
allowed_chunk_alloc = 0;
if (ret == 1)
done_chunk_alloc = 1;
} else if (!done_chunk_alloc &&
space_info->force_alloc ==
CHUNK_ALLOC_NO_FORCE) {
space_info->force_alloc = CHUNK_ALLOC_LIMITED;
}

if (allowed_chunk_alloc) {
ret = do_chunk_alloc(trans, root, num_bytes +
2 * 1024 * 1024, data,
CHUNK_ALLOC_LIMITED);
allowed_chunk_alloc = 0;
done_chunk_alloc = 1;
} else if (!done_chunk_alloc &&
space_info->force_alloc == CHUNK_ALLOC_NO_FORCE) {
space_info->force_alloc = CHUNK_ALLOC_LIMITED;
/*
* We didn't allocate a chunk, go ahead and drop the
* empty size and loop again.
*/
if (!done_chunk_alloc)
loop = LOOP_NO_EMPTY_SIZE;
}

if (loop < LOOP_NO_EMPTY_SIZE) {
loop++;
goto search;
if (loop == LOOP_NO_EMPTY_SIZE) {
empty_size = 0;
empty_cluster = 0;
}
ret = -ENOSPC;

goto search;
} else if (!ins->objectid) {
ret = -ENOSPC;
} else if (ins->objectid) {
Expand Down
2 changes: 1 addition & 1 deletion fs/btrfs/extent_io.h
Original file line number Diff line number Diff line change
Expand Up @@ -126,9 +126,9 @@ struct extent_buffer {
unsigned long map_len;
struct page *first_page;
unsigned long bflags;
atomic_t refs;
struct list_head leak_list;
struct rcu_head rcu_head;
atomic_t refs;

/* the spinlock is used to protect most operations */
spinlock_t lock;
Expand Down
Loading

0 comments on commit 3c25fa7

Please sign in to comment.