Commit 7ce14f6f authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus-4.1' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs

Pull btrfs fixes from Chris Mason:
 "I fixed up a regression from 4.0 where conversion between different
  raid levels would sometimes bail out without converting.

  Filipe tracked down a race where it was possible to double allocate
  chunks on the drive.

  Mark has a fix for fiemap.  All three will get bundled off for stable
  as well"

* 'for-linus-4.1' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs:
  Btrfs: fix regression in raid level conversion
  Btrfs: fix racy system chunk allocation when setting block group ro
  btrfs: clear 'ret' in btrfs_check_shared() loop
parents cf539cbd 153c35b6
...@@ -880,6 +880,8 @@ static int __add_keyed_refs(struct btrfs_fs_info *fs_info, ...@@ -880,6 +880,8 @@ static int __add_keyed_refs(struct btrfs_fs_info *fs_info,
* indirect refs to their parent bytenr. * indirect refs to their parent bytenr.
* When roots are found, they're added to the roots list * When roots are found, they're added to the roots list
* *
* NOTE: This can return values > 0
*
* FIXME some caching might speed things up * FIXME some caching might speed things up
*/ */
static int find_parent_nodes(struct btrfs_trans_handle *trans, static int find_parent_nodes(struct btrfs_trans_handle *trans,
...@@ -1198,6 +1200,19 @@ int btrfs_find_all_roots(struct btrfs_trans_handle *trans, ...@@ -1198,6 +1200,19 @@ int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
return ret; return ret;
} }
/**
* btrfs_check_shared - tell us whether an extent is shared
*
* @trans: optional trans handle
*
* btrfs_check_shared uses the backref walking code but will short
* circuit as soon as it finds a root or inode that doesn't match the
* one passed in. This provides a significant performance benefit for
* callers (such as fiemap) which want to know whether the extent is
* shared but do not need a ref count.
*
* Return: 0 if extent is not shared, 1 if it is shared, < 0 on error.
*/
int btrfs_check_shared(struct btrfs_trans_handle *trans, int btrfs_check_shared(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 root_objectid, struct btrfs_fs_info *fs_info, u64 root_objectid,
u64 inum, u64 bytenr) u64 inum, u64 bytenr)
...@@ -1226,11 +1241,13 @@ int btrfs_check_shared(struct btrfs_trans_handle *trans, ...@@ -1226,11 +1241,13 @@ int btrfs_check_shared(struct btrfs_trans_handle *trans,
ret = find_parent_nodes(trans, fs_info, bytenr, elem.seq, tmp, ret = find_parent_nodes(trans, fs_info, bytenr, elem.seq, tmp,
roots, NULL, root_objectid, inum); roots, NULL, root_objectid, inum);
if (ret == BACKREF_FOUND_SHARED) { if (ret == BACKREF_FOUND_SHARED) {
/* this is the only condition under which we return 1 */
ret = 1; ret = 1;
break; break;
} }
if (ret < 0 && ret != -ENOENT) if (ret < 0 && ret != -ENOENT)
break; break;
ret = 0;
node = ulist_next(tmp, &uiter); node = ulist_next(tmp, &uiter);
if (!node) if (!node)
break; break;
......
...@@ -8829,6 +8829,24 @@ int btrfs_set_block_group_ro(struct btrfs_root *root, ...@@ -8829,6 +8829,24 @@ int btrfs_set_block_group_ro(struct btrfs_root *root,
goto again; goto again;
} }
/*
* if we are changing raid levels, try to allocate a corresponding
* block group with the new raid level.
*/
alloc_flags = update_block_group_flags(root, cache->flags);
if (alloc_flags != cache->flags) {
ret = do_chunk_alloc(trans, root, alloc_flags,
CHUNK_ALLOC_FORCE);
/*
* ENOSPC is allowed here, we may have enough space
* already allocated at the new raid level to
* carry on
*/
if (ret == -ENOSPC)
ret = 0;
if (ret < 0)
goto out;
}
ret = set_block_group_ro(cache, 0); ret = set_block_group_ro(cache, 0);
if (!ret) if (!ret)
...@@ -8842,7 +8860,9 @@ int btrfs_set_block_group_ro(struct btrfs_root *root, ...@@ -8842,7 +8860,9 @@ int btrfs_set_block_group_ro(struct btrfs_root *root,
out: out:
if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) { if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
alloc_flags = update_block_group_flags(root, cache->flags); alloc_flags = update_block_group_flags(root, cache->flags);
lock_chunks(root->fs_info->chunk_root);
check_system_chunk(trans, root, alloc_flags); check_system_chunk(trans, root, alloc_flags);
unlock_chunks(root->fs_info->chunk_root);
} }
mutex_unlock(&root->fs_info->ro_block_group_mutex); mutex_unlock(&root->fs_info->ro_block_group_mutex);
......
...@@ -4625,6 +4625,7 @@ int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, ...@@ -4625,6 +4625,7 @@ int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
{ {
u64 chunk_offset; u64 chunk_offset;
ASSERT(mutex_is_locked(&extent_root->fs_info->chunk_mutex));
chunk_offset = find_next_chunk(extent_root->fs_info); chunk_offset = find_next_chunk(extent_root->fs_info);
return __btrfs_alloc_chunk(trans, extent_root, chunk_offset, type); return __btrfs_alloc_chunk(trans, extent_root, chunk_offset, type);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment