Commit a94794d5 authored by Naohiro Aota's avatar Naohiro Aota Committed by David Sterba

btrfs: zoned: calculate allocation offset for conventional zones

Conventional zones do not have a write pointer, so we cannot use it to
determine the allocation offset for sequential allocation if a block
group contains a conventional zone.

But instead, we can consider the end of the highest addressed extent in
the block group for the allocation offset.

For new block group, we cannot calculate the allocation offset by
consulting the extent tree, because it can cause deadlock by taking
extent buffer lock after chunk mutex, which is already taken in
btrfs_make_block_group(). Since it is a new block group anyways, we can
simply set the allocation offset to 0.
Reviewed-by: default avatarJosef Bacik <josef@toxicpanda.com>
Reviewed-by: default avatarAnand Jain <anand.jain@oracle.com>
Signed-off-by: default avatarNaohiro Aota <naohiro.aota@wdc.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 08e11a3d
...@@ -1856,7 +1856,7 @@ static int read_one_block_group(struct btrfs_fs_info *info, ...@@ -1856,7 +1856,7 @@ static int read_one_block_group(struct btrfs_fs_info *info,
goto error; goto error;
} }
ret = btrfs_load_block_group_zone_info(cache); ret = btrfs_load_block_group_zone_info(cache, false);
if (ret) { if (ret) {
btrfs_err(info, "zoned: failed to load zone info of bg %llu", btrfs_err(info, "zoned: failed to load zone info of bg %llu",
cache->start); cache->start);
...@@ -2150,7 +2150,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used, ...@@ -2150,7 +2150,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE))
cache->needs_free_space = 1; cache->needs_free_space = 1;
ret = btrfs_load_block_group_zone_info(cache); ret = btrfs_load_block_group_zone_info(cache, true);
if (ret) { if (ret) {
btrfs_put_block_group(cache); btrfs_put_block_group(cache);
return ret; return ret;
......
...@@ -927,7 +927,68 @@ int btrfs_ensure_empty_zones(struct btrfs_device *device, u64 start, u64 size) ...@@ -927,7 +927,68 @@ int btrfs_ensure_empty_zones(struct btrfs_device *device, u64 start, u64 size)
return 0; return 0;
} }
int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache) /*
* Calculate an allocation pointer from the extent allocation information
* for a block group consist of conventional zones. It is pointed to the
* end of the highest addressed extent in the block group as an allocation
* offset.
*/
static int calculate_alloc_pointer(struct btrfs_block_group *cache,
u64 *offset_ret)
{
struct btrfs_fs_info *fs_info = cache->fs_info;
struct btrfs_root *root = fs_info->extent_root;
struct btrfs_path *path;
struct btrfs_key key;
struct btrfs_key found_key;
int ret;
u64 length;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
key.objectid = cache->start + cache->length;
key.type = 0;
key.offset = 0;
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
/* We should not find the exact match */
if (!ret)
ret = -EUCLEAN;
if (ret < 0)
goto out;
ret = btrfs_previous_extent_item(root, path, cache->start);
if (ret) {
if (ret == 1) {
ret = 0;
*offset_ret = 0;
}
goto out;
}
btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
if (found_key.type == BTRFS_EXTENT_ITEM_KEY)
length = found_key.offset;
else
length = fs_info->nodesize;
if (!(found_key.objectid >= cache->start &&
found_key.objectid + length <= cache->start + cache->length)) {
ret = -EUCLEAN;
goto out;
}
*offset_ret = found_key.objectid + length - cache->start;
ret = 0;
out:
btrfs_free_path(path);
return ret;
}
int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
{ {
struct btrfs_fs_info *fs_info = cache->fs_info; struct btrfs_fs_info *fs_info = cache->fs_info;
struct extent_map_tree *em_tree = &fs_info->mapping_tree; struct extent_map_tree *em_tree = &fs_info->mapping_tree;
...@@ -941,6 +1002,7 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache) ...@@ -941,6 +1002,7 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache)
int i; int i;
unsigned int nofs_flag; unsigned int nofs_flag;
u64 *alloc_offsets = NULL; u64 *alloc_offsets = NULL;
u64 last_alloc = 0;
u32 num_sequential = 0, num_conventional = 0; u32 num_sequential = 0, num_conventional = 0;
if (!btrfs_is_zoned(fs_info)) if (!btrfs_is_zoned(fs_info))
...@@ -1040,11 +1102,30 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache) ...@@ -1040,11 +1102,30 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache)
if (num_conventional > 0) { if (num_conventional > 0) {
/* /*
* Since conventional zones do not have a write pointer, we * Avoid calling calculate_alloc_pointer() for new BG. It
* cannot determine alloc_offset from the pointer * is no use for new BG. It must be always 0.
*
* Also, we have a lock chain of extent buffer lock ->
* chunk mutex. For new BG, this function is called from
* btrfs_make_block_group() which is already taking the
* chunk mutex. Thus, we cannot call
* calculate_alloc_pointer() which takes extent buffer
* locks to avoid deadlock.
*/ */
ret = -EINVAL; if (new) {
goto out; cache->alloc_offset = 0;
goto out;
}
ret = calculate_alloc_pointer(cache, &last_alloc);
if (ret || map->num_stripes == num_conventional) {
if (!ret)
cache->alloc_offset = last_alloc;
else
btrfs_err(fs_info,
"zoned: failed to determine allocation offset of bg %llu",
cache->start);
goto out;
}
} }
switch (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) { switch (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
...@@ -1066,6 +1147,14 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache) ...@@ -1066,6 +1147,14 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache)
} }
out: out:
/* An extent is allocated after the write pointer */
if (!ret && num_conventional && last_alloc > cache->alloc_offset) {
btrfs_err(fs_info,
"zoned: got wrong write pointer in BG %llu: %llu > %llu",
logical, last_alloc, cache->alloc_offset);
ret = -EIO;
}
kfree(alloc_offsets); kfree(alloc_offsets);
free_extent_map(em); free_extent_map(em);
......
...@@ -41,7 +41,7 @@ u64 btrfs_find_allocatable_zones(struct btrfs_device *device, u64 hole_start, ...@@ -41,7 +41,7 @@ u64 btrfs_find_allocatable_zones(struct btrfs_device *device, u64 hole_start,
int btrfs_reset_device_zone(struct btrfs_device *device, u64 physical, int btrfs_reset_device_zone(struct btrfs_device *device, u64 physical,
u64 length, u64 *bytes); u64 length, u64 *bytes);
int btrfs_ensure_empty_zones(struct btrfs_device *device, u64 start, u64 size); int btrfs_ensure_empty_zones(struct btrfs_device *device, u64 start, u64 size);
int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache); int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new);
#else /* CONFIG_BLK_DEV_ZONED */ #else /* CONFIG_BLK_DEV_ZONED */
static inline int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos, static inline int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos,
struct blk_zone *zone) struct blk_zone *zone)
...@@ -118,7 +118,7 @@ static inline int btrfs_ensure_empty_zones(struct btrfs_device *device, ...@@ -118,7 +118,7 @@ static inline int btrfs_ensure_empty_zones(struct btrfs_device *device,
} }
static inline int btrfs_load_block_group_zone_info( static inline int btrfs_load_block_group_zone_info(
struct btrfs_block_group *cache) struct btrfs_block_group *cache, bool new)
{ {
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment