Commit bf38be65 authored by David Sterba's avatar David Sterba

btrfs: move block_group_item::used to block group

For unknown reasons, the member 'used' in the block group struct is
stored in the b-tree item and accessed everywhere using the special
accessor helper. Let's unify it and make it a regular member and only
update the item before writing it to the tree.

The item is still being used for flags and chunk_objectid, there's some
duplication until the item is removed in following patches.
Reviewed-by: default avatarJohannes Thumshirn <jthumshirn@suse.de>
Reviewed-by: default avatarNikolay Borisov <nborisov@suse.com>
Reviewed-by: default avatarQu Wenruo <wqu@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 34b127ae
...@@ -656,8 +656,7 @@ static noinline void caching_thread(struct btrfs_work *work) ...@@ -656,8 +656,7 @@ static noinline void caching_thread(struct btrfs_work *work)
spin_lock(&block_group->space_info->lock); spin_lock(&block_group->space_info->lock);
spin_lock(&block_group->lock); spin_lock(&block_group->lock);
bytes_used = block_group->key.offset - bytes_used = block_group->key.offset - block_group->used;
btrfs_block_group_used(&block_group->item);
block_group->space_info->bytes_used += bytes_used >> 1; block_group->space_info->bytes_used += bytes_used >> 1;
spin_unlock(&block_group->lock); spin_unlock(&block_group->lock);
spin_unlock(&block_group->space_info->lock); spin_unlock(&block_group->space_info->lock);
...@@ -762,8 +761,7 @@ int btrfs_cache_block_group(struct btrfs_block_group_cache *cache, ...@@ -762,8 +761,7 @@ int btrfs_cache_block_group(struct btrfs_block_group_cache *cache,
spin_lock(&cache->space_info->lock); spin_lock(&cache->space_info->lock);
spin_lock(&cache->lock); spin_lock(&cache->lock);
bytes_used = cache->key.offset - bytes_used = cache->key.offset - cache->used;
btrfs_block_group_used(&cache->item);
cache->space_info->bytes_used += bytes_used >> 1; cache->space_info->bytes_used += bytes_used >> 1;
spin_unlock(&cache->lock); spin_unlock(&cache->lock);
spin_unlock(&cache->space_info->lock); spin_unlock(&cache->space_info->lock);
...@@ -1209,7 +1207,7 @@ static int inc_block_group_ro(struct btrfs_block_group_cache *cache, int force) ...@@ -1209,7 +1207,7 @@ static int inc_block_group_ro(struct btrfs_block_group_cache *cache, int force)
} }
num_bytes = cache->key.offset - cache->reserved - cache->pinned - num_bytes = cache->key.offset - cache->reserved - cache->pinned -
cache->bytes_super - btrfs_block_group_used(&cache->item); cache->bytes_super - cache->used;
sinfo_used = btrfs_space_info_used(sinfo, true); sinfo_used = btrfs_space_info_used(sinfo, true);
/* /*
...@@ -1278,8 +1276,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) ...@@ -1278,8 +1276,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
down_write(&space_info->groups_sem); down_write(&space_info->groups_sem);
spin_lock(&block_group->lock); spin_lock(&block_group->lock);
if (block_group->reserved || block_group->pinned || if (block_group->reserved || block_group->pinned ||
btrfs_block_group_used(&block_group->item) || block_group->used || block_group->ro ||
block_group->ro ||
list_is_singular(&block_group->list)) { list_is_singular(&block_group->list)) {
/* /*
* We want to bail if we made new allocations or have * We want to bail if we made new allocations or have
...@@ -1719,6 +1716,8 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info) ...@@ -1719,6 +1716,8 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info)
need_clear = 1; need_clear = 1;
while (1) { while (1) {
struct btrfs_block_group_item bgi;
ret = find_first_block_group(info, path, &key); ret = find_first_block_group(info, path, &key);
if (ret > 0) if (ret > 0)
break; break;
...@@ -1750,9 +1749,12 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info) ...@@ -1750,9 +1749,12 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info)
cache->disk_cache_state = BTRFS_DC_CLEAR; cache->disk_cache_state = BTRFS_DC_CLEAR;
} }
read_extent_buffer(leaf, &cache->item, read_extent_buffer(leaf, &bgi,
btrfs_item_ptr_offset(leaf, path->slots[0]), btrfs_item_ptr_offset(leaf, path->slots[0]),
sizeof(cache->item)); sizeof(bgi));
/* Duplicate as the item is still partially used */
memcpy(&cache->item, &bgi, sizeof(bgi));
cache->used = btrfs_block_group_used(&bgi);
cache->flags = btrfs_block_group_flags(&cache->item); cache->flags = btrfs_block_group_flags(&cache->item);
if (!mixed && if (!mixed &&
((cache->flags & BTRFS_BLOCK_GROUP_METADATA) && ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) &&
...@@ -1791,11 +1793,11 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info) ...@@ -1791,11 +1793,11 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info)
* the space in and be done with it. This saves us _a_lot_ of * the space in and be done with it. This saves us _a_lot_ of
* time, particularly in the full case. * time, particularly in the full case.
*/ */
if (found_key.offset == btrfs_block_group_used(&cache->item)) { if (found_key.offset == cache->used) {
cache->last_byte_to_unpin = (u64)-1; cache->last_byte_to_unpin = (u64)-1;
cache->cached = BTRFS_CACHE_FINISHED; cache->cached = BTRFS_CACHE_FINISHED;
btrfs_free_excluded_extents(cache); btrfs_free_excluded_extents(cache);
} else if (btrfs_block_group_used(&cache->item) == 0) { } else if (cache->used == 0) {
cache->last_byte_to_unpin = (u64)-1; cache->last_byte_to_unpin = (u64)-1;
cache->cached = BTRFS_CACHE_FINISHED; cache->cached = BTRFS_CACHE_FINISHED;
add_new_free_space(cache, found_key.objectid, add_new_free_space(cache, found_key.objectid,
...@@ -1813,7 +1815,7 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info) ...@@ -1813,7 +1815,7 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info)
trace_btrfs_add_block_group(info, cache, 0); trace_btrfs_add_block_group(info, cache, 0);
btrfs_update_space_info(info, cache->flags, found_key.offset, btrfs_update_space_info(info, cache->flags, found_key.offset,
btrfs_block_group_used(&cache->item), cache->used,
cache->bytes_super, &space_info); cache->bytes_super, &space_info);
cache->space_info = space_info; cache->space_info = space_info;
...@@ -1823,7 +1825,7 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info) ...@@ -1823,7 +1825,7 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info)
set_avail_alloc_bits(info, cache->flags); set_avail_alloc_bits(info, cache->flags);
if (btrfs_chunk_readonly(info, cache->key.objectid)) { if (btrfs_chunk_readonly(info, cache->key.objectid)) {
inc_block_group_ro(cache, 1); inc_block_group_ro(cache, 1);
} else if (btrfs_block_group_used(&cache->item) == 0) { } else if (cache->used == 0) {
ASSERT(list_empty(&cache->bg_list)); ASSERT(list_empty(&cache->bg_list));
btrfs_mark_bg_unused(cache); btrfs_mark_bg_unused(cache);
} }
...@@ -1877,7 +1879,12 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans) ...@@ -1877,7 +1879,12 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans)
goto next; goto next;
spin_lock(&block_group->lock); spin_lock(&block_group->lock);
/*
* Copy partially filled item from the cache and ovewrite used
* that has the correct value
*/
memcpy(&item, &block_group->item, sizeof(item)); memcpy(&item, &block_group->item, sizeof(item));
btrfs_set_block_group_used(&item, block_group->used);
memcpy(&key, &block_group->key, sizeof(key)); memcpy(&key, &block_group->key, sizeof(key));
spin_unlock(&block_group->lock); spin_unlock(&block_group->lock);
...@@ -1910,7 +1917,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used, ...@@ -1910,7 +1917,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
if (!cache) if (!cache)
return -ENOMEM; return -ENOMEM;
btrfs_set_block_group_used(&cache->item, bytes_used); cache->used = bytes_used;
btrfs_set_block_group_chunk_objectid(&cache->item, btrfs_set_block_group_chunk_objectid(&cache->item,
BTRFS_FIRST_CHUNK_TREE_OBJECTID); BTRFS_FIRST_CHUNK_TREE_OBJECTID);
btrfs_set_block_group_flags(&cache->item, type); btrfs_set_block_group_flags(&cache->item, type);
...@@ -2102,8 +2109,7 @@ void btrfs_dec_block_group_ro(struct btrfs_block_group_cache *cache) ...@@ -2102,8 +2109,7 @@ void btrfs_dec_block_group_ro(struct btrfs_block_group_cache *cache)
spin_lock(&cache->lock); spin_lock(&cache->lock);
if (!--cache->ro) { if (!--cache->ro) {
num_bytes = cache->key.offset - cache->reserved - num_bytes = cache->key.offset - cache->reserved -
cache->pinned - cache->bytes_super - cache->pinned - cache->bytes_super - cache->used;
btrfs_block_group_used(&cache->item);
sinfo->bytes_readonly -= num_bytes; sinfo->bytes_readonly -= num_bytes;
list_del_init(&cache->ro_list); list_del_init(&cache->ro_list);
} }
...@@ -2120,6 +2126,7 @@ static int write_one_cache_group(struct btrfs_trans_handle *trans, ...@@ -2120,6 +2126,7 @@ static int write_one_cache_group(struct btrfs_trans_handle *trans,
struct btrfs_root *extent_root = fs_info->extent_root; struct btrfs_root *extent_root = fs_info->extent_root;
unsigned long bi; unsigned long bi;
struct extent_buffer *leaf; struct extent_buffer *leaf;
struct btrfs_block_group_item bgi;
ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1); ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
if (ret) { if (ret) {
...@@ -2130,7 +2137,10 @@ static int write_one_cache_group(struct btrfs_trans_handle *trans, ...@@ -2130,7 +2137,10 @@ static int write_one_cache_group(struct btrfs_trans_handle *trans,
leaf = path->nodes[0]; leaf = path->nodes[0];
bi = btrfs_item_ptr_offset(leaf, path->slots[0]); bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item)); /* Partial copy of item, update the rest from memory */
memcpy(&bgi, &cache->item, sizeof(bgi));
btrfs_set_block_group_used(&bgi, cache->used);
write_extent_buffer(leaf, &bgi, bi, sizeof(bgi));
btrfs_mark_buffer_dirty(leaf); btrfs_mark_buffer_dirty(leaf);
fail: fail:
btrfs_release_path(path); btrfs_release_path(path);
...@@ -2674,11 +2684,11 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans, ...@@ -2674,11 +2684,11 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans,
cache->disk_cache_state < BTRFS_DC_CLEAR) cache->disk_cache_state < BTRFS_DC_CLEAR)
cache->disk_cache_state = BTRFS_DC_CLEAR; cache->disk_cache_state = BTRFS_DC_CLEAR;
old_val = btrfs_block_group_used(&cache->item); old_val = cache->used;
num_bytes = min(total, cache->key.offset - byte_in_group); num_bytes = min(total, cache->key.offset - byte_in_group);
if (alloc) { if (alloc) {
old_val += num_bytes; old_val += num_bytes;
btrfs_set_block_group_used(&cache->item, old_val); cache->used = old_val;
cache->reserved -= num_bytes; cache->reserved -= num_bytes;
cache->space_info->bytes_reserved -= num_bytes; cache->space_info->bytes_reserved -= num_bytes;
cache->space_info->bytes_used += num_bytes; cache->space_info->bytes_used += num_bytes;
...@@ -2687,7 +2697,7 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans, ...@@ -2687,7 +2697,7 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans,
spin_unlock(&cache->space_info->lock); spin_unlock(&cache->space_info->lock);
} else { } else {
old_val -= num_bytes; old_val -= num_bytes;
btrfs_set_block_group_used(&cache->item, old_val); cache->used = old_val;
cache->pinned += num_bytes; cache->pinned += num_bytes;
btrfs_space_info_update_bytes_pinned(info, btrfs_space_info_update_bytes_pinned(info,
cache->space_info, num_bytes); cache->space_info, num_bytes);
......
...@@ -50,6 +50,7 @@ struct btrfs_block_group_cache { ...@@ -50,6 +50,7 @@ struct btrfs_block_group_cache {
spinlock_t lock; spinlock_t lock;
u64 pinned; u64 pinned;
u64 reserved; u64 reserved;
u64 used;
u64 delalloc_bytes; u64 delalloc_bytes;
u64 bytes_super; u64 bytes_super;
u64 flags; u64 flags;
......
...@@ -5498,8 +5498,7 @@ u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo) ...@@ -5498,8 +5498,7 @@ u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
factor = btrfs_bg_type_to_factor(block_group->flags); factor = btrfs_bg_type_to_factor(block_group->flags);
free_bytes += (block_group->key.offset - free_bytes += (block_group->key.offset -
btrfs_block_group_used(&block_group->item)) * block_group->used) * factor;
factor;
spin_unlock(&block_group->lock); spin_unlock(&block_group->lock);
} }
......
...@@ -828,7 +828,7 @@ int load_free_space_cache(struct btrfs_block_group_cache *block_group) ...@@ -828,7 +828,7 @@ int load_free_space_cache(struct btrfs_block_group_cache *block_group)
struct btrfs_path *path; struct btrfs_path *path;
int ret = 0; int ret = 0;
bool matched; bool matched;
u64 used = btrfs_block_group_used(&block_group->item); u64 used = block_group->used;
/* /*
* If this block group has been marked to be cleared for one reason or * If this block group has been marked to be cleared for one reason or
......
...@@ -4039,8 +4039,7 @@ static void get_block_group_info(struct list_head *groups_list, ...@@ -4039,8 +4039,7 @@ static void get_block_group_info(struct list_head *groups_list,
list_for_each_entry(block_group, groups_list, list) { list_for_each_entry(block_group, groups_list, list) {
space->flags = block_group->flags; space->flags = block_group->flags;
space->total_bytes += block_group->key.offset; space->total_bytes += block_group->key.offset;
space->used_bytes += space->used_bytes += block_group->used;
btrfs_block_group_used(&block_group->item);
} }
} }
......
...@@ -4405,7 +4405,7 @@ int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start) ...@@ -4405,7 +4405,7 @@ int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
WARN_ON(rc->block_group->pinned > 0); WARN_ON(rc->block_group->pinned > 0);
WARN_ON(rc->block_group->reserved > 0); WARN_ON(rc->block_group->reserved > 0);
WARN_ON(btrfs_block_group_used(&rc->block_group->item) > 0); WARN_ON(rc->block_group->used > 0);
out: out:
if (err && rw) if (err && rw)
btrfs_dec_block_group_ro(rc->block_group); btrfs_dec_block_group_ro(rc->block_group);
......
...@@ -3678,7 +3678,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, ...@@ -3678,7 +3678,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
*/ */
spin_lock(&cache->lock); spin_lock(&cache->lock);
if (!cache->removed && !cache->ro && cache->reserved == 0 && if (!cache->removed && !cache->ro && cache->reserved == 0 &&
btrfs_block_group_used(&cache->item) == 0) { cache->used == 0) {
spin_unlock(&cache->lock); spin_unlock(&cache->lock);
btrfs_mark_bg_unused(cache); btrfs_mark_bg_unused(cache);
} else { } else {
......
...@@ -302,7 +302,7 @@ void btrfs_dump_space_info(struct btrfs_fs_info *fs_info, ...@@ -302,7 +302,7 @@ void btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
btrfs_info(fs_info, btrfs_info(fs_info,
"block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %s", "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %s",
cache->key.objectid, cache->key.offset, cache->key.objectid, cache->key.offset,
btrfs_block_group_used(&cache->item), cache->pinned, cache->used, cache->pinned,
cache->reserved, cache->ro ? "[readonly]" : ""); cache->reserved, cache->ro ? "[readonly]" : "");
btrfs_dump_free_space(cache, bytes); btrfs_dump_free_space(cache, bytes);
spin_unlock(&cache->lock); spin_unlock(&cache->lock);
......
...@@ -404,7 +404,7 @@ static ssize_t raid_bytes_show(struct kobject *kobj, ...@@ -404,7 +404,7 @@ static ssize_t raid_bytes_show(struct kobject *kobj,
if (&attr->attr == BTRFS_ATTR_PTR(raid, total_bytes)) if (&attr->attr == BTRFS_ATTR_PTR(raid, total_bytes))
val += block_group->key.offset; val += block_group->key.offset;
else else
val += btrfs_block_group_used(&block_group->item); val += block_group->used;
} }
up_read(&sinfo->groups_sem); up_read(&sinfo->groups_sem);
return snprintf(buf, PAGE_SIZE, "%llu\n", val); return snprintf(buf, PAGE_SIZE, "%llu\n", val);
......
...@@ -3189,7 +3189,7 @@ static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_off ...@@ -3189,7 +3189,7 @@ static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_off
int ret = 1; int ret = 1;
cache = btrfs_lookup_block_group(fs_info, chunk_offset); cache = btrfs_lookup_block_group(fs_info, chunk_offset);
chunk_used = btrfs_block_group_used(&cache->item); chunk_used = cache->used;
if (bargs->usage_min == 0) if (bargs->usage_min == 0)
user_thresh_min = 0; user_thresh_min = 0;
...@@ -3220,7 +3220,7 @@ static int chunk_usage_filter(struct btrfs_fs_info *fs_info, ...@@ -3220,7 +3220,7 @@ static int chunk_usage_filter(struct btrfs_fs_info *fs_info,
int ret = 1; int ret = 1;
cache = btrfs_lookup_block_group(fs_info, chunk_offset); cache = btrfs_lookup_block_group(fs_info, chunk_offset);
chunk_used = btrfs_block_group_used(&cache->item); chunk_used = cache->used;
if (bargs->usage_min == 0) if (bargs->usage_min == 0)
user_thresh = 1; user_thresh = 1;
......
...@@ -716,8 +716,7 @@ TRACE_EVENT(btrfs_add_block_group, ...@@ -716,8 +716,7 @@ TRACE_EVENT(btrfs_add_block_group,
__entry->offset = block_group->key.objectid; __entry->offset = block_group->key.objectid;
__entry->size = block_group->key.offset; __entry->size = block_group->key.offset;
__entry->flags = block_group->flags; __entry->flags = block_group->flags;
__entry->bytes_used = __entry->bytes_used = block_group->used;
btrfs_block_group_used(&block_group->item);
__entry->bytes_super = block_group->bytes_super; __entry->bytes_super = block_group->bytes_super;
__entry->create = create; __entry->create = create;
), ),
...@@ -1859,7 +1858,7 @@ DECLARE_EVENT_CLASS(btrfs__block_group, ...@@ -1859,7 +1858,7 @@ DECLARE_EVENT_CLASS(btrfs__block_group,
TP_fast_assign_btrfs(bg_cache->fs_info, TP_fast_assign_btrfs(bg_cache->fs_info,
__entry->bytenr = bg_cache->key.objectid, __entry->bytenr = bg_cache->key.objectid,
__entry->len = bg_cache->key.offset, __entry->len = bg_cache->key.offset,
__entry->used = btrfs_block_group_used(&bg_cache->item); __entry->used = bg_cache->used;
__entry->flags = bg_cache->flags; __entry->flags = bg_cache->flags;
), ),
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment