Commit 138a12d8 authored by Josef Bacik's avatar Josef Bacik Committed by David Sterba

btrfs: rip out btrfs_space_info::total_bytes_pinned

We used this in may_commit_transaction() in order to determine if we
needed to commit the transaction.  However we no longer have that logic
and thus have no use of this counter anymore, so delete it.
Reviewed-by: default avatarNikolay Borisov <nborisov@suse.com>
Signed-off-by: default avatarJosef Bacik <josef@toxicpanda.com>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 3ffad696
...@@ -1399,7 +1399,6 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) ...@@ -1399,7 +1399,6 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
btrfs_space_info_update_bytes_pinned(fs_info, space_info, btrfs_space_info_update_bytes_pinned(fs_info, space_info,
-block_group->pinned); -block_group->pinned);
space_info->bytes_readonly += block_group->pinned; space_info->bytes_readonly += block_group->pinned;
__btrfs_mod_total_bytes_pinned(space_info, -block_group->pinned);
block_group->pinned = 0; block_group->pinned = 0;
spin_unlock(&block_group->lock); spin_unlock(&block_group->lock);
...@@ -3068,8 +3067,6 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans, ...@@ -3068,8 +3067,6 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans,
spin_unlock(&cache->lock); spin_unlock(&cache->lock);
spin_unlock(&cache->space_info->lock); spin_unlock(&cache->space_info->lock);
__btrfs_mod_total_bytes_pinned(cache->space_info,
num_bytes);
set_extent_dirty(&trans->transaction->pinned_extents, set_extent_dirty(&trans->transaction->pinned_extents,
bytenr, bytenr + num_bytes - 1, bytenr, bytenr + num_bytes - 1,
GFP_NOFS | __GFP_NOFAIL); GFP_NOFS | __GFP_NOFAIL);
......
...@@ -641,7 +641,6 @@ static noinline void update_existing_head_ref(struct btrfs_trans_handle *trans, ...@@ -641,7 +641,6 @@ static noinline void update_existing_head_ref(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_root *delayed_refs = struct btrfs_delayed_ref_root *delayed_refs =
&trans->transaction->delayed_refs; &trans->transaction->delayed_refs;
struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_fs_info *fs_info = trans->fs_info;
u64 flags = btrfs_ref_head_to_space_flags(existing);
int old_ref_mod; int old_ref_mod;
BUG_ON(existing->is_data != update->is_data); BUG_ON(existing->is_data != update->is_data);
...@@ -711,26 +710,6 @@ static noinline void update_existing_head_ref(struct btrfs_trans_handle *trans, ...@@ -711,26 +710,6 @@ static noinline void update_existing_head_ref(struct btrfs_trans_handle *trans,
} }
} }
/*
* This handles the following conditions:
*
* 1. We had a ref mod of 0 or more and went negative, indicating that
* we may be freeing space, so add our space to the
* total_bytes_pinned counter.
* 2. We were negative and went to 0 or positive, so no longer can say
* that the space would be pinned, decrement our counter from the
* total_bytes_pinned counter.
* 3. We are now at 0 and have ->must_insert_reserved set, which means
* this was a new allocation and then we dropped it, and thus must
* add our space to the total_bytes_pinned counter.
*/
if (existing->total_ref_mod < 0 && old_ref_mod >= 0)
btrfs_mod_total_bytes_pinned(fs_info, flags, existing->num_bytes);
else if (existing->total_ref_mod >= 0 && old_ref_mod < 0)
btrfs_mod_total_bytes_pinned(fs_info, flags, -existing->num_bytes);
else if (existing->total_ref_mod == 0 && existing->must_insert_reserved)
btrfs_mod_total_bytes_pinned(fs_info, flags, existing->num_bytes);
spin_unlock(&existing->lock); spin_unlock(&existing->lock);
} }
...@@ -835,17 +814,12 @@ add_delayed_ref_head(struct btrfs_trans_handle *trans, ...@@ -835,17 +814,12 @@ add_delayed_ref_head(struct btrfs_trans_handle *trans,
kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref); kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
head_ref = existing; head_ref = existing;
} else { } else {
u64 flags = btrfs_ref_head_to_space_flags(head_ref);
if (head_ref->is_data && head_ref->ref_mod < 0) { if (head_ref->is_data && head_ref->ref_mod < 0) {
delayed_refs->pending_csums += head_ref->num_bytes; delayed_refs->pending_csums += head_ref->num_bytes;
trans->delayed_ref_updates += trans->delayed_ref_updates +=
btrfs_csum_bytes_to_leaves(trans->fs_info, btrfs_csum_bytes_to_leaves(trans->fs_info,
head_ref->num_bytes); head_ref->num_bytes);
} }
if (head_ref->ref_mod < 0)
btrfs_mod_total_bytes_pinned(trans->fs_info, flags,
head_ref->num_bytes);
delayed_refs->num_heads++; delayed_refs->num_heads++;
delayed_refs->num_heads_ready++; delayed_refs->num_heads_ready++;
atomic_inc(&delayed_refs->num_entries); atomic_inc(&delayed_refs->num_entries);
......
...@@ -4680,9 +4680,6 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, ...@@ -4680,9 +4680,6 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
cache->space_info->bytes_reserved -= head->num_bytes; cache->space_info->bytes_reserved -= head->num_bytes;
spin_unlock(&cache->lock); spin_unlock(&cache->lock);
spin_unlock(&cache->space_info->lock); spin_unlock(&cache->space_info->lock);
percpu_counter_add_batch(
&cache->space_info->total_bytes_pinned,
head->num_bytes, BTRFS_TOTAL_BYTES_PINNED_BATCH);
btrfs_put_block_group(cache); btrfs_put_block_group(cache);
......
...@@ -1804,19 +1804,6 @@ void btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info, ...@@ -1804,19 +1804,6 @@ void btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info,
nr_items += btrfs_csum_bytes_to_leaves(fs_info, head->num_bytes); nr_items += btrfs_csum_bytes_to_leaves(fs_info, head->num_bytes);
} }
/*
* We were dropping refs, or had a new ref and dropped it, and thus must
* adjust down our total_bytes_pinned, the space may or may not have
* been pinned and so is accounted for properly in the pinned space by
* now.
*/
if (head->total_ref_mod < 0 ||
(head->total_ref_mod == 0 && head->must_insert_reserved)) {
u64 flags = btrfs_ref_head_to_space_flags(head);
btrfs_mod_total_bytes_pinned(fs_info, flags, -head->num_bytes);
}
btrfs_delayed_refs_rsv_release(fs_info, nr_items); btrfs_delayed_refs_rsv_release(fs_info, nr_items);
} }
...@@ -2551,7 +2538,6 @@ static int pin_down_extent(struct btrfs_trans_handle *trans, ...@@ -2551,7 +2538,6 @@ static int pin_down_extent(struct btrfs_trans_handle *trans,
spin_unlock(&cache->lock); spin_unlock(&cache->lock);
spin_unlock(&cache->space_info->lock); spin_unlock(&cache->space_info->lock);
__btrfs_mod_total_bytes_pinned(cache->space_info, num_bytes);
set_extent_dirty(&trans->transaction->pinned_extents, bytenr, set_extent_dirty(&trans->transaction->pinned_extents, bytenr,
bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL); bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
return 0; return 0;
...@@ -2762,7 +2748,6 @@ static int unpin_extent_range(struct btrfs_fs_info *fs_info, ...@@ -2762,7 +2748,6 @@ static int unpin_extent_range(struct btrfs_fs_info *fs_info,
cache->pinned -= len; cache->pinned -= len;
btrfs_space_info_update_bytes_pinned(fs_info, space_info, -len); btrfs_space_info_update_bytes_pinned(fs_info, space_info, -len);
space_info->max_extent_size = 0; space_info->max_extent_size = 0;
__btrfs_mod_total_bytes_pinned(space_info, -len);
if (cache->ro) { if (cache->ro) {
space_info->bytes_readonly += len; space_info->bytes_readonly += len;
readonly = true; readonly = true;
......
...@@ -192,13 +192,6 @@ static int create_space_info(struct btrfs_fs_info *info, u64 flags) ...@@ -192,13 +192,6 @@ static int create_space_info(struct btrfs_fs_info *info, u64 flags)
if (!space_info) if (!space_info)
return -ENOMEM; return -ENOMEM;
ret = percpu_counter_init(&space_info->total_bytes_pinned, 0,
GFP_KERNEL);
if (ret) {
kfree(space_info);
return ret;
}
for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
INIT_LIST_HEAD(&space_info->block_groups[i]); INIT_LIST_HEAD(&space_info->block_groups[i]);
init_rwsem(&space_info->groups_sem); init_rwsem(&space_info->groups_sem);
......
...@@ -43,18 +43,6 @@ struct btrfs_space_info { ...@@ -43,18 +43,6 @@ struct btrfs_space_info {
u64 flags; u64 flags;
/*
* bytes_pinned is kept in line with what is actually pinned, as in
* we've called update_block_group and dropped the bytes_used counter
* and increased the bytes_pinned counter. However this means that
* bytes_pinned does not reflect the bytes that will be pinned once the
* delayed refs are flushed, so this counter is inc'ed every time we
* call btrfs_free_extent so it is a realtime count of what will be
* freed once the transaction is committed. It will be zeroed every
* time the transaction commits.
*/
struct percpu_counter total_bytes_pinned;
struct list_head list; struct list_head list;
/* Protected by the spinlock 'lock'. */ /* Protected by the spinlock 'lock'. */
struct list_head ro_bgs; struct list_head ro_bgs;
...@@ -157,22 +145,4 @@ static inline void btrfs_space_info_free_bytes_may_use( ...@@ -157,22 +145,4 @@ static inline void btrfs_space_info_free_bytes_may_use(
} }
int btrfs_reserve_data_bytes(struct btrfs_fs_info *fs_info, u64 bytes, int btrfs_reserve_data_bytes(struct btrfs_fs_info *fs_info, u64 bytes,
enum btrfs_reserve_flush_enum flush); enum btrfs_reserve_flush_enum flush);
static inline void __btrfs_mod_total_bytes_pinned(
struct btrfs_space_info *space_info,
s64 mod)
{
percpu_counter_add_batch(&space_info->total_bytes_pinned, mod,
BTRFS_TOTAL_BYTES_PINNED_BATCH);
}
static inline void btrfs_mod_total_bytes_pinned(struct btrfs_fs_info *fs_info,
u64 flags, s64 mod)
{
struct btrfs_space_info *space_info = btrfs_find_space_info(fs_info, flags);
ASSERT(space_info);
__btrfs_mod_total_bytes_pinned(space_info, mod);
}
#endif /* BTRFS_SPACE_INFO_H */ #endif /* BTRFS_SPACE_INFO_H */
...@@ -665,15 +665,6 @@ static ssize_t btrfs_space_info_show_##field(struct kobject *kobj, \ ...@@ -665,15 +665,6 @@ static ssize_t btrfs_space_info_show_##field(struct kobject *kobj, \
} \ } \
BTRFS_ATTR(space_info, field, btrfs_space_info_show_##field) BTRFS_ATTR(space_info, field, btrfs_space_info_show_##field)
static ssize_t btrfs_space_info_show_total_bytes_pinned(struct kobject *kobj,
struct kobj_attribute *a,
char *buf)
{
struct btrfs_space_info *sinfo = to_space_info(kobj);
s64 val = percpu_counter_sum(&sinfo->total_bytes_pinned);
return scnprintf(buf, PAGE_SIZE, "%lld\n", val);
}
SPACE_INFO_ATTR(flags); SPACE_INFO_ATTR(flags);
SPACE_INFO_ATTR(total_bytes); SPACE_INFO_ATTR(total_bytes);
SPACE_INFO_ATTR(bytes_used); SPACE_INFO_ATTR(bytes_used);
...@@ -684,8 +675,6 @@ SPACE_INFO_ATTR(bytes_readonly); ...@@ -684,8 +675,6 @@ SPACE_INFO_ATTR(bytes_readonly);
SPACE_INFO_ATTR(bytes_zone_unusable); SPACE_INFO_ATTR(bytes_zone_unusable);
SPACE_INFO_ATTR(disk_used); SPACE_INFO_ATTR(disk_used);
SPACE_INFO_ATTR(disk_total); SPACE_INFO_ATTR(disk_total);
BTRFS_ATTR(space_info, total_bytes_pinned,
btrfs_space_info_show_total_bytes_pinned);
static struct attribute *space_info_attrs[] = { static struct attribute *space_info_attrs[] = {
BTRFS_ATTR_PTR(space_info, flags), BTRFS_ATTR_PTR(space_info, flags),
...@@ -698,7 +687,6 @@ static struct attribute *space_info_attrs[] = { ...@@ -698,7 +687,6 @@ static struct attribute *space_info_attrs[] = {
BTRFS_ATTR_PTR(space_info, bytes_zone_unusable), BTRFS_ATTR_PTR(space_info, bytes_zone_unusable),
BTRFS_ATTR_PTR(space_info, disk_used), BTRFS_ATTR_PTR(space_info, disk_used),
BTRFS_ATTR_PTR(space_info, disk_total), BTRFS_ATTR_PTR(space_info, disk_total),
BTRFS_ATTR_PTR(space_info, total_bytes_pinned),
NULL, NULL,
}; };
ATTRIBUTE_GROUPS(space_info); ATTRIBUTE_GROUPS(space_info);
...@@ -706,7 +694,6 @@ ATTRIBUTE_GROUPS(space_info); ...@@ -706,7 +694,6 @@ ATTRIBUTE_GROUPS(space_info);
static void space_info_release(struct kobject *kobj) static void space_info_release(struct kobject *kobj)
{ {
struct btrfs_space_info *sinfo = to_space_info(kobj); struct btrfs_space_info *sinfo = to_space_info(kobj);
percpu_counter_destroy(&sinfo->total_bytes_pinned);
kfree(sinfo); kfree(sinfo);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment