Commit 3cce39a8 authored by Junchao Sun's avatar Junchao Sun Committed by David Sterba

btrfs: qgroup: use xarray to track dirty extents in transaction

Use xarray to track dirty extents to reduce the size of the struct
btrfs_qgroup_extent_record from 64 bytes to 40 bytes.  The xarray is
more cache line friendly, it also reduces the complexity of insertion
and search code compared to rb tree.

Another change introduced is about error handling.  Before this patch,
the result of btrfs_qgroup_trace_extent_nolock() is always a success. In
this patch, because of this function calls the function xa_store() which
has the possibility to fail, so mark qgroup as inconsistent if error
happened and then free preallocated memory. Also we preallocate memory
before spin_lock(), if memory preallcation failed, error handling is the
same the existing code.
Suggested-by: default avatarQu Wenruo <wqu@suse.com>
Signed-off-by: default avatarJunchao Sun <sunjunchao2870@gmail.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 14ed830d
......@@ -855,11 +855,17 @@ add_delayed_ref_head(struct btrfs_trans_handle *trans,
/* Record qgroup extent info if provided */
if (qrecord) {
if (btrfs_qgroup_trace_extent_nolock(trans->fs_info,
delayed_refs, qrecord))
int ret;
ret = btrfs_qgroup_trace_extent_nolock(trans->fs_info,
delayed_refs, qrecord);
if (ret) {
/* Clean up if insertion fails or item exists. */
xa_release(&delayed_refs->dirty_extents, qrecord->bytenr);
kfree(qrecord);
else
} else {
qrecord_inserted = true;
}
}
trace_add_delayed_ref_head(trans->fs_info, head_ref, action);
......@@ -1012,6 +1018,9 @@ static int add_delayed_ref(struct btrfs_trans_handle *trans,
record = kzalloc(sizeof(*record), GFP_NOFS);
if (!record)
goto free_head_ref;
if (xa_reserve(&trans->transaction->delayed_refs.dirty_extents,
generic_ref->bytenr, GFP_NOFS))
goto free_record;
}
init_delayed_ref_common(fs_info, node, generic_ref);
......@@ -1048,6 +1057,8 @@ static int add_delayed_ref(struct btrfs_trans_handle *trans,
return btrfs_qgroup_trace_extent_post(trans, record);
return 0;
free_record:
kfree(record);
free_head_ref:
kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
free_node:
......
......@@ -202,8 +202,8 @@ struct btrfs_delayed_ref_root {
/* head ref rbtree */
struct rb_root_cached href_root;
/* dirty extent records */
struct rb_root dirty_extent_root;
/* Track dirty extent records. */
struct xarray dirty_extents;
/* this spin lock protects the rbtree and the entries inside */
spinlock_t lock;
......
......@@ -1998,16 +1998,14 @@ int btrfs_limit_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid,
*
* Return 0 for success insert
* Return >0 for existing record, caller can free @record safely.
* Error is not possible
* Return <0 for insertion failure, caller can free @record safely.
*/
int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_root *delayed_refs,
struct btrfs_qgroup_extent_record *record)
{
struct rb_node **p = &delayed_refs->dirty_extent_root.rb_node;
struct rb_node *parent_node = NULL;
struct btrfs_qgroup_extent_record *entry;
u64 bytenr = record->bytenr;
struct btrfs_qgroup_extent_record *existing, *ret;
unsigned long bytenr = record->bytenr;
if (!btrfs_qgroup_full_accounting(fs_info))
return 1;
......@@ -2015,26 +2013,24 @@ int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info,
lockdep_assert_held(&delayed_refs->lock);
trace_btrfs_qgroup_trace_extent(fs_info, record);
while (*p) {
parent_node = *p;
entry = rb_entry(parent_node, struct btrfs_qgroup_extent_record,
node);
if (bytenr < entry->bytenr) {
p = &(*p)->rb_left;
} else if (bytenr > entry->bytenr) {
p = &(*p)->rb_right;
} else {
if (record->data_rsv && !entry->data_rsv) {
entry->data_rsv = record->data_rsv;
entry->data_rsv_refroot =
record->data_rsv_refroot;
}
return 1;
xa_lock(&delayed_refs->dirty_extents);
existing = xa_load(&delayed_refs->dirty_extents, bytenr);
if (existing) {
if (record->data_rsv && !existing->data_rsv) {
existing->data_rsv = record->data_rsv;
existing->data_rsv_refroot = record->data_rsv_refroot;
}
xa_unlock(&delayed_refs->dirty_extents);
return 1;
}
ret = __xa_store(&delayed_refs->dirty_extents, record->bytenr, record, GFP_ATOMIC);
xa_unlock(&delayed_refs->dirty_extents);
if (xa_is_err(ret)) {
qgroup_mark_inconsistent(fs_info);
return xa_err(ret);
}
rb_link_node(&record->node, parent_node, p);
rb_insert_color(&record->node, &delayed_refs->dirty_extent_root);
return 0;
}
......@@ -2141,6 +2137,11 @@ int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr,
if (!record)
return -ENOMEM;
if (xa_reserve(&trans->transaction->delayed_refs.dirty_extents, bytenr, GFP_NOFS)) {
kfree(record);
return -ENOMEM;
}
delayed_refs = &trans->transaction->delayed_refs;
record->bytenr = bytenr;
record->num_bytes = num_bytes;
......@@ -2149,7 +2150,9 @@ int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr,
spin_lock(&delayed_refs->lock);
ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, record);
spin_unlock(&delayed_refs->lock);
if (ret > 0) {
if (ret) {
/* Clean up if insertion fails or item exists. */
xa_release(&delayed_refs->dirty_extents, record->bytenr);
kfree(record);
return 0;
}
......@@ -3018,7 +3021,7 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans)
struct btrfs_qgroup_extent_record *record;
struct btrfs_delayed_ref_root *delayed_refs;
struct ulist *new_roots = NULL;
struct rb_node *node;
unsigned long index;
u64 num_dirty_extents = 0;
u64 qgroup_to_skip;
int ret = 0;
......@@ -3028,10 +3031,7 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans)
delayed_refs = &trans->transaction->delayed_refs;
qgroup_to_skip = delayed_refs->qgroup_to_skip;
while ((node = rb_first(&delayed_refs->dirty_extent_root))) {
record = rb_entry(node, struct btrfs_qgroup_extent_record,
node);
xa_for_each(&delayed_refs->dirty_extents, index, record) {
num_dirty_extents++;
trace_btrfs_qgroup_account_extents(fs_info, record);
......@@ -3097,7 +3097,7 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans)
ulist_free(record->old_roots);
ulist_free(new_roots);
new_roots = NULL;
rb_erase(node, &delayed_refs->dirty_extent_root);
xa_erase(&delayed_refs->dirty_extents, index);
kfree(record);
}
......@@ -4874,15 +4874,13 @@ int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans,
void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans)
{
struct btrfs_qgroup_extent_record *entry;
struct btrfs_qgroup_extent_record *next;
struct rb_root *root;
unsigned long index;
root = &trans->delayed_refs.dirty_extent_root;
rbtree_postorder_for_each_entry_safe(entry, next, root, node) {
xa_for_each(&trans->delayed_refs.dirty_extents, index, entry) {
ulist_free(entry->old_roots);
kfree(entry);
}
*root = RB_ROOT;
xa_destroy(&trans->delayed_refs.dirty_extents);
}
void btrfs_free_squota_rsv(struct btrfs_fs_info *fs_info, u64 root, u64 rsv_bytes)
......
......@@ -125,7 +125,6 @@ struct btrfs_inode;
* Record a dirty extent, and info qgroup to update quota on it
*/
struct btrfs_qgroup_extent_record {
struct rb_node node;
u64 bytenr;
u64 num_bytes;
......
......@@ -143,8 +143,7 @@ void btrfs_put_transaction(struct btrfs_transaction *transaction)
BUG_ON(!list_empty(&transaction->list));
WARN_ON(!RB_EMPTY_ROOT(
&transaction->delayed_refs.href_root.rb_root));
WARN_ON(!RB_EMPTY_ROOT(
&transaction->delayed_refs.dirty_extent_root));
WARN_ON(!xa_empty(&transaction->delayed_refs.dirty_extents));
if (transaction->delayed_refs.pending_csums)
btrfs_err(transaction->fs_info,
"pending csums is %llu",
......@@ -351,7 +350,7 @@ static noinline int join_transaction(struct btrfs_fs_info *fs_info,
memset(&cur_trans->delayed_refs, 0, sizeof(cur_trans->delayed_refs));
cur_trans->delayed_refs.href_root = RB_ROOT_CACHED;
cur_trans->delayed_refs.dirty_extent_root = RB_ROOT;
xa_init(&cur_trans->delayed_refs.dirty_extents);
atomic_set(&cur_trans->delayed_refs.num_entries, 0);
/*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment