Commit 1e8f9158 authored by Wang Shilong's avatar Wang Shilong Committed by Josef Bacik

Btrfs: introduce qgroup_ulist to avoid frequently allocating/freeing ulist

When doing qgroup accounting, we call ulist_alloc()/ulist_free() every time
when we want to walk qgroup tree.

By introducing 'qgroup_ulist', we only need to call ulist_alloc()/ulist_free()
once. This reduce some sys time to allocate memory, see the measurements below

fsstress -p 4 -n 10000 -d $dir

With this patch:

real    0m50.153s
user    0m0.081s
sys     0m6.294s

real    0m51.113s
user    0m0.092s
sys     0m6.220s

real    0m52.610s
user    0m0.096s
sys     0m6.125s	avg 6.213
-----------------------------------------------------
Without the patch:

real    0m54.825s
user    0m0.061s
sys     0m10.665s

real    1m6.401s
user    0m0.089s
sys     0m11.218s

real    1m13.768s
user    0m0.087s
sys     0m10.665s       avg 10.849

we can see the sys time reduce ~43%.
Signed-off-by: default avatarWang Shilong <wangsl-fnst@cn.fujitsu.com>
Signed-off-by: default avatarJosef Bacik <jbacik@fusionio.com>
parent 85965600
...@@ -1594,6 +1594,12 @@ struct btrfs_fs_info { ...@@ -1594,6 +1594,12 @@ struct btrfs_fs_info {
struct rb_root qgroup_tree; struct rb_root qgroup_tree;
spinlock_t qgroup_lock; spinlock_t qgroup_lock;
/*
* used to avoid frequently calling ulist_alloc()/ulist_free()
* when doing qgroup accounting, it must be protected by qgroup_lock.
*/
struct ulist *qgroup_ulist;
/* protect user change for quota operations */ /* protect user change for quota operations */
struct mutex qgroup_ioctl_lock; struct mutex qgroup_ioctl_lock;
......
...@@ -2274,6 +2274,7 @@ int open_ctree(struct super_block *sb, ...@@ -2274,6 +2274,7 @@ int open_ctree(struct super_block *sb,
fs_info->qgroup_seq = 1; fs_info->qgroup_seq = 1;
fs_info->quota_enabled = 0; fs_info->quota_enabled = 0;
fs_info->pending_quota_state = 0; fs_info->pending_quota_state = 0;
fs_info->qgroup_ulist = NULL;
mutex_init(&fs_info->qgroup_rescan_lock); mutex_init(&fs_info->qgroup_rescan_lock);
btrfs_init_free_cluster(&fs_info->meta_alloc_cluster); btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);
......
...@@ -259,6 +259,12 @@ int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info) ...@@ -259,6 +259,12 @@ int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
if (!fs_info->quota_enabled) if (!fs_info->quota_enabled)
return 0; return 0;
fs_info->qgroup_ulist = ulist_alloc(GFP_NOFS);
if (!fs_info->qgroup_ulist) {
ret = -ENOMEM;
goto out;
}
path = btrfs_alloc_path(); path = btrfs_alloc_path();
if (!path) { if (!path) {
ret = -ENOMEM; ret = -ENOMEM;
...@@ -424,6 +430,9 @@ int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info) ...@@ -424,6 +430,9 @@ int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
} }
btrfs_free_path(path); btrfs_free_path(path);
if (ret)
ulist_free(fs_info->qgroup_ulist);
return ret < 0 ? ret : 0; return ret < 0 ? ret : 0;
} }
...@@ -460,6 +469,7 @@ void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info) ...@@ -460,6 +469,7 @@ void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info)
} }
kfree(qgroup); kfree(qgroup);
} }
ulist_free(fs_info->qgroup_ulist);
} }
static int add_qgroup_relation_item(struct btrfs_trans_handle *trans, static int add_qgroup_relation_item(struct btrfs_trans_handle *trans,
...@@ -819,6 +829,12 @@ int btrfs_quota_enable(struct btrfs_trans_handle *trans, ...@@ -819,6 +829,12 @@ int btrfs_quota_enable(struct btrfs_trans_handle *trans,
goto out; goto out;
} }
fs_info->qgroup_ulist = ulist_alloc(GFP_NOFS);
if (!fs_info->qgroup_ulist) {
ret = -ENOMEM;
goto out;
}
/* /*
* initially create the quota tree * initially create the quota tree
*/ */
...@@ -916,6 +932,8 @@ int btrfs_quota_enable(struct btrfs_trans_handle *trans, ...@@ -916,6 +932,8 @@ int btrfs_quota_enable(struct btrfs_trans_handle *trans,
kfree(quota_root); kfree(quota_root);
} }
out: out:
if (ret)
ulist_free(fs_info->qgroup_ulist);
mutex_unlock(&fs_info->qgroup_ioctl_lock); mutex_unlock(&fs_info->qgroup_ioctl_lock);
return ret; return ret;
} }
...@@ -1355,7 +1373,6 @@ int btrfs_qgroup_account_ref(struct btrfs_trans_handle *trans, ...@@ -1355,7 +1373,6 @@ int btrfs_qgroup_account_ref(struct btrfs_trans_handle *trans,
u64 ref_root; u64 ref_root;
struct btrfs_qgroup *qgroup; struct btrfs_qgroup *qgroup;
struct ulist *roots = NULL; struct ulist *roots = NULL;
struct ulist *tmp = NULL;
u64 seq; u64 seq;
int ret = 0; int ret = 0;
int sgn; int sgn;
...@@ -1448,31 +1465,28 @@ int btrfs_qgroup_account_ref(struct btrfs_trans_handle *trans, ...@@ -1448,31 +1465,28 @@ int btrfs_qgroup_account_ref(struct btrfs_trans_handle *trans,
/* /*
* step 1: for each old ref, visit all nodes once and inc refcnt * step 1: for each old ref, visit all nodes once and inc refcnt
*/ */
tmp = ulist_alloc(GFP_ATOMIC); ulist_reinit(fs_info->qgroup_ulist);
if (!tmp) {
ret = -ENOMEM;
goto unlock;
}
seq = fs_info->qgroup_seq; seq = fs_info->qgroup_seq;
fs_info->qgroup_seq += roots->nnodes + 1; /* max refcnt */ fs_info->qgroup_seq += roots->nnodes + 1; /* max refcnt */
ret = qgroup_account_ref_step1(fs_info, roots, tmp, seq); ret = qgroup_account_ref_step1(fs_info, roots, fs_info->qgroup_ulist,
seq);
if (ret) if (ret)
goto unlock; goto unlock;
/* /*
* step 2: walk from the new root * step 2: walk from the new root
*/ */
ret = qgroup_account_ref_step2(fs_info, roots, tmp, seq, sgn, ret = qgroup_account_ref_step2(fs_info, roots, fs_info->qgroup_ulist,
node->num_bytes, qgroup); seq, sgn, node->num_bytes, qgroup);
if (ret) if (ret)
goto unlock; goto unlock;
/* /*
* step 3: walk again from old refs * step 3: walk again from old refs
*/ */
ret = qgroup_account_ref_step3(fs_info, roots, tmp, seq, sgn, ret = qgroup_account_ref_step3(fs_info, roots, fs_info->qgroup_ulist,
node->num_bytes); seq, sgn, node->num_bytes);
if (ret) if (ret)
goto unlock; goto unlock;
...@@ -1480,7 +1494,6 @@ int btrfs_qgroup_account_ref(struct btrfs_trans_handle *trans, ...@@ -1480,7 +1494,6 @@ int btrfs_qgroup_account_ref(struct btrfs_trans_handle *trans,
spin_unlock(&fs_info->qgroup_lock); spin_unlock(&fs_info->qgroup_lock);
mutex_unlock(&fs_info->qgroup_rescan_lock); mutex_unlock(&fs_info->qgroup_rescan_lock);
ulist_free(roots); ulist_free(roots);
ulist_free(tmp);
return ret; return ret;
} }
...@@ -1720,7 +1733,6 @@ int btrfs_qgroup_reserve(struct btrfs_root *root, u64 num_bytes) ...@@ -1720,7 +1733,6 @@ int btrfs_qgroup_reserve(struct btrfs_root *root, u64 num_bytes)
struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_fs_info *fs_info = root->fs_info;
u64 ref_root = root->root_key.objectid; u64 ref_root = root->root_key.objectid;
int ret = 0; int ret = 0;
struct ulist *ulist = NULL;
struct ulist_node *unode; struct ulist_node *unode;
struct ulist_iterator uiter; struct ulist_iterator uiter;
...@@ -1743,17 +1755,13 @@ int btrfs_qgroup_reserve(struct btrfs_root *root, u64 num_bytes) ...@@ -1743,17 +1755,13 @@ int btrfs_qgroup_reserve(struct btrfs_root *root, u64 num_bytes)
* in a first step, we check all affected qgroups if any limits would * in a first step, we check all affected qgroups if any limits would
* be exceeded * be exceeded
*/ */
ulist = ulist_alloc(GFP_ATOMIC); ulist_reinit(fs_info->qgroup_ulist);
if (!ulist) { ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
ret = -ENOMEM;
goto out;
}
ret = ulist_add(ulist, qgroup->qgroupid,
(uintptr_t)qgroup, GFP_ATOMIC); (uintptr_t)qgroup, GFP_ATOMIC);
if (ret < 0) if (ret < 0)
goto out; goto out;
ULIST_ITER_INIT(&uiter); ULIST_ITER_INIT(&uiter);
while ((unode = ulist_next(ulist, &uiter))) { while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
struct btrfs_qgroup *qg; struct btrfs_qgroup *qg;
struct btrfs_qgroup_list *glist; struct btrfs_qgroup_list *glist;
...@@ -1774,7 +1782,8 @@ int btrfs_qgroup_reserve(struct btrfs_root *root, u64 num_bytes) ...@@ -1774,7 +1782,8 @@ int btrfs_qgroup_reserve(struct btrfs_root *root, u64 num_bytes)
} }
list_for_each_entry(glist, &qg->groups, next_group) { list_for_each_entry(glist, &qg->groups, next_group) {
ret = ulist_add(ulist, glist->group->qgroupid, ret = ulist_add(fs_info->qgroup_ulist,
glist->group->qgroupid,
(uintptr_t)glist->group, GFP_ATOMIC); (uintptr_t)glist->group, GFP_ATOMIC);
if (ret < 0) if (ret < 0)
goto out; goto out;
...@@ -1785,7 +1794,7 @@ int btrfs_qgroup_reserve(struct btrfs_root *root, u64 num_bytes) ...@@ -1785,7 +1794,7 @@ int btrfs_qgroup_reserve(struct btrfs_root *root, u64 num_bytes)
* no limits exceeded, now record the reservation into all qgroups * no limits exceeded, now record the reservation into all qgroups
*/ */
ULIST_ITER_INIT(&uiter); ULIST_ITER_INIT(&uiter);
while ((unode = ulist_next(ulist, &uiter))) { while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
struct btrfs_qgroup *qg; struct btrfs_qgroup *qg;
qg = (struct btrfs_qgroup *)(uintptr_t)unode->aux; qg = (struct btrfs_qgroup *)(uintptr_t)unode->aux;
...@@ -1795,8 +1804,6 @@ int btrfs_qgroup_reserve(struct btrfs_root *root, u64 num_bytes) ...@@ -1795,8 +1804,6 @@ int btrfs_qgroup_reserve(struct btrfs_root *root, u64 num_bytes)
out: out:
spin_unlock(&fs_info->qgroup_lock); spin_unlock(&fs_info->qgroup_lock);
ulist_free(ulist);
return ret; return ret;
} }
...@@ -1805,7 +1812,6 @@ void btrfs_qgroup_free(struct btrfs_root *root, u64 num_bytes) ...@@ -1805,7 +1812,6 @@ void btrfs_qgroup_free(struct btrfs_root *root, u64 num_bytes)
struct btrfs_root *quota_root; struct btrfs_root *quota_root;
struct btrfs_qgroup *qgroup; struct btrfs_qgroup *qgroup;
struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_fs_info *fs_info = root->fs_info;
struct ulist *ulist = NULL;
struct ulist_node *unode; struct ulist_node *unode;
struct ulist_iterator uiter; struct ulist_iterator uiter;
u64 ref_root = root->root_key.objectid; u64 ref_root = root->root_key.objectid;
...@@ -1827,17 +1833,13 @@ void btrfs_qgroup_free(struct btrfs_root *root, u64 num_bytes) ...@@ -1827,17 +1833,13 @@ void btrfs_qgroup_free(struct btrfs_root *root, u64 num_bytes)
if (!qgroup) if (!qgroup)
goto out; goto out;
ulist = ulist_alloc(GFP_ATOMIC); ulist_reinit(fs_info->qgroup_ulist);
if (!ulist) { ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
btrfs_std_error(fs_info, -ENOMEM);
goto out;
}
ret = ulist_add(ulist, qgroup->qgroupid,
(uintptr_t)qgroup, GFP_ATOMIC); (uintptr_t)qgroup, GFP_ATOMIC);
if (ret < 0) if (ret < 0)
goto out; goto out;
ULIST_ITER_INIT(&uiter); ULIST_ITER_INIT(&uiter);
while ((unode = ulist_next(ulist, &uiter))) { while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
struct btrfs_qgroup *qg; struct btrfs_qgroup *qg;
struct btrfs_qgroup_list *glist; struct btrfs_qgroup_list *glist;
...@@ -1846,7 +1848,8 @@ void btrfs_qgroup_free(struct btrfs_root *root, u64 num_bytes) ...@@ -1846,7 +1848,8 @@ void btrfs_qgroup_free(struct btrfs_root *root, u64 num_bytes)
qg->reserved -= num_bytes; qg->reserved -= num_bytes;
list_for_each_entry(glist, &qg->groups, next_group) { list_for_each_entry(glist, &qg->groups, next_group) {
ret = ulist_add(ulist, glist->group->qgroupid, ret = ulist_add(fs_info->qgroup_ulist,
glist->group->qgroupid,
(uintptr_t)glist->group, GFP_ATOMIC); (uintptr_t)glist->group, GFP_ATOMIC);
if (ret < 0) if (ret < 0)
goto out; goto out;
...@@ -1855,7 +1858,6 @@ void btrfs_qgroup_free(struct btrfs_root *root, u64 num_bytes) ...@@ -1855,7 +1858,6 @@ void btrfs_qgroup_free(struct btrfs_root *root, u64 num_bytes)
out: out:
spin_unlock(&fs_info->qgroup_lock); spin_unlock(&fs_info->qgroup_lock);
ulist_free(ulist);
} }
void assert_qgroups_uptodate(struct btrfs_trans_handle *trans) void assert_qgroups_uptodate(struct btrfs_trans_handle *trans)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment