Commit 963d678b authored by Miao Xie's avatar Miao Xie Committed by Josef Bacik

Btrfs: use percpu counter for fs_info->delalloc_bytes

fs_info->delalloc_bytes is accessed very frequently, so use percpu
counter instead of the u64 variant for it to reduce the lock
contention.

This patch also fixed the problem that we access the variant
without the lock protection.At worst, we would not flush the
delalloc inodes, and just return ENOSPC error when we still have
some free space in the fs.
Signed-off-by: default avatarMiao Xie <miaox@cn.fujitsu.com>
Signed-off-by: default avatarJosef Bacik <jbacik@fusionio.com>
parent e2d84521
...@@ -1392,6 +1392,7 @@ struct btrfs_fs_info { ...@@ -1392,6 +1392,7 @@ struct btrfs_fs_info {
*/ */
struct list_head ordered_extents; struct list_head ordered_extents;
spinlock_t delalloc_lock;
/* /*
* all of the inodes that have delalloc bytes. It is possible for * all of the inodes that have delalloc bytes. It is possible for
* this list to be empty even when there is still dirty data=ordered * this list to be empty even when there is still dirty data=ordered
...@@ -1452,7 +1453,10 @@ struct btrfs_fs_info { ...@@ -1452,7 +1453,10 @@ struct btrfs_fs_info {
/* used to keep from writing metadata until there is a nice batch */ /* used to keep from writing metadata until there is a nice batch */
struct percpu_counter dirty_metadata_bytes; struct percpu_counter dirty_metadata_bytes;
struct percpu_counter delalloc_bytes;
s32 dirty_metadata_batch; s32 dirty_metadata_batch;
s32 delalloc_batch;
struct list_head dirty_cowonly_roots; struct list_head dirty_cowonly_roots;
struct btrfs_fs_devices *fs_devices; struct btrfs_fs_devices *fs_devices;
...@@ -1468,9 +1472,6 @@ struct btrfs_fs_info { ...@@ -1468,9 +1472,6 @@ struct btrfs_fs_info {
struct reloc_control *reloc_ctl; struct reloc_control *reloc_ctl;
spinlock_t delalloc_lock;
u64 delalloc_bytes;
/* data_alloc_cluster is only used in ssd mode */ /* data_alloc_cluster is only used in ssd mode */
struct btrfs_free_cluster data_alloc_cluster; struct btrfs_free_cluster data_alloc_cluster;
......
...@@ -2010,10 +2010,16 @@ int open_ctree(struct super_block *sb, ...@@ -2010,10 +2010,16 @@ int open_ctree(struct super_block *sb,
fs_info->dirty_metadata_batch = PAGE_CACHE_SIZE * fs_info->dirty_metadata_batch = PAGE_CACHE_SIZE *
(1 + ilog2(nr_cpu_ids)); (1 + ilog2(nr_cpu_ids));
ret = percpu_counter_init(&fs_info->delalloc_bytes, 0);
if (ret) {
err = ret;
goto fail_dirty_metadata_bytes;
}
fs_info->btree_inode = new_inode(sb); fs_info->btree_inode = new_inode(sb);
if (!fs_info->btree_inode) { if (!fs_info->btree_inode) {
err = -ENOMEM; err = -ENOMEM;
goto fail_dirty_metadata_bytes; goto fail_delalloc_bytes;
} }
mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS); mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
...@@ -2269,6 +2275,7 @@ int open_ctree(struct super_block *sb, ...@@ -2269,6 +2275,7 @@ int open_ctree(struct super_block *sb,
sectorsize = btrfs_super_sectorsize(disk_super); sectorsize = btrfs_super_sectorsize(disk_super);
stripesize = btrfs_super_stripesize(disk_super); stripesize = btrfs_super_stripesize(disk_super);
fs_info->dirty_metadata_batch = leafsize * (1 + ilog2(nr_cpu_ids)); fs_info->dirty_metadata_batch = leafsize * (1 + ilog2(nr_cpu_ids));
fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
/* /*
* mixed block groups end up with duplicate but slightly offset * mixed block groups end up with duplicate but slightly offset
...@@ -2731,6 +2738,8 @@ int open_ctree(struct super_block *sb, ...@@ -2731,6 +2738,8 @@ int open_ctree(struct super_block *sb,
invalidate_inode_pages2(fs_info->btree_inode->i_mapping); invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
iput(fs_info->btree_inode); iput(fs_info->btree_inode);
fail_delalloc_bytes:
percpu_counter_destroy(&fs_info->delalloc_bytes);
fail_dirty_metadata_bytes: fail_dirty_metadata_bytes:
percpu_counter_destroy(&fs_info->dirty_metadata_bytes); percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
fail_bdi: fail_bdi:
...@@ -3362,9 +3371,9 @@ int close_ctree(struct btrfs_root *root) ...@@ -3362,9 +3371,9 @@ int close_ctree(struct btrfs_root *root)
btrfs_free_qgroup_config(root->fs_info); btrfs_free_qgroup_config(root->fs_info);
if (fs_info->delalloc_bytes) { if (percpu_counter_sum(&fs_info->delalloc_bytes)) {
printk(KERN_INFO "btrfs: at unmount delalloc count %llu\n", printk(KERN_INFO "btrfs: at unmount delalloc count %lld\n",
(unsigned long long)fs_info->delalloc_bytes); percpu_counter_sum(&fs_info->delalloc_bytes));
} }
free_extent_buffer(fs_info->extent_root->node); free_extent_buffer(fs_info->extent_root->node);
...@@ -3412,6 +3421,7 @@ int close_ctree(struct btrfs_root *root) ...@@ -3412,6 +3421,7 @@ int close_ctree(struct btrfs_root *root)
btrfs_mapping_tree_free(&fs_info->mapping_tree); btrfs_mapping_tree_free(&fs_info->mapping_tree);
percpu_counter_destroy(&fs_info->dirty_metadata_bytes); percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
percpu_counter_destroy(&fs_info->delalloc_bytes);
bdi_destroy(&fs_info->bdi); bdi_destroy(&fs_info->bdi);
cleanup_srcu_struct(&fs_info->subvol_srcu); cleanup_srcu_struct(&fs_info->subvol_srcu);
......
...@@ -3760,7 +3760,8 @@ static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig, ...@@ -3760,7 +3760,8 @@ static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
space_info = block_rsv->space_info; space_info = block_rsv->space_info;
smp_mb(); smp_mb();
delalloc_bytes = root->fs_info->delalloc_bytes; delalloc_bytes = percpu_counter_sum_positive(
&root->fs_info->delalloc_bytes);
if (delalloc_bytes == 0) { if (delalloc_bytes == 0) {
if (trans) if (trans)
return; return;
...@@ -3799,7 +3800,8 @@ static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig, ...@@ -3799,7 +3800,8 @@ static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
break; break;
} }
smp_mb(); smp_mb();
delalloc_bytes = root->fs_info->delalloc_bytes; delalloc_bytes = percpu_counter_sum_positive(
&root->fs_info->delalloc_bytes);
} }
} }
......
...@@ -1516,7 +1516,8 @@ static void btrfs_set_bit_hook(struct inode *inode, ...@@ -1516,7 +1516,8 @@ static void btrfs_set_bit_hook(struct inode *inode,
spin_lock(&root->fs_info->delalloc_lock); spin_lock(&root->fs_info->delalloc_lock);
BTRFS_I(inode)->delalloc_bytes += len; BTRFS_I(inode)->delalloc_bytes += len;
root->fs_info->delalloc_bytes += len; __percpu_counter_add(&root->fs_info->delalloc_bytes, len,
root->fs_info->delalloc_batch);
if (do_list && list_empty(&BTRFS_I(inode)->delalloc_inodes)) { if (do_list && list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
list_add_tail(&BTRFS_I(inode)->delalloc_inodes, list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
&root->fs_info->delalloc_inodes); &root->fs_info->delalloc_inodes);
...@@ -1557,7 +1558,8 @@ static void btrfs_clear_bit_hook(struct inode *inode, ...@@ -1557,7 +1558,8 @@ static void btrfs_clear_bit_hook(struct inode *inode,
btrfs_free_reserved_data_space(inode, len); btrfs_free_reserved_data_space(inode, len);
spin_lock(&root->fs_info->delalloc_lock); spin_lock(&root->fs_info->delalloc_lock);
root->fs_info->delalloc_bytes -= len; __percpu_counter_add(&root->fs_info->delalloc_bytes, -len,
root->fs_info->delalloc_batch);
BTRFS_I(inode)->delalloc_bytes -= len; BTRFS_I(inode)->delalloc_bytes -= len;
if (do_list && BTRFS_I(inode)->delalloc_bytes == 0 && if (do_list && BTRFS_I(inode)->delalloc_bytes == 0 &&
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment