Commit 74fd8d99 authored by Jaegeuk Kim's avatar Jaegeuk Kim

f2fs: speed up shrinking extent tree entries

If there is no candidates for shrinking slab entries, we don't need to traverse
any trees at all.
Reviewed-by: default avatarChao Yu <chao2.yu@samsung.com>
[Jaegeuk Kim: fix missing initialization reported by Yunlei He]
Signed-off-by: default avatarJaegeuk Kim <jaegeuk@kernel.org>
parent 7441ccef
......@@ -71,6 +71,8 @@ static struct extent_tree *__grab_extent_tree(struct inode *inode)
atomic_set(&et->refcount, 0);
et->count = 0;
atomic_inc(&sbi->total_ext_tree);
} else {
atomic_dec(&sbi->total_zombie_tree);
}
atomic_inc(&et->refcount);
up_write(&sbi->extent_tree_lock);
......@@ -547,10 +549,14 @@ unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
unsigned int found;
unsigned int node_cnt = 0, tree_cnt = 0;
int remained;
bool do_free = false;
if (!test_opt(sbi, EXTENT_CACHE))
return 0;
if (!atomic_read(&sbi->total_zombie_tree))
goto free_node;
if (!down_write_trylock(&sbi->extent_tree_lock))
goto out;
......@@ -571,6 +577,7 @@ unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
radix_tree_delete(root, et->ino);
kmem_cache_free(extent_tree_slab, et);
atomic_dec(&sbi->total_ext_tree);
atomic_dec(&sbi->total_zombie_tree);
tree_cnt++;
if (node_cnt + tree_cnt >= nr_shrink)
......@@ -580,6 +587,7 @@ unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
}
up_write(&sbi->extent_tree_lock);
free_node:
/* 2. remove LRU extent entries */
if (!down_write_trylock(&sbi->extent_tree_lock))
goto out;
......@@ -591,9 +599,13 @@ unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
if (!remained--)
break;
list_del_init(&en->list);
do_free = true;
}
spin_unlock(&sbi->extent_lock);
if (do_free == false)
goto unlock_out;
/*
* reset ino for searching victims from beginning of global extent tree.
*/
......@@ -651,6 +663,7 @@ void f2fs_destroy_extent_tree(struct inode *inode)
if (inode->i_nlink && !is_bad_inode(inode) && et->count) {
atomic_dec(&et->refcount);
atomic_inc(&sbi->total_zombie_tree);
return;
}
......@@ -716,6 +729,7 @@ void init_extent_cache_info(struct f2fs_sb_info *sbi)
INIT_LIST_HEAD(&sbi->extent_list);
spin_lock_init(&sbi->extent_lock);
atomic_set(&sbi->total_ext_tree, 0);
atomic_set(&sbi->total_zombie_tree, 0);
atomic_set(&sbi->total_ext_node, 0);
}
......
......@@ -763,6 +763,7 @@ struct f2fs_sb_info {
struct list_head extent_list; /* lru list for shrinker */
spinlock_t extent_lock; /* locking extent lru list */
atomic_t total_ext_tree; /* extent tree count */
atomic_t total_zombie_tree; /* extent zombie tree count */
atomic_t total_ext_node; /* extent info count */
/* basic filesystem units */
......
......@@ -32,7 +32,7 @@ static unsigned long __count_free_nids(struct f2fs_sb_info *sbi)
static unsigned long __count_extent_cache(struct f2fs_sb_info *sbi)
{
return atomic_read(&sbi->total_ext_tree) +
return atomic_read(&sbi->total_zombie_tree) +
atomic_read(&sbi->total_ext_node);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment