Commit 38727786 authored by Ojaswin Mujoo's avatar Ojaswin Mujoo Committed by Theodore Ts'o

ext4: Use rbtrees to manage PAs instead of inode i_prealloc_list

Currently, the kernel uses i_prealloc_list to hold all the inode
preallocations. This is known to cause degradation in performance in
workloads which perform large number of sparse writes on a single file.
This is mainly because functions like ext4_mb_normalize_request() and
ext4_mb_use_preallocated() iterate over this complete list, resulting in
slowdowns when large number of PAs are present.

Patch 27bc446e partially fixed this by enforcing a limit of 512 for
the inode preallocation list and adding logic to continually trim the
list if it grows above the threshold, however our testing revealed that
a hardcoded value is not suitable for all kinds of workloads.

To optimize this, add an rbtree to the inode and hold the inode
preallocations in this rbtree. This will make iterating over inode PAs
faster and scale much better than a linked list. Additionally, we also
had to remove the LRU logic that was added during trimming of the list
(in ext4_mb_release_context()) as it will add extra overhead in rbtree.
The discards now happen in the lowest-logical-offset-first order.

** Locking notes **

With the introduction of rbtree to maintain inode PAs, we can't use RCU
to walk the tree for searching since it can result in partial traversals
which might miss some nodes(or entire subtrees) while discards happen
in parallel (which happens under a lock).  Hence this patch converts the
ei->i_prealloc_lock spin_lock to rw_lock.

Almost all the codepaths that read/modify the PA rbtrees are protected
by the higher level inode->i_data_sem (except
ext4_mb_discard_group_preallocations() and ext4_clear_inode()) IIUC, the
only place we need lock protection is when one thread is reading
"searching" the PA rbtree (earlier protected under rcu_read_lock()) and
another is "deleting" the PAs in ext4_mb_discard_group_preallocations()
function (which iterates all the PAs using the grp->bb_prealloc_list and
deletes PAs from the tree without taking any inode lock (i_data_sem)).

So, this patch converts all rcu_read_lock/unlock() paths for inode list
PA to use read_lock() and all places where we were using
ei->i_prealloc_lock spinlock will now be using write_lock().

Note that this makes the fast path (searching of the right PA e.g.
ext4_mb_use_preallocated() or ext4_mb_normalize_request()), now use
read_lock() instead of rcu_read_lock/unlock().  Ths also will now block
due to slow discard path (ext4_mb_discard_group_preallocations()) which
uses write_lock().

But this is not as bad as it looks. This is because -

1. The slow path only occurs when the normal allocation failed and we
   can say that we are low on disk space.  One can argue this scenario
   won't be much frequent.

2. ext4_mb_discard_group_preallocations(), locks and unlocks the rwlock
   for deleting every individual PA.  This gives enough opportunity for
   the fast path to acquire the read_lock for searching the PA inode
   list.
Suggested-by: default avatarRitesh Harjani (IBM) <ritesh.list@gmail.com>
Signed-off-by: default avatarOjaswin Mujoo <ojaswin@linux.ibm.com>
Reviewed-by: default avatarRitesh Harjani (IBM) <ritesh.list@gmail.com>
Reviewed-by: default avatarJan Kara <jack@suse.cz>
Link: https://lore.kernel.org/r/4137bce8f6948fedd8bae134dabae24acfe699c6.1679731817.git.ojaswin@linux.ibm.comSigned-off-by: default avatarTheodore Ts'o <tytso@mit.edu>
parent a8e38fd3
...@@ -1120,8 +1120,8 @@ struct ext4_inode_info { ...@@ -1120,8 +1120,8 @@ struct ext4_inode_info {
/* mballoc */ /* mballoc */
atomic_t i_prealloc_active; atomic_t i_prealloc_active;
struct list_head i_prealloc_list; struct rb_root i_prealloc_node;
spinlock_t i_prealloc_lock; rwlock_t i_prealloc_lock;
/* extents status tree */ /* extents status tree */
struct ext4_es_tree i_es_tree; struct ext4_es_tree i_es_tree;
......
This diff is collapsed.
...@@ -115,7 +115,7 @@ struct ext4_free_data { ...@@ -115,7 +115,7 @@ struct ext4_free_data {
struct ext4_prealloc_space { struct ext4_prealloc_space {
union { union {
struct list_head inode_list; /* for inode PAs */ struct rb_node inode_node; /* for inode PA rbtree */
struct list_head lg_list; /* for lg PAs */ struct list_head lg_list; /* for lg PAs */
} pa_node; } pa_node;
struct list_head pa_group_list; struct list_head pa_group_list;
...@@ -132,10 +132,10 @@ struct ext4_prealloc_space { ...@@ -132,10 +132,10 @@ struct ext4_prealloc_space {
ext4_grpblk_t pa_free; /* how many blocks are free */ ext4_grpblk_t pa_free; /* how many blocks are free */
unsigned short pa_type; /* pa type. inode or group */ unsigned short pa_type; /* pa type. inode or group */
union { union {
spinlock_t *inode_lock; /* locks the inode list holding this PA */ rwlock_t *inode_lock; /* locks the rbtree holding this PA */
spinlock_t *lg_lock; /* locks the lg list holding this PA */ spinlock_t *lg_lock; /* locks the lg list holding this PA */
} pa_node_lock; } pa_node_lock;
struct inode *pa_inode; /* hack, for history only */ struct inode *pa_inode; /* used to get the inode during group discard */
}; };
enum { enum {
......
...@@ -1325,9 +1325,9 @@ static struct inode *ext4_alloc_inode(struct super_block *sb) ...@@ -1325,9 +1325,9 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
inode_set_iversion(&ei->vfs_inode, 1); inode_set_iversion(&ei->vfs_inode, 1);
ei->i_flags = 0; ei->i_flags = 0;
spin_lock_init(&ei->i_raw_lock); spin_lock_init(&ei->i_raw_lock);
INIT_LIST_HEAD(&ei->i_prealloc_list); ei->i_prealloc_node = RB_ROOT;
atomic_set(&ei->i_prealloc_active, 0); atomic_set(&ei->i_prealloc_active, 0);
spin_lock_init(&ei->i_prealloc_lock); rwlock_init(&ei->i_prealloc_lock);
ext4_es_init_tree(&ei->i_es_tree); ext4_es_init_tree(&ei->i_es_tree);
rwlock_init(&ei->i_es_lock); rwlock_init(&ei->i_es_lock);
INIT_LIST_HEAD(&ei->i_es_list); INIT_LIST_HEAD(&ei->i_es_list);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment