Commit 09cc9fc7 authored by Dave Chinner's avatar Dave Chinner Committed by Al Viro

inode: move to per-sb LRU locks

With the inode LRUs moving to per-sb structures, there is no longer
a need for a global inode_lru_lock. The locking can be made more
fine-grained by moving to a per-sb LRU lock, isolating the LRU
operations of different filesytsems completely from each other.
Signed-off-by: default avatarDave Chinner <dchinner@redhat.com>
Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
parent 98b745c6
...@@ -33,7 +33,7 @@ ...@@ -33,7 +33,7 @@
* *
* inode->i_lock protects: * inode->i_lock protects:
* inode->i_state, inode->i_hash, __iget() * inode->i_state, inode->i_hash, __iget()
* inode_lru_lock protects: * inode->i_sb->s_inode_lru_lock protects:
* inode->i_sb->s_inode_lru, inode->i_lru * inode->i_sb->s_inode_lru, inode->i_lru
* inode_sb_list_lock protects: * inode_sb_list_lock protects:
* sb->s_inodes, inode->i_sb_list * sb->s_inodes, inode->i_sb_list
...@@ -46,7 +46,7 @@ ...@@ -46,7 +46,7 @@
* *
* inode_sb_list_lock * inode_sb_list_lock
* inode->i_lock * inode->i_lock
* inode_lru_lock * inode->i_sb->s_inode_lru_lock
* *
* inode_wb_list_lock * inode_wb_list_lock
* inode->i_lock * inode->i_lock
...@@ -64,8 +64,6 @@ static unsigned int i_hash_shift __read_mostly; ...@@ -64,8 +64,6 @@ static unsigned int i_hash_shift __read_mostly;
static struct hlist_head *inode_hashtable __read_mostly; static struct hlist_head *inode_hashtable __read_mostly;
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock); static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock);
static DEFINE_SPINLOCK(inode_lru_lock);
__cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_sb_list_lock); __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_sb_list_lock);
__cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_wb_list_lock); __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_wb_list_lock);
...@@ -342,24 +340,24 @@ EXPORT_SYMBOL(ihold); ...@@ -342,24 +340,24 @@ EXPORT_SYMBOL(ihold);
static void inode_lru_list_add(struct inode *inode) static void inode_lru_list_add(struct inode *inode)
{ {
spin_lock(&inode_lru_lock); spin_lock(&inode->i_sb->s_inode_lru_lock);
if (list_empty(&inode->i_lru)) { if (list_empty(&inode->i_lru)) {
list_add(&inode->i_lru, &inode->i_sb->s_inode_lru); list_add(&inode->i_lru, &inode->i_sb->s_inode_lru);
inode->i_sb->s_nr_inodes_unused++; inode->i_sb->s_nr_inodes_unused++;
this_cpu_inc(nr_unused); this_cpu_inc(nr_unused);
} }
spin_unlock(&inode_lru_lock); spin_unlock(&inode->i_sb->s_inode_lru_lock);
} }
static void inode_lru_list_del(struct inode *inode) static void inode_lru_list_del(struct inode *inode)
{ {
spin_lock(&inode_lru_lock); spin_lock(&inode->i_sb->s_inode_lru_lock);
if (!list_empty(&inode->i_lru)) { if (!list_empty(&inode->i_lru)) {
list_del_init(&inode->i_lru); list_del_init(&inode->i_lru);
inode->i_sb->s_nr_inodes_unused--; inode->i_sb->s_nr_inodes_unused--;
this_cpu_dec(nr_unused); this_cpu_dec(nr_unused);
} }
spin_unlock(&inode_lru_lock); spin_unlock(&inode->i_sb->s_inode_lru_lock);
} }
/** /**
...@@ -615,7 +613,8 @@ static int can_unuse(struct inode *inode) ...@@ -615,7 +613,8 @@ static int can_unuse(struct inode *inode)
/* /*
* Scan `goal' inodes on the unused list for freeable ones. They are moved to a * Scan `goal' inodes on the unused list for freeable ones. They are moved to a
* temporary list and then are freed outside inode_lru_lock by dispose_list(). * temporary list and then are freed outside sb->s_inode_lru_lock by
* dispose_list().
* *
* Any inodes which are pinned purely because of attached pagecache have their * Any inodes which are pinned purely because of attached pagecache have their
* pagecache removed. If the inode has metadata buffers attached to * pagecache removed. If the inode has metadata buffers attached to
...@@ -635,7 +634,7 @@ static void shrink_icache_sb(struct super_block *sb, int *nr_to_scan) ...@@ -635,7 +634,7 @@ static void shrink_icache_sb(struct super_block *sb, int *nr_to_scan)
int nr_scanned; int nr_scanned;
unsigned long reap = 0; unsigned long reap = 0;
spin_lock(&inode_lru_lock); spin_lock(&sb->s_inode_lru_lock);
for (nr_scanned = *nr_to_scan; nr_scanned >= 0; nr_scanned--) { for (nr_scanned = *nr_to_scan; nr_scanned >= 0; nr_scanned--) {
struct inode *inode; struct inode *inode;
...@@ -645,7 +644,7 @@ static void shrink_icache_sb(struct super_block *sb, int *nr_to_scan) ...@@ -645,7 +644,7 @@ static void shrink_icache_sb(struct super_block *sb, int *nr_to_scan)
inode = list_entry(sb->s_inode_lru.prev, struct inode, i_lru); inode = list_entry(sb->s_inode_lru.prev, struct inode, i_lru);
/* /*
* we are inverting the inode_lru_lock/inode->i_lock here, * we are inverting the sb->s_inode_lru_lock/inode->i_lock here,
* so use a trylock. If we fail to get the lock, just move the * so use a trylock. If we fail to get the lock, just move the
* inode to the back of the list so we don't spin on it. * inode to the back of the list so we don't spin on it.
*/ */
...@@ -677,12 +676,12 @@ static void shrink_icache_sb(struct super_block *sb, int *nr_to_scan) ...@@ -677,12 +676,12 @@ static void shrink_icache_sb(struct super_block *sb, int *nr_to_scan)
if (inode_has_buffers(inode) || inode->i_data.nrpages) { if (inode_has_buffers(inode) || inode->i_data.nrpages) {
__iget(inode); __iget(inode);
spin_unlock(&inode->i_lock); spin_unlock(&inode->i_lock);
spin_unlock(&inode_lru_lock); spin_unlock(&sb->s_inode_lru_lock);
if (remove_inode_buffers(inode)) if (remove_inode_buffers(inode))
reap += invalidate_mapping_pages(&inode->i_data, reap += invalidate_mapping_pages(&inode->i_data,
0, -1); 0, -1);
iput(inode); iput(inode);
spin_lock(&inode_lru_lock); spin_lock(&sb->s_inode_lru_lock);
if (inode != list_entry(sb->s_inode_lru.next, if (inode != list_entry(sb->s_inode_lru.next,
struct inode, i_lru)) struct inode, i_lru))
...@@ -707,7 +706,7 @@ static void shrink_icache_sb(struct super_block *sb, int *nr_to_scan) ...@@ -707,7 +706,7 @@ static void shrink_icache_sb(struct super_block *sb, int *nr_to_scan)
__count_vm_events(KSWAPD_INODESTEAL, reap); __count_vm_events(KSWAPD_INODESTEAL, reap);
else else
__count_vm_events(PGINODESTEAL, reap); __count_vm_events(PGINODESTEAL, reap);
spin_unlock(&inode_lru_lock); spin_unlock(&sb->s_inode_lru_lock);
*nr_to_scan = nr_scanned; *nr_to_scan = nr_scanned;
dispose_list(&freeable); dispose_list(&freeable);
......
...@@ -78,6 +78,7 @@ static struct super_block *alloc_super(struct file_system_type *type) ...@@ -78,6 +78,7 @@ static struct super_block *alloc_super(struct file_system_type *type)
INIT_LIST_HEAD(&s->s_inodes); INIT_LIST_HEAD(&s->s_inodes);
INIT_LIST_HEAD(&s->s_dentry_lru); INIT_LIST_HEAD(&s->s_dentry_lru);
INIT_LIST_HEAD(&s->s_inode_lru); INIT_LIST_HEAD(&s->s_inode_lru);
spin_lock_init(&s->s_inode_lru_lock);
init_rwsem(&s->s_umount); init_rwsem(&s->s_umount);
mutex_init(&s->s_lock); mutex_init(&s->s_lock);
lockdep_set_class(&s->s_umount, &type->s_umount_key); lockdep_set_class(&s->s_umount, &type->s_umount_key);
......
...@@ -1397,7 +1397,8 @@ struct super_block { ...@@ -1397,7 +1397,8 @@ struct super_block {
struct list_head s_dentry_lru; /* unused dentry lru */ struct list_head s_dentry_lru; /* unused dentry lru */
int s_nr_dentry_unused; /* # of dentry on lru */ int s_nr_dentry_unused; /* # of dentry on lru */
/* inode_lru_lock protects s_inode_lru and s_nr_inodes_unused */ /* s_inode_lru_lock protects s_inode_lru and s_nr_inodes_unused */
spinlock_t s_inode_lru_lock ____cacheline_aligned_in_smp;
struct list_head s_inode_lru; /* unused inode lru */ struct list_head s_inode_lru; /* unused inode lru */
int s_nr_inodes_unused; /* # of inodes on lru */ int s_nr_inodes_unused; /* # of inodes on lru */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment