Commit eb6ef3df authored by Konstantin Khlebnikov's avatar Konstantin Khlebnikov Committed by Al Viro

trylock_super(): replacement for grab_super_passive()

I've noticed significant locking contention in memory reclaimer around
sb_lock inside grab_super_passive(). Grab_super_passive() is called from
two places: in icache/dcache shrinkers (function super_cache_scan) and
from writeback (function __writeback_inodes_wb). Both are required for
progress in memory allocator.

Grab_super_passive() acquires sb_lock to increment sb->s_count and check
sb->s_instances. It seems sb->s_umount locked for read is enough here:
super-block deactivation always runs under sb->s_umount locked for write.
Protecting super-block itself isn't a problem: in super_cache_scan() sb
is protected by shrinker_rwsem: it cannot be freed if its slab shrinkers
are still active. Inside writeback super-block comes from inode from bdi
writeback list under wb->list_lock.

This patch removes locking sb_lock and checks s_instances under s_umount:
generic_shutdown_super() unlinks it under sb->s_umount locked for write.
New variant is called trylock_super() and since it only locks semaphore,
callers must call up_read(&sb->s_umount) instead of drop_super(sb) when
they're done.
Signed-off-by: default avatarKonstantin Khlebnikov <khlebnikov@yandex-team.ru>
Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
parent 54f2a2f4
...@@ -769,9 +769,9 @@ static long __writeback_inodes_wb(struct bdi_writeback *wb, ...@@ -769,9 +769,9 @@ static long __writeback_inodes_wb(struct bdi_writeback *wb,
struct inode *inode = wb_inode(wb->b_io.prev); struct inode *inode = wb_inode(wb->b_io.prev);
struct super_block *sb = inode->i_sb; struct super_block *sb = inode->i_sb;
if (!grab_super_passive(sb)) { if (!trylock_super(sb)) {
/* /*
* grab_super_passive() may fail consistently due to * trylock_super() may fail consistently due to
* s_umount being grabbed by someone else. Don't use * s_umount being grabbed by someone else. Don't use
* requeue_io() to avoid busy retrying the inode/sb. * requeue_io() to avoid busy retrying the inode/sb.
*/ */
...@@ -779,7 +779,7 @@ static long __writeback_inodes_wb(struct bdi_writeback *wb, ...@@ -779,7 +779,7 @@ static long __writeback_inodes_wb(struct bdi_writeback *wb,
continue; continue;
} }
wrote += writeback_sb_inodes(sb, wb, work); wrote += writeback_sb_inodes(sb, wb, work);
drop_super(sb); up_read(&sb->s_umount);
/* refer to the same tests at the end of writeback_sb_inodes */ /* refer to the same tests at the end of writeback_sb_inodes */
if (wrote) { if (wrote) {
......
...@@ -84,7 +84,7 @@ extern struct file *get_empty_filp(void); ...@@ -84,7 +84,7 @@ extern struct file *get_empty_filp(void);
* super.c * super.c
*/ */
extern int do_remount_sb(struct super_block *, int, void *, int); extern int do_remount_sb(struct super_block *, int, void *, int);
extern bool grab_super_passive(struct super_block *sb); extern bool trylock_super(struct super_block *sb);
extern struct dentry *mount_fs(struct file_system_type *, extern struct dentry *mount_fs(struct file_system_type *,
int, const char *, void *); int, const char *, void *);
extern struct super_block *user_get_super(dev_t); extern struct super_block *user_get_super(dev_t);
......
...@@ -71,7 +71,7 @@ static unsigned long super_cache_scan(struct shrinker *shrink, ...@@ -71,7 +71,7 @@ static unsigned long super_cache_scan(struct shrinker *shrink,
if (!(sc->gfp_mask & __GFP_FS)) if (!(sc->gfp_mask & __GFP_FS))
return SHRINK_STOP; return SHRINK_STOP;
if (!grab_super_passive(sb)) if (!trylock_super(sb))
return SHRINK_STOP; return SHRINK_STOP;
if (sb->s_op->nr_cached_objects) if (sb->s_op->nr_cached_objects)
...@@ -105,7 +105,7 @@ static unsigned long super_cache_scan(struct shrinker *shrink, ...@@ -105,7 +105,7 @@ static unsigned long super_cache_scan(struct shrinker *shrink,
freed += sb->s_op->free_cached_objects(sb, sc); freed += sb->s_op->free_cached_objects(sb, sc);
} }
drop_super(sb); up_read(&sb->s_umount);
return freed; return freed;
} }
...@@ -118,7 +118,7 @@ static unsigned long super_cache_count(struct shrinker *shrink, ...@@ -118,7 +118,7 @@ static unsigned long super_cache_count(struct shrinker *shrink,
sb = container_of(shrink, struct super_block, s_shrink); sb = container_of(shrink, struct super_block, s_shrink);
/* /*
* Don't call grab_super_passive as it is a potential * Don't call trylock_super as it is a potential
* scalability bottleneck. The counts could get updated * scalability bottleneck. The counts could get updated
* between super_cache_count and super_cache_scan anyway. * between super_cache_count and super_cache_scan anyway.
* Call to super_cache_count with shrinker_rwsem held * Call to super_cache_count with shrinker_rwsem held
...@@ -348,35 +348,31 @@ static int grab_super(struct super_block *s) __releases(sb_lock) ...@@ -348,35 +348,31 @@ static int grab_super(struct super_block *s) __releases(sb_lock)
} }
/* /*
* grab_super_passive - acquire a passive reference * trylock_super - try to grab ->s_umount shared
* @sb: reference we are trying to grab * @sb: reference we are trying to grab
* *
* Tries to acquire a passive reference. This is used in places where we * Try to prevent fs shutdown. This is used in places where we
* cannot take an active reference but we need to ensure that the * cannot take an active reference but we need to ensure that the
* superblock does not go away while we are working on it. It returns * filesystem is not shut down while we are working on it. It returns
* false if a reference was not gained, and returns true with the s_umount * false if we cannot acquire s_umount or if we lose the race and
* lock held in read mode if a reference is gained. On successful return, * filesystem already got into shutdown, and returns true with the s_umount
* the caller must drop the s_umount lock and the passive reference when * lock held in read mode in case of success. On successful return,
* done. * the caller must drop the s_umount lock when done.
*
* Note that unlike get_super() et.al. this one does *not* bump ->s_count.
* The reason why it's safe is that we are OK with doing trylock instead
* of down_read(). There's a couple of places that are OK with that, but
* it's very much not a general-purpose interface.
*/ */
bool grab_super_passive(struct super_block *sb) bool trylock_super(struct super_block *sb)
{ {
spin_lock(&sb_lock);
if (hlist_unhashed(&sb->s_instances)) {
spin_unlock(&sb_lock);
return false;
}
sb->s_count++;
spin_unlock(&sb_lock);
if (down_read_trylock(&sb->s_umount)) { if (down_read_trylock(&sb->s_umount)) {
if (sb->s_root && (sb->s_flags & MS_BORN)) if (!hlist_unhashed(&sb->s_instances) &&
sb->s_root && (sb->s_flags & MS_BORN))
return true; return true;
up_read(&sb->s_umount); up_read(&sb->s_umount);
} }
put_super(sb);
return false; return false;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment