Commit 468e28d4 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'v6.6-vfs.super.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs

Pull superblock fixes from Christian Brauner:
 "Two follow-up fixes for the super work this cycle:

   - Move a misplaced lockep assertion before we potentially free the
     object containing the lock.

   - Ensure that filesystems which match superblocks in sget{_fc}()
     based on sb->s_fs_info are guaranteed to see a valid sb->s_fs_info
     as long as a superblock still appears on the filesystem type's
     superblock list.

     What we want as a proper solution for next cycle is to split
     sb->free_sb() out of sb->kill_sb() so that we can simply call
     kill_super_notify() after sb->kill_sb() but before sb->free_sb().

     Currently, this is lumped together in sb->kill_sb()"

* tag 'v6.6-vfs.super.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs:
  super: ensure valid info
  super: move lockdep assert
parents bd6c11bc dc3216b1
......@@ -434,6 +434,33 @@ void put_super(struct super_block *sb)
spin_unlock(&sb_lock);
}
static void kill_super_notify(struct super_block *sb)
{
lockdep_assert_not_held(&sb->s_umount);
/* already notified earlier */
if (sb->s_flags & SB_DEAD)
return;
/*
* Remove it from @fs_supers so it isn't found by new
* sget{_fc}() walkers anymore. Any concurrent mounter still
* managing to grab a temporary reference is guaranteed to
* already see SB_DYING and will wait until we notify them about
* SB_DEAD.
*/
spin_lock(&sb_lock);
hlist_del_init(&sb->s_instances);
spin_unlock(&sb_lock);
/*
* Let concurrent mounts know that this thing is really dead.
* We don't need @sb->s_umount here as every concurrent caller
* will see SB_DYING and either discard the superblock or wait
* for SB_DEAD.
*/
super_wake(sb, SB_DEAD);
}
/**
* deactivate_locked_super - drop an active reference to superblock
......@@ -453,6 +480,8 @@ void deactivate_locked_super(struct super_block *s)
unregister_shrinker(&s->s_shrink);
fs->kill_sb(s);
kill_super_notify(s);
/*
* Since list_lru_destroy() may sleep, we cannot call it from
* put_super(), where we hold the sb_lock. Therefore we destroy
......@@ -461,25 +490,6 @@ void deactivate_locked_super(struct super_block *s)
list_lru_destroy(&s->s_dentry_lru);
list_lru_destroy(&s->s_inode_lru);
/*
* Remove it from @fs_supers so it isn't found by new
* sget{_fc}() walkers anymore. Any concurrent mounter still
* managing to grab a temporary reference is guaranteed to
* already see SB_DYING and will wait until we notify them about
* SB_DEAD.
*/
spin_lock(&sb_lock);
hlist_del_init(&s->s_instances);
spin_unlock(&sb_lock);
/*
* Let concurrent mounts know that this thing is really dead.
* We don't need @sb->s_umount here as every concurrent caller
* will see SB_DYING and either discard the superblock or wait
* for SB_DEAD.
*/
super_wake(s, SB_DEAD);
put_filesystem(fs);
put_super(s);
} else {
......@@ -570,8 +580,8 @@ static bool grab_super_dead(struct super_block *sb)
return true;
}
wait_var_event(&sb->s_flags, wait_dead(sb));
put_super(sb);
lockdep_assert_not_held(&sb->s_umount);
put_super(sb);
return false;
}
......@@ -1278,6 +1288,7 @@ void kill_anon_super(struct super_block *sb)
{
dev_t dev = sb->s_dev;
generic_shutdown_super(sb);
kill_super_notify(sb);
free_anon_bdev(dev);
}
EXPORT_SYMBOL(kill_anon_super);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment