Commit e02f08e2 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'fsnotify_for_v6.12-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs

Pull fsnotify fixes from Jan Kara:
 "Fixes for an inotify deadlock and a data race in fsnotify"

* tag 'fsnotify_for_v6.12-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs:
  inotify: Fix possible deadlock in fsnotify_destroy_mark
  fsnotify: Avoid data race between fsnotify_recalc_mask() and fsnotify_object_watched()
parents 4770119d cad3f4a2
......@@ -792,7 +792,7 @@ nfsd_file_cache_init(void)
}
nfsd_file_fsnotify_group = fsnotify_alloc_group(&nfsd_file_fsnotify_ops,
FSNOTIFY_GROUP_NOFS);
0);
if (IS_ERR(nfsd_file_fsnotify_group)) {
pr_err("nfsd: unable to create fsnotify group: %ld\n",
PTR_ERR(nfsd_file_fsnotify_group));
......
......@@ -406,8 +406,7 @@ static int __init dnotify_init(void)
SLAB_PANIC|SLAB_ACCOUNT);
dnotify_mark_cache = KMEM_CACHE(dnotify_mark, SLAB_PANIC|SLAB_ACCOUNT);
dnotify_group = fsnotify_alloc_group(&dnotify_fsnotify_ops,
FSNOTIFY_GROUP_NOFS);
dnotify_group = fsnotify_alloc_group(&dnotify_fsnotify_ops, 0);
if (IS_ERR(dnotify_group))
panic("unable to allocate fsnotify group for dnotify\n");
dnotify_sysctl_init();
......
......@@ -1480,7 +1480,7 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
/* fsnotify_alloc_group takes a ref. Dropped in fanotify_release */
group = fsnotify_alloc_group(&fanotify_fsnotify_ops,
FSNOTIFY_GROUP_USER | FSNOTIFY_GROUP_NOFS);
FSNOTIFY_GROUP_USER);
if (IS_ERR(group)) {
return PTR_ERR(group);
}
......
......@@ -183,8 +183,10 @@ static bool fsnotify_event_needs_parent(struct inode *inode, __u32 mnt_mask,
BUILD_BUG_ON(FS_EVENTS_POSS_ON_CHILD & ~FS_EVENTS_POSS_TO_PARENT);
/* Did either inode/sb/mount subscribe for events with parent/name? */
marks_mask |= fsnotify_parent_needed_mask(inode->i_fsnotify_mask);
marks_mask |= fsnotify_parent_needed_mask(inode->i_sb->s_fsnotify_mask);
marks_mask |= fsnotify_parent_needed_mask(
READ_ONCE(inode->i_fsnotify_mask));
marks_mask |= fsnotify_parent_needed_mask(
READ_ONCE(inode->i_sb->s_fsnotify_mask));
marks_mask |= fsnotify_parent_needed_mask(mnt_mask);
/* Did they subscribe for this event with parent/name info? */
......@@ -195,8 +197,8 @@ static bool fsnotify_event_needs_parent(struct inode *inode, __u32 mnt_mask,
static inline bool fsnotify_object_watched(struct inode *inode, __u32 mnt_mask,
__u32 mask)
{
__u32 marks_mask = inode->i_fsnotify_mask | mnt_mask |
inode->i_sb->s_fsnotify_mask;
__u32 marks_mask = READ_ONCE(inode->i_fsnotify_mask) | mnt_mask |
READ_ONCE(inode->i_sb->s_fsnotify_mask);
return mask & marks_mask & ALL_FSNOTIFY_EVENTS;
}
......@@ -213,7 +215,8 @@ int __fsnotify_parent(struct dentry *dentry, __u32 mask, const void *data,
int data_type)
{
const struct path *path = fsnotify_data_path(data, data_type);
__u32 mnt_mask = path ? real_mount(path->mnt)->mnt_fsnotify_mask : 0;
__u32 mnt_mask = path ?
READ_ONCE(real_mount(path->mnt)->mnt_fsnotify_mask) : 0;
struct inode *inode = d_inode(dentry);
struct dentry *parent;
bool parent_watched = dentry->d_flags & DCACHE_FSNOTIFY_PARENT_WATCHED;
......@@ -557,13 +560,13 @@ int fsnotify(__u32 mask, const void *data, int data_type, struct inode *dir,
(!inode2 || !inode2->i_fsnotify_marks))
return 0;
marks_mask = sb->s_fsnotify_mask;
marks_mask = READ_ONCE(sb->s_fsnotify_mask);
if (mnt)
marks_mask |= mnt->mnt_fsnotify_mask;
marks_mask |= READ_ONCE(mnt->mnt_fsnotify_mask);
if (inode)
marks_mask |= inode->i_fsnotify_mask;
marks_mask |= READ_ONCE(inode->i_fsnotify_mask);
if (inode2)
marks_mask |= inode2->i_fsnotify_mask;
marks_mask |= READ_ONCE(inode2->i_fsnotify_mask);
/*
......
......@@ -115,7 +115,6 @@ static struct fsnotify_group *__fsnotify_alloc_group(
const struct fsnotify_ops *ops,
int flags, gfp_t gfp)
{
static struct lock_class_key nofs_marks_lock;
struct fsnotify_group *group;
group = kzalloc(sizeof(struct fsnotify_group), gfp);
......@@ -136,16 +135,6 @@ static struct fsnotify_group *__fsnotify_alloc_group(
group->ops = ops;
group->flags = flags;
/*
* For most backends, eviction of inode with a mark is not expected,
* because marks hold a refcount on the inode against eviction.
*
* Use a different lockdep class for groups that support evictable
* inode marks, because with evictable marks, mark_mutex is NOT
* fs-reclaim safe - the mutex is taken when evicting inodes.
*/
if (flags & FSNOTIFY_GROUP_NOFS)
lockdep_set_class(&group->mark_mutex, &nofs_marks_lock);
return group;
}
......
......@@ -569,7 +569,7 @@ static int inotify_update_existing_watch(struct fsnotify_group *group,
/* more bits in old than in new? */
int dropped = (old_mask & ~new_mask);
/* more bits in this fsn_mark than the inode's mask? */
int do_inode = (new_mask & ~inode->i_fsnotify_mask);
int do_inode = (new_mask & ~READ_ONCE(inode->i_fsnotify_mask));
/* update the inode with this new fsn_mark */
if (dropped || do_inode)
......
......@@ -128,7 +128,7 @@ __u32 fsnotify_conn_mask(struct fsnotify_mark_connector *conn)
if (WARN_ON(!fsnotify_valid_obj_type(conn->type)))
return 0;
return *fsnotify_conn_mask_p(conn);
return READ_ONCE(*fsnotify_conn_mask_p(conn));
}
static void fsnotify_get_sb_watched_objects(struct super_block *sb)
......@@ -245,7 +245,11 @@ static void *__fsnotify_recalc_mask(struct fsnotify_mark_connector *conn)
!(mark->flags & FSNOTIFY_MARK_FLAG_NO_IREF))
want_iref = true;
}
*fsnotify_conn_mask_p(conn) = new_mask;
/*
* We use WRITE_ONCE() to prevent silly compiler optimizations from
* confusing readers not holding conn->lock with partial updates.
*/
WRITE_ONCE(*fsnotify_conn_mask_p(conn), new_mask);
return fsnotify_update_iref(conn, want_iref);
}
......
......@@ -217,7 +217,6 @@ struct fsnotify_group {
#define FSNOTIFY_GROUP_USER 0x01 /* user allocated group */
#define FSNOTIFY_GROUP_DUPS 0x02 /* allow multiple marks per object */
#define FSNOTIFY_GROUP_NOFS 0x04 /* group lock is not direct reclaim safe */
int flags;
unsigned int owner_flags; /* stored flags of mark_mutex owner */
......@@ -268,13 +267,11 @@ struct fsnotify_group {
static inline void fsnotify_group_lock(struct fsnotify_group *group)
{
mutex_lock(&group->mark_mutex);
if (group->flags & FSNOTIFY_GROUP_NOFS)
group->owner_flags = memalloc_nofs_save();
}
static inline void fsnotify_group_unlock(struct fsnotify_group *group)
{
if (group->flags & FSNOTIFY_GROUP_NOFS)
memalloc_nofs_restore(group->owner_flags);
mutex_unlock(&group->mark_mutex);
}
......@@ -282,7 +279,6 @@ static inline void fsnotify_group_unlock(struct fsnotify_group *group)
static inline void fsnotify_group_assert_locked(struct fsnotify_group *group)
{
WARN_ON_ONCE(!mutex_is_locked(&group->mark_mutex));
if (group->flags & FSNOTIFY_GROUP_NOFS)
WARN_ON_ONCE(!(current->flags & PF_MEMALLOC_NOFS));
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment