Commit 13d34ac6 authored by Jeff Layton's avatar Jeff Layton Committed by Linus Torvalds

Revert "fsnotify: destroy marks with call_srcu instead of dedicated thread"

This reverts commit c510eff6 ("fsnotify: destroy marks with
call_srcu instead of dedicated thread").

Eryu reported that he was seeing some OOM kills kick in when running a
testcase that adds and removes inotify marks on a file in a tight loop.

The above commit changed the code to use call_srcu to clean up the
marks.  While that does (in principle) work, the srcu callback job is
limited to cleaning up entries in small batches and only once per jiffy.
It's easily possible to overwhelm that machinery with too many call_srcu
callbacks, and Eryu's reproduer did just that.

There's also another potential problem with using call_srcu here.  While
you can obviously sleep while holding the srcu_read_lock, the callbacks
run under local_bh_disable, so you can't sleep there.

It's possible when putting the last reference to the fsnotify_mark that
we'll end up putting a chain of references including the fsnotify_group,
uid, and associated keys.  While I don't see any obvious ways that that
could occurs, it's probably still best to avoid using call_srcu here
after all.

This patch reverts the above patch.  A later patch will take a different
approach to eliminated the dedicated thread here.
Signed-off-by: default avatarJeff Layton <jeff.layton@primarydata.com>
Reported-by: default avatarEryu Guan <guaneryu@gmail.com>
Tested-by: default avatarEryu Guan <guaneryu@gmail.com>
Cc: Jan Kara <jack@suse.com>
Cc: Eric Paris <eparis@parisplace.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 48f7df32
...@@ -92,6 +92,9 @@ ...@@ -92,6 +92,9 @@
#include "fsnotify.h" #include "fsnotify.h"
struct srcu_struct fsnotify_mark_srcu; struct srcu_struct fsnotify_mark_srcu;
static DEFINE_SPINLOCK(destroy_lock);
static LIST_HEAD(destroy_list);
static DECLARE_WAIT_QUEUE_HEAD(destroy_waitq);
void fsnotify_get_mark(struct fsnotify_mark *mark) void fsnotify_get_mark(struct fsnotify_mark *mark)
{ {
...@@ -165,19 +168,10 @@ void fsnotify_detach_mark(struct fsnotify_mark *mark) ...@@ -165,19 +168,10 @@ void fsnotify_detach_mark(struct fsnotify_mark *mark)
atomic_dec(&group->num_marks); atomic_dec(&group->num_marks);
} }
static void
fsnotify_mark_free_rcu(struct rcu_head *rcu)
{
struct fsnotify_mark *mark;
mark = container_of(rcu, struct fsnotify_mark, g_rcu);
fsnotify_put_mark(mark);
}
/* /*
* Free fsnotify mark. The freeing is actually happening from a call_srcu * Free fsnotify mark. The freeing is actually happening from a kthread which
* callback. Caller must have a reference to the mark or be protected by * first waits for srcu period end. Caller must have a reference to the mark
* fsnotify_mark_srcu. * or be protected by fsnotify_mark_srcu.
*/ */
void fsnotify_free_mark(struct fsnotify_mark *mark) void fsnotify_free_mark(struct fsnotify_mark *mark)
{ {
...@@ -192,7 +186,10 @@ void fsnotify_free_mark(struct fsnotify_mark *mark) ...@@ -192,7 +186,10 @@ void fsnotify_free_mark(struct fsnotify_mark *mark)
mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE; mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE;
spin_unlock(&mark->lock); spin_unlock(&mark->lock);
call_srcu(&fsnotify_mark_srcu, &mark->g_rcu, fsnotify_mark_free_rcu); spin_lock(&destroy_lock);
list_add(&mark->g_list, &destroy_list);
spin_unlock(&destroy_lock);
wake_up(&destroy_waitq);
/* /*
* Some groups like to know that marks are being freed. This is a * Some groups like to know that marks are being freed. This is a
...@@ -388,7 +385,11 @@ int fsnotify_add_mark_locked(struct fsnotify_mark *mark, ...@@ -388,7 +385,11 @@ int fsnotify_add_mark_locked(struct fsnotify_mark *mark,
spin_unlock(&mark->lock); spin_unlock(&mark->lock);
call_srcu(&fsnotify_mark_srcu, &mark->g_rcu, fsnotify_mark_free_rcu); spin_lock(&destroy_lock);
list_add(&mark->g_list, &destroy_list);
spin_unlock(&destroy_lock);
wake_up(&destroy_waitq);
return ret; return ret;
} }
...@@ -491,3 +492,40 @@ void fsnotify_init_mark(struct fsnotify_mark *mark, ...@@ -491,3 +492,40 @@ void fsnotify_init_mark(struct fsnotify_mark *mark,
atomic_set(&mark->refcnt, 1); atomic_set(&mark->refcnt, 1);
mark->free_mark = free_mark; mark->free_mark = free_mark;
} }
static int fsnotify_mark_destroy(void *ignored)
{
struct fsnotify_mark *mark, *next;
struct list_head private_destroy_list;
for (;;) {
spin_lock(&destroy_lock);
/* exchange the list head */
list_replace_init(&destroy_list, &private_destroy_list);
spin_unlock(&destroy_lock);
synchronize_srcu(&fsnotify_mark_srcu);
list_for_each_entry_safe(mark, next, &private_destroy_list, g_list) {
list_del_init(&mark->g_list);
fsnotify_put_mark(mark);
}
wait_event_interruptible(destroy_waitq, !list_empty(&destroy_list));
}
return 0;
}
static int __init fsnotify_mark_init(void)
{
struct task_struct *thread;
thread = kthread_run(fsnotify_mark_destroy, NULL,
"fsnotify_mark");
if (IS_ERR(thread))
panic("unable to start fsnotify mark destruction thread.");
return 0;
}
device_initcall(fsnotify_mark_init);
...@@ -220,10 +220,7 @@ struct fsnotify_mark { ...@@ -220,10 +220,7 @@ struct fsnotify_mark {
/* List of marks by group->i_fsnotify_marks. Also reused for queueing /* List of marks by group->i_fsnotify_marks. Also reused for queueing
* mark into destroy_list when it's waiting for the end of SRCU period * mark into destroy_list when it's waiting for the end of SRCU period
* before it can be freed. [group->mark_mutex] */ * before it can be freed. [group->mark_mutex] */
union { struct list_head g_list;
struct list_head g_list;
struct rcu_head g_rcu;
};
/* Protects inode / mnt pointers, flags, masks */ /* Protects inode / mnt pointers, flags, masks */
spinlock_t lock; spinlock_t lock;
/* List of marks for inode / vfsmount [obj_lock] */ /* List of marks for inode / vfsmount [obj_lock] */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment