Commit 8f7bb368 authored by Miklos Szeredi's avatar Miklos Szeredi

fuse: allow interrupt queuing without fc->lock

Interrupt is only queued after the request has been sent to userspace.
This is either done in request_wait_answer() or fuse_dev_do_read()
depending on which state the request is in at the time of the interrupt.
If it's not yet sent, then queuing the interrupt is postponed until the
request is read.  Otherwise (the request has already been read and is
waiting for an answer) the interrupt is queued immedidately.

We want to call queue_interrupt() without fc->lock protection, in which
case there can be a race between the two functions:

 - neither of them queue the interrupt (thinking the other one has already
   done it).

 - both of them queue the interrupt

The first one is prevented by adding memory barriers, the second is
prevented by checking (under fiq->waitq.lock) if the interrupt has already
been queued.
Signed-off-by: default avatarMiklos Szeredi <mszeredi@suse.cz>
parent 4ce60812
...@@ -423,8 +423,10 @@ __releases(fc->lock) ...@@ -423,8 +423,10 @@ __releases(fc->lock)
static void queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req) static void queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
{ {
spin_lock(&fiq->waitq.lock); spin_lock(&fiq->waitq.lock);
if (list_empty(&req->intr_entry)) {
list_add_tail(&req->intr_entry, &fiq->interrupts); list_add_tail(&req->intr_entry, &fiq->interrupts);
wake_up_locked(&fiq->waitq); wake_up_locked(&fiq->waitq);
}
spin_unlock(&fiq->waitq.lock); spin_unlock(&fiq->waitq.lock);
kill_fasync(&fiq->fasync, SIGIO, POLL_IN); kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
} }
...@@ -443,6 +445,8 @@ static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req) ...@@ -443,6 +445,8 @@ static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
spin_lock(&fc->lock); spin_lock(&fc->lock);
set_bit(FR_INTERRUPTED, &req->flags); set_bit(FR_INTERRUPTED, &req->flags);
/* matches barrier in fuse_dev_do_read() */
smp_mb__after_atomic();
if (test_bit(FR_SENT, &req->flags)) if (test_bit(FR_SENT, &req->flags))
queue_interrupt(fiq, req); queue_interrupt(fiq, req);
spin_unlock(&fc->lock); spin_unlock(&fc->lock);
...@@ -1358,8 +1362,10 @@ static ssize_t fuse_dev_do_read(struct fuse_conn *fc, struct file *file, ...@@ -1358,8 +1362,10 @@ static ssize_t fuse_dev_do_read(struct fuse_conn *fc, struct file *file,
if (!test_bit(FR_ISREPLY, &req->flags)) { if (!test_bit(FR_ISREPLY, &req->flags)) {
request_end(fc, req); request_end(fc, req);
} else { } else {
set_bit(FR_SENT, &req->flags);
list_move_tail(&req->list, &fc->processing); list_move_tail(&req->list, &fc->processing);
set_bit(FR_SENT, &req->flags);
/* matches barrier in request_wait_answer() */
smp_mb__after_atomic();
if (test_bit(FR_INTERRUPTED, &req->flags)) if (test_bit(FR_INTERRUPTED, &req->flags))
queue_interrupt(fiq, req); queue_interrupt(fiq, req);
spin_unlock(&fc->lock); spin_unlock(&fc->lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment