Commit 68b11e8b authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: explicitly count entries for poll reqs

If __io_queue_proc() fails to add a second poll entry, e.g. kmalloc()
failed, but it goes on with a third waitqueue, it may succeed and
overwrite the error status. Count the number of poll entries we added,
so we can set pt->error to zero at the beginning and find out when the
mentioned scenario happens.

Cc: stable@vger.kernel.org
Fixes: 18bceab1 ("io_uring: allow POLL_ADD with double poll_wait() users")
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/9d6b9e561f88bcc0163623b74a76c39f712151c3.1626774457.git.asml.silence@gmail.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 1b48773f
...@@ -4802,6 +4802,7 @@ IO_NETOP_FN(recv); ...@@ -4802,6 +4802,7 @@ IO_NETOP_FN(recv);
struct io_poll_table { struct io_poll_table {
struct poll_table_struct pt; struct poll_table_struct pt;
struct io_kiocb *req; struct io_kiocb *req;
int nr_entries;
int error; int error;
}; };
...@@ -4995,11 +4996,11 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt, ...@@ -4995,11 +4996,11 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
struct io_kiocb *req = pt->req; struct io_kiocb *req = pt->req;
/* /*
* If poll->head is already set, it's because the file being polled * The file being polled uses multiple waitqueues for poll handling
* uses multiple waitqueues for poll handling (eg one for read, one * (e.g. one for read, one for write). Setup a separate io_poll_iocb
* for write). Setup a separate io_poll_iocb if this happens. * if this happens.
*/ */
if (unlikely(poll->head)) { if (unlikely(pt->nr_entries)) {
struct io_poll_iocb *poll_one = poll; struct io_poll_iocb *poll_one = poll;
/* already have a 2nd entry, fail a third attempt */ /* already have a 2nd entry, fail a third attempt */
...@@ -5027,7 +5028,7 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt, ...@@ -5027,7 +5028,7 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
*poll_ptr = poll; *poll_ptr = poll;
} }
pt->error = 0; pt->nr_entries++;
poll->head = head; poll->head = head;
if (poll->events & EPOLLEXCLUSIVE) if (poll->events & EPOLLEXCLUSIVE)
...@@ -5104,9 +5105,12 @@ static __poll_t __io_arm_poll_handler(struct io_kiocb *req, ...@@ -5104,9 +5105,12 @@ static __poll_t __io_arm_poll_handler(struct io_kiocb *req,
ipt->pt._key = mask; ipt->pt._key = mask;
ipt->req = req; ipt->req = req;
ipt->error = -EINVAL; ipt->error = 0;
ipt->nr_entries = 0;
mask = vfs_poll(req->file, &ipt->pt) & poll->events; mask = vfs_poll(req->file, &ipt->pt) & poll->events;
if (unlikely(!ipt->nr_entries) && !ipt->error)
ipt->error = -EINVAL;
spin_lock_irq(&ctx->completion_lock); spin_lock_irq(&ctx->completion_lock);
if (likely(poll->head)) { if (likely(poll->head)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment