Commit 3bd7d748 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'io_uring-6.7-2023-12-15' of git://git.kernel.dk/linux

Pull io_uring fixes from Jens Axboe:
 "Just two minor fixes:

   - Fix for the io_uring socket option commands using the wrong value
     on some archs (Al)

   - Tweak to the poll lazy wake enable (me)"

* tag 'io_uring-6.7-2023-12-15' of git://git.kernel.dk/linux:
  io_uring/cmd: fix breakage in SOCKET_URING_OP_SIOC* implementation
  io_uring/poll: don't enable lazy wake for POLLEXCLUSIVE
parents a62aa88b 1ba0e9d6
...@@ -434,6 +434,7 @@ enum { ...@@ -434,6 +434,7 @@ enum {
/* keep async read/write and isreg together and in order */ /* keep async read/write and isreg together and in order */
REQ_F_SUPPORT_NOWAIT_BIT, REQ_F_SUPPORT_NOWAIT_BIT,
REQ_F_ISREG_BIT, REQ_F_ISREG_BIT,
REQ_F_POLL_NO_LAZY_BIT,
/* not a real bit, just to check we're not overflowing the space */ /* not a real bit, just to check we're not overflowing the space */
__REQ_F_LAST_BIT, __REQ_F_LAST_BIT,
...@@ -501,6 +502,8 @@ enum { ...@@ -501,6 +502,8 @@ enum {
REQ_F_CLEAR_POLLIN = BIT(REQ_F_CLEAR_POLLIN_BIT), REQ_F_CLEAR_POLLIN = BIT(REQ_F_CLEAR_POLLIN_BIT),
/* hashed into ->cancel_hash_locked, protected by ->uring_lock */ /* hashed into ->cancel_hash_locked, protected by ->uring_lock */
REQ_F_HASH_LOCKED = BIT(REQ_F_HASH_LOCKED_BIT), REQ_F_HASH_LOCKED = BIT(REQ_F_HASH_LOCKED_BIT),
/* don't use lazy poll wake for this request */
REQ_F_POLL_NO_LAZY = BIT(REQ_F_POLL_NO_LAZY_BIT),
}; };
typedef void (*io_req_tw_func_t)(struct io_kiocb *req, struct io_tw_state *ts); typedef void (*io_req_tw_func_t)(struct io_kiocb *req, struct io_tw_state *ts);
......
...@@ -366,11 +366,16 @@ void io_poll_task_func(struct io_kiocb *req, struct io_tw_state *ts) ...@@ -366,11 +366,16 @@ void io_poll_task_func(struct io_kiocb *req, struct io_tw_state *ts)
static void __io_poll_execute(struct io_kiocb *req, int mask) static void __io_poll_execute(struct io_kiocb *req, int mask)
{ {
unsigned flags = 0;
io_req_set_res(req, mask, 0); io_req_set_res(req, mask, 0);
req->io_task_work.func = io_poll_task_func; req->io_task_work.func = io_poll_task_func;
trace_io_uring_task_add(req, mask); trace_io_uring_task_add(req, mask);
__io_req_task_work_add(req, IOU_F_TWQ_LAZY_WAKE);
if (!(req->flags & REQ_F_POLL_NO_LAZY))
flags = IOU_F_TWQ_LAZY_WAKE;
__io_req_task_work_add(req, flags);
} }
static inline void io_poll_execute(struct io_kiocb *req, int res) static inline void io_poll_execute(struct io_kiocb *req, int res)
...@@ -526,10 +531,19 @@ static void __io_queue_proc(struct io_poll *poll, struct io_poll_table *pt, ...@@ -526,10 +531,19 @@ static void __io_queue_proc(struct io_poll *poll, struct io_poll_table *pt,
poll->head = head; poll->head = head;
poll->wait.private = (void *) wqe_private; poll->wait.private = (void *) wqe_private;
if (poll->events & EPOLLEXCLUSIVE) if (poll->events & EPOLLEXCLUSIVE) {
/*
* Exclusive waits may only wake a limited amount of entries
* rather than all of them, this may interfere with lazy
* wake if someone does wait(events > 1). Ensure we don't do
* lazy wake for those, as we need to process each one as they
* come in.
*/
req->flags |= REQ_F_POLL_NO_LAZY;
add_wait_queue_exclusive(head, &poll->wait); add_wait_queue_exclusive(head, &poll->wait);
else } else {
add_wait_queue(head, &poll->wait); add_wait_queue(head, &poll->wait);
}
} }
static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head, static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
#include <linux/nospec.h> #include <linux/nospec.h>
#include <uapi/linux/io_uring.h> #include <uapi/linux/io_uring.h>
#include <uapi/asm-generic/ioctls.h> #include <asm/ioctls.h>
#include "io_uring.h" #include "io_uring.h"
#include "rsrc.h" #include "rsrc.h"
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment