Commit cce5fe5e authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-6.3/io_uring-2023-02-16' of git://git.kernel.dk/linux

Pull io_uring updates from Jens Axboe:

 - Cleanup series making the async prep and handling of
   REQ_F_FORCE_ASYNC easier to follow and verify (Dylan)

 - Enable specifying specific flags for OP_MSG_RING (Breno)

 - Enable use of KASAN with the internal request cache (Breno)

 - Split the opcode definition structs into a hot and cold part (Breno)

 - OP_MSG_RING fixes (Pavel, me)

 - Fix an issue with IOPOLL cancelation and PREEMPT_NONE (me)

 - Handle TIF_NOTIFY_RESUME for the io-wq threads that never return to
   userspace (me)

 - Add support for using io_uring_register() with a registered ring fd
   (Josh)

 - Improve handling of poll on the ring fd (Pavel)

 - Series improving the task_work handling (Pavel)

 - Misc cleanups, fixes, improvements (Dmitrii, Quanfa, Richard, Pavel,
   me)

* tag 'for-6.3/io_uring-2023-02-16' of git://git.kernel.dk/linux: (51 commits)
  io_uring: Support calling io_uring_register with a registered ring fd
  io_uring,audit: don't log IORING_OP_MADVISE
  io_uring: mark task TASK_RUNNING before handling resume/task work
  io_uring: always go async for unsupported open flags
  io_uring: always go async for unsupported fadvise flags
  io_uring: for requests that require async, force it
  io_uring: if a linked request has REQ_F_FORCE_ASYNC then run it async
  io_uring: add reschedule point to handle_tw_list()
  io_uring: add a conditional reschedule to the IOPOLL cancelation loop
  io_uring: return normal tw run linking optimisation
  io_uring: refactor tctx_task_work
  io_uring: refactor io_put_task helpers
  io_uring: refactor req allocation
  io_uring: improve io_get_sqe
  io_uring: kill outdated comment about overflow flush
  io_uring: use user visible tail in io_uring_poll()
  io_uring: pass in io_issue_def to io_assign_file()
  io_uring: Enable KASAN for request cache
  io_uring: handle TIF_NOTIFY_RESUME when checking for task_work
  io_uring/msg-ring: ensure flags passing works for task_work completions
  ...
parents eca3a04f 7d3fd88d
...@@ -195,21 +195,23 @@ struct io_alloc_cache { ...@@ -195,21 +195,23 @@ struct io_alloc_cache {
struct io_ring_ctx { struct io_ring_ctx {
/* const or read-mostly hot data */ /* const or read-mostly hot data */
struct { struct {
struct percpu_ref refs;
struct io_rings *rings;
unsigned int flags; unsigned int flags;
enum task_work_notify_mode notify_method;
unsigned int compat: 1;
unsigned int drain_next: 1; unsigned int drain_next: 1;
unsigned int restricted: 1; unsigned int restricted: 1;
unsigned int off_timeout_used: 1; unsigned int off_timeout_used: 1;
unsigned int drain_active: 1; unsigned int drain_active: 1;
unsigned int drain_disabled: 1;
unsigned int has_evfd: 1; unsigned int has_evfd: 1;
unsigned int syscall_iopoll: 1;
/* all CQEs should be posted only by the submitter task */ /* all CQEs should be posted only by the submitter task */
unsigned int task_complete: 1; unsigned int task_complete: 1;
unsigned int syscall_iopoll: 1;
unsigned int poll_activated: 1;
unsigned int drain_disabled: 1;
unsigned int compat: 1;
enum task_work_notify_mode notify_method;
struct io_rings *rings;
struct task_struct *submitter_task;
struct percpu_ref refs;
} ____cacheline_aligned_in_smp; } ____cacheline_aligned_in_smp;
/* submission data */ /* submission data */
...@@ -293,6 +295,7 @@ struct io_ring_ctx { ...@@ -293,6 +295,7 @@ struct io_ring_ctx {
spinlock_t completion_lock; spinlock_t completion_lock;
bool poll_multi_queue; bool poll_multi_queue;
bool cq_waiting;
/* /*
* ->iopoll_list is protected by the ctx->uring_lock for * ->iopoll_list is protected by the ctx->uring_lock for
...@@ -318,9 +321,8 @@ struct io_ring_ctx { ...@@ -318,9 +321,8 @@ struct io_ring_ctx {
} ____cacheline_aligned_in_smp; } ____cacheline_aligned_in_smp;
/* Keep this last, we don't need it for the fast path */ /* Keep this last, we don't need it for the fast path */
struct wait_queue_head poll_wq;
struct io_restriction restrictions; struct io_restriction restrictions;
struct task_struct *submitter_task;
/* slow path rsrc auxilary data, used by update/register */ /* slow path rsrc auxilary data, used by update/register */
struct io_rsrc_node *rsrc_backup_node; struct io_rsrc_node *rsrc_backup_node;
...@@ -357,6 +359,7 @@ struct io_ring_ctx { ...@@ -357,6 +359,7 @@ struct io_ring_ctx {
u32 iowq_limits[2]; u32 iowq_limits[2];
bool iowq_limits_set; bool iowq_limits_set;
struct callback_head poll_wq_task_work;
struct list_head defer_list; struct list_head defer_list;
unsigned sq_thread_idle; unsigned sq_thread_idle;
/* protected by ->completion_lock */ /* protected by ->completion_lock */
......
...@@ -347,6 +347,8 @@ enum { ...@@ -347,6 +347,8 @@ enum {
* applicable for IORING_MSG_DATA, obviously. * applicable for IORING_MSG_DATA, obviously.
*/ */
#define IORING_MSG_RING_CQE_SKIP (1U << 0) #define IORING_MSG_RING_CQE_SKIP (1U << 0)
/* Pass through the flags from sqe->file_index to cqe->flags */
#define IORING_MSG_RING_FLAGS_PASS (1U << 1)
/* /*
* IO completion data structure (Completion Queue Entry) * IO completion data structure (Completion Queue Entry)
...@@ -470,6 +472,7 @@ struct io_uring_params { ...@@ -470,6 +472,7 @@ struct io_uring_params {
#define IORING_FEAT_RSRC_TAGS (1U << 10) #define IORING_FEAT_RSRC_TAGS (1U << 10)
#define IORING_FEAT_CQE_SKIP (1U << 11) #define IORING_FEAT_CQE_SKIP (1U << 11)
#define IORING_FEAT_LINKED_FILE (1U << 12) #define IORING_FEAT_LINKED_FILE (1U << 12)
#define IORING_FEAT_REG_REG_RING (1U << 13)
/* /*
* io_uring_register(2) opcodes and arguments * io_uring_register(2) opcodes and arguments
...@@ -517,7 +520,10 @@ enum { ...@@ -517,7 +520,10 @@ enum {
IORING_REGISTER_FILE_ALLOC_RANGE = 25, IORING_REGISTER_FILE_ALLOC_RANGE = 25,
/* this goes last */ /* this goes last */
IORING_REGISTER_LAST IORING_REGISTER_LAST,
/* flag added to the opcode to use a registered ring fd */
IORING_REGISTER_USE_REGISTERED_RING = 1U << 31
}; };
/* io-wq worker categories */ /* io-wq worker categories */
......
...@@ -39,6 +39,7 @@ int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) ...@@ -39,6 +39,7 @@ int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
ma->addr = READ_ONCE(sqe->addr); ma->addr = READ_ONCE(sqe->addr);
ma->len = READ_ONCE(sqe->len); ma->len = READ_ONCE(sqe->len);
ma->advice = READ_ONCE(sqe->fadvise_advice); ma->advice = READ_ONCE(sqe->fadvise_advice);
req->flags |= REQ_F_FORCE_ASYNC;
return 0; return 0;
#else #else
return -EOPNOTSUPP; return -EOPNOTSUPP;
...@@ -51,8 +52,7 @@ int io_madvise(struct io_kiocb *req, unsigned int issue_flags) ...@@ -51,8 +52,7 @@ int io_madvise(struct io_kiocb *req, unsigned int issue_flags)
struct io_madvise *ma = io_kiocb_to_cmd(req, struct io_madvise); struct io_madvise *ma = io_kiocb_to_cmd(req, struct io_madvise);
int ret; int ret;
if (issue_flags & IO_URING_F_NONBLOCK) WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
return -EAGAIN;
ret = do_madvise(current->mm, ma->addr, ma->len, ma->advice); ret = do_madvise(current->mm, ma->addr, ma->len, ma->advice);
io_req_set_res(req, ret, 0); io_req_set_res(req, ret, 0);
...@@ -62,6 +62,18 @@ int io_madvise(struct io_kiocb *req, unsigned int issue_flags) ...@@ -62,6 +62,18 @@ int io_madvise(struct io_kiocb *req, unsigned int issue_flags)
#endif #endif
} }
static bool io_fadvise_force_async(struct io_fadvise *fa)
{
switch (fa->advice) {
case POSIX_FADV_NORMAL:
case POSIX_FADV_RANDOM:
case POSIX_FADV_SEQUENTIAL:
return false;
default:
return true;
}
}
int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{ {
struct io_fadvise *fa = io_kiocb_to_cmd(req, struct io_fadvise); struct io_fadvise *fa = io_kiocb_to_cmd(req, struct io_fadvise);
...@@ -72,6 +84,8 @@ int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) ...@@ -72,6 +84,8 @@ int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
fa->offset = READ_ONCE(sqe->off); fa->offset = READ_ONCE(sqe->off);
fa->len = READ_ONCE(sqe->len); fa->len = READ_ONCE(sqe->len);
fa->advice = READ_ONCE(sqe->fadvise_advice); fa->advice = READ_ONCE(sqe->fadvise_advice);
if (io_fadvise_force_async(fa))
req->flags |= REQ_F_FORCE_ASYNC;
return 0; return 0;
} }
...@@ -80,16 +94,7 @@ int io_fadvise(struct io_kiocb *req, unsigned int issue_flags) ...@@ -80,16 +94,7 @@ int io_fadvise(struct io_kiocb *req, unsigned int issue_flags)
struct io_fadvise *fa = io_kiocb_to_cmd(req, struct io_fadvise); struct io_fadvise *fa = io_kiocb_to_cmd(req, struct io_fadvise);
int ret; int ret;
if (issue_flags & IO_URING_F_NONBLOCK) { WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK && io_fadvise_force_async(fa));
switch (fa->advice) {
case POSIX_FADV_NORMAL:
case POSIX_FADV_RANDOM:
case POSIX_FADV_SEQUENTIAL:
break;
default:
return -EAGAIN;
}
}
ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice); ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice);
if (ret < 0) if (ret < 0)
......
...@@ -74,6 +74,7 @@ int io_renameat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) ...@@ -74,6 +74,7 @@ int io_renameat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
} }
req->flags |= REQ_F_NEED_CLEANUP; req->flags |= REQ_F_NEED_CLEANUP;
req->flags |= REQ_F_FORCE_ASYNC;
return 0; return 0;
} }
...@@ -82,8 +83,7 @@ int io_renameat(struct io_kiocb *req, unsigned int issue_flags) ...@@ -82,8 +83,7 @@ int io_renameat(struct io_kiocb *req, unsigned int issue_flags)
struct io_rename *ren = io_kiocb_to_cmd(req, struct io_rename); struct io_rename *ren = io_kiocb_to_cmd(req, struct io_rename);
int ret; int ret;
if (issue_flags & IO_URING_F_NONBLOCK) WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
return -EAGAIN;
ret = do_renameat2(ren->old_dfd, ren->oldpath, ren->new_dfd, ret = do_renameat2(ren->old_dfd, ren->oldpath, ren->new_dfd,
ren->newpath, ren->flags); ren->newpath, ren->flags);
...@@ -123,6 +123,7 @@ int io_unlinkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) ...@@ -123,6 +123,7 @@ int io_unlinkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return PTR_ERR(un->filename); return PTR_ERR(un->filename);
req->flags |= REQ_F_NEED_CLEANUP; req->flags |= REQ_F_NEED_CLEANUP;
req->flags |= REQ_F_FORCE_ASYNC;
return 0; return 0;
} }
...@@ -131,8 +132,7 @@ int io_unlinkat(struct io_kiocb *req, unsigned int issue_flags) ...@@ -131,8 +132,7 @@ int io_unlinkat(struct io_kiocb *req, unsigned int issue_flags)
struct io_unlink *un = io_kiocb_to_cmd(req, struct io_unlink); struct io_unlink *un = io_kiocb_to_cmd(req, struct io_unlink);
int ret; int ret;
if (issue_flags & IO_URING_F_NONBLOCK) WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
return -EAGAIN;
if (un->flags & AT_REMOVEDIR) if (un->flags & AT_REMOVEDIR)
ret = do_rmdir(un->dfd, un->filename); ret = do_rmdir(un->dfd, un->filename);
...@@ -170,6 +170,7 @@ int io_mkdirat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) ...@@ -170,6 +170,7 @@ int io_mkdirat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return PTR_ERR(mkd->filename); return PTR_ERR(mkd->filename);
req->flags |= REQ_F_NEED_CLEANUP; req->flags |= REQ_F_NEED_CLEANUP;
req->flags |= REQ_F_FORCE_ASYNC;
return 0; return 0;
} }
...@@ -178,8 +179,7 @@ int io_mkdirat(struct io_kiocb *req, unsigned int issue_flags) ...@@ -178,8 +179,7 @@ int io_mkdirat(struct io_kiocb *req, unsigned int issue_flags)
struct io_mkdir *mkd = io_kiocb_to_cmd(req, struct io_mkdir); struct io_mkdir *mkd = io_kiocb_to_cmd(req, struct io_mkdir);
int ret; int ret;
if (issue_flags & IO_URING_F_NONBLOCK) WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
return -EAGAIN;
ret = do_mkdirat(mkd->dfd, mkd->filename, mkd->mode); ret = do_mkdirat(mkd->dfd, mkd->filename, mkd->mode);
...@@ -220,6 +220,7 @@ int io_symlinkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) ...@@ -220,6 +220,7 @@ int io_symlinkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
} }
req->flags |= REQ_F_NEED_CLEANUP; req->flags |= REQ_F_NEED_CLEANUP;
req->flags |= REQ_F_FORCE_ASYNC;
return 0; return 0;
} }
...@@ -228,8 +229,7 @@ int io_symlinkat(struct io_kiocb *req, unsigned int issue_flags) ...@@ -228,8 +229,7 @@ int io_symlinkat(struct io_kiocb *req, unsigned int issue_flags)
struct io_link *sl = io_kiocb_to_cmd(req, struct io_link); struct io_link *sl = io_kiocb_to_cmd(req, struct io_link);
int ret; int ret;
if (issue_flags & IO_URING_F_NONBLOCK) WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
return -EAGAIN;
ret = do_symlinkat(sl->oldpath, sl->new_dfd, sl->newpath); ret = do_symlinkat(sl->oldpath, sl->new_dfd, sl->newpath);
...@@ -265,6 +265,7 @@ int io_linkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) ...@@ -265,6 +265,7 @@ int io_linkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
} }
req->flags |= REQ_F_NEED_CLEANUP; req->flags |= REQ_F_NEED_CLEANUP;
req->flags |= REQ_F_FORCE_ASYNC;
return 0; return 0;
} }
...@@ -273,8 +274,7 @@ int io_linkat(struct io_kiocb *req, unsigned int issue_flags) ...@@ -273,8 +274,7 @@ int io_linkat(struct io_kiocb *req, unsigned int issue_flags)
struct io_link *lnk = io_kiocb_to_cmd(req, struct io_link); struct io_link *lnk = io_kiocb_to_cmd(req, struct io_link);
int ret; int ret;
if (issue_flags & IO_URING_F_NONBLOCK) WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
return -EAGAIN;
ret = do_linkat(lnk->old_dfd, lnk->oldpath, lnk->new_dfd, ret = do_linkat(lnk->old_dfd, lnk->oldpath, lnk->new_dfd,
lnk->newpath, lnk->flags); lnk->newpath, lnk->flags);
......
This diff is collapsed.
...@@ -3,6 +3,8 @@ ...@@ -3,6 +3,8 @@
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/lockdep.h> #include <linux/lockdep.h>
#include <linux/resume_user_mode.h>
#include <linux/kasan.h>
#include <linux/io_uring_types.h> #include <linux/io_uring_types.h>
#include <uapi/linux/eventpoll.h> #include <uapi/linux/eventpoll.h>
#include "io-wq.h" #include "io-wq.h"
...@@ -28,8 +30,6 @@ enum { ...@@ -28,8 +30,6 @@ enum {
struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx, bool overflow); struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx, bool overflow);
bool io_req_cqe_overflow(struct io_kiocb *req); bool io_req_cqe_overflow(struct io_kiocb *req);
int io_run_task_work_sig(struct io_ring_ctx *ctx); int io_run_task_work_sig(struct io_ring_ctx *ctx);
int __io_run_local_work(struct io_ring_ctx *ctx, bool *locked);
int io_run_local_work(struct io_ring_ctx *ctx);
void io_req_defer_failed(struct io_kiocb *req, s32 res); void io_req_defer_failed(struct io_kiocb *req, s32 res);
void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags); void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags);
bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags); bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags);
...@@ -72,7 +72,6 @@ void io_wq_submit_work(struct io_wq_work *work); ...@@ -72,7 +72,6 @@ void io_wq_submit_work(struct io_wq_work *work);
void io_free_req(struct io_kiocb *req); void io_free_req(struct io_kiocb *req);
void io_queue_next(struct io_kiocb *req); void io_queue_next(struct io_kiocb *req);
void __io_put_task(struct task_struct *task, int nr);
void io_task_refs_refill(struct io_uring_task *tctx); void io_task_refs_refill(struct io_uring_task *tctx);
bool __io_alloc_req_refill(struct io_ring_ctx *ctx); bool __io_alloc_req_refill(struct io_ring_ctx *ctx);
...@@ -222,6 +221,13 @@ static inline void io_commit_cqring(struct io_ring_ctx *ctx) ...@@ -222,6 +221,13 @@ static inline void io_commit_cqring(struct io_ring_ctx *ctx)
smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail); smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
} }
static inline void io_poll_wq_wake(struct io_ring_ctx *ctx)
{
if (wq_has_sleeper(&ctx->poll_wq))
__wake_up(&ctx->poll_wq, TASK_NORMAL, 0,
poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
}
/* requires smb_mb() prior, see wq_has_sleeper() */ /* requires smb_mb() prior, see wq_has_sleeper() */
static inline void __io_cqring_wake(struct io_ring_ctx *ctx) static inline void __io_cqring_wake(struct io_ring_ctx *ctx)
{ {
...@@ -270,6 +276,15 @@ static inline int io_run_task_work(void) ...@@ -270,6 +276,15 @@ static inline int io_run_task_work(void)
*/ */
if (test_thread_flag(TIF_NOTIFY_SIGNAL)) if (test_thread_flag(TIF_NOTIFY_SIGNAL))
clear_notify_signal(); clear_notify_signal();
/*
* PF_IO_WORKER never returns to userspace, so check here if we have
* notify work that needs processing.
*/
if (current->flags & PF_IO_WORKER &&
test_thread_flag(TIF_NOTIFY_RESUME)) {
__set_current_state(TASK_RUNNING);
resume_user_mode_work(NULL);
}
if (task_work_pending(current)) { if (task_work_pending(current)) {
__set_current_state(TASK_RUNNING); __set_current_state(TASK_RUNNING);
task_work_run(); task_work_run();
...@@ -284,42 +299,6 @@ static inline bool io_task_work_pending(struct io_ring_ctx *ctx) ...@@ -284,42 +299,6 @@ static inline bool io_task_work_pending(struct io_ring_ctx *ctx)
return task_work_pending(current) || !wq_list_empty(&ctx->work_llist); return task_work_pending(current) || !wq_list_empty(&ctx->work_llist);
} }
static inline int io_run_task_work_ctx(struct io_ring_ctx *ctx)
{
int ret = 0;
int ret2;
if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
ret = io_run_local_work(ctx);
/* want to run this after in case more is added */
ret2 = io_run_task_work();
/* Try propagate error in favour of if tasks were run,
* but still make sure to run them if requested
*/
if (ret >= 0)
ret += ret2;
return ret;
}
static inline int io_run_local_work_locked(struct io_ring_ctx *ctx)
{
bool locked;
int ret;
if (llist_empty(&ctx->work_llist))
return 0;
locked = true;
ret = __io_run_local_work(ctx, &locked);
/* shouldn't happen! */
if (WARN_ON_ONCE(!locked))
mutex_lock(&ctx->uring_lock);
return ret;
}
static inline void io_tw_lock(struct io_ring_ctx *ctx, bool *locked) static inline void io_tw_lock(struct io_ring_ctx *ctx, bool *locked)
{ {
if (!*locked) { if (!*locked) {
...@@ -345,19 +324,11 @@ static inline void io_req_complete_defer(struct io_kiocb *req) ...@@ -345,19 +324,11 @@ static inline void io_req_complete_defer(struct io_kiocb *req)
static inline void io_commit_cqring_flush(struct io_ring_ctx *ctx) static inline void io_commit_cqring_flush(struct io_ring_ctx *ctx)
{ {
if (unlikely(ctx->off_timeout_used || ctx->drain_active || ctx->has_evfd)) if (unlikely(ctx->off_timeout_used || ctx->drain_active ||
ctx->has_evfd || ctx->poll_activated))
__io_commit_cqring_flush(ctx); __io_commit_cqring_flush(ctx);
} }
/* must to be called somewhat shortly after putting a request */
static inline void io_put_task(struct task_struct *task, int nr)
{
if (likely(task == current))
task->io_uring->cached_refs += nr;
else
__io_put_task(task, nr);
}
static inline void io_get_task_refs(int nr) static inline void io_get_task_refs(int nr)
{ {
struct io_uring_task *tctx = current->io_uring; struct io_uring_task *tctx = current->io_uring;
...@@ -372,19 +343,31 @@ static inline bool io_req_cache_empty(struct io_ring_ctx *ctx) ...@@ -372,19 +343,31 @@ static inline bool io_req_cache_empty(struct io_ring_ctx *ctx)
return !ctx->submit_state.free_list.next; return !ctx->submit_state.free_list.next;
} }
static inline bool io_alloc_req_refill(struct io_ring_ctx *ctx) extern struct kmem_cache *req_cachep;
static inline struct io_kiocb *io_extract_req(struct io_ring_ctx *ctx)
{ {
if (unlikely(io_req_cache_empty(ctx))) struct io_kiocb *req;
return __io_alloc_req_refill(ctx);
return true; req = container_of(ctx->submit_state.free_list.next, struct io_kiocb, comp_list);
kasan_unpoison_object_data(req_cachep, req);
wq_stack_extract(&ctx->submit_state.free_list);
return req;
} }
static inline struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx) static inline bool io_alloc_req(struct io_ring_ctx *ctx, struct io_kiocb **req)
{ {
struct io_wq_work_node *node; if (unlikely(io_req_cache_empty(ctx))) {
if (!__io_alloc_req_refill(ctx))
return false;
}
*req = io_extract_req(ctx);
return true;
}
node = wq_stack_extract(&ctx->submit_state.free_list); static inline bool io_allowed_defer_tw_run(struct io_ring_ctx *ctx)
return container_of(node, struct io_kiocb, comp_list); {
return likely(ctx->submitter_task == current);
} }
static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx) static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx)
......
...@@ -13,6 +13,11 @@ ...@@ -13,6 +13,11 @@
#include "filetable.h" #include "filetable.h"
#include "msg_ring.h" #include "msg_ring.h"
/* All valid masks for MSG_RING */
#define IORING_MSG_RING_MASK (IORING_MSG_RING_CQE_SKIP | \
IORING_MSG_RING_FLAGS_PASS)
struct io_msg { struct io_msg {
struct file *file; struct file *file;
struct file *src_file; struct file *src_file;
...@@ -21,7 +26,10 @@ struct io_msg { ...@@ -21,7 +26,10 @@ struct io_msg {
u32 len; u32 len;
u32 cmd; u32 cmd;
u32 src_fd; u32 src_fd;
union {
u32 dst_fd; u32 dst_fd;
u32 cqe_flags;
};
u32 flags; u32 flags;
}; };
...@@ -91,6 +99,11 @@ static void io_msg_tw_complete(struct callback_head *head) ...@@ -91,6 +99,11 @@ static void io_msg_tw_complete(struct callback_head *head)
if (current->flags & PF_EXITING) { if (current->flags & PF_EXITING) {
ret = -EOWNERDEAD; ret = -EOWNERDEAD;
} else { } else {
u32 flags = 0;
if (msg->flags & IORING_MSG_RING_FLAGS_PASS)
flags = msg->cqe_flags;
/* /*
* If the target ring is using IOPOLL mode, then we need to be * If the target ring is using IOPOLL mode, then we need to be
* holding the uring_lock for posting completions. Other ring * holding the uring_lock for posting completions. Other ring
...@@ -99,7 +112,7 @@ static void io_msg_tw_complete(struct callback_head *head) ...@@ -99,7 +112,7 @@ static void io_msg_tw_complete(struct callback_head *head)
*/ */
if (target_ctx->flags & IORING_SETUP_IOPOLL) if (target_ctx->flags & IORING_SETUP_IOPOLL)
mutex_lock(&target_ctx->uring_lock); mutex_lock(&target_ctx->uring_lock);
if (!io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0)) if (!io_post_aux_cqe(target_ctx, msg->user_data, msg->len, flags))
ret = -EOVERFLOW; ret = -EOVERFLOW;
if (target_ctx->flags & IORING_SETUP_IOPOLL) if (target_ctx->flags & IORING_SETUP_IOPOLL)
mutex_unlock(&target_ctx->uring_lock); mutex_unlock(&target_ctx->uring_lock);
...@@ -114,9 +127,12 @@ static int io_msg_ring_data(struct io_kiocb *req, unsigned int issue_flags) ...@@ -114,9 +127,12 @@ static int io_msg_ring_data(struct io_kiocb *req, unsigned int issue_flags)
{ {
struct io_ring_ctx *target_ctx = req->file->private_data; struct io_ring_ctx *target_ctx = req->file->private_data;
struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg); struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
u32 flags = 0;
int ret; int ret;
if (msg->src_fd || msg->dst_fd || msg->flags) if (msg->src_fd || msg->flags & ~IORING_MSG_RING_FLAGS_PASS)
return -EINVAL;
if (!(msg->flags & IORING_MSG_RING_FLAGS_PASS) && msg->dst_fd)
return -EINVAL; return -EINVAL;
if (target_ctx->flags & IORING_SETUP_R_DISABLED) if (target_ctx->flags & IORING_SETUP_R_DISABLED)
return -EBADFD; return -EBADFD;
...@@ -124,15 +140,18 @@ static int io_msg_ring_data(struct io_kiocb *req, unsigned int issue_flags) ...@@ -124,15 +140,18 @@ static int io_msg_ring_data(struct io_kiocb *req, unsigned int issue_flags)
if (io_msg_need_remote(target_ctx)) if (io_msg_need_remote(target_ctx))
return io_msg_exec_remote(req, io_msg_tw_complete); return io_msg_exec_remote(req, io_msg_tw_complete);
if (msg->flags & IORING_MSG_RING_FLAGS_PASS)
flags = msg->cqe_flags;
ret = -EOVERFLOW; ret = -EOVERFLOW;
if (target_ctx->flags & IORING_SETUP_IOPOLL) { if (target_ctx->flags & IORING_SETUP_IOPOLL) {
if (unlikely(io_double_lock_ctx(target_ctx, issue_flags))) if (unlikely(io_double_lock_ctx(target_ctx, issue_flags)))
return -EAGAIN; return -EAGAIN;
if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0)) if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, flags))
ret = 0; ret = 0;
io_double_unlock_ctx(target_ctx); io_double_unlock_ctx(target_ctx);
} else { } else {
if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0)) if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, flags))
ret = 0; ret = 0;
} }
return ret; return ret;
...@@ -241,7 +260,7 @@ int io_msg_ring_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) ...@@ -241,7 +260,7 @@ int io_msg_ring_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
msg->src_fd = READ_ONCE(sqe->addr3); msg->src_fd = READ_ONCE(sqe->addr3);
msg->dst_fd = READ_ONCE(sqe->file_index); msg->dst_fd = READ_ONCE(sqe->file_index);
msg->flags = READ_ONCE(sqe->msg_ring_flags); msg->flags = READ_ONCE(sqe->msg_ring_flags);
if (msg->flags & ~IORING_MSG_RING_CQE_SKIP) if (msg->flags & ~IORING_MSG_RING_MASK)
return -EINVAL; return -EINVAL;
return 0; return 0;
......
...@@ -90,6 +90,7 @@ int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) ...@@ -90,6 +90,7 @@ int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return -EINVAL; return -EINVAL;
shutdown->how = READ_ONCE(sqe->len); shutdown->how = READ_ONCE(sqe->len);
req->flags |= REQ_F_FORCE_ASYNC;
return 0; return 0;
} }
...@@ -99,8 +100,7 @@ int io_shutdown(struct io_kiocb *req, unsigned int issue_flags) ...@@ -99,8 +100,7 @@ int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
struct socket *sock; struct socket *sock;
int ret; int ret;
if (issue_flags & IO_URING_F_NONBLOCK) WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
return -EAGAIN;
sock = sock_from_file(req->file); sock = sock_from_file(req->file);
if (unlikely(!sock)) if (unlikely(!sock))
......
...@@ -68,9 +68,8 @@ struct io_kiocb *io_alloc_notif(struct io_ring_ctx *ctx) ...@@ -68,9 +68,8 @@ struct io_kiocb *io_alloc_notif(struct io_ring_ctx *ctx)
struct io_kiocb *notif; struct io_kiocb *notif;
struct io_notif_data *nd; struct io_notif_data *nd;
if (unlikely(!io_alloc_req_refill(ctx))) if (unlikely(!io_alloc_req(ctx, &notif)))
return NULL; return NULL;
notif = io_alloc_req(ctx);
notif->opcode = IORING_OP_NOP; notif->opcode = IORING_OP_NOP;
notif->flags = 0; notif->flags = 0;
notif->file = NULL; notif->file = NULL;
......
This diff is collapsed.
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
#ifndef IOU_OP_DEF_H #ifndef IOU_OP_DEF_H
#define IOU_OP_DEF_H #define IOU_OP_DEF_H
struct io_op_def { struct io_issue_def {
/* needs req->file assigned */ /* needs req->file assigned */
unsigned needs_file : 1; unsigned needs_file : 1;
/* should block plug */ /* should block plug */
...@@ -29,19 +29,24 @@ struct io_op_def { ...@@ -29,19 +29,24 @@ struct io_op_def {
unsigned iopoll_queue : 1; unsigned iopoll_queue : 1;
/* opcode specific path will handle ->async_data allocation if needed */ /* opcode specific path will handle ->async_data allocation if needed */
unsigned manual_alloc : 1; unsigned manual_alloc : 1;
int (*issue)(struct io_kiocb *, unsigned int);
int (*prep)(struct io_kiocb *, const struct io_uring_sqe *);
};
struct io_cold_def {
/* size of async data needed, if any */ /* size of async data needed, if any */
unsigned short async_size; unsigned short async_size;
const char *name; const char *name;
int (*prep)(struct io_kiocb *, const struct io_uring_sqe *);
int (*issue)(struct io_kiocb *, unsigned int);
int (*prep_async)(struct io_kiocb *); int (*prep_async)(struct io_kiocb *);
void (*cleanup)(struct io_kiocb *); void (*cleanup)(struct io_kiocb *);
void (*fail)(struct io_kiocb *); void (*fail)(struct io_kiocb *);
}; };
extern const struct io_op_def io_op_defs[]; extern const struct io_issue_def io_issue_defs[];
extern const struct io_cold_def io_cold_defs[];
void io_uring_optable_init(void); void io_uring_optable_init(void);
#endif #endif
...@@ -31,6 +31,15 @@ struct io_close { ...@@ -31,6 +31,15 @@ struct io_close {
u32 file_slot; u32 file_slot;
}; };
static bool io_openat_force_async(struct io_open *open)
{
/*
* Don't bother trying for O_TRUNC, O_CREAT, or O_TMPFILE open,
* it'll always -EAGAIN
*/
return open->how.flags & (O_TRUNC | O_CREAT | O_TMPFILE);
}
static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{ {
struct io_open *open = io_kiocb_to_cmd(req, struct io_open); struct io_open *open = io_kiocb_to_cmd(req, struct io_open);
...@@ -61,6 +70,8 @@ static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe ...@@ -61,6 +70,8 @@ static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
open->nofile = rlimit(RLIMIT_NOFILE); open->nofile = rlimit(RLIMIT_NOFILE);
req->flags |= REQ_F_NEED_CLEANUP; req->flags |= REQ_F_NEED_CLEANUP;
if (io_openat_force_async(open))
req->flags |= REQ_F_FORCE_ASYNC;
return 0; return 0;
} }
...@@ -108,12 +119,7 @@ int io_openat2(struct io_kiocb *req, unsigned int issue_flags) ...@@ -108,12 +119,7 @@ int io_openat2(struct io_kiocb *req, unsigned int issue_flags)
nonblock_set = op.open_flag & O_NONBLOCK; nonblock_set = op.open_flag & O_NONBLOCK;
resolve_nonblock = open->how.resolve & RESOLVE_CACHED; resolve_nonblock = open->how.resolve & RESOLVE_CACHED;
if (issue_flags & IO_URING_F_NONBLOCK) { if (issue_flags & IO_URING_F_NONBLOCK) {
/* WARN_ON_ONCE(io_openat_force_async(open));
* Don't bother trying for O_TRUNC, O_CREAT, or O_TMPFILE open,
* it'll always -EAGAIN
*/
if (open->how.flags & (O_TRUNC | O_CREAT | O_TMPFILE))
return -EAGAIN;
op.lookup_flags |= LOOKUP_CACHED; op.lookup_flags |= LOOKUP_CACHED;
op.open_flag |= O_NONBLOCK; op.open_flag |= O_NONBLOCK;
} }
......
...@@ -678,7 +678,7 @@ static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req, ...@@ -678,7 +678,7 @@ static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req,
int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags) int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
{ {
const struct io_op_def *def = &io_op_defs[req->opcode]; const struct io_issue_def *def = &io_issue_defs[req->opcode];
struct async_poll *apoll; struct async_poll *apoll;
struct io_poll_table ipt; struct io_poll_table ipt;
__poll_t mask = POLLPRI | POLLERR | EPOLLET; __poll_t mask = POLLPRI | POLLERR | EPOLLET;
......
...@@ -410,7 +410,7 @@ static inline int io_import_iovec(int rw, struct io_kiocb *req, ...@@ -410,7 +410,7 @@ static inline int io_import_iovec(int rw, struct io_kiocb *req,
unsigned int issue_flags) unsigned int issue_flags)
{ {
*iovec = __io_import_iovec(rw, req, s, issue_flags); *iovec = __io_import_iovec(rw, req, s, issue_flags);
if (unlikely(IS_ERR(*iovec))) if (IS_ERR(*iovec))
return PTR_ERR(*iovec); return PTR_ERR(*iovec);
iov_iter_save_state(&s->iter, &s->iter_state); iov_iter_save_state(&s->iter, &s->iter_state);
...@@ -516,7 +516,7 @@ static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec, ...@@ -516,7 +516,7 @@ static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec, static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
struct io_rw_state *s, bool force) struct io_rw_state *s, bool force)
{ {
if (!force && !io_op_defs[req->opcode].prep_async) if (!force && !io_cold_defs[req->opcode].prep_async)
return 0; return 0;
if (!req_has_async_data(req)) { if (!req_has_async_data(req)) {
struct io_async_rw *iorw; struct io_async_rw *iorw;
......
...@@ -34,6 +34,7 @@ static int __io_splice_prep(struct io_kiocb *req, ...@@ -34,6 +34,7 @@ static int __io_splice_prep(struct io_kiocb *req,
if (unlikely(sp->flags & ~valid_flags)) if (unlikely(sp->flags & ~valid_flags))
return -EINVAL; return -EINVAL;
sp->splice_fd_in = READ_ONCE(sqe->splice_fd_in); sp->splice_fd_in = READ_ONCE(sqe->splice_fd_in);
req->flags |= REQ_F_FORCE_ASYNC;
return 0; return 0;
} }
...@@ -52,8 +53,7 @@ int io_tee(struct io_kiocb *req, unsigned int issue_flags) ...@@ -52,8 +53,7 @@ int io_tee(struct io_kiocb *req, unsigned int issue_flags)
struct file *in; struct file *in;
long ret = 0; long ret = 0;
if (issue_flags & IO_URING_F_NONBLOCK) WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
return -EAGAIN;
if (sp->flags & SPLICE_F_FD_IN_FIXED) if (sp->flags & SPLICE_F_FD_IN_FIXED)
in = io_file_get_fixed(req, sp->splice_fd_in, issue_flags); in = io_file_get_fixed(req, sp->splice_fd_in, issue_flags);
...@@ -94,8 +94,7 @@ int io_splice(struct io_kiocb *req, unsigned int issue_flags) ...@@ -94,8 +94,7 @@ int io_splice(struct io_kiocb *req, unsigned int issue_flags)
struct file *in; struct file *in;
long ret = 0; long ret = 0;
if (issue_flags & IO_URING_F_NONBLOCK) WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
return -EAGAIN;
if (sp->flags & SPLICE_F_FD_IN_FIXED) if (sp->flags & SPLICE_F_FD_IN_FIXED)
in = io_file_get_fixed(req, sp->splice_fd_in, issue_flags); in = io_file_get_fixed(req, sp->splice_fd_in, issue_flags);
......
...@@ -312,7 +312,7 @@ static int io_sq_thread(void *data) ...@@ -312,7 +312,7 @@ static int io_sq_thread(void *data)
do_exit(0); do_exit(0);
} }
int io_sqpoll_wait_sq(struct io_ring_ctx *ctx) void io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
{ {
DEFINE_WAIT(wait); DEFINE_WAIT(wait);
...@@ -327,7 +327,6 @@ int io_sqpoll_wait_sq(struct io_ring_ctx *ctx) ...@@ -327,7 +327,6 @@ int io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
} while (!signal_pending(current)); } while (!signal_pending(current));
finish_wait(&ctx->sqo_sq_wait, &wait); finish_wait(&ctx->sqo_sq_wait, &wait);
return 0;
} }
__cold int io_sq_offload_create(struct io_ring_ctx *ctx, __cold int io_sq_offload_create(struct io_ring_ctx *ctx,
......
...@@ -26,4 +26,4 @@ void io_sq_thread_stop(struct io_sq_data *sqd); ...@@ -26,4 +26,4 @@ void io_sq_thread_stop(struct io_sq_data *sqd);
void io_sq_thread_park(struct io_sq_data *sqd); void io_sq_thread_park(struct io_sq_data *sqd);
void io_sq_thread_unpark(struct io_sq_data *sqd); void io_sq_thread_unpark(struct io_sq_data *sqd);
void io_put_sq_data(struct io_sq_data *sqd); void io_put_sq_data(struct io_sq_data *sqd);
int io_sqpoll_wait_sq(struct io_ring_ctx *ctx); void io_sqpoll_wait_sq(struct io_ring_ctx *ctx);
...@@ -48,6 +48,7 @@ int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) ...@@ -48,6 +48,7 @@ int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
} }
req->flags |= REQ_F_NEED_CLEANUP; req->flags |= REQ_F_NEED_CLEANUP;
req->flags |= REQ_F_FORCE_ASYNC;
return 0; return 0;
} }
...@@ -56,8 +57,7 @@ int io_statx(struct io_kiocb *req, unsigned int issue_flags) ...@@ -56,8 +57,7 @@ int io_statx(struct io_kiocb *req, unsigned int issue_flags)
struct io_statx *sx = io_kiocb_to_cmd(req, struct io_statx); struct io_statx *sx = io_kiocb_to_cmd(req, struct io_statx);
int ret; int ret;
if (issue_flags & IO_URING_F_NONBLOCK) WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
return -EAGAIN;
ret = do_statx(sx->dfd, sx->filename, sx->flags, sx->mask, sx->buffer); ret = do_statx(sx->dfd, sx->filename, sx->flags, sx->mask, sx->buffer);
io_req_set_res(req, ret, 0); io_req_set_res(req, ret, 0);
......
...@@ -32,6 +32,8 @@ int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) ...@@ -32,6 +32,8 @@ int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
sync->off = READ_ONCE(sqe->off); sync->off = READ_ONCE(sqe->off);
sync->len = READ_ONCE(sqe->len); sync->len = READ_ONCE(sqe->len);
sync->flags = READ_ONCE(sqe->sync_range_flags); sync->flags = READ_ONCE(sqe->sync_range_flags);
req->flags |= REQ_F_FORCE_ASYNC;
return 0; return 0;
} }
...@@ -41,8 +43,7 @@ int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags) ...@@ -41,8 +43,7 @@ int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags)
int ret; int ret;
/* sync_file_range always requires a blocking context */ /* sync_file_range always requires a blocking context */
if (issue_flags & IO_URING_F_NONBLOCK) WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
return -EAGAIN;
ret = sync_file_range(req->file, sync->off, sync->len, sync->flags); ret = sync_file_range(req->file, sync->off, sync->len, sync->flags);
io_req_set_res(req, ret, 0); io_req_set_res(req, ret, 0);
...@@ -62,6 +63,7 @@ int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) ...@@ -62,6 +63,7 @@ int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
sync->off = READ_ONCE(sqe->off); sync->off = READ_ONCE(sqe->off);
sync->len = READ_ONCE(sqe->len); sync->len = READ_ONCE(sqe->len);
req->flags |= REQ_F_FORCE_ASYNC;
return 0; return 0;
} }
...@@ -72,8 +74,7 @@ int io_fsync(struct io_kiocb *req, unsigned int issue_flags) ...@@ -72,8 +74,7 @@ int io_fsync(struct io_kiocb *req, unsigned int issue_flags)
int ret; int ret;
/* fsync always requires a blocking context */ /* fsync always requires a blocking context */
if (issue_flags & IO_URING_F_NONBLOCK) WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
return -EAGAIN;
ret = vfs_fsync_range(req->file, sync->off, end > 0 ? end : LLONG_MAX, ret = vfs_fsync_range(req->file, sync->off, end > 0 ? end : LLONG_MAX,
sync->flags & IORING_FSYNC_DATASYNC); sync->flags & IORING_FSYNC_DATASYNC);
...@@ -91,6 +92,7 @@ int io_fallocate_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) ...@@ -91,6 +92,7 @@ int io_fallocate_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
sync->off = READ_ONCE(sqe->off); sync->off = READ_ONCE(sqe->off);
sync->len = READ_ONCE(sqe->addr); sync->len = READ_ONCE(sqe->addr);
sync->mode = READ_ONCE(sqe->len); sync->mode = READ_ONCE(sqe->len);
req->flags |= REQ_F_FORCE_ASYNC;
return 0; return 0;
} }
...@@ -100,8 +102,8 @@ int io_fallocate(struct io_kiocb *req, unsigned int issue_flags) ...@@ -100,8 +102,8 @@ int io_fallocate(struct io_kiocb *req, unsigned int issue_flags)
int ret; int ret;
/* fallocate always requiring blocking context */ /* fallocate always requiring blocking context */
if (issue_flags & IO_URING_F_NONBLOCK) WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
return -EAGAIN;
ret = vfs_fallocate(req->file, sync->mode, sync->off, sync->len); ret = vfs_fallocate(req->file, sync->mode, sync->off, sync->len);
if (ret >= 0) if (ret >= 0)
fsnotify_modify(req->file); fsnotify_modify(req->file);
......
...@@ -75,6 +75,7 @@ static int __io_getxattr_prep(struct io_kiocb *req, ...@@ -75,6 +75,7 @@ static int __io_getxattr_prep(struct io_kiocb *req,
} }
req->flags |= REQ_F_NEED_CLEANUP; req->flags |= REQ_F_NEED_CLEANUP;
req->flags |= REQ_F_FORCE_ASYNC;
return 0; return 0;
} }
...@@ -109,8 +110,7 @@ int io_fgetxattr(struct io_kiocb *req, unsigned int issue_flags) ...@@ -109,8 +110,7 @@ int io_fgetxattr(struct io_kiocb *req, unsigned int issue_flags)
struct io_xattr *ix = io_kiocb_to_cmd(req, struct io_xattr); struct io_xattr *ix = io_kiocb_to_cmd(req, struct io_xattr);
int ret; int ret;
if (issue_flags & IO_URING_F_NONBLOCK) WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
return -EAGAIN;
ret = do_getxattr(mnt_idmap(req->file->f_path.mnt), ret = do_getxattr(mnt_idmap(req->file->f_path.mnt),
req->file->f_path.dentry, req->file->f_path.dentry,
...@@ -127,8 +127,7 @@ int io_getxattr(struct io_kiocb *req, unsigned int issue_flags) ...@@ -127,8 +127,7 @@ int io_getxattr(struct io_kiocb *req, unsigned int issue_flags)
struct path path; struct path path;
int ret; int ret;
if (issue_flags & IO_URING_F_NONBLOCK) WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
return -EAGAIN;
retry: retry:
ret = filename_lookup(AT_FDCWD, ix->filename, lookup_flags, &path, NULL); ret = filename_lookup(AT_FDCWD, ix->filename, lookup_flags, &path, NULL);
...@@ -174,6 +173,7 @@ static int __io_setxattr_prep(struct io_kiocb *req, ...@@ -174,6 +173,7 @@ static int __io_setxattr_prep(struct io_kiocb *req,
} }
req->flags |= REQ_F_NEED_CLEANUP; req->flags |= REQ_F_NEED_CLEANUP;
req->flags |= REQ_F_FORCE_ASYNC;
return 0; return 0;
} }
...@@ -222,8 +222,7 @@ int io_fsetxattr(struct io_kiocb *req, unsigned int issue_flags) ...@@ -222,8 +222,7 @@ int io_fsetxattr(struct io_kiocb *req, unsigned int issue_flags)
{ {
int ret; int ret;
if (issue_flags & IO_URING_F_NONBLOCK) WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
return -EAGAIN;
ret = __io_setxattr(req, issue_flags, &req->file->f_path); ret = __io_setxattr(req, issue_flags, &req->file->f_path);
io_xattr_finish(req, ret); io_xattr_finish(req, ret);
...@@ -237,8 +236,7 @@ int io_setxattr(struct io_kiocb *req, unsigned int issue_flags) ...@@ -237,8 +236,7 @@ int io_setxattr(struct io_kiocb *req, unsigned int issue_flags)
struct path path; struct path path;
int ret; int ret;
if (issue_flags & IO_URING_F_NONBLOCK) WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
return -EAGAIN;
retry: retry:
ret = filename_lookup(AT_FDCWD, ix->filename, lookup_flags, &path, NULL); ret = filename_lookup(AT_FDCWD, ix->filename, lookup_flags, &path, NULL);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment