Commit 05b538c1 authored by Pavel Begunkov's avatar Pavel Begunkov

io_uring: fix not locked access to fixed buf table

We can look inside the fixed buffer table only while holding
->uring_lock, however in some cases we don't do the right async prep for
IORING_OP_{WRITE,READ}_FIXED ending up with NULL req->imu forcing making
an io-wq worker to try to resolve the fixed buffer without proper
locking.

Move req->imu setup into early req init paths, i.e. io_prep_rw(), which
is called unconditionally for rw requests and under uring_lock.

Fixes: 634d00df ("io_uring: add full-fledged dynamic buffers support")
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
parent d11d31fc
...@@ -3636,6 +3636,20 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe) ...@@ -3636,6 +3636,20 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
int ret; int ret;
kiocb->ki_pos = READ_ONCE(sqe->off); kiocb->ki_pos = READ_ONCE(sqe->off);
/* used for fixed read/write too - just read unconditionally */
req->buf_index = READ_ONCE(sqe->buf_index);
if (req->opcode == IORING_OP_READ_FIXED ||
req->opcode == IORING_OP_WRITE_FIXED) {
struct io_ring_ctx *ctx = req->ctx;
u16 index;
if (unlikely(req->buf_index >= ctx->nr_user_bufs))
return -EFAULT;
index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
req->imu = ctx->user_bufs[index];
io_req_set_rsrc_node(req, ctx, 0);
}
ioprio = READ_ONCE(sqe->ioprio); ioprio = READ_ONCE(sqe->ioprio);
if (ioprio) { if (ioprio) {
...@@ -3648,12 +3662,9 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe) ...@@ -3648,12 +3662,9 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
kiocb->ki_ioprio = get_current_ioprio(); kiocb->ki_ioprio = get_current_ioprio();
} }
req->imu = NULL;
req->rw.addr = READ_ONCE(sqe->addr); req->rw.addr = READ_ONCE(sqe->addr);
req->rw.len = READ_ONCE(sqe->len); req->rw.len = READ_ONCE(sqe->len);
req->rw.flags = READ_ONCE(sqe->rw_flags); req->rw.flags = READ_ONCE(sqe->rw_flags);
/* used for fixed read/write too - just read unconditionally */
req->buf_index = READ_ONCE(sqe->buf_index);
return 0; return 0;
} }
...@@ -3785,20 +3796,9 @@ static int __io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter ...@@ -3785,20 +3796,9 @@ static int __io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter
static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter, static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter,
unsigned int issue_flags) unsigned int issue_flags)
{ {
struct io_mapped_ubuf *imu = req->imu; if (WARN_ON_ONCE(!req->imu))
u16 index, buf_index = req->buf_index; return -EFAULT;
return __io_import_fixed(req, rw, iter, req->imu);
if (likely(!imu)) {
struct io_ring_ctx *ctx = req->ctx;
if (unlikely(buf_index >= ctx->nr_user_bufs))
return -EFAULT;
io_req_set_rsrc_node(req, ctx, issue_flags);
index = array_index_nospec(buf_index, ctx->nr_user_bufs);
imu = READ_ONCE(ctx->user_bufs[index]);
req->imu = imu;
}
return __io_import_fixed(req, rw, iter, imu);
} }
static int io_buffer_add_list(struct io_ring_ctx *ctx, static int io_buffer_add_list(struct io_ring_ctx *ctx,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment