Commit 7b29f92d authored by Jens Axboe's avatar Jens Axboe

io_uring: cache async and regular file state for fixed files

We have to dig quite deep to check for particularly whether or not a
file supports a fast-path nonblock attempt. For fixed files, we can do
this lookup once and cache the state instead.

This adds two new bits to track whether we support async read/write
attempt, and lines up the REQ_F_ISREG bit with those two. The file slot
re-uses the last 3 (or 2, for 32-bit) of the file pointer to cache that
state, and then we mask it in when we go and use a fixed file.
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent d44f554e
......@@ -690,7 +690,6 @@ enum {
REQ_F_CUR_POS_BIT,
REQ_F_NOWAIT_BIT,
REQ_F_LINK_TIMEOUT_BIT,
REQ_F_ISREG_BIT,
REQ_F_NEED_CLEANUP_BIT,
REQ_F_POLLED_BIT,
REQ_F_BUFFER_SELECTED_BIT,
......@@ -698,6 +697,10 @@ enum {
REQ_F_LTIMEOUT_ACTIVE_BIT,
REQ_F_COMPLETE_INLINE_BIT,
REQ_F_REISSUE_BIT,
/* keep async read/write and isreg together and in order */
REQ_F_ASYNC_READ_BIT,
REQ_F_ASYNC_WRITE_BIT,
REQ_F_ISREG_BIT,
/* not a real bit, just to check we're not overflowing the space */
__REQ_F_LAST_BIT,
......@@ -727,8 +730,6 @@ enum {
REQ_F_NOWAIT = BIT(REQ_F_NOWAIT_BIT),
/* has or had linked timeout */
REQ_F_LINK_TIMEOUT = BIT(REQ_F_LINK_TIMEOUT_BIT),
/* regular file */
REQ_F_ISREG = BIT(REQ_F_ISREG_BIT),
/* needs cleanup */
REQ_F_NEED_CLEANUP = BIT(REQ_F_NEED_CLEANUP_BIT),
/* already went through poll handler */
......@@ -743,6 +744,12 @@ enum {
REQ_F_COMPLETE_INLINE = BIT(REQ_F_COMPLETE_INLINE_BIT),
/* caller should reissue async */
REQ_F_REISSUE = BIT(REQ_F_REISSUE_BIT),
/* supports async reads */
REQ_F_ASYNC_READ = BIT(REQ_F_ASYNC_READ_BIT),
/* supports async writes */
REQ_F_ASYNC_WRITE = BIT(REQ_F_ASYNC_WRITE_BIT),
/* regular file */
REQ_F_ISREG = BIT(REQ_F_ISREG_BIT),
};
struct async_poll {
......@@ -2651,7 +2658,7 @@ static bool io_bdev_nowait(struct block_device *bdev)
* any file. For now, just ensure that anything potentially problematic is done
* inline.
*/
static bool io_file_supports_async(struct file *file, int rw)
static bool __io_file_supports_async(struct file *file, int rw)
{
umode_t mode = file_inode(file)->i_mode;
......@@ -2684,6 +2691,16 @@ static bool io_file_supports_async(struct file *file, int rw)
return file->f_op->write_iter != NULL;
}
static bool io_file_supports_async(struct io_kiocb *req, int rw)
{
if (rw == READ && (req->flags & REQ_F_ASYNC_READ))
return true;
else if (rw == WRITE && (req->flags & REQ_F_ASYNC_WRITE))
return true;
return __io_file_supports_async(req->file, rw);
}
static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_ring_ctx *ctx = req->ctx;
......@@ -2692,7 +2709,7 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
unsigned ioprio;
int ret;
if (S_ISREG(file_inode(file)->i_mode))
if (!(req->flags & REQ_F_ISREG) && S_ISREG(file_inode(file)->i_mode))
req->flags |= REQ_F_ISREG;
kiocb->ki_pos = READ_ONCE(sqe->off);
......@@ -3289,7 +3306,7 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
kiocb->ki_flags |= IOCB_NOWAIT;
/* If the file doesn't support async, just async punt */
if (force_nonblock && !io_file_supports_async(req->file, READ)) {
if (force_nonblock && !io_file_supports_async(req, READ)) {
ret = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
return ret ?: -EAGAIN;
}
......@@ -3394,7 +3411,7 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags)
kiocb->ki_flags |= IOCB_NOWAIT;
/* If the file doesn't support async, just async punt */
if (force_nonblock && !io_file_supports_async(req->file, WRITE))
if (force_nonblock && !io_file_supports_async(req, WRITE))
goto copy_iov;
/* file path doesn't support NOWAIT for non-direct_IO */
......@@ -5173,7 +5190,7 @@ static bool io_arm_poll_handler(struct io_kiocb *req)
else
return false;
/* if we can't nonblock try, then no point in arming a poll handler */
if (!io_file_supports_async(req->file, rw))
if (!io_file_supports_async(req, rw))
return false;
apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
......@@ -6182,6 +6199,15 @@ static void io_wq_submit_work(struct io_wq_work *work)
}
}
#define FFS_ASYNC_READ 0x1UL
#define FFS_ASYNC_WRITE 0x2UL
#ifdef CONFIG_64BIT
#define FFS_ISREG 0x4UL
#else
#define FFS_ISREG 0x0UL
#endif
#define FFS_MASK ~(FFS_ASYNC_READ|FFS_ASYNC_WRITE|FFS_ISREG)
static inline struct file **io_fixed_file_slot(struct fixed_rsrc_data *file_data,
unsigned i)
{
......@@ -6194,7 +6220,9 @@ static inline struct file **io_fixed_file_slot(struct fixed_rsrc_data *file_data
static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
int index)
{
return *io_fixed_file_slot(ctx->file_data, index);
struct file **file_slot = io_fixed_file_slot(ctx->file_data, index);
return (struct file *) ((unsigned long) *file_slot & FFS_MASK);
}
static struct file *io_file_get(struct io_submit_state *state,
......@@ -6204,10 +6232,16 @@ static struct file *io_file_get(struct io_submit_state *state,
struct file *file;
if (fixed) {
unsigned long file_ptr;
if (unlikely((unsigned int)fd >= ctx->nr_user_files))
return NULL;
fd = array_index_nospec(fd, ctx->nr_user_files);
file = io_file_from_index(ctx, fd);
file_ptr = (unsigned long) *io_fixed_file_slot(ctx->file_data, fd);
file = (struct file *) (file_ptr & FFS_MASK);
file_ptr &= ~FFS_MASK;
/* mask in overlapping REQ_F and FFS bits */
req->flags |= (file_ptr << REQ_F_ASYNC_READ_BIT);
io_set_resource_node(req);
} else {
trace_io_uring_file_get(ctx, fd);
......@@ -7556,6 +7590,8 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
goto out_free;
for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
unsigned long file_ptr;
if (copy_from_user(&fd, &fds[i], sizeof(fd))) {
ret = -EFAULT;
goto out_fput;
......@@ -7580,7 +7616,14 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
fput(file);
goto out_fput;
}
*io_fixed_file_slot(file_data, i) = file;
file_ptr = (unsigned long) file;
if (__io_file_supports_async(file, READ))
file_ptr |= FFS_ASYNC_READ;
if (__io_file_supports_async(file, WRITE))
file_ptr |= FFS_ASYNC_WRITE;
if (S_ISREG(file_inode(file)->i_mode))
file_ptr |= FFS_ISREG;
*io_fixed_file_slot(file_data, i) = (struct file *) file_ptr;
}
ret = io_sqe_files_scm(ctx);
......@@ -7713,7 +7756,8 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
file_slot = io_fixed_file_slot(ctx->file_data, i);
if (*file_slot) {
err = io_queue_file_removal(data, *file_slot);
file = (struct file *) ((unsigned long) *file_slot & FFS_MASK);
err = io_queue_file_removal(data, file);
if (err)
break;
*file_slot = NULL;
......@@ -9288,7 +9332,7 @@ static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
seq_printf(m, "SqThreadCpu:\t%d\n", sq ? task_cpu(sq->thread) : -1);
seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files);
for (i = 0; has_lock && i < ctx->nr_user_files; i++) {
struct file *f = *io_fixed_file_slot(ctx->file_data, i);
struct file *f = io_file_from_index(ctx, i);
if (f)
seq_printf(m, "%5u: %s\n", i, file_dentry(f)->d_iname);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment