Commit 4e17aaab authored by Stefan Roesch's avatar Stefan Roesch Committed by Jens Axboe

io_uring: Add support for async buffered writes

This enables the async buffered writes for the filesystems that support
async buffered writes in io-uring. Buffered writes are enabled for
blocks that are already in the page cache or can be acquired with noio.
Signed-off-by: default avatarStefan Roesch <shr@fb.com>
Link: https://lore.kernel.org/r/20220616212221.2024518-12-shr@fb.com
[axboe: adapt to 5.20 branch]
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 66fa3ced
...@@ -641,7 +641,7 @@ static inline int io_iter_do_read(struct io_rw *rw, struct iov_iter *iter) ...@@ -641,7 +641,7 @@ static inline int io_iter_do_read(struct io_rw *rw, struct iov_iter *iter)
return -EINVAL; return -EINVAL;
} }
static bool need_read_all(struct io_kiocb *req) static bool need_complete_io(struct io_kiocb *req)
{ {
return req->flags & REQ_F_ISREG || return req->flags & REQ_F_ISREG ||
S_ISBLK(file_inode(req->file)->i_mode); S_ISBLK(file_inode(req->file)->i_mode);
...@@ -775,7 +775,7 @@ int io_read(struct io_kiocb *req, unsigned int issue_flags) ...@@ -775,7 +775,7 @@ int io_read(struct io_kiocb *req, unsigned int issue_flags)
kfree(iovec); kfree(iovec);
return IOU_ISSUE_SKIP_COMPLETE; return IOU_ISSUE_SKIP_COMPLETE;
} else if (ret == req->cqe.res || ret <= 0 || !force_nonblock || } else if (ret == req->cqe.res || ret <= 0 || !force_nonblock ||
(req->flags & REQ_F_NOWAIT) || !need_read_all(req)) { (req->flags & REQ_F_NOWAIT) || !need_complete_io(req)) {
/* read all, failed, already did sync or don't want to retry */ /* read all, failed, already did sync or don't want to retry */
goto done; goto done;
} }
...@@ -870,9 +870,10 @@ int io_write(struct io_kiocb *req, unsigned int issue_flags) ...@@ -870,9 +870,10 @@ int io_write(struct io_kiocb *req, unsigned int issue_flags)
if (unlikely(!io_file_supports_nowait(req))) if (unlikely(!io_file_supports_nowait(req)))
goto copy_iov; goto copy_iov;
/* file path doesn't support NOWAIT for non-direct_IO */ /* File path supports NOWAIT for non-direct_IO only for block devices. */
if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT) && if (!(kiocb->ki_flags & IOCB_DIRECT) &&
(req->flags & REQ_F_ISREG)) !(kiocb->ki_filp->f_mode & FMODE_BUF_WASYNC) &&
(req->flags & REQ_F_ISREG))
goto copy_iov; goto copy_iov;
kiocb->ki_flags |= IOCB_NOWAIT; kiocb->ki_flags |= IOCB_NOWAIT;
...@@ -928,6 +929,24 @@ int io_write(struct io_kiocb *req, unsigned int issue_flags) ...@@ -928,6 +929,24 @@ int io_write(struct io_kiocb *req, unsigned int issue_flags)
/* IOPOLL retry should happen for io-wq threads */ /* IOPOLL retry should happen for io-wq threads */
if (ret2 == -EAGAIN && (req->ctx->flags & IORING_SETUP_IOPOLL)) if (ret2 == -EAGAIN && (req->ctx->flags & IORING_SETUP_IOPOLL))
goto copy_iov; goto copy_iov;
if (ret2 != req->cqe.res && ret2 >= 0 && need_complete_io(req)) {
struct io_async_rw *rw;
/* This is a partial write. The file pos has already been
* updated, setup the async struct to complete the request
* in the worker. Also update bytes_done to account for
* the bytes already written.
*/
iov_iter_save_state(&s->iter, &s->iter_state);
ret = io_setup_async_rw(req, iovec, s, true);
rw = req->async_data;
if (rw)
rw->bytes_done += ret2;
return ret ? ret : -EAGAIN;
}
done: done:
ret = kiocb_done(req, ret2, issue_flags); ret = kiocb_done(req, ret2, issue_flags);
} else { } else {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment