Commit ef99b2d3 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

block: replace the spin argument to blk_iopoll with a flags argument

Switch the boolean spin argument to blk_poll to passing a set of flags
instead.  This will allow to control polling behavior in a more fine
grained way.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Tested-by: default avatarMark Wunderlich <mark.wunderlich@intel.com>
Link: https://lore.kernel.org/r/20211012111226.760968-10-hch@lst.de
[axboe: adapt to changed io_uring iopoll]
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 28a1ae6b
...@@ -71,7 +71,7 @@ static bool blk_rq_is_poll(struct request *rq) ...@@ -71,7 +71,7 @@ static bool blk_rq_is_poll(struct request *rq)
static void blk_rq_poll_completion(struct request *rq, struct completion *wait) static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
{ {
do { do {
blk_poll(rq->q, request_to_qc_t(rq->mq_hctx, rq), true); blk_poll(rq->q, request_to_qc_t(rq->mq_hctx, rq), 0);
cond_resched(); cond_resched();
} while (!completion_done(wait)); } while (!completion_done(wait));
} }
......
...@@ -4052,7 +4052,7 @@ static bool blk_mq_poll_hybrid(struct request_queue *q, blk_qc_t qc) ...@@ -4052,7 +4052,7 @@ static bool blk_mq_poll_hybrid(struct request_queue *q, blk_qc_t qc)
} }
static int blk_mq_poll_classic(struct request_queue *q, blk_qc_t cookie, static int blk_mq_poll_classic(struct request_queue *q, blk_qc_t cookie,
bool spin) unsigned int flags)
{ {
struct blk_mq_hw_ctx *hctx = blk_qc_to_hctx(q, cookie); struct blk_mq_hw_ctx *hctx = blk_qc_to_hctx(q, cookie);
long state = get_current_state(); long state = get_current_state();
...@@ -4075,7 +4075,7 @@ static int blk_mq_poll_classic(struct request_queue *q, blk_qc_t cookie, ...@@ -4075,7 +4075,7 @@ static int blk_mq_poll_classic(struct request_queue *q, blk_qc_t cookie,
if (task_is_running(current)) if (task_is_running(current))
return 1; return 1;
if (ret < 0 || !spin) if (ret < 0 || (flags & BLK_POLL_ONESHOT))
break; break;
cpu_relax(); cpu_relax();
} while (!need_resched()); } while (!need_resched());
...@@ -4088,15 +4088,13 @@ static int blk_mq_poll_classic(struct request_queue *q, blk_qc_t cookie, ...@@ -4088,15 +4088,13 @@ static int blk_mq_poll_classic(struct request_queue *q, blk_qc_t cookie,
* blk_poll - poll for IO completions * blk_poll - poll for IO completions
* @q: the queue * @q: the queue
* @cookie: cookie passed back at IO submission time * @cookie: cookie passed back at IO submission time
* @spin: whether to spin for completions * @flags: BLK_POLL_* flags that control the behavior
* *
* Description: * Description:
* Poll for completions on the passed in queue. Returns number of * Poll for completions on the passed in queue. Returns number of
* completed entries found. If @spin is true, then blk_poll will continue * completed entries found.
* looping until at least one completion is found, unless the task is
* otherwise marked running (or we need to reschedule).
*/ */
int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin) int blk_poll(struct request_queue *q, blk_qc_t cookie, unsigned int flags)
{ {
if (cookie == BLK_QC_T_NONE || if (cookie == BLK_QC_T_NONE ||
!test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
...@@ -4105,12 +4103,11 @@ int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin) ...@@ -4105,12 +4103,11 @@ int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
if (current->plug) if (current->plug)
blk_flush_plug_list(current->plug, false); blk_flush_plug_list(current->plug, false);
/* If specified not to spin, we also should not sleep. */ if (q->poll_nsec != BLK_MQ_POLL_CLASSIC) {
if (spin && q->poll_nsec != BLK_MQ_POLL_CLASSIC) {
if (blk_mq_poll_hybrid(q, cookie)) if (blk_mq_poll_hybrid(q, cookie))
return 1; return 1;
} }
return blk_mq_poll_classic(q, cookie, spin); return blk_mq_poll_classic(q, cookie, flags);
} }
EXPORT_SYMBOL_GPL(blk_poll); EXPORT_SYMBOL_GPL(blk_poll);
......
...@@ -108,7 +108,7 @@ static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb, ...@@ -108,7 +108,7 @@ static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb,
if (!READ_ONCE(bio.bi_private)) if (!READ_ONCE(bio.bi_private))
break; break;
if (!(iocb->ki_flags & IOCB_HIPRI) || if (!(iocb->ki_flags & IOCB_HIPRI) ||
!blk_poll(bdev_get_queue(bdev), qc, true)) !blk_poll(bdev_get_queue(bdev), qc, 0))
blk_io_schedule(); blk_io_schedule();
} }
__set_current_state(TASK_RUNNING); __set_current_state(TASK_RUNNING);
...@@ -141,12 +141,12 @@ struct blkdev_dio { ...@@ -141,12 +141,12 @@ struct blkdev_dio {
static struct bio_set blkdev_dio_pool; static struct bio_set blkdev_dio_pool;
static int blkdev_iopoll(struct kiocb *kiocb, bool wait) static int blkdev_iopoll(struct kiocb *kiocb, unsigned int flags)
{ {
struct block_device *bdev = I_BDEV(kiocb->ki_filp->f_mapping->host); struct block_device *bdev = I_BDEV(kiocb->ki_filp->f_mapping->host);
struct request_queue *q = bdev_get_queue(bdev); struct request_queue *q = bdev_get_queue(bdev);
return blk_poll(q, READ_ONCE(kiocb->ki_cookie), wait); return blk_poll(q, READ_ONCE(kiocb->ki_cookie), flags);
} }
static void blkdev_bio_end_io(struct bio *bio) static void blkdev_bio_end_io(struct bio *bio)
...@@ -297,7 +297,7 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, ...@@ -297,7 +297,7 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
if (!READ_ONCE(dio->waiter)) if (!READ_ONCE(dio->waiter))
break; break;
if (!do_poll || !blk_poll(bdev_get_queue(bdev), qc, true)) if (!do_poll || !blk_poll(bdev_get_queue(bdev), qc, 0))
blk_io_schedule(); blk_io_schedule();
} }
__set_current_state(TASK_RUNNING); __set_current_state(TASK_RUNNING);
......
...@@ -2457,14 +2457,15 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events, ...@@ -2457,14 +2457,15 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
long min) long min)
{ {
struct io_kiocb *req, *tmp; struct io_kiocb *req, *tmp;
unsigned int poll_flags = 0;
LIST_HEAD(done); LIST_HEAD(done);
bool spin;
/* /*
* Only spin for completions if we don't have multiple devices hanging * Only spin for completions if we don't have multiple devices hanging
* off our complete list, and we're under the requested amount. * off our complete list, and we're under the requested amount.
*/ */
spin = !ctx->poll_multi_queue && *nr_events < min; if (ctx->poll_multi_queue || *nr_events >= min)
poll_flags |= BLK_POLL_ONESHOT;
list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, inflight_entry) { list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, inflight_entry) {
struct kiocb *kiocb = &req->rw.kiocb; struct kiocb *kiocb = &req->rw.kiocb;
...@@ -2482,11 +2483,11 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events, ...@@ -2482,11 +2483,11 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
if (!list_empty(&done)) if (!list_empty(&done))
break; break;
ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin); ret = kiocb->ki_filp->f_op->iopoll(kiocb, poll_flags);
if (unlikely(ret < 0)) if (unlikely(ret < 0))
return ret; return ret;
else if (ret) else if (ret)
spin = false; poll_flags |= BLK_POLL_ONESHOT;
/* iopoll may have completed current req */ /* iopoll may have completed current req */
if (READ_ONCE(req->iopoll_completed)) if (READ_ONCE(req->iopoll_completed))
......
...@@ -49,13 +49,13 @@ struct iomap_dio { ...@@ -49,13 +49,13 @@ struct iomap_dio {
}; };
}; };
int iomap_dio_iopoll(struct kiocb *kiocb, bool spin) int iomap_dio_iopoll(struct kiocb *kiocb, unsigned int flags)
{ {
struct request_queue *q = READ_ONCE(kiocb->private); struct request_queue *q = READ_ONCE(kiocb->private);
if (!q) if (!q)
return 0; return 0;
return blk_poll(q, READ_ONCE(kiocb->ki_cookie), spin); return blk_poll(q, READ_ONCE(kiocb->ki_cookie), flags);
} }
EXPORT_SYMBOL_GPL(iomap_dio_iopoll); EXPORT_SYMBOL_GPL(iomap_dio_iopoll);
...@@ -642,7 +642,7 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, ...@@ -642,7 +642,7 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
if (!(iocb->ki_flags & IOCB_HIPRI) || if (!(iocb->ki_flags & IOCB_HIPRI) ||
!dio->submit.last_queue || !dio->submit.last_queue ||
!blk_poll(dio->submit.last_queue, !blk_poll(dio->submit.last_queue,
dio->submit.cookie, true)) dio->submit.cookie, 0))
blk_io_schedule(); blk_io_schedule();
} }
__set_current_state(TASK_RUNNING); __set_current_state(TASK_RUNNING);
......
...@@ -564,7 +564,9 @@ extern const char *blk_op_str(unsigned int op); ...@@ -564,7 +564,9 @@ extern const char *blk_op_str(unsigned int op);
int blk_status_to_errno(blk_status_t status); int blk_status_to_errno(blk_status_t status);
blk_status_t errno_to_blk_status(int errno); blk_status_t errno_to_blk_status(int errno);
int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin); /* only poll the hardware once, don't continue until a completion was found */
#define BLK_POLL_ONESHOT (1 << 0)
int blk_poll(struct request_queue *q, blk_qc_t cookie, unsigned int flags);
static inline struct request_queue *bdev_get_queue(struct block_device *bdev) static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
{ {
......
...@@ -2075,7 +2075,7 @@ struct file_operations { ...@@ -2075,7 +2075,7 @@ struct file_operations {
ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *); ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
ssize_t (*read_iter) (struct kiocb *, struct iov_iter *); ssize_t (*read_iter) (struct kiocb *, struct iov_iter *);
ssize_t (*write_iter) (struct kiocb *, struct iov_iter *); ssize_t (*write_iter) (struct kiocb *, struct iov_iter *);
int (*iopoll)(struct kiocb *kiocb, bool spin); int (*iopoll)(struct kiocb *kiocb, unsigned int flags);
int (*iterate) (struct file *, struct dir_context *); int (*iterate) (struct file *, struct dir_context *);
int (*iterate_shared) (struct file *, struct dir_context *); int (*iterate_shared) (struct file *, struct dir_context *);
__poll_t (*poll) (struct file *, struct poll_table_struct *); __poll_t (*poll) (struct file *, struct poll_table_struct *);
......
...@@ -337,7 +337,7 @@ struct iomap_dio *__iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, ...@@ -337,7 +337,7 @@ struct iomap_dio *__iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
const struct iomap_ops *ops, const struct iomap_dio_ops *dops, const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
unsigned int dio_flags); unsigned int dio_flags);
ssize_t iomap_dio_complete(struct iomap_dio *dio); ssize_t iomap_dio_complete(struct iomap_dio *dio);
int iomap_dio_iopoll(struct kiocb *kiocb, bool spin); int iomap_dio_iopoll(struct kiocb *kiocb, unsigned int flags);
#ifdef CONFIG_SWAP #ifdef CONFIG_SWAP
struct file; struct file;
......
...@@ -428,7 +428,7 @@ int swap_readpage(struct page *page, bool synchronous) ...@@ -428,7 +428,7 @@ int swap_readpage(struct page *page, bool synchronous)
if (!READ_ONCE(bio->bi_private)) if (!READ_ONCE(bio->bi_private))
break; break;
if (!blk_poll(disk->queue, qc, true)) if (!blk_poll(disk->queue, qc, 0))
blk_io_schedule(); blk_io_schedule();
} }
__set_current_state(TASK_RUNNING); __set_current_state(TASK_RUNNING);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment