Commit 40d09b53 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

blk-mq: add a new blk_mq_complete_request_remote API

This is a variant of blk_mq_complete_request_remote that only completes
the request if it needs to be bounced to another CPU or a softirq.  If
the request can be completed locally the function returns false and lets
the driver complete it without requring and indirect function call.
Reviewed-by: default avatarDaniel Wagner <dwagner@suse.de>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 96339526
...@@ -632,8 +632,11 @@ static int blk_softirq_cpu_dead(unsigned int cpu) ...@@ -632,8 +632,11 @@ static int blk_softirq_cpu_dead(unsigned int cpu)
return 0; return 0;
} }
static void __blk_mq_complete_request(struct request *rq)
static void __blk_mq_complete_request_remote(void *data)
{ {
struct request *rq = data;
/* /*
* For most of single queue controllers, there is only one irq vector * For most of single queue controllers, there is only one irq vector
* for handling I/O completion, and the only irq's affinity is set * for handling I/O completion, and the only irq's affinity is set
...@@ -649,11 +652,6 @@ static void __blk_mq_complete_request(struct request *rq) ...@@ -649,11 +652,6 @@ static void __blk_mq_complete_request(struct request *rq)
rq->q->mq_ops->complete(rq); rq->q->mq_ops->complete(rq);
} }
static void __blk_mq_complete_request_remote(void *data)
{
__blk_mq_complete_request(data);
}
static inline bool blk_mq_complete_need_ipi(struct request *rq) static inline bool blk_mq_complete_need_ipi(struct request *rq)
{ {
int cpu = raw_smp_processor_id(); int cpu = raw_smp_processor_id();
...@@ -672,14 +670,7 @@ static inline bool blk_mq_complete_need_ipi(struct request *rq) ...@@ -672,14 +670,7 @@ static inline bool blk_mq_complete_need_ipi(struct request *rq)
return cpu_online(rq->mq_ctx->cpu); return cpu_online(rq->mq_ctx->cpu);
} }
/** bool blk_mq_complete_request_remote(struct request *rq)
* blk_mq_complete_request - end I/O on a request
* @rq: the request being processed
*
* Description:
* Complete a request by scheduling the ->complete_rq operation.
**/
void blk_mq_complete_request(struct request *rq)
{ {
WRITE_ONCE(rq->state, MQ_RQ_COMPLETE); WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
...@@ -687,10 +678,8 @@ void blk_mq_complete_request(struct request *rq) ...@@ -687,10 +678,8 @@ void blk_mq_complete_request(struct request *rq)
* For a polled request, always complete locallly, it's pointless * For a polled request, always complete locallly, it's pointless
* to redirect the completion. * to redirect the completion.
*/ */
if (rq->cmd_flags & REQ_HIPRI) { if (rq->cmd_flags & REQ_HIPRI)
rq->q->mq_ops->complete(rq); return false;
return;
}
if (blk_mq_complete_need_ipi(rq)) { if (blk_mq_complete_need_ipi(rq)) {
rq->csd.func = __blk_mq_complete_request_remote; rq->csd.func = __blk_mq_complete_request_remote;
...@@ -698,8 +687,26 @@ void blk_mq_complete_request(struct request *rq) ...@@ -698,8 +687,26 @@ void blk_mq_complete_request(struct request *rq)
rq->csd.flags = 0; rq->csd.flags = 0;
smp_call_function_single_async(rq->mq_ctx->cpu, &rq->csd); smp_call_function_single_async(rq->mq_ctx->cpu, &rq->csd);
} else { } else {
__blk_mq_complete_request(rq); if (rq->q->nr_hw_queues > 1)
return false;
blk_mq_trigger_softirq(rq);
} }
return true;
}
EXPORT_SYMBOL_GPL(blk_mq_complete_request_remote);
/**
* blk_mq_complete_request - end I/O on a request
* @rq: the request being processed
*
* Description:
* Complete a request by scheduling the ->complete_rq operation.
**/
void blk_mq_complete_request(struct request *rq)
{
if (!blk_mq_complete_request_remote(rq))
rq->q->mq_ops->complete(rq);
} }
EXPORT_SYMBOL(blk_mq_complete_request); EXPORT_SYMBOL(blk_mq_complete_request);
......
...@@ -504,6 +504,7 @@ void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list); ...@@ -504,6 +504,7 @@ void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list);
void blk_mq_kick_requeue_list(struct request_queue *q); void blk_mq_kick_requeue_list(struct request_queue *q);
void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs); void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
void blk_mq_complete_request(struct request *rq); void blk_mq_complete_request(struct request *rq);
bool blk_mq_complete_request_remote(struct request *rq);
bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list, bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list,
struct bio *bio, unsigned int nr_segs); struct bio *bio, unsigned int nr_segs);
bool blk_mq_queue_stopped(struct request_queue *q); bool blk_mq_queue_stopped(struct request_queue *q);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment