Commit 639d3531 authored by Sebastian Andrzej Siewior's avatar Sebastian Andrzej Siewior Committed by Jens Axboe

mmc: core: Use blk_mq_complete_request_direct().

The completion callback for the sdhci-pci device is invoked from a
kworker.
I couldn't identify in which context is mmc_blk_mq_req_done() invoke but
the remaining caller are from invoked from preemptible context. Here it
would make sense to complete the request directly instead scheduling
ksoftirqd for its completion.
Signed-off-by: default avatarSebastian Andrzej Siewior <bigeasy@linutronix.de>
Reviewed-by: default avatarUlf Hansson <ulf.hansson@linaro.org>
Acked-by: default avatarAdrian Hunter <adrian.hunter@intel.com>
Link: https://lore.kernel.org/r/20211025070658.1565848-3-bigeasy@linutronix.deSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent e8dc17e2
......@@ -2051,7 +2051,8 @@ static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, struct request *req)
mmc_put_card(mq->card, &mq->ctx);
}
static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req)
static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req,
bool can_sleep)
{
struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
struct mmc_request *mrq = &mqrq->brq.mrq;
......@@ -2063,10 +2064,14 @@ static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req)
* Block layer timeouts race with completions which means the normal
* completion path cannot be used during recovery.
*/
if (mq->in_recovery)
if (mq->in_recovery) {
mmc_blk_mq_complete_rq(mq, req);
else if (likely(!blk_should_fake_timeout(req->q)))
blk_mq_complete_request(req);
} else if (likely(!blk_should_fake_timeout(req->q))) {
if (can_sleep)
blk_mq_complete_request_direct(req, mmc_blk_mq_complete);
else
blk_mq_complete_request(req);
}
mmc_blk_mq_dec_in_flight(mq, req);
}
......@@ -2087,7 +2092,7 @@ void mmc_blk_mq_recovery(struct mmc_queue *mq)
mmc_blk_urgent_bkops(mq, mqrq);
mmc_blk_mq_post_req(mq, req);
mmc_blk_mq_post_req(mq, req, true);
}
static void mmc_blk_mq_complete_prev_req(struct mmc_queue *mq,
......@@ -2106,7 +2111,7 @@ static void mmc_blk_mq_complete_prev_req(struct mmc_queue *mq,
if (prev_req)
*prev_req = mq->complete_req;
else
mmc_blk_mq_post_req(mq, mq->complete_req);
mmc_blk_mq_post_req(mq, mq->complete_req, true);
mq->complete_req = NULL;
......@@ -2178,7 +2183,8 @@ static void mmc_blk_mq_req_done(struct mmc_request *mrq)
mq->rw_wait = false;
wake_up(&mq->wait);
mmc_blk_mq_post_req(mq, req);
/* context unknown */
mmc_blk_mq_post_req(mq, req, false);
}
static bool mmc_blk_rw_wait_cond(struct mmc_queue *mq, int *err)
......@@ -2238,7 +2244,7 @@ static int mmc_blk_mq_issue_rw_rq(struct mmc_queue *mq,
err = mmc_start_request(host, &mqrq->brq.mrq);
if (prev_req)
mmc_blk_mq_post_req(mq, prev_req);
mmc_blk_mq_post_req(mq, prev_req, true);
if (err)
mq->rw_wait = false;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment