Commit 81ada09c authored by Chengming Zhou's avatar Chengming Zhou Committed by Jens Axboe

blk-flush: reuse rq queuelist in flush state machine

Since we don't need to maintain inflight flush_data requests list
anymore, we can reuse rq->queuelist for flush pending list.

Note in mq_flush_data_end_io(), we need to re-initialize rq->queuelist
before reusing it in the state machine when end, since the rq->rq_next
also reuse it, may have corrupted rq->queuelist by the driver.

This patch decrease the size of struct request by 16 bytes.
Signed-off-by: default avatarChengming Zhou <zhouchengming@bytedance.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarMing Lei <ming.lei@redhat.com>
Link: https://lore.kernel.org/r/20230717040058.3993930-5-chengming.zhou@linux.devSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent b175c867
...@@ -183,14 +183,13 @@ static void blk_flush_complete_seq(struct request *rq, ...@@ -183,14 +183,13 @@ static void blk_flush_complete_seq(struct request *rq,
/* queue for flush */ /* queue for flush */
if (list_empty(pending)) if (list_empty(pending))
fq->flush_pending_since = jiffies; fq->flush_pending_since = jiffies;
list_move_tail(&rq->flush.list, pending); list_move_tail(&rq->queuelist, pending);
break; break;
case REQ_FSEQ_DATA: case REQ_FSEQ_DATA:
list_del_init(&rq->flush.list);
fq->flush_data_in_flight++; fq->flush_data_in_flight++;
spin_lock(&q->requeue_lock); spin_lock(&q->requeue_lock);
list_add(&rq->queuelist, &q->requeue_list); list_move(&rq->queuelist, &q->requeue_list);
spin_unlock(&q->requeue_lock); spin_unlock(&q->requeue_lock);
blk_mq_kick_requeue_list(q); blk_mq_kick_requeue_list(q);
break; break;
...@@ -202,7 +201,7 @@ static void blk_flush_complete_seq(struct request *rq, ...@@ -202,7 +201,7 @@ static void blk_flush_complete_seq(struct request *rq,
* flush data request completion path. Restore @rq for * flush data request completion path. Restore @rq for
* normal completion and end it. * normal completion and end it.
*/ */
list_del_init(&rq->flush.list); list_del_init(&rq->queuelist);
blk_flush_restore_request(rq); blk_flush_restore_request(rq);
blk_mq_end_request(rq, error); blk_mq_end_request(rq, error);
break; break;
...@@ -258,7 +257,7 @@ static enum rq_end_io_ret flush_end_io(struct request *flush_rq, ...@@ -258,7 +257,7 @@ static enum rq_end_io_ret flush_end_io(struct request *flush_rq,
fq->flush_running_idx ^= 1; fq->flush_running_idx ^= 1;
/* and push the waiting requests to the next stage */ /* and push the waiting requests to the next stage */
list_for_each_entry_safe(rq, n, running, flush.list) { list_for_each_entry_safe(rq, n, running, queuelist) {
unsigned int seq = blk_flush_cur_seq(rq); unsigned int seq = blk_flush_cur_seq(rq);
BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH); BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
...@@ -292,7 +291,7 @@ static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq, ...@@ -292,7 +291,7 @@ static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
{ {
struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx]; struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
struct request *first_rq = struct request *first_rq =
list_first_entry(pending, struct request, flush.list); list_first_entry(pending, struct request, queuelist);
struct request *flush_rq = fq->flush_rq; struct request *flush_rq = fq->flush_rq;
/* C1 described at the top of this file */ /* C1 described at the top of this file */
...@@ -376,6 +375,11 @@ static enum rq_end_io_ret mq_flush_data_end_io(struct request *rq, ...@@ -376,6 +375,11 @@ static enum rq_end_io_ret mq_flush_data_end_io(struct request *rq,
*/ */
spin_lock_irqsave(&fq->mq_flush_lock, flags); spin_lock_irqsave(&fq->mq_flush_lock, flags);
fq->flush_data_in_flight--; fq->flush_data_in_flight--;
/*
* May have been corrupted by rq->rq_next reuse, we need to
* re-initialize rq->queuelist before reusing it here.
*/
INIT_LIST_HEAD(&rq->queuelist);
blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error); blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error);
spin_unlock_irqrestore(&fq->mq_flush_lock, flags); spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
...@@ -386,7 +390,6 @@ static enum rq_end_io_ret mq_flush_data_end_io(struct request *rq, ...@@ -386,7 +390,6 @@ static enum rq_end_io_ret mq_flush_data_end_io(struct request *rq,
static void blk_rq_init_flush(struct request *rq) static void blk_rq_init_flush(struct request *rq)
{ {
rq->flush.seq = 0; rq->flush.seq = 0;
INIT_LIST_HEAD(&rq->flush.list);
rq->rq_flags |= RQF_FLUSH_SEQ; rq->rq_flags |= RQF_FLUSH_SEQ;
rq->flush.saved_end_io = rq->end_io; /* Usually NULL */ rq->flush.saved_end_io = rq->end_io; /* Usually NULL */
rq->end_io = mq_flush_data_end_io; rq->end_io = mq_flush_data_end_io;
......
...@@ -178,7 +178,6 @@ struct request { ...@@ -178,7 +178,6 @@ struct request {
struct { struct {
unsigned int seq; unsigned int seq;
struct list_head list;
rq_end_io_fn *saved_end_io; rq_end_io_fn *saved_end_io;
} flush; } flush;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment