Commit d4b186ed authored by Chaitanya Kulkarni's avatar Chaitanya Kulkarni Committed by Jens Axboe

null_blk: move duplicate code to callers

This is a preparation patch which moves the duplicate code for sectors
and nr_sectors calculations for bio vs request mode into their
respective callers (null_queue_bio(), null_qeueue_req()). Now the core
function only deals with the respective actions and commands instead of
having to calculte the bio vs req operations and different sector
related variables. We also move the flush command handling at the top
which significantly simplifies the rest of the code.
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarChaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent d1916c86
...@@ -1133,7 +1133,8 @@ static void null_restart_queue_async(struct nullb *nullb) ...@@ -1133,7 +1133,8 @@ static void null_restart_queue_async(struct nullb *nullb)
blk_mq_start_stopped_hw_queues(q, true); blk_mq_start_stopped_hw_queues(q, true);
} }
static blk_status_t null_handle_cmd(struct nullb_cmd *cmd) static blk_status_t null_handle_cmd(struct nullb_cmd *cmd, sector_t sector,
sector_t nr_sectors, enum req_opf op)
{ {
struct nullb_device *dev = cmd->nq->dev; struct nullb_device *dev = cmd->nq->dev;
struct nullb *nullb = dev->nullb; struct nullb *nullb = dev->nullb;
...@@ -1156,60 +1157,31 @@ static blk_status_t null_handle_cmd(struct nullb_cmd *cmd) ...@@ -1156,60 +1157,31 @@ static blk_status_t null_handle_cmd(struct nullb_cmd *cmd)
} }
} }
if (op == REQ_OP_FLUSH) {
cmd->error = errno_to_blk_status(null_handle_flush(nullb));
goto out;
}
if (nullb->dev->badblocks.shift != -1) { if (nullb->dev->badblocks.shift != -1) {
int bad_sectors; int bad_sectors;
sector_t sector, size, first_bad; sector_t first_bad;
bool is_flush = true;
if (badblocks_check(&nullb->dev->badblocks, sector, nr_sectors,
if (dev->queue_mode == NULL_Q_BIO && &first_bad, &bad_sectors)) {
bio_op(cmd->bio) != REQ_OP_FLUSH) {
is_flush = false;
sector = cmd->bio->bi_iter.bi_sector;
size = bio_sectors(cmd->bio);
}
if (dev->queue_mode != NULL_Q_BIO &&
req_op(cmd->rq) != REQ_OP_FLUSH) {
is_flush = false;
sector = blk_rq_pos(cmd->rq);
size = blk_rq_sectors(cmd->rq);
}
if (!is_flush && badblocks_check(&nullb->dev->badblocks, sector,
size, &first_bad, &bad_sectors)) {
cmd->error = BLK_STS_IOERR; cmd->error = BLK_STS_IOERR;
goto out; goto out;
} }
} }
if (dev->memory_backed) { if (dev->memory_backed) {
if (dev->queue_mode == NULL_Q_BIO) { if (dev->queue_mode == NULL_Q_BIO)
if (bio_op(cmd->bio) == REQ_OP_FLUSH)
err = null_handle_flush(nullb);
else
err = null_handle_bio(cmd); err = null_handle_bio(cmd);
} else {
if (req_op(cmd->rq) == REQ_OP_FLUSH)
err = null_handle_flush(nullb);
else else
err = null_handle_rq(cmd); err = null_handle_rq(cmd);
} }
}
cmd->error = errno_to_blk_status(err); cmd->error = errno_to_blk_status(err);
if (!cmd->error && dev->zoned) { if (!cmd->error && dev->zoned) {
sector_t sector;
unsigned int nr_sectors;
enum req_opf op;
if (dev->queue_mode == NULL_Q_BIO) {
op = bio_op(cmd->bio);
sector = cmd->bio->bi_iter.bi_sector;
nr_sectors = cmd->bio->bi_iter.bi_size >> 9;
} else {
op = req_op(cmd->rq);
sector = blk_rq_pos(cmd->rq);
nr_sectors = blk_rq_sectors(cmd->rq);
}
if (op == REQ_OP_WRITE) if (op == REQ_OP_WRITE)
null_zone_write(cmd, sector, nr_sectors); null_zone_write(cmd, sector, nr_sectors);
else if (op == REQ_OP_ZONE_RESET) else if (op == REQ_OP_ZONE_RESET)
...@@ -1282,6 +1254,8 @@ static struct nullb_queue *nullb_to_queue(struct nullb *nullb) ...@@ -1282,6 +1254,8 @@ static struct nullb_queue *nullb_to_queue(struct nullb *nullb)
static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio) static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio)
{ {
sector_t sector = bio->bi_iter.bi_sector;
sector_t nr_sectors = bio_sectors(bio);
struct nullb *nullb = q->queuedata; struct nullb *nullb = q->queuedata;
struct nullb_queue *nq = nullb_to_queue(nullb); struct nullb_queue *nq = nullb_to_queue(nullb);
struct nullb_cmd *cmd; struct nullb_cmd *cmd;
...@@ -1289,7 +1263,7 @@ static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio) ...@@ -1289,7 +1263,7 @@ static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio)
cmd = alloc_cmd(nq, 1); cmd = alloc_cmd(nq, 1);
cmd->bio = bio; cmd->bio = bio;
null_handle_cmd(cmd); null_handle_cmd(cmd, sector, nr_sectors, bio_op(bio));
return BLK_QC_T_NONE; return BLK_QC_T_NONE;
} }
...@@ -1323,6 +1297,8 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx, ...@@ -1323,6 +1297,8 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
{ {
struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq); struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
struct nullb_queue *nq = hctx->driver_data; struct nullb_queue *nq = hctx->driver_data;
sector_t nr_sectors = blk_rq_sectors(bd->rq);
sector_t sector = blk_rq_pos(bd->rq);
might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING); might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
...@@ -1351,7 +1327,7 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx, ...@@ -1351,7 +1327,7 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
if (should_timeout_request(bd->rq)) if (should_timeout_request(bd->rq))
return BLK_STS_OK; return BLK_STS_OK;
return null_handle_cmd(cmd); return null_handle_cmd(cmd, sector, nr_sectors, req_op(bd->rq));
} }
static const struct blk_mq_ops null_mq_ops = { static const struct blk_mq_ops null_mq_ops = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment