Commit 49d24398 authored by Uday Shankar's avatar Uday Shankar Committed by Jens Axboe

blk-mq: enforce op-specific segment limits in blk_insert_cloned_request

The block layer might merge together discard requests up until the
max_discard_segments limit is hit, but blk_insert_cloned_request checks
the segment count against max_segments regardless of the req op. This
can result in errors like the following when discards are issued through
a DM device and max_discard_segments exceeds max_segments for the queue
of the chosen underlying device.

blk_insert_cloned_request: over max segments limit. (256 > 129)

Fix this by looking at the req_op and enforcing the appropriate segment
limit - max_discard_segments for REQ_OP_DISCARDs and max_segments for
everything else.
Signed-off-by: default avatarUday Shankar <ushankar@purestorage.com>
Reviewed-by: default avatarKeith Busch <kbusch@kernel.org>
Reviewed-by: default avatarMing Lei <ming.lei@redhat.com>
Link: https://lore.kernel.org/r/20230301000655.48112-1-ushankar@purestorage.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 326ac2c5
...@@ -586,13 +586,6 @@ int __blk_rq_map_sg(struct request_queue *q, struct request *rq, ...@@ -586,13 +586,6 @@ int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
} }
EXPORT_SYMBOL(__blk_rq_map_sg); EXPORT_SYMBOL(__blk_rq_map_sg);
static inline unsigned int blk_rq_get_max_segments(struct request *rq)
{
if (req_op(rq) == REQ_OP_DISCARD)
return queue_max_discard_segments(rq->q);
return queue_max_segments(rq->q);
}
static inline unsigned int blk_rq_get_max_sectors(struct request *rq, static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
sector_t offset) sector_t offset)
{ {
......
...@@ -3000,6 +3000,7 @@ blk_status_t blk_insert_cloned_request(struct request *rq) ...@@ -3000,6 +3000,7 @@ blk_status_t blk_insert_cloned_request(struct request *rq)
{ {
struct request_queue *q = rq->q; struct request_queue *q = rq->q;
unsigned int max_sectors = blk_queue_get_max_sectors(q, req_op(rq)); unsigned int max_sectors = blk_queue_get_max_sectors(q, req_op(rq));
unsigned int max_segments = blk_rq_get_max_segments(rq);
blk_status_t ret; blk_status_t ret;
if (blk_rq_sectors(rq) > max_sectors) { if (blk_rq_sectors(rq) > max_sectors) {
...@@ -3026,9 +3027,9 @@ blk_status_t blk_insert_cloned_request(struct request *rq) ...@@ -3026,9 +3027,9 @@ blk_status_t blk_insert_cloned_request(struct request *rq)
* original queue. * original queue.
*/ */
rq->nr_phys_segments = blk_recalc_rq_segments(rq); rq->nr_phys_segments = blk_recalc_rq_segments(rq);
if (rq->nr_phys_segments > queue_max_segments(q)) { if (rq->nr_phys_segments > max_segments) {
printk(KERN_ERR "%s: over max segments limit. (%hu > %hu)\n", printk(KERN_ERR "%s: over max segments limit. (%u > %u)\n",
__func__, rq->nr_phys_segments, queue_max_segments(q)); __func__, rq->nr_phys_segments, max_segments);
return BLK_STS_IOERR; return BLK_STS_IOERR;
} }
......
...@@ -156,6 +156,13 @@ static inline bool blk_discard_mergable(struct request *req) ...@@ -156,6 +156,13 @@ static inline bool blk_discard_mergable(struct request *req)
return false; return false;
} }
static inline unsigned int blk_rq_get_max_segments(struct request *rq)
{
if (req_op(rq) == REQ_OP_DISCARD)
return queue_max_discard_segments(rq->q);
return queue_max_segments(rq->q);
}
static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
enum req_op op) enum req_op op)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment