Commit 8dd2cb7e authored by Shaohua Li's avatar Shaohua Li Committed by Jens Axboe

block: discard granularity might not be power of 2

In MD raid case, discard granularity might not be power of 2, for example, a
4-disk raid5 has 3*chunk_size discard granularity. Correct the calculation for
such cases.
Reported-by: default avatarNeil Brown <neilb@suse.de>
Signed-off-by: default avatarShaohua Li <shli@fusionio.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 75274551
...@@ -43,8 +43,8 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, ...@@ -43,8 +43,8 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
DECLARE_COMPLETION_ONSTACK(wait); DECLARE_COMPLETION_ONSTACK(wait);
struct request_queue *q = bdev_get_queue(bdev); struct request_queue *q = bdev_get_queue(bdev);
int type = REQ_WRITE | REQ_DISCARD; int type = REQ_WRITE | REQ_DISCARD;
unsigned int max_discard_sectors; sector_t max_discard_sectors;
unsigned int granularity, alignment, mask; sector_t granularity, alignment;
struct bio_batch bb; struct bio_batch bb;
struct bio *bio; struct bio *bio;
int ret = 0; int ret = 0;
...@@ -57,15 +57,16 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, ...@@ -57,15 +57,16 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
/* Zero-sector (unknown) and one-sector granularities are the same. */ /* Zero-sector (unknown) and one-sector granularities are the same. */
granularity = max(q->limits.discard_granularity >> 9, 1U); granularity = max(q->limits.discard_granularity >> 9, 1U);
mask = granularity - 1; alignment = bdev_discard_alignment(bdev) >> 9;
alignment = (bdev_discard_alignment(bdev) >> 9) & mask; alignment = sector_div(alignment, granularity);
/* /*
* Ensure that max_discard_sectors is of the proper * Ensure that max_discard_sectors is of the proper
* granularity, so that requests stay aligned after a split. * granularity, so that requests stay aligned after a split.
*/ */
max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9); max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
max_discard_sectors = round_down(max_discard_sectors, granularity); sector_div(max_discard_sectors, granularity);
max_discard_sectors *= granularity;
if (unlikely(!max_discard_sectors)) { if (unlikely(!max_discard_sectors)) {
/* Avoid infinite loop below. Being cautious never hurts. */ /* Avoid infinite loop below. Being cautious never hurts. */
return -EOPNOTSUPP; return -EOPNOTSUPP;
...@@ -83,7 +84,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, ...@@ -83,7 +84,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
while (nr_sects) { while (nr_sects) {
unsigned int req_sects; unsigned int req_sects;
sector_t end_sect; sector_t end_sect, tmp;
bio = bio_alloc(gfp_mask, 1); bio = bio_alloc(gfp_mask, 1);
if (!bio) { if (!bio) {
...@@ -98,10 +99,12 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, ...@@ -98,10 +99,12 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
* misaligned, stop the discard at the previous aligned sector. * misaligned, stop the discard at the previous aligned sector.
*/ */
end_sect = sector + req_sects; end_sect = sector + req_sects;
if (req_sects < nr_sects && (end_sect & mask) != alignment) { tmp = end_sect;
end_sect = if (req_sects < nr_sects &&
round_down(end_sect - alignment, granularity) sector_div(tmp, granularity) != alignment) {
+ alignment; end_sect = end_sect - alignment;
sector_div(end_sect, granularity);
end_sect = end_sect * granularity + alignment;
req_sects = end_sect - sector; req_sects = end_sect - sector;
} }
......
...@@ -611,7 +611,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, ...@@ -611,7 +611,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
bottom = b->discard_granularity + alignment; bottom = b->discard_granularity + alignment;
/* Verify that top and bottom intervals line up */ /* Verify that top and bottom intervals line up */
if (max(top, bottom) & (min(top, bottom) - 1)) if ((max(top, bottom) % min(top, bottom)) != 0)
t->discard_misaligned = 1; t->discard_misaligned = 1;
} }
...@@ -619,8 +619,8 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, ...@@ -619,8 +619,8 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
b->max_discard_sectors); b->max_discard_sectors);
t->discard_granularity = max(t->discard_granularity, t->discard_granularity = max(t->discard_granularity,
b->discard_granularity); b->discard_granularity);
t->discard_alignment = lcm(t->discard_alignment, alignment) & t->discard_alignment = lcm(t->discard_alignment, alignment) %
(t->discard_granularity - 1); t->discard_granularity;
} }
return ret; return ret;
......
...@@ -1188,13 +1188,14 @@ static inline int queue_discard_alignment(struct request_queue *q) ...@@ -1188,13 +1188,14 @@ static inline int queue_discard_alignment(struct request_queue *q)
static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector) static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector)
{ {
unsigned int alignment = (sector << 9) & (lim->discard_granularity - 1); sector_t alignment = sector << 9;
alignment = sector_div(alignment, lim->discard_granularity);
if (!lim->max_discard_sectors) if (!lim->max_discard_sectors)
return 0; return 0;
return (lim->discard_granularity + lim->discard_alignment - alignment) alignment = lim->discard_granularity + lim->discard_alignment - alignment;
& (lim->discard_granularity - 1); return sector_div(alignment, lim->discard_granularity);
} }
static inline int bdev_discard_alignment(struct block_device *bdev) static inline int bdev_discard_alignment(struct block_device *bdev)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment