Commit 2bb4cd5c authored by Jens Axboe's avatar Jens Axboe

block: have drivers use blk_queue_max_discard_sectors()

Some drivers use it now, others just set the limits field manually.
But in preparation for splitting this into a hard and soft limit,
ensure that they all call the proper function for setting the hw
limit for discards.
Reviewed-by: default avatarJeff Moyer <jmoyer@redhat.com>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent 6c71013e
...@@ -500,7 +500,7 @@ static struct brd_device *brd_alloc(int i) ...@@ -500,7 +500,7 @@ static struct brd_device *brd_alloc(int i)
blk_queue_physical_block_size(brd->brd_queue, PAGE_SIZE); blk_queue_physical_block_size(brd->brd_queue, PAGE_SIZE);
brd->brd_queue->limits.discard_granularity = PAGE_SIZE; brd->brd_queue->limits.discard_granularity = PAGE_SIZE;
brd->brd_queue->limits.max_discard_sectors = UINT_MAX; blk_queue_max_discard_sectors(brd->brd_queue, UINT_MAX);
brd->brd_queue->limits.discard_zeroes_data = 1; brd->brd_queue->limits.discard_zeroes_data = 1;
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, brd->brd_queue); queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, brd->brd_queue);
......
...@@ -1156,14 +1156,14 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi ...@@ -1156,14 +1156,14 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi
/* For now, don't allow more than one activity log extent worth of data /* For now, don't allow more than one activity log extent worth of data
* to be discarded in one go. We may need to rework drbd_al_begin_io() * to be discarded in one go. We may need to rework drbd_al_begin_io()
* to allow for even larger discard ranges */ * to allow for even larger discard ranges */
q->limits.max_discard_sectors = DRBD_MAX_DISCARD_SECTORS; blk_queue_max_discard_sectors(q, DRBD_MAX_DISCARD_SECTORS);
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
/* REALLY? Is stacking secdiscard "legal"? */ /* REALLY? Is stacking secdiscard "legal"? */
if (blk_queue_secdiscard(b)) if (blk_queue_secdiscard(b))
queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q); queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q);
} else { } else {
q->limits.max_discard_sectors = 0; blk_queue_max_discard_sectors(q, 0);
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q); queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
queue_flag_clear_unlocked(QUEUE_FLAG_SECDISCARD, q); queue_flag_clear_unlocked(QUEUE_FLAG_SECDISCARD, q);
} }
......
...@@ -675,7 +675,7 @@ static void loop_config_discard(struct loop_device *lo) ...@@ -675,7 +675,7 @@ static void loop_config_discard(struct loop_device *lo)
lo->lo_encrypt_key_size) { lo->lo_encrypt_key_size) {
q->limits.discard_granularity = 0; q->limits.discard_granularity = 0;
q->limits.discard_alignment = 0; q->limits.discard_alignment = 0;
q->limits.max_discard_sectors = 0; blk_queue_max_discard_sectors(q, 0);
q->limits.discard_zeroes_data = 0; q->limits.discard_zeroes_data = 0;
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q); queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
return; return;
...@@ -683,7 +683,7 @@ static void loop_config_discard(struct loop_device *lo) ...@@ -683,7 +683,7 @@ static void loop_config_discard(struct loop_device *lo)
q->limits.discard_granularity = inode->i_sb->s_blocksize; q->limits.discard_granularity = inode->i_sb->s_blocksize;
q->limits.discard_alignment = 0; q->limits.discard_alignment = 0;
q->limits.max_discard_sectors = UINT_MAX >> 9; blk_queue_max_discard_sectors(q, UINT_MAX >> 9);
q->limits.discard_zeroes_data = 1; q->limits.discard_zeroes_data = 1;
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
} }
......
...@@ -822,7 +822,7 @@ static int __init nbd_init(void) ...@@ -822,7 +822,7 @@ static int __init nbd_init(void)
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue); queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, disk->queue); queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, disk->queue);
disk->queue->limits.discard_granularity = 512; disk->queue->limits.discard_granularity = 512;
disk->queue->limits.max_discard_sectors = UINT_MAX; blk_queue_max_discard_sectors(disk->queue, UINT_MAX);
disk->queue->limits.discard_zeroes_data = 0; disk->queue->limits.discard_zeroes_data = 0;
blk_queue_max_hw_sectors(disk->queue, 65536); blk_queue_max_hw_sectors(disk->queue, 65536);
disk->queue->limits.max_sectors = 256; disk->queue->limits.max_sectors = 256;
......
...@@ -1935,7 +1935,7 @@ static void nvme_config_discard(struct nvme_ns *ns) ...@@ -1935,7 +1935,7 @@ static void nvme_config_discard(struct nvme_ns *ns)
ns->queue->limits.discard_zeroes_data = 0; ns->queue->limits.discard_zeroes_data = 0;
ns->queue->limits.discard_alignment = logical_block_size; ns->queue->limits.discard_alignment = logical_block_size;
ns->queue->limits.discard_granularity = logical_block_size; ns->queue->limits.discard_granularity = logical_block_size;
ns->queue->limits.max_discard_sectors = 0xffffffff; blk_queue_max_discard_sectors(ns->queue, 0xffffffff);
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue); queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue);
} }
......
...@@ -3803,7 +3803,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) ...@@ -3803,7 +3803,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
q->limits.discard_granularity = segment_size; q->limits.discard_granularity = segment_size;
q->limits.discard_alignment = segment_size; q->limits.discard_alignment = segment_size;
q->limits.max_discard_sectors = segment_size / SECTOR_SIZE; blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE);
q->limits.discard_zeroes_data = 1; q->limits.discard_zeroes_data = 1;
blk_queue_merge_bvec(q, rbd_merge_bvec); blk_queue_merge_bvec(q, rbd_merge_bvec);
......
...@@ -4422,7 +4422,7 @@ static int skd_cons_disk(struct skd_device *skdev) ...@@ -4422,7 +4422,7 @@ static int skd_cons_disk(struct skd_device *skdev)
/* DISCARD Flag initialization. */ /* DISCARD Flag initialization. */
q->limits.discard_granularity = 8192; q->limits.discard_granularity = 8192;
q->limits.discard_alignment = 0; q->limits.discard_alignment = 0;
q->limits.max_discard_sectors = UINT_MAX >> 9; blk_queue_max_discard_sectors(q, UINT_MAX >> 9);
q->limits.discard_zeroes_data = 1; q->limits.discard_zeroes_data = 1;
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
......
...@@ -1244,7 +1244,7 @@ static int zram_add(void) ...@@ -1244,7 +1244,7 @@ static int zram_add(void)
blk_queue_io_min(zram->disk->queue, PAGE_SIZE); blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
blk_queue_io_opt(zram->disk->queue, PAGE_SIZE); blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
zram->disk->queue->limits.discard_granularity = PAGE_SIZE; zram->disk->queue->limits.discard_granularity = PAGE_SIZE;
zram->disk->queue->limits.max_discard_sectors = UINT_MAX; blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX);
/* /*
* zram_bio_discard() will clear all logical blocks if logical block * zram_bio_discard() will clear all logical blocks if logical block
* size is identical with physical block size(PAGE_SIZE). But if it is * size is identical with physical block size(PAGE_SIZE). But if it is
......
...@@ -830,7 +830,7 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size, ...@@ -830,7 +830,7 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
q->limits.max_sectors = UINT_MAX; q->limits.max_sectors = UINT_MAX;
q->limits.max_segment_size = UINT_MAX; q->limits.max_segment_size = UINT_MAX;
q->limits.max_segments = BIO_MAX_PAGES; q->limits.max_segments = BIO_MAX_PAGES;
q->limits.max_discard_sectors = UINT_MAX; blk_queue_max_discard_sectors(q, UINT_MAX);
q->limits.discard_granularity = 512; q->limits.discard_granularity = 512;
q->limits.io_min = block_size; q->limits.io_min = block_size;
q->limits.logical_block_size = block_size; q->limits.logical_block_size = block_size;
......
...@@ -165,7 +165,7 @@ static void mmc_queue_setup_discard(struct request_queue *q, ...@@ -165,7 +165,7 @@ static void mmc_queue_setup_discard(struct request_queue *q,
return; return;
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
q->limits.max_discard_sectors = max_discard; blk_queue_max_discard_sectors(q, max_discard);
if (card->erased_byte == 0 && !mmc_can_discard(card)) if (card->erased_byte == 0 && !mmc_can_discard(card))
q->limits.discard_zeroes_data = 1; q->limits.discard_zeroes_data = 1;
q->limits.discard_granularity = card->pref_erase << 9; q->limits.discard_granularity = card->pref_erase << 9;
......
...@@ -423,7 +423,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new) ...@@ -423,7 +423,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
if (tr->discard) { if (tr->discard) {
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, new->rq); queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, new->rq);
new->rq->limits.max_discard_sectors = UINT_MAX; blk_queue_max_discard_sectors(new->rq, UINT_MAX);
} }
gd->queue = new->rq; gd->queue = new->rq;
......
...@@ -647,7 +647,7 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode) ...@@ -647,7 +647,7 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
switch (mode) { switch (mode) {
case SD_LBP_DISABLE: case SD_LBP_DISABLE:
q->limits.max_discard_sectors = 0; blk_queue_max_discard_sectors(q, 0);
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q); queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
return; return;
...@@ -675,7 +675,7 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode) ...@@ -675,7 +675,7 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
break; break;
} }
q->limits.max_discard_sectors = max_blocks * (logical_block_size >> 9); blk_queue_max_discard_sectors(q, max_blocks * (logical_block_size >> 9));
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment