Commit 7b47ef52 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

block: add a bdev_discard_granularity helper

Abstract away implementation details from file systems by providing a
block_device based helper to retrieve the discard granularity.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
Acked-by: Christoph Böhmwalder <christoph.boehmwalder@linbit.com> [drbd]
Acked-by: default avatarRyusuke Konishi <konishi.ryusuke@gmail.com>
Acked-by: David Sterba <dsterba@suse.com> [btrfs]
Link: https://lore.kernel.org/r/20220415045258.199825-26-hch@lst.deSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 70200574
...@@ -12,8 +12,7 @@ ...@@ -12,8 +12,7 @@
static sector_t bio_discard_limit(struct block_device *bdev, sector_t sector) static sector_t bio_discard_limit(struct block_device *bdev, sector_t sector)
{ {
unsigned int discard_granularity = unsigned int discard_granularity = bdev_discard_granularity(bdev);
bdev_get_queue(bdev)->limits.discard_granularity;
sector_t granularity_aligned_sector; sector_t granularity_aligned_sector;
if (bdev_is_partition(bdev)) if (bdev_is_partition(bdev))
...@@ -59,7 +58,7 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, ...@@ -59,7 +58,7 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
} }
/* In case the discard granularity isn't set by buggy device driver */ /* In case the discard granularity isn't set by buggy device driver */
if (WARN_ON_ONCE(!q->limits.discard_granularity)) { if (WARN_ON_ONCE(!bdev_discard_granularity(bdev))) {
char dev_name[BDEVNAME_SIZE]; char dev_name[BDEVNAME_SIZE];
bdevname(bdev, dev_name); bdevname(bdev, dev_name);
......
...@@ -1425,7 +1425,6 @@ static void sanitize_disk_conf(struct drbd_device *device, struct disk_conf *dis ...@@ -1425,7 +1425,6 @@ static void sanitize_disk_conf(struct drbd_device *device, struct disk_conf *dis
struct drbd_backing_dev *nbc) struct drbd_backing_dev *nbc)
{ {
struct block_device *bdev = nbc->backing_bdev; struct block_device *bdev = nbc->backing_bdev;
struct request_queue *q = bdev->bd_disk->queue;
if (disk_conf->al_extents < DRBD_AL_EXTENTS_MIN) if (disk_conf->al_extents < DRBD_AL_EXTENTS_MIN)
disk_conf->al_extents = DRBD_AL_EXTENTS_MIN; disk_conf->al_extents = DRBD_AL_EXTENTS_MIN;
...@@ -1442,12 +1441,14 @@ static void sanitize_disk_conf(struct drbd_device *device, struct disk_conf *dis ...@@ -1442,12 +1441,14 @@ static void sanitize_disk_conf(struct drbd_device *device, struct disk_conf *dis
if (disk_conf->rs_discard_granularity) { if (disk_conf->rs_discard_granularity) {
int orig_value = disk_conf->rs_discard_granularity; int orig_value = disk_conf->rs_discard_granularity;
sector_t discard_size = bdev_max_discard_sectors(bdev) << 9; sector_t discard_size = bdev_max_discard_sectors(bdev) << 9;
unsigned int discard_granularity = bdev_discard_granularity(bdev);
int remainder; int remainder;
if (q->limits.discard_granularity > disk_conf->rs_discard_granularity) if (discard_granularity > disk_conf->rs_discard_granularity)
disk_conf->rs_discard_granularity = q->limits.discard_granularity; disk_conf->rs_discard_granularity = discard_granularity;
remainder = disk_conf->rs_discard_granularity % q->limits.discard_granularity; remainder = disk_conf->rs_discard_granularity %
discard_granularity;
disk_conf->rs_discard_granularity += remainder; disk_conf->rs_discard_granularity += remainder;
if (disk_conf->rs_discard_granularity > discard_size) if (disk_conf->rs_discard_granularity > discard_size)
......
...@@ -1511,7 +1511,6 @@ void drbd_bump_write_ordering(struct drbd_resource *resource, struct drbd_backin ...@@ -1511,7 +1511,6 @@ void drbd_bump_write_ordering(struct drbd_resource *resource, struct drbd_backin
int drbd_issue_discard_or_zero_out(struct drbd_device *device, sector_t start, unsigned int nr_sectors, int flags) int drbd_issue_discard_or_zero_out(struct drbd_device *device, sector_t start, unsigned int nr_sectors, int flags)
{ {
struct block_device *bdev = device->ldev->backing_bdev; struct block_device *bdev = device->ldev->backing_bdev;
struct request_queue *q = bdev_get_queue(bdev);
sector_t tmp, nr; sector_t tmp, nr;
unsigned int max_discard_sectors, granularity; unsigned int max_discard_sectors, granularity;
int alignment; int alignment;
...@@ -1521,7 +1520,7 @@ int drbd_issue_discard_or_zero_out(struct drbd_device *device, sector_t start, u ...@@ -1521,7 +1520,7 @@ int drbd_issue_discard_or_zero_out(struct drbd_device *device, sector_t start, u
goto zero_out; goto zero_out;
/* Zero-sector (unknown) and one-sector granularities are the same. */ /* Zero-sector (unknown) and one-sector granularities are the same. */
granularity = max(q->limits.discard_granularity >> 9, 1U); granularity = max(bdev_discard_granularity(bdev) >> 9, 1U);
alignment = (bdev_discard_alignment(bdev) >> 9) % granularity; alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
max_discard_sectors = min(bdev_max_discard_sectors(bdev), (1U << 22)); max_discard_sectors = min(bdev_max_discard_sectors(bdev), (1U << 22));
......
...@@ -759,7 +759,7 @@ static void loop_config_discard(struct loop_device *lo) ...@@ -759,7 +759,7 @@ static void loop_config_discard(struct loop_device *lo)
struct request_queue *backingq = bdev_get_queue(I_BDEV(inode)); struct request_queue *backingq = bdev_get_queue(I_BDEV(inode));
max_discard_sectors = backingq->limits.max_write_zeroes_sectors; max_discard_sectors = backingq->limits.max_write_zeroes_sectors;
granularity = backingq->limits.discard_granularity ?: granularity = bdev_discard_granularity(I_BDEV(inode)) ?:
queue_physical_block_size(backingq); queue_physical_block_size(backingq);
/* /*
......
...@@ -835,7 +835,6 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name) ...@@ -835,7 +835,6 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib, bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
struct block_device *bdev) struct block_device *bdev)
{ {
struct request_queue *q = bdev_get_queue(bdev);
int block_size = bdev_logical_block_size(bdev); int block_size = bdev_logical_block_size(bdev);
if (!bdev_max_discard_sectors(bdev)) if (!bdev_max_discard_sectors(bdev))
...@@ -847,7 +846,7 @@ bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib, ...@@ -847,7 +846,7 @@ bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
* Currently hardcoded to 1 in Linux/SCSI code.. * Currently hardcoded to 1 in Linux/SCSI code..
*/ */
attrib->max_unmap_block_desc_count = 1; attrib->max_unmap_block_desc_count = 1;
attrib->unmap_granularity = q->limits.discard_granularity / block_size; attrib->unmap_granularity = bdev_discard_granularity(bdev) / block_size;
attrib->unmap_granularity_alignment = attrib->unmap_granularity_alignment =
bdev_discard_alignment(bdev) / block_size; bdev_discard_alignment(bdev) / block_size;
return true; return true;
......
...@@ -468,7 +468,6 @@ static noinline int btrfs_ioctl_fitrim(struct btrfs_fs_info *fs_info, ...@@ -468,7 +468,6 @@ static noinline int btrfs_ioctl_fitrim(struct btrfs_fs_info *fs_info,
void __user *arg) void __user *arg)
{ {
struct btrfs_device *device; struct btrfs_device *device;
struct request_queue *q;
struct fstrim_range range; struct fstrim_range range;
u64 minlen = ULLONG_MAX; u64 minlen = ULLONG_MAX;
u64 num_devices = 0; u64 num_devices = 0;
...@@ -498,15 +497,12 @@ static noinline int btrfs_ioctl_fitrim(struct btrfs_fs_info *fs_info, ...@@ -498,15 +497,12 @@ static noinline int btrfs_ioctl_fitrim(struct btrfs_fs_info *fs_info,
rcu_read_lock(); rcu_read_lock();
list_for_each_entry_rcu(device, &fs_info->fs_devices->devices, list_for_each_entry_rcu(device, &fs_info->fs_devices->devices,
dev_list) { dev_list) {
if (!device->bdev) if (!device->bdev || !bdev_max_discard_sectors(device->bdev))
continue; continue;
q = bdev_get_queue(device->bdev);
if (bdev_max_discard_sectors(device->bdev)) {
num_devices++; num_devices++;
minlen = min_t(u64, q->limits.discard_granularity, minlen = min_t(u64, bdev_discard_granularity(device->bdev),
minlen); minlen);
} }
}
rcu_read_unlock(); rcu_read_unlock();
if (!num_devices) if (!num_devices)
......
...@@ -351,7 +351,6 @@ int exfat_setattr(struct user_namespace *mnt_userns, struct dentry *dentry, ...@@ -351,7 +351,6 @@ int exfat_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
static int exfat_ioctl_fitrim(struct inode *inode, unsigned long arg) static int exfat_ioctl_fitrim(struct inode *inode, unsigned long arg)
{ {
struct request_queue *q = bdev_get_queue(inode->i_sb->s_bdev);
struct fstrim_range range; struct fstrim_range range;
int ret = 0; int ret = 0;
...@@ -365,7 +364,7 @@ static int exfat_ioctl_fitrim(struct inode *inode, unsigned long arg) ...@@ -365,7 +364,7 @@ static int exfat_ioctl_fitrim(struct inode *inode, unsigned long arg)
return -EFAULT; return -EFAULT;
range.minlen = max_t(unsigned int, range.minlen, range.minlen = max_t(unsigned int, range.minlen,
q->limits.discard_granularity); bdev_discard_granularity(inode->i_sb->s_bdev));
ret = exfat_trim_fs(inode, &range); ret = exfat_trim_fs(inode, &range);
if (ret < 0) if (ret < 0)
......
...@@ -6455,7 +6455,7 @@ ext4_trim_all_free(struct super_block *sb, ext4_group_t group, ...@@ -6455,7 +6455,7 @@ ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
*/ */
int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range) int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
{ {
struct request_queue *q = bdev_get_queue(sb->s_bdev); unsigned int discard_granularity = bdev_discard_granularity(sb->s_bdev);
struct ext4_group_info *grp; struct ext4_group_info *grp;
ext4_group_t group, first_group, last_group; ext4_group_t group, first_group, last_group;
ext4_grpblk_t cnt = 0, first_cluster, last_cluster; ext4_grpblk_t cnt = 0, first_cluster, last_cluster;
...@@ -6475,9 +6475,9 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range) ...@@ -6475,9 +6475,9 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
range->len < sb->s_blocksize) range->len < sb->s_blocksize)
return -EINVAL; return -EINVAL;
/* No point to try to trim less than discard granularity */ /* No point to try to trim less than discard granularity */
if (range->minlen < q->limits.discard_granularity) { if (range->minlen < discard_granularity) {
minlen = EXT4_NUM_B2C(EXT4_SB(sb), minlen = EXT4_NUM_B2C(EXT4_SB(sb),
q->limits.discard_granularity >> sb->s_blocksize_bits); discard_granularity >> sb->s_blocksize_bits);
if (minlen > EXT4_CLUSTERS_PER_GROUP(sb)) if (minlen > EXT4_CLUSTERS_PER_GROUP(sb))
goto out; goto out;
} }
......
...@@ -2285,7 +2285,6 @@ static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg) ...@@ -2285,7 +2285,6 @@ static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
{ {
struct inode *inode = file_inode(filp); struct inode *inode = file_inode(filp);
struct super_block *sb = inode->i_sb; struct super_block *sb = inode->i_sb;
struct request_queue *q = bdev_get_queue(sb->s_bdev);
struct fstrim_range range; struct fstrim_range range;
int ret; int ret;
...@@ -2304,7 +2303,7 @@ static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg) ...@@ -2304,7 +2303,7 @@ static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
return ret; return ret;
range.minlen = max((unsigned int)range.minlen, range.minlen = max((unsigned int)range.minlen,
q->limits.discard_granularity); bdev_discard_granularity(sb->s_bdev));
ret = f2fs_trim_fs(F2FS_SB(sb), &range); ret = f2fs_trim_fs(F2FS_SB(sb), &range);
mnt_drop_write_file(filp); mnt_drop_write_file(filp);
if (ret < 0) if (ret < 0)
......
...@@ -127,7 +127,6 @@ static int fat_ioctl_fitrim(struct inode *inode, unsigned long arg) ...@@ -127,7 +127,6 @@ static int fat_ioctl_fitrim(struct inode *inode, unsigned long arg)
struct super_block *sb = inode->i_sb; struct super_block *sb = inode->i_sb;
struct fstrim_range __user *user_range; struct fstrim_range __user *user_range;
struct fstrim_range range; struct fstrim_range range;
struct request_queue *q = bdev_get_queue(sb->s_bdev);
int err; int err;
if (!capable(CAP_SYS_ADMIN)) if (!capable(CAP_SYS_ADMIN))
...@@ -141,7 +140,7 @@ static int fat_ioctl_fitrim(struct inode *inode, unsigned long arg) ...@@ -141,7 +140,7 @@ static int fat_ioctl_fitrim(struct inode *inode, unsigned long arg)
return -EFAULT; return -EFAULT;
range.minlen = max_t(unsigned int, range.minlen, range.minlen = max_t(unsigned int, range.minlen,
q->limits.discard_granularity); bdev_discard_granularity(sb->s_bdev));
err = fat_trim_fs(inode, &range); err = fat_trim_fs(inode, &range);
if (err < 0) if (err < 0)
......
...@@ -1386,7 +1386,7 @@ int gfs2_fitrim(struct file *filp, void __user *argp) ...@@ -1386,7 +1386,7 @@ int gfs2_fitrim(struct file *filp, void __user *argp)
{ {
struct inode *inode = file_inode(filp); struct inode *inode = file_inode(filp);
struct gfs2_sbd *sdp = GFS2_SB(inode); struct gfs2_sbd *sdp = GFS2_SB(inode);
struct request_queue *q = bdev_get_queue(sdp->sd_vfs->s_bdev); struct block_device *bdev = sdp->sd_vfs->s_bdev;
struct buffer_head *bh; struct buffer_head *bh;
struct gfs2_rgrpd *rgd; struct gfs2_rgrpd *rgd;
struct gfs2_rgrpd *rgd_end; struct gfs2_rgrpd *rgd_end;
...@@ -1405,7 +1405,7 @@ int gfs2_fitrim(struct file *filp, void __user *argp) ...@@ -1405,7 +1405,7 @@ int gfs2_fitrim(struct file *filp, void __user *argp)
if (!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) if (!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
return -EROFS; return -EROFS;
if (!bdev_max_discard_sectors(sdp->sd_vfs->s_bdev)) if (!bdev_max_discard_sectors(bdev))
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (copy_from_user(&r, argp, sizeof(r))) if (copy_from_user(&r, argp, sizeof(r)))
...@@ -1418,8 +1418,7 @@ int gfs2_fitrim(struct file *filp, void __user *argp) ...@@ -1418,8 +1418,7 @@ int gfs2_fitrim(struct file *filp, void __user *argp)
start = r.start >> bs_shift; start = r.start >> bs_shift;
end = start + (r.len >> bs_shift); end = start + (r.len >> bs_shift);
minlen = max_t(u64, r.minlen, sdp->sd_sb.sb_bsize); minlen = max_t(u64, r.minlen, sdp->sd_sb.sb_bsize);
minlen = max_t(u64, minlen, minlen = max_t(u64, minlen, bdev_discard_granularity(bdev)) >> bs_shift;
q->limits.discard_granularity) >> bs_shift;
if (end <= start || minlen > sdp->sd_max_rg_data) if (end <= start || minlen > sdp->sd_max_rg_data)
return -EINVAL; return -EINVAL;
......
...@@ -110,7 +110,6 @@ long jfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) ...@@ -110,7 +110,6 @@ long jfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
case FITRIM: case FITRIM:
{ {
struct super_block *sb = inode->i_sb; struct super_block *sb = inode->i_sb;
struct request_queue *q = bdev_get_queue(sb->s_bdev);
struct fstrim_range range; struct fstrim_range range;
s64 ret = 0; s64 ret = 0;
...@@ -127,7 +126,7 @@ long jfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) ...@@ -127,7 +126,7 @@ long jfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return -EFAULT; return -EFAULT;
range.minlen = max_t(unsigned int, range.minlen, range.minlen = max_t(unsigned int, range.minlen,
q->limits.discard_granularity); bdev_discard_granularity(sb->s_bdev));
ret = jfs_ioc_trim(inode, &range); ret = jfs_ioc_trim(inode, &range);
if (ret < 0) if (ret < 0)
......
...@@ -1052,7 +1052,6 @@ static int nilfs_ioctl_resize(struct inode *inode, struct file *filp, ...@@ -1052,7 +1052,6 @@ static int nilfs_ioctl_resize(struct inode *inode, struct file *filp,
static int nilfs_ioctl_trim_fs(struct inode *inode, void __user *argp) static int nilfs_ioctl_trim_fs(struct inode *inode, void __user *argp)
{ {
struct the_nilfs *nilfs = inode->i_sb->s_fs_info; struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
struct request_queue *q = bdev_get_queue(nilfs->ns_bdev);
struct fstrim_range range; struct fstrim_range range;
int ret; int ret;
...@@ -1065,7 +1064,8 @@ static int nilfs_ioctl_trim_fs(struct inode *inode, void __user *argp) ...@@ -1065,7 +1064,8 @@ static int nilfs_ioctl_trim_fs(struct inode *inode, void __user *argp)
if (copy_from_user(&range, argp, sizeof(range))) if (copy_from_user(&range, argp, sizeof(range)))
return -EFAULT; return -EFAULT;
range.minlen = max_t(u64, range.minlen, q->limits.discard_granularity); range.minlen = max_t(u64, range.minlen,
bdev_discard_granularity(nilfs->ns_bdev));
down_read(&nilfs->ns_segctor_sem); down_read(&nilfs->ns_segctor_sem);
ret = nilfs_sufile_trim_fs(nilfs->ns_sufile, &range); ret = nilfs_sufile_trim_fs(nilfs->ns_sufile, &range);
......
...@@ -22,7 +22,6 @@ static int ntfs_ioctl_fitrim(struct ntfs_sb_info *sbi, unsigned long arg) ...@@ -22,7 +22,6 @@ static int ntfs_ioctl_fitrim(struct ntfs_sb_info *sbi, unsigned long arg)
{ {
struct fstrim_range __user *user_range; struct fstrim_range __user *user_range;
struct fstrim_range range; struct fstrim_range range;
struct request_queue *q = bdev_get_queue(sbi->sb->s_bdev);
int err; int err;
if (!capable(CAP_SYS_ADMIN)) if (!capable(CAP_SYS_ADMIN))
...@@ -35,7 +34,8 @@ static int ntfs_ioctl_fitrim(struct ntfs_sb_info *sbi, unsigned long arg) ...@@ -35,7 +34,8 @@ static int ntfs_ioctl_fitrim(struct ntfs_sb_info *sbi, unsigned long arg)
if (copy_from_user(&range, user_range, sizeof(range))) if (copy_from_user(&range, user_range, sizeof(range)))
return -EFAULT; return -EFAULT;
range.minlen = max_t(u32, range.minlen, q->limits.discard_granularity); range.minlen = max_t(u32, range.minlen,
bdev_discard_granularity(sbi->sb->s_bdev));
err = ntfs_trim_fs(sbi, &range); err = ntfs_trim_fs(sbi, &range);
if (err < 0) if (err < 0)
......
...@@ -882,7 +882,6 @@ static int ntfs_fill_super(struct super_block *sb, struct fs_context *fc) ...@@ -882,7 +882,6 @@ static int ntfs_fill_super(struct super_block *sb, struct fs_context *fc)
int err; int err;
struct ntfs_sb_info *sbi = sb->s_fs_info; struct ntfs_sb_info *sbi = sb->s_fs_info;
struct block_device *bdev = sb->s_bdev; struct block_device *bdev = sb->s_bdev;
struct request_queue *rq;
struct inode *inode; struct inode *inode;
struct ntfs_inode *ni; struct ntfs_inode *ni;
size_t i, tt; size_t i, tt;
...@@ -912,9 +911,8 @@ static int ntfs_fill_super(struct super_block *sb, struct fs_context *fc) ...@@ -912,9 +911,8 @@ static int ntfs_fill_super(struct super_block *sb, struct fs_context *fc)
goto out; goto out;
} }
rq = bdev_get_queue(bdev); if (bdev_max_discard_sectors(bdev) && bdev_discard_granularity(bdev)) {
if (bdev_max_discard_sectors(bdev) && rq->limits.discard_granularity) { sbi->discard_granularity = bdev_discard_granularity(bdev);
sbi->discard_granularity = rq->limits.discard_granularity;
sbi->discard_granularity_mask_inv = sbi->discard_granularity_mask_inv =
~(u64)(sbi->discard_granularity - 1); ~(u64)(sbi->discard_granularity - 1);
} }
......
...@@ -903,7 +903,6 @@ long ocfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) ...@@ -903,7 +903,6 @@ long ocfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
case FITRIM: case FITRIM:
{ {
struct super_block *sb = inode->i_sb; struct super_block *sb = inode->i_sb;
struct request_queue *q = bdev_get_queue(sb->s_bdev);
struct fstrim_range range; struct fstrim_range range;
int ret = 0; int ret = 0;
...@@ -916,7 +915,7 @@ long ocfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) ...@@ -916,7 +915,7 @@ long ocfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (copy_from_user(&range, argp, sizeof(range))) if (copy_from_user(&range, argp, sizeof(range)))
return -EFAULT; return -EFAULT;
range.minlen = max_t(u64, q->limits.discard_granularity, range.minlen = max_t(u64, bdev_discard_granularity(sb->s_bdev),
range.minlen); range.minlen);
ret = ocfs2_trim_fs(sb, &range); ret = ocfs2_trim_fs(sb, &range);
if (ret < 0) if (ret < 0)
......
...@@ -152,8 +152,8 @@ xfs_ioc_trim( ...@@ -152,8 +152,8 @@ xfs_ioc_trim(
struct xfs_mount *mp, struct xfs_mount *mp,
struct fstrim_range __user *urange) struct fstrim_range __user *urange)
{ {
struct request_queue *q = bdev_get_queue(mp->m_ddev_targp->bt_bdev); unsigned int granularity =
unsigned int granularity = q->limits.discard_granularity; bdev_discard_granularity(mp->m_ddev_targp->bt_bdev);
struct fstrim_range range; struct fstrim_range range;
xfs_daddr_t start, end, minlen; xfs_daddr_t start, end, minlen;
xfs_agnumber_t start_agno, end_agno, agno; xfs_agnumber_t start_agno, end_agno, agno;
......
...@@ -1257,6 +1257,11 @@ static inline unsigned int bdev_max_discard_sectors(struct block_device *bdev) ...@@ -1257,6 +1257,11 @@ static inline unsigned int bdev_max_discard_sectors(struct block_device *bdev)
return bdev_get_queue(bdev)->limits.max_discard_sectors; return bdev_get_queue(bdev)->limits.max_discard_sectors;
} }
static inline unsigned int bdev_discard_granularity(struct block_device *bdev)
{
return bdev_get_queue(bdev)->limits.discard_granularity;
}
static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev) static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev)
{ {
struct request_queue *q = bdev_get_queue(bdev); struct request_queue *q = bdev_get_queue(bdev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment