Commit dd3932ed authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

block: remove BLKDEV_IFL_WAIT

All the blkdev_issue_* helpers can only sanely be used for synchronous
caller.  To issue cache flushes or barriers asynchronously the caller needs
to set up a bio by itself with a completion callback to move the asynchronous
state machine ahead.  So drop the BLKDEV_IFL_WAIT flag that is always
specified when calling blkdev_issue_* and also remove the now unused flags
argument to blkdev_issue_flush and blkdev_issue_zeroout.  For
blkdev_issue_discard we need to keep it for the secure discard flag, which
gains a more descriptive name and loses the bitops vs flag confusion.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJens Axboe <jaxboe@fusionio.com>
parent 8786fb70
...@@ -205,7 +205,6 @@ static void bio_end_flush(struct bio *bio, int err) ...@@ -205,7 +205,6 @@ static void bio_end_flush(struct bio *bio, int err)
* @bdev: blockdev to issue flush for * @bdev: blockdev to issue flush for
* @gfp_mask: memory allocation flags (for bio_alloc) * @gfp_mask: memory allocation flags (for bio_alloc)
* @error_sector: error sector * @error_sector: error sector
* @flags: BLKDEV_IFL_* flags to control behaviour
* *
* Description: * Description:
* Issue a flush for the block device in question. Caller can supply * Issue a flush for the block device in question. Caller can supply
...@@ -214,7 +213,7 @@ static void bio_end_flush(struct bio *bio, int err) ...@@ -214,7 +213,7 @@ static void bio_end_flush(struct bio *bio, int err)
* request was pushed in some internal queue for later handling. * request was pushed in some internal queue for later handling.
*/ */
int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
sector_t *error_sector, unsigned long flags) sector_t *error_sector)
{ {
DECLARE_COMPLETION_ONSTACK(wait); DECLARE_COMPLETION_ONSTACK(wait);
struct request_queue *q; struct request_queue *q;
...@@ -240,13 +239,12 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, ...@@ -240,13 +239,12 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
bio = bio_alloc(gfp_mask, 0); bio = bio_alloc(gfp_mask, 0);
bio->bi_end_io = bio_end_flush; bio->bi_end_io = bio_end_flush;
bio->bi_bdev = bdev; bio->bi_bdev = bdev;
if (test_bit(BLKDEV_WAIT, &flags))
bio->bi_private = &wait; bio->bi_private = &wait;
bio_get(bio); bio_get(bio);
submit_bio(WRITE_FLUSH, bio); submit_bio(WRITE_FLUSH, bio);
if (test_bit(BLKDEV_WAIT, &flags)) {
wait_for_completion(&wait); wait_for_completion(&wait);
/* /*
* The driver must store the error location in ->bi_sector, if * The driver must store the error location in ->bi_sector, if
* it supports it. For non-stacked drivers, this should be * it supports it. For non-stacked drivers, this should be
...@@ -254,7 +252,6 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, ...@@ -254,7 +252,6 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
*/ */
if (error_sector) if (error_sector)
*error_sector = bio->bi_sector; *error_sector = bio->bi_sector;
}
if (!bio_flagged(bio, BIO_UPTODATE)) if (!bio_flagged(bio, BIO_UPTODATE))
ret = -EIO; ret = -EIO;
......
...@@ -61,7 +61,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, ...@@ -61,7 +61,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
max_discard_sectors &= ~(disc_sects - 1); max_discard_sectors &= ~(disc_sects - 1);
} }
if (flags & BLKDEV_IFL_SECURE) { if (flags & BLKDEV_DISCARD_SECURE) {
if (!blk_queue_secdiscard(q)) if (!blk_queue_secdiscard(q))
return -EOPNOTSUPP; return -EOPNOTSUPP;
type |= REQ_SECURE; type |= REQ_SECURE;
...@@ -77,7 +77,6 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, ...@@ -77,7 +77,6 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
bio->bi_sector = sector; bio->bi_sector = sector;
bio->bi_end_io = blkdev_discard_end_io; bio->bi_end_io = blkdev_discard_end_io;
bio->bi_bdev = bdev; bio->bi_bdev = bdev;
if (flags & BLKDEV_IFL_WAIT)
bio->bi_private = &wait; bio->bi_private = &wait;
if (nr_sects > max_discard_sectors) { if (nr_sects > max_discard_sectors) {
...@@ -92,7 +91,6 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, ...@@ -92,7 +91,6 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
bio_get(bio); bio_get(bio);
submit_bio(type, bio); submit_bio(type, bio);
if (flags & BLKDEV_IFL_WAIT)
wait_for_completion(&wait); wait_for_completion(&wait);
if (bio_flagged(bio, BIO_EOPNOTSUPP)) if (bio_flagged(bio, BIO_EOPNOTSUPP))
...@@ -139,7 +137,6 @@ static void bio_batch_end_io(struct bio *bio, int err) ...@@ -139,7 +137,6 @@ static void bio_batch_end_io(struct bio *bio, int err)
* @sector: start sector * @sector: start sector
* @nr_sects: number of sectors to write * @nr_sects: number of sectors to write
* @gfp_mask: memory allocation flags (for bio_alloc) * @gfp_mask: memory allocation flags (for bio_alloc)
* @flags: BLKDEV_IFL_* flags to control behaviour
* *
* Description: * Description:
* Generate and issue number of bios with zerofiled pages. * Generate and issue number of bios with zerofiled pages.
...@@ -148,7 +145,7 @@ static void bio_batch_end_io(struct bio *bio, int err) ...@@ -148,7 +145,7 @@ static void bio_batch_end_io(struct bio *bio, int err)
*/ */
int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) sector_t nr_sects, gfp_t gfp_mask)
{ {
int ret; int ret;
struct bio *bio; struct bio *bio;
...@@ -174,7 +171,6 @@ int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, ...@@ -174,7 +171,6 @@ int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
bio->bi_sector = sector; bio->bi_sector = sector;
bio->bi_bdev = bdev; bio->bi_bdev = bdev;
bio->bi_end_io = bio_batch_end_io; bio->bi_end_io = bio_batch_end_io;
if (flags & BLKDEV_IFL_WAIT)
bio->bi_private = &bb; bio->bi_private = &bb;
while (nr_sects != 0) { while (nr_sects != 0) {
...@@ -193,9 +189,8 @@ int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, ...@@ -193,9 +189,8 @@ int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
submit_bio(WRITE, bio); submit_bio(WRITE, bio);
} }
if (flags & BLKDEV_IFL_WAIT)
/* Wait for bios in-flight */ /* Wait for bios in-flight */
while ( issued != atomic_read(&bb.done)) while (issued != atomic_read(&bb.done))
wait_for_completion(&wait); wait_for_completion(&wait);
if (!test_bit(BIO_UPTODATE, &bb.flags)) if (!test_bit(BIO_UPTODATE, &bb.flags))
......
...@@ -116,7 +116,7 @@ static int blkdev_reread_part(struct block_device *bdev) ...@@ -116,7 +116,7 @@ static int blkdev_reread_part(struct block_device *bdev)
static int blk_ioctl_discard(struct block_device *bdev, uint64_t start, static int blk_ioctl_discard(struct block_device *bdev, uint64_t start,
uint64_t len, int secure) uint64_t len, int secure)
{ {
unsigned long flags = BLKDEV_IFL_WAIT; unsigned long flags = 0;
if (start & 511) if (start & 511)
return -EINVAL; return -EINVAL;
...@@ -128,7 +128,7 @@ static int blk_ioctl_discard(struct block_device *bdev, uint64_t start, ...@@ -128,7 +128,7 @@ static int blk_ioctl_discard(struct block_device *bdev, uint64_t start,
if (start + len > (bdev->bd_inode->i_size >> 9)) if (start + len > (bdev->bd_inode->i_size >> 9))
return -EINVAL; return -EINVAL;
if (secure) if (secure)
flags |= BLKDEV_IFL_SECURE; flags |= BLKDEV_DISCARD_SECURE;
return blkdev_issue_discard(bdev, start, len, GFP_KERNEL, flags); return blkdev_issue_discard(bdev, start, len, GFP_KERNEL, flags);
} }
......
...@@ -2321,8 +2321,7 @@ static inline void drbd_md_flush(struct drbd_conf *mdev) ...@@ -2321,8 +2321,7 @@ static inline void drbd_md_flush(struct drbd_conf *mdev)
if (test_bit(MD_NO_BARRIER, &mdev->flags)) if (test_bit(MD_NO_BARRIER, &mdev->flags))
return; return;
r = blkdev_issue_flush(mdev->ldev->md_bdev, GFP_KERNEL, NULL, r = blkdev_issue_flush(mdev->ldev->md_bdev, GFP_KERNEL, NULL);
BLKDEV_IFL_WAIT);
if (r) { if (r) {
set_bit(MD_NO_BARRIER, &mdev->flags); set_bit(MD_NO_BARRIER, &mdev->flags);
dev_err(DEV, "meta data flush failed with status %d, disabling md-flushes\n", r); dev_err(DEV, "meta data flush failed with status %d, disabling md-flushes\n", r);
......
...@@ -975,7 +975,7 @@ static enum finish_epoch drbd_flush_after_epoch(struct drbd_conf *mdev, struct d ...@@ -975,7 +975,7 @@ static enum finish_epoch drbd_flush_after_epoch(struct drbd_conf *mdev, struct d
if (mdev->write_ordering >= WO_bdev_flush && get_ldev(mdev)) { if (mdev->write_ordering >= WO_bdev_flush && get_ldev(mdev)) {
rv = blkdev_issue_flush(mdev->ldev->backing_bdev, GFP_KERNEL, rv = blkdev_issue_flush(mdev->ldev->backing_bdev, GFP_KERNEL,
NULL, BLKDEV_IFL_WAIT); NULL);
if (rv) { if (rv) {
dev_err(DEV, "local disk flush failed with status %d\n", rv); dev_err(DEV, "local disk flush failed with status %d\n", rv);
/* would rather check on EOPNOTSUPP, but that is not reliable. /* would rather check on EOPNOTSUPP, but that is not reliable.
......
...@@ -370,7 +370,7 @@ int blkdev_fsync(struct file *filp, int datasync) ...@@ -370,7 +370,7 @@ int blkdev_fsync(struct file *filp, int datasync)
*/ */
mutex_unlock(&bd_inode->i_mutex); mutex_unlock(&bd_inode->i_mutex);
error = blkdev_issue_flush(bdev, GFP_KERNEL, NULL, BLKDEV_IFL_WAIT); error = blkdev_issue_flush(bdev, GFP_KERNEL, NULL);
if (error == -EOPNOTSUPP) if (error == -EOPNOTSUPP)
error = 0; error = 0;
......
...@@ -1695,8 +1695,7 @@ static int remove_extent_backref(struct btrfs_trans_handle *trans, ...@@ -1695,8 +1695,7 @@ static int remove_extent_backref(struct btrfs_trans_handle *trans,
static void btrfs_issue_discard(struct block_device *bdev, static void btrfs_issue_discard(struct block_device *bdev,
u64 start, u64 len) u64 start, u64 len)
{ {
blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL, blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL, 0);
BLKDEV_IFL_WAIT);
} }
static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr, static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
......
...@@ -90,7 +90,6 @@ int ext3_sync_file(struct file *file, int datasync) ...@@ -90,7 +90,6 @@ int ext3_sync_file(struct file *file, int datasync)
* storage * storage
*/ */
if (needs_barrier) if (needs_barrier)
blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL, blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
BLKDEV_IFL_WAIT);
return ret; return ret;
} }
...@@ -128,10 +128,9 @@ int ext4_sync_file(struct file *file, int datasync) ...@@ -128,10 +128,9 @@ int ext4_sync_file(struct file *file, int datasync)
(journal->j_fs_dev != journal->j_dev) && (journal->j_fs_dev != journal->j_dev) &&
(journal->j_flags & JBD2_BARRIER)) (journal->j_flags & JBD2_BARRIER))
blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL,
NULL, BLKDEV_IFL_WAIT); NULL);
ret = jbd2_log_wait_commit(journal, commit_tid); ret = jbd2_log_wait_commit(journal, commit_tid);
} else if (journal->j_flags & JBD2_BARRIER) } else if (journal->j_flags & JBD2_BARRIER)
blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL, blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
BLKDEV_IFL_WAIT);
return ret; return ret;
} }
...@@ -2566,8 +2566,7 @@ static inline void ext4_issue_discard(struct super_block *sb, ...@@ -2566,8 +2566,7 @@ static inline void ext4_issue_discard(struct super_block *sb,
discard_block = block + ext4_group_first_block_no(sb, block_group); discard_block = block + ext4_group_first_block_no(sb, block_group);
trace_ext4_discard_blocks(sb, trace_ext4_discard_blocks(sb,
(unsigned long long) discard_block, count); (unsigned long long) discard_block, count);
ret = sb_issue_discard(sb, discard_block, count, GFP_NOFS, ret = sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0);
BLKDEV_IFL_WAIT);
if (ret == EOPNOTSUPP) { if (ret == EOPNOTSUPP) {
ext4_warning(sb, "discard not supported, disabling"); ext4_warning(sb, "discard not supported, disabling");
clear_opt(EXT4_SB(sb)->s_mount_opt, DISCARD); clear_opt(EXT4_SB(sb)->s_mount_opt, DISCARD);
......
...@@ -578,8 +578,7 @@ int fat_free_clusters(struct inode *inode, int cluster) ...@@ -578,8 +578,7 @@ int fat_free_clusters(struct inode *inode, int cluster)
sb_issue_discard(sb, sb_issue_discard(sb,
fat_clus_to_blknr(sbi, first_cl), fat_clus_to_blknr(sbi, first_cl),
nr_clus * sbi->sec_per_clus, nr_clus * sbi->sec_per_clus,
GFP_NOFS, GFP_NOFS, 0);
BLKDEV_IFL_WAIT);
first_cl = cluster; first_cl = cluster;
} }
......
...@@ -854,7 +854,7 @@ static void gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset, ...@@ -854,7 +854,7 @@ static void gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
if ((start + nr_sects) != blk) { if ((start + nr_sects) != blk) {
rv = blkdev_issue_discard(bdev, start, rv = blkdev_issue_discard(bdev, start,
nr_sects, GFP_NOFS, nr_sects, GFP_NOFS,
BLKDEV_IFL_WAIT); 0);
if (rv) if (rv)
goto fail; goto fail;
nr_sects = 0; nr_sects = 0;
...@@ -868,8 +868,7 @@ static void gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset, ...@@ -868,8 +868,7 @@ static void gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
} }
} }
if (nr_sects) { if (nr_sects) {
rv = blkdev_issue_discard(bdev, start, nr_sects, GFP_NOFS, rv = blkdev_issue_discard(bdev, start, nr_sects, GFP_NOFS, 0);
BLKDEV_IFL_WAIT);
if (rv) if (rv)
goto fail; goto fail;
} }
......
...@@ -532,8 +532,7 @@ int jbd2_cleanup_journal_tail(journal_t *journal) ...@@ -532,8 +532,7 @@ int jbd2_cleanup_journal_tail(journal_t *journal)
*/ */
if ((journal->j_fs_dev != journal->j_dev) && if ((journal->j_fs_dev != journal->j_dev) &&
(journal->j_flags & JBD2_BARRIER)) (journal->j_flags & JBD2_BARRIER))
blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL, blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL);
BLKDEV_IFL_WAIT);
if (!(journal->j_flags & JBD2_ABORT)) if (!(journal->j_flags & JBD2_ABORT))
jbd2_journal_update_superblock(journal, 1); jbd2_journal_update_superblock(journal, 1);
return 0; return 0;
......
...@@ -684,8 +684,7 @@ void jbd2_journal_commit_transaction(journal_t *journal) ...@@ -684,8 +684,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
if (commit_transaction->t_flushed_data_blocks && if (commit_transaction->t_flushed_data_blocks &&
(journal->j_fs_dev != journal->j_dev) && (journal->j_fs_dev != journal->j_dev) &&
(journal->j_flags & JBD2_BARRIER)) (journal->j_flags & JBD2_BARRIER))
blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL, blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL);
BLKDEV_IFL_WAIT);
/* Done it all: now write the commit record asynchronously. */ /* Done it all: now write the commit record asynchronously. */
if (JBD2_HAS_INCOMPAT_FEATURE(journal, if (JBD2_HAS_INCOMPAT_FEATURE(journal,
...@@ -810,8 +809,7 @@ void jbd2_journal_commit_transaction(journal_t *journal) ...@@ -810,8 +809,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
if (JBD2_HAS_INCOMPAT_FEATURE(journal, if (JBD2_HAS_INCOMPAT_FEATURE(journal,
JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT) && JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT) &&
journal->j_flags & JBD2_BARRIER) { journal->j_flags & JBD2_BARRIER) {
blkdev_issue_flush(journal->j_dev, GFP_KERNEL, NULL, blkdev_issue_flush(journal->j_dev, GFP_KERNEL, NULL);
BLKDEV_IFL_WAIT);
} }
if (err) if (err)
......
...@@ -774,7 +774,7 @@ int nilfs_discard_segments(struct the_nilfs *nilfs, __u64 *segnump, ...@@ -774,7 +774,7 @@ int nilfs_discard_segments(struct the_nilfs *nilfs, __u64 *segnump,
ret = blkdev_issue_discard(nilfs->ns_bdev, ret = blkdev_issue_discard(nilfs->ns_bdev,
start * sects_per_block, start * sects_per_block,
nblocks * sects_per_block, nblocks * sects_per_block,
GFP_NOFS, BLKDEV_IFL_WAIT); GFP_NOFS, 0);
if (ret < 0) if (ret < 0)
return ret; return ret;
nblocks = 0; nblocks = 0;
...@@ -784,7 +784,7 @@ int nilfs_discard_segments(struct the_nilfs *nilfs, __u64 *segnump, ...@@ -784,7 +784,7 @@ int nilfs_discard_segments(struct the_nilfs *nilfs, __u64 *segnump,
ret = blkdev_issue_discard(nilfs->ns_bdev, ret = blkdev_issue_discard(nilfs->ns_bdev,
start * sects_per_block, start * sects_per_block,
nblocks * sects_per_block, nblocks * sects_per_block,
GFP_NOFS, BLKDEV_IFL_WAIT); GFP_NOFS, 0);
return ret; return ret;
} }
......
...@@ -152,8 +152,7 @@ static int reiserfs_sync_file(struct file *filp, int datasync) ...@@ -152,8 +152,7 @@ static int reiserfs_sync_file(struct file *filp, int datasync)
barrier_done = reiserfs_commit_for_inode(inode); barrier_done = reiserfs_commit_for_inode(inode);
reiserfs_write_unlock(inode->i_sb); reiserfs_write_unlock(inode->i_sb);
if (barrier_done != 1 && reiserfs_barrier_flush(inode->i_sb)) if (barrier_done != 1 && reiserfs_barrier_flush(inode->i_sb))
blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL, blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
BLKDEV_IFL_WAIT);
if (barrier_done < 0) if (barrier_done < 0)
return barrier_done; return barrier_done;
return (err < 0) ? -EIO : 0; return (err < 0) ? -EIO : 0;
......
...@@ -693,8 +693,7 @@ void ...@@ -693,8 +693,7 @@ void
xfs_blkdev_issue_flush( xfs_blkdev_issue_flush(
xfs_buftarg_t *buftarg) xfs_buftarg_t *buftarg)
{ {
blkdev_issue_flush(buftarg->bt_bdev, GFP_KERNEL, NULL, blkdev_issue_flush(buftarg->bt_bdev, GFP_KERNEL, NULL);
BLKDEV_IFL_WAIT);
} }
STATIC void STATIC void
......
...@@ -867,18 +867,14 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, ...@@ -867,18 +867,14 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
return NULL; return NULL;
return bqt->tag_index[tag]; return bqt->tag_index[tag];
} }
enum{
BLKDEV_WAIT, /* wait for completion */ #define BLKDEV_DISCARD_SECURE 0x01 /* secure discard */
BLKDEV_SECURE, /* secure discard */
}; extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
#define BLKDEV_IFL_WAIT (1 << BLKDEV_WAIT)
#define BLKDEV_IFL_SECURE (1 << BLKDEV_SECURE)
extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *,
unsigned long);
extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); sector_t nr_sects, gfp_t gfp_mask);
static inline int sb_issue_discard(struct super_block *sb, sector_t block, static inline int sb_issue_discard(struct super_block *sb, sector_t block,
sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags) sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags)
{ {
......
...@@ -141,7 +141,7 @@ static int discard_swap(struct swap_info_struct *si) ...@@ -141,7 +141,7 @@ static int discard_swap(struct swap_info_struct *si)
nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9); nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9);
if (nr_blocks) { if (nr_blocks) {
err = blkdev_issue_discard(si->bdev, start_block, err = blkdev_issue_discard(si->bdev, start_block,
nr_blocks, GFP_KERNEL, BLKDEV_IFL_WAIT); nr_blocks, GFP_KERNEL, 0);
if (err) if (err)
return err; return err;
cond_resched(); cond_resched();
...@@ -152,7 +152,7 @@ static int discard_swap(struct swap_info_struct *si) ...@@ -152,7 +152,7 @@ static int discard_swap(struct swap_info_struct *si)
nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9); nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9);
err = blkdev_issue_discard(si->bdev, start_block, err = blkdev_issue_discard(si->bdev, start_block,
nr_blocks, GFP_KERNEL, BLKDEV_IFL_WAIT); nr_blocks, GFP_KERNEL, 0);
if (err) if (err)
break; break;
...@@ -191,7 +191,7 @@ static void discard_swap_cluster(struct swap_info_struct *si, ...@@ -191,7 +191,7 @@ static void discard_swap_cluster(struct swap_info_struct *si,
start_block <<= PAGE_SHIFT - 9; start_block <<= PAGE_SHIFT - 9;
nr_blocks <<= PAGE_SHIFT - 9; nr_blocks <<= PAGE_SHIFT - 9;
if (blkdev_issue_discard(si->bdev, start_block, if (blkdev_issue_discard(si->bdev, start_block,
nr_blocks, GFP_NOIO, BLKDEV_IFL_WAIT)) nr_blocks, GFP_NOIO, 0))
break; break;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment