Commit 6f673763 authored by Omar Sandoval's avatar Omar Sandoval Committed by Al Viro

direct_IO: use iov_iter_rw() instead of rw everywhere

The rw parameter to direct_IO is redundant with iov_iter->type, and
treated slightly differently just about everywhere it's used: some users
do rw & WRITE, and others do rw == WRITE where they should be doing a
bitwise check. Simplify this with the new iov_iter_rw() helper, which
always returns either READ or WRITE.
Signed-off-by: default avatarOmar Sandoval <osandov@osandov.com>
Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
parent a95cd631
......@@ -399,7 +399,7 @@ static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb,
* size changing by concurrent truncates and writes.
* 1. Need inode mutex to operate transient pages.
*/
if (rw == READ)
if (iov_iter_rw(iter) == READ)
mutex_lock(&inode->i_mutex);
LASSERT(obj->cob_transient_pages == 0);
......@@ -408,7 +408,7 @@ static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb,
size_t offs;
count = min_t(size_t, iov_iter_count(iter), size);
if (rw == READ) {
if (iov_iter_rw(iter) == READ) {
if (file_offset >= i_size_read(inode))
break;
if (file_offset + count > i_size_read(inode))
......@@ -418,11 +418,11 @@ static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb,
result = iov_iter_get_pages_alloc(iter, &pages, count, &offs);
if (likely(result > 0)) {
int n = DIV_ROUND_UP(result + offs, PAGE_SIZE);
result = ll_direct_IO_26_seg(env, io, rw, inode,
file->f_mapping,
result, file_offset,
pages, n);
ll_free_user_pages(pages, n, rw==READ);
result = ll_direct_IO_26_seg(env, io, iov_iter_rw(iter),
inode, file->f_mapping,
result, file_offset, pages,
n);
ll_free_user_pages(pages, n, iov_iter_rw(iter) == READ);
}
if (unlikely(result <= 0)) {
/* If we can't allocate a large enough buffer
......@@ -449,11 +449,11 @@ static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb,
}
out:
LASSERT(obj->cob_transient_pages == 0);
if (rw == READ)
if (iov_iter_rw(iter) == READ)
mutex_unlock(&inode->i_mutex);
if (tot_bytes > 0) {
if (rw == WRITE) {
if (iov_iter_rw(iter) == WRITE) {
struct lov_stripe_md *lsm;
lsm = ccc_inode_lsm_get(inode);
......
......@@ -253,7 +253,7 @@ v9fs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
struct file *file = iocb->ki_filp;
ssize_t n;
int err = 0;
if (rw & WRITE) {
if (iov_iter_rw(iter) == WRITE) {
n = p9_client_write(file->private_data, pos, iter, &err);
if (n) {
struct inode *inode = file_inode(file);
......
......@@ -398,7 +398,7 @@ affs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
size_t count = iov_iter_count(iter);
ssize_t ret;
if (rw == WRITE) {
if (iov_iter_rw(iter) == WRITE) {
loff_t size = offset + count;
if (AFFS_I(inode)->mmu_private < size)
......@@ -406,7 +406,7 @@ affs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
}
ret = blockdev_direct_IO(iocb, inode, iter, offset, affs_get_block);
if (ret < 0 && (rw & WRITE))
if (ret < 0 && iov_iter_rw(iter) == WRITE)
affs_write_failed(mapping, offset + count);
return ret;
}
......
......@@ -8081,7 +8081,7 @@ static void btrfs_submit_direct(int rw, struct bio *dio_bio,
bio_endio(dio_bio, ret);
}
static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *iocb,
static ssize_t check_direct_IO(struct btrfs_root *root, struct kiocb *iocb,
const struct iov_iter *iter, loff_t offset)
{
int seg;
......@@ -8096,7 +8096,7 @@ static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *io
goto out;
/* If this is a write we don't need to check anymore */
if (rw & WRITE)
if (iov_iter_rw(iter) == WRITE)
return 0;
/*
* Check to make sure we don't have duplicate iov_base's in this
......@@ -8126,7 +8126,7 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
bool relock = false;
ssize_t ret;
if (check_direct_IO(BTRFS_I(inode)->root, rw, iocb, iter, offset))
if (check_direct_IO(BTRFS_I(inode)->root, iocb, iter, offset))
return 0;
atomic_inc(&inode->i_dio_count);
......@@ -8144,7 +8144,7 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
filemap_fdatawrite_range(inode->i_mapping, offset,
offset + count - 1);
if (rw & WRITE) {
if (iov_iter_rw(iter) == WRITE) {
/*
* If the write DIO is beyond the EOF, we need update
* the isize, but it is protected by i_mutex. So we can
......@@ -8178,7 +8178,7 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev,
iter, offset, btrfs_get_blocks_direct, NULL,
btrfs_submit_direct, flags);
if (rw & WRITE) {
if (iov_iter_rw(iter) == WRITE) {
current->journal_info = NULL;
if (ret < 0 && ret != -EIOCBQUEUED)
btrfs_delalloc_release_space(inode, count);
......
......@@ -866,7 +866,7 @@ ext2_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
else
ret = blockdev_direct_IO(iocb, inode, iter, offset,
ext2_get_block);
if (ret < 0 && (rw & WRITE))
if (ret < 0 && iov_iter_rw(iter) == WRITE)
ext2_write_failed(mapping, offset + count);
return ret;
}
......
......@@ -1832,9 +1832,9 @@ static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
size_t count = iov_iter_count(iter);
int retries = 0;
trace_ext3_direct_IO_enter(inode, offset, count, rw);
trace_ext3_direct_IO_enter(inode, offset, count, iov_iter_rw(iter));
if (rw == WRITE) {
if (iov_iter_rw(iter) == WRITE) {
loff_t final_size = offset + count;
if (final_size > inode->i_size) {
......@@ -1861,7 +1861,7 @@ static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
* In case of error extending write may have instantiated a few
* blocks outside i_size. Trim these off again.
*/
if (unlikely((rw & WRITE) && ret < 0)) {
if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) {
loff_t isize = i_size_read(inode);
loff_t end = offset + count;
......@@ -1908,7 +1908,7 @@ static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
ret = err;
}
out:
trace_ext3_direct_IO_exit(inode, offset, count, rw, ret);
trace_ext3_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), ret);
return ret;
}
......
......@@ -2152,8 +2152,8 @@ extern void ext4_da_update_reserve_space(struct inode *inode,
/* indirect.c */
extern int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
struct ext4_map_blocks *map, int flags);
extern ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
struct iov_iter *iter, loff_t offset);
extern ssize_t ext4_ind_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
loff_t offset);
extern int ext4_ind_calc_metadata_amount(struct inode *inode, sector_t lblock);
extern int ext4_ind_trans_blocks(struct inode *inode, int nrblocks);
extern void ext4_ind_truncate(handle_t *, struct inode *inode);
......
......@@ -642,8 +642,8 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
* crashes then stale disk data _may_ be exposed inside the file. But current
* VFS code falls back into buffered path in that case so we are safe.
*/
ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
struct iov_iter *iter, loff_t offset)
ssize_t ext4_ind_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
loff_t offset)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
......@@ -654,7 +654,7 @@ ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
size_t count = iov_iter_count(iter);
int retries = 0;
if (rw == WRITE) {
if (iov_iter_rw(iter) == WRITE) {
loff_t final_size = offset + count;
if (final_size > inode->i_size) {
......@@ -676,7 +676,7 @@ ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
}
retry:
if (rw == READ && ext4_should_dioread_nolock(inode)) {
if (iov_iter_rw(iter) == READ && ext4_should_dioread_nolock(inode)) {
/*
* Nolock dioread optimization may be dynamically disabled
* via ext4_inode_block_unlocked_dio(). Check inode's state
......@@ -707,7 +707,7 @@ ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
ret = blockdev_direct_IO(iocb, inode, iter, offset,
ext4_get_block);
if (unlikely((rw & WRITE) && ret < 0)) {
if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) {
loff_t isize = i_size_read(inode);
loff_t end = offset + count;
......
......@@ -2952,8 +2952,8 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
* if the machine crashes during the write.
*
*/
static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
struct iov_iter *iter, loff_t offset)
static ssize_t ext4_ext_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
loff_t offset)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
......@@ -2966,8 +2966,8 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
ext4_io_end_t *io_end = NULL;
/* Use the old path for reads and writes beyond i_size. */
if (rw != WRITE || final_size > inode->i_size)
return ext4_ind_direct_IO(rw, iocb, iter, offset);
if (iov_iter_rw(iter) != WRITE || final_size > inode->i_size)
return ext4_ind_direct_IO(iocb, iter, offset);
BUG_ON(iocb->private == NULL);
......@@ -2976,7 +2976,7 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
* conversion. This also disallows race between truncate() and
* overwrite DIO as i_dio_count needs to be incremented under i_mutex.
*/
if (rw == WRITE)
if (iov_iter_rw(iter) == WRITE)
atomic_inc(&inode->i_dio_count);
/* If we do a overwrite dio, i_mutex locking can be released */
......@@ -3078,7 +3078,7 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
}
retake_lock:
if (rw == WRITE)
if (iov_iter_rw(iter) == WRITE)
inode_dio_done(inode);
/* take i_mutex locking again if we do a ovewrite dio */
if (overwrite) {
......@@ -3107,12 +3107,12 @@ static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
if (ext4_has_inline_data(inode))
return 0;
trace_ext4_direct_IO_enter(inode, offset, count, rw);
trace_ext4_direct_IO_enter(inode, offset, count, iov_iter_rw(iter));
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
ret = ext4_ext_direct_IO(rw, iocb, iter, offset);
ret = ext4_ext_direct_IO(iocb, iter, offset);
else
ret = ext4_ind_direct_IO(rw, iocb, iter, offset);
trace_ext4_direct_IO_exit(inode, offset, count, rw, ret);
ret = ext4_ind_direct_IO(iocb, iter, offset);
trace_ext4_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), ret);
return ret;
}
......
......@@ -1118,12 +1118,12 @@ static int f2fs_write_end(struct file *file,
return copied;
}
static int check_direct_IO(struct inode *inode, int rw,
struct iov_iter *iter, loff_t offset)
static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
loff_t offset)
{
unsigned blocksize_mask = inode->i_sb->s_blocksize - 1;
if (rw == READ)
if (iov_iter_rw(iter) == READ)
return 0;
if (offset & blocksize_mask)
......@@ -1151,19 +1151,19 @@ static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb,
return err;
}
if (check_direct_IO(inode, rw, iter, offset))
if (check_direct_IO(inode, iter, offset))
return 0;
trace_f2fs_direct_IO_enter(inode, offset, count, rw);
trace_f2fs_direct_IO_enter(inode, offset, count, iov_iter_rw(iter));
if (rw & WRITE)
if (iov_iter_rw(iter) == WRITE)
__allocate_data_blocks(inode, offset, count);
err = blockdev_direct_IO(iocb, inode, iter, offset, get_data_block);
if (err < 0 && (rw & WRITE))
if (err < 0 && iov_iter_rw(iter) == WRITE)
f2fs_write_failed(mapping, offset + count);
trace_f2fs_direct_IO_exit(inode, offset, count, rw, err);
trace_f2fs_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), err);
return err;
}
......
......@@ -255,7 +255,7 @@ static ssize_t fat_direct_IO(int rw, struct kiocb *iocb,
size_t count = iov_iter_count(iter);
ssize_t ret;
if (rw == WRITE) {
if (iov_iter_rw(iter) == WRITE) {
/*
* FIXME: blockdev_direct_IO() doesn't use ->write_begin(),
* so we need to update the ->mmu_private to block boundary.
......@@ -275,7 +275,7 @@ static ssize_t fat_direct_IO(int rw, struct kiocb *iocb,
* condition of fat_get_block() and ->truncate().
*/
ret = blockdev_direct_IO(iocb, inode, iter, offset, fat_get_block);
if (ret < 0 && (rw & WRITE))
if (ret < 0 && iov_iter_rw(iter) == WRITE)
fat_write_failed(mapping, offset + count);
return ret;
......
......@@ -2800,11 +2800,11 @@ fuse_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
inode = file->f_mapping->host;
i_size = i_size_read(inode);
if ((rw == READ) && (offset > i_size))
if ((iov_iter_rw(iter) == READ) && (offset > i_size))
return 0;
/* optimization for short read */
if (async_dio && rw != WRITE && offset + count > i_size) {
if (async_dio && iov_iter_rw(iter) != WRITE && offset + count > i_size) {
if (offset >= i_size)
return 0;
count = min_t(loff_t, count, fuse_round_up(i_size - offset));
......@@ -2819,7 +2819,7 @@ fuse_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
io->bytes = -1;
io->size = 0;
io->offset = offset;
io->write = (rw == WRITE);
io->write = (iov_iter_rw(iter) == WRITE);
io->err = 0;
io->file = file;
/*
......@@ -2834,13 +2834,14 @@ fuse_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
* to wait on real async I/O requests, so we must submit this request
* synchronously.
*/
if (!is_sync_kiocb(iocb) && (offset + count > i_size) && rw == WRITE)
if (!is_sync_kiocb(iocb) && (offset + count > i_size) &&
iov_iter_rw(iter) == WRITE)
io->async = false;
if (io->async && is_sync_kiocb(iocb))
io->done = &wait;
if (rw == WRITE) {
if (iov_iter_rw(iter) == WRITE) {
ret = generic_write_checks(file, &pos, &count, 0);
if (!ret) {
iov_iter_truncate(iter, count);
......@@ -2865,7 +2866,7 @@ fuse_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
kfree(io);
if (rw == WRITE) {
if (iov_iter_rw(iter) == WRITE) {
if (ret > 0)
fuse_write_update_size(inode, pos);
else if (ret < 0 && offset + count > i_size)
......
......@@ -1016,13 +1016,12 @@ static void gfs2_invalidatepage(struct page *page, unsigned int offset,
/**
* gfs2_ok_for_dio - check that dio is valid on this file
* @ip: The inode
* @rw: READ or WRITE
* @offset: The offset at which we are reading or writing
*
* Returns: 0 (to ignore the i/o request and thus fall back to buffered i/o)
* 1 (to accept the i/o request)
*/
static int gfs2_ok_for_dio(struct gfs2_inode *ip, int rw, loff_t offset)
static int gfs2_ok_for_dio(struct gfs2_inode *ip, loff_t offset)
{
/*
* Should we return an error here? I can't see that O_DIRECT for
......@@ -1061,7 +1060,7 @@ static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
rv = gfs2_glock_nq(&gh);
if (rv)
return rv;
rv = gfs2_ok_for_dio(ip, rw, offset);
rv = gfs2_ok_for_dio(ip, offset);
if (rv != 1)
goto out; /* dio not valid, fall back to buffered i/o */
......@@ -1091,7 +1090,7 @@ static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
rv = filemap_write_and_wait_range(mapping, lstart, end);
if (rv)
goto out;
if (rw == WRITE)
if (iov_iter_rw(iter) == WRITE)
truncate_inode_pages_range(mapping, lstart, end);
}
......
......@@ -139,7 +139,7 @@ static ssize_t hfs_direct_IO(int rw, struct kiocb *iocb,
* In case of error extending write may have instantiated a few
* blocks outside i_size. Trim these off again.
*/
if (unlikely((rw & WRITE) && ret < 0)) {
if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) {
loff_t isize = i_size_read(inode);
loff_t end = offset + count;
......
......@@ -137,7 +137,7 @@ static ssize_t hfsplus_direct_IO(int rw, struct kiocb *iocb,
* In case of error extending write may have instantiated a few
* blocks outside i_size. Trim these off again.
*/
if (unlikely((rw & WRITE) && ret < 0)) {
if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) {
loff_t isize = i_size_read(inode);
loff_t end = offset + count;
......
......@@ -345,7 +345,7 @@ static ssize_t jfs_direct_IO(int rw, struct kiocb *iocb,
* In case of error extending write may have instantiated a few
* blocks outside i_size. Trim these off again.
*/
if (unlikely((rw & WRITE) && ret < 0)) {
if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) {
loff_t isize = i_size_read(inode);
loff_t end = offset + count;
......
......@@ -267,7 +267,7 @@ ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, loff_t
#else
VM_BUG_ON(iov_iter_count(iter) != PAGE_SIZE);
if (rw == READ)
if (iov_iter_rw(iter) == READ)
return nfs_file_direct_read(iocb, iter, pos);
return nfs_file_direct_write(iocb, iter, pos);
#endif /* CONFIG_NFS_SWAP */
......
......@@ -314,7 +314,7 @@ nilfs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
size_t count = iov_iter_count(iter);
ssize_t size;
if (rw == WRITE)
if (iov_iter_rw(iter) == WRITE)
return 0;
/* Needs synchronization with the cleaner */
......@@ -324,7 +324,7 @@ nilfs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
* In case of error extending write may have instantiated a few
* blocks outside i_size. Trim these off again.
*/
if (unlikely((rw & WRITE) && size < 0)) {
if (unlikely(iov_iter_rw(iter) == WRITE && size < 0)) {
loff_t isize = i_size_read(inode);
loff_t end = offset + count;
......
......@@ -842,7 +842,7 @@ static ssize_t ocfs2_direct_IO(int rw,
if (i_size_read(inode) <= offset && !full_coherency)
return 0;
if (rw == READ)
if (iov_iter_rw(iter) == READ)
return __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
iter, offset,
ocfs2_direct_IO_get_blocks,
......
......@@ -3293,7 +3293,7 @@ static ssize_t reiserfs_direct_IO(int rw, struct kiocb *iocb,
* In case of error extending write may have instantiated a few
* blocks outside i_size. Trim these off again.
*/
if (unlikely((rw & WRITE) && ret < 0)) {
if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) {
loff_t isize = i_size_read(inode);
loff_t end = offset + count;
......
......@@ -226,7 +226,7 @@ static ssize_t udf_direct_IO(int rw, struct kiocb *iocb,
ssize_t ret;
ret = blockdev_direct_IO(iocb, inode, iter, offset, udf_get_block);
if (unlikely(ret < 0 && (rw & WRITE)))
if (unlikely(ret < 0 && iov_iter_rw(iter) == WRITE))
udf_write_failed(mapping, offset + count);
return ret;
}
......
......@@ -1503,7 +1503,7 @@ xfs_vm_direct_IO(
struct inode *inode = iocb->ki_filp->f_mapping->host;
struct block_device *bdev = xfs_find_bdev_for_inode(inode);
if (rw & WRITE) {
if (iov_iter_rw(iter) == WRITE) {
return __blockdev_direct_IO(iocb, inode, bdev, iter, offset,
xfs_get_blocks_direct,
xfs_end_io_direct_write, NULL,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment