Commit 4a165feb authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'dio_for_v5.10-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs

Pull direct-io fix from Jan Kara:
 "Fix for unaligned direct IO read past EOF in legacy DIO code"

* tag 'dio_for_v5.10-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs:
  direct-io: defer alignment check until after the EOF check
  direct-io: don't force writeback for reads beyond EOF
  direct-io: clean up error paths of do_blockdev_direct_IO
parents b77a69b8 41b21af3
......@@ -1146,22 +1146,13 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
* the early prefetch in the caller enough time.
*/
if (align & blocksize_mask) {
if (bdev)
blkbits = blksize_bits(bdev_logical_block_size(bdev));
blocksize_mask = (1 << blkbits) - 1;
if (align & blocksize_mask)
goto out;
}
/* watch out for a 0 len io from a tricksy fs */
if (iov_iter_rw(iter) == READ && !count)
return 0;
dio = kmem_cache_alloc(dio_cache, GFP_KERNEL);
retval = -ENOMEM;
if (!dio)
goto out;
return -ENOMEM;
/*
* Believe it or not, zeroing out the page array caused a .5%
* performance regression in a database benchmark. So, we take
......@@ -1170,32 +1161,32 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
memset(dio, 0, offsetof(struct dio, pages));
dio->flags = flags;
if (dio->flags & DIO_LOCKING) {
if (iov_iter_rw(iter) == READ) {
struct address_space *mapping =
iocb->ki_filp->f_mapping;
/* will be released by direct_io_worker */
inode_lock(inode);
retval = filemap_write_and_wait_range(mapping, offset,
end - 1);
if (retval) {
inode_unlock(inode);
kmem_cache_free(dio_cache, dio);
goto out;
}
}
if (dio->flags & DIO_LOCKING && iov_iter_rw(iter) == READ) {
/* will be released by direct_io_worker */
inode_lock(inode);
}
/* Once we sampled i_size check for reads beyond EOF */
dio->i_size = i_size_read(inode);
if (iov_iter_rw(iter) == READ && offset >= dio->i_size) {
if (dio->flags & DIO_LOCKING)
inode_unlock(inode);
kmem_cache_free(dio_cache, dio);
retval = 0;
goto out;
goto fail_dio;
}
if (align & blocksize_mask) {
if (bdev)
blkbits = blksize_bits(bdev_logical_block_size(bdev));
blocksize_mask = (1 << blkbits) - 1;
if (align & blocksize_mask)
goto fail_dio;
}
if (dio->flags & DIO_LOCKING && iov_iter_rw(iter) == READ) {
struct address_space *mapping = iocb->ki_filp->f_mapping;
retval = filemap_write_and_wait_range(mapping, offset, end - 1);
if (retval)
goto fail_dio;
}
/*
......@@ -1239,14 +1230,8 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
*/
retval = sb_init_dio_done_wq(dio->inode->i_sb);
}
if (retval) {
/*
* We grab i_mutex only for reads so we don't have
* to release it here
*/
kmem_cache_free(dio_cache, dio);
goto out;
}
if (retval)
goto fail_dio;
}
/*
......@@ -1349,7 +1334,13 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
} else
BUG_ON(retval != -EIOCBQUEUED);
out:
return retval;
fail_dio:
if (dio->flags & DIO_LOCKING && iov_iter_rw(iter) == READ)
inode_unlock(inode);
kmem_cache_free(dio_cache, dio);
return retval;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment