Commit f150b423 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Darrick J. Wong

xfs: split the iomap ops for buffered vs direct writes

Instead of lots of magic conditionals in the main write_begin
handler this make the intent very clear.  Thing will become even
better once we support delayed allocations for extent size hints
and realtime allocations.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarDarrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: default avatarDarrick J. Wong <darrick.wong@oracle.com>
parent a526c85c
...@@ -1113,7 +1113,8 @@ xfs_free_file_space( ...@@ -1113,7 +1113,8 @@ xfs_free_file_space(
return 0; return 0;
if (offset + len > XFS_ISIZE(ip)) if (offset + len > XFS_ISIZE(ip))
len = XFS_ISIZE(ip) - offset; len = XFS_ISIZE(ip) - offset;
error = iomap_zero_range(VFS_I(ip), offset, len, NULL, &xfs_iomap_ops); error = iomap_zero_range(VFS_I(ip), offset, len, NULL,
&xfs_buffered_write_iomap_ops);
if (error) if (error)
return error; return error;
......
...@@ -352,7 +352,7 @@ xfs_file_aio_write_checks( ...@@ -352,7 +352,7 @@ xfs_file_aio_write_checks(
trace_xfs_zero_eof(ip, isize, iocb->ki_pos - isize); trace_xfs_zero_eof(ip, isize, iocb->ki_pos - isize);
error = iomap_zero_range(inode, isize, iocb->ki_pos - isize, error = iomap_zero_range(inode, isize, iocb->ki_pos - isize,
NULL, &xfs_iomap_ops); NULL, &xfs_buffered_write_iomap_ops);
if (error) if (error)
return error; return error;
} else } else
...@@ -552,7 +552,8 @@ xfs_file_dio_aio_write( ...@@ -552,7 +552,8 @@ xfs_file_dio_aio_write(
* If unaligned, this is the only IO in-flight. Wait on it before we * If unaligned, this is the only IO in-flight. Wait on it before we
* release the iolock to prevent subsequent overlapping IO. * release the iolock to prevent subsequent overlapping IO.
*/ */
ret = iomap_dio_rw(iocb, from, &xfs_iomap_ops, &xfs_dio_write_ops, ret = iomap_dio_rw(iocb, from, &xfs_direct_write_iomap_ops,
&xfs_dio_write_ops,
is_sync_kiocb(iocb) || unaligned_io); is_sync_kiocb(iocb) || unaligned_io);
out: out:
xfs_iunlock(ip, iolock); xfs_iunlock(ip, iolock);
...@@ -592,7 +593,7 @@ xfs_file_dax_write( ...@@ -592,7 +593,7 @@ xfs_file_dax_write(
count = iov_iter_count(from); count = iov_iter_count(from);
trace_xfs_file_dax_write(ip, count, pos); trace_xfs_file_dax_write(ip, count, pos);
ret = dax_iomap_rw(iocb, from, &xfs_iomap_ops); ret = dax_iomap_rw(iocb, from, &xfs_direct_write_iomap_ops);
if (ret > 0 && iocb->ki_pos > i_size_read(inode)) { if (ret > 0 && iocb->ki_pos > i_size_read(inode)) {
i_size_write(inode, iocb->ki_pos); i_size_write(inode, iocb->ki_pos);
error = xfs_setfilesize(ip, pos, ret); error = xfs_setfilesize(ip, pos, ret);
...@@ -639,7 +640,8 @@ xfs_file_buffered_aio_write( ...@@ -639,7 +640,8 @@ xfs_file_buffered_aio_write(
current->backing_dev_info = inode_to_bdi(inode); current->backing_dev_info = inode_to_bdi(inode);
trace_xfs_file_buffered_write(ip, iov_iter_count(from), iocb->ki_pos); trace_xfs_file_buffered_write(ip, iov_iter_count(from), iocb->ki_pos);
ret = iomap_file_buffered_write(iocb, from, &xfs_iomap_ops); ret = iomap_file_buffered_write(iocb, from,
&xfs_buffered_write_iomap_ops);
if (likely(ret >= 0)) if (likely(ret >= 0))
iocb->ki_pos += ret; iocb->ki_pos += ret;
...@@ -1156,12 +1158,14 @@ __xfs_filemap_fault( ...@@ -1156,12 +1158,14 @@ __xfs_filemap_fault(
ret = dax_iomap_fault(vmf, pe_size, &pfn, NULL, ret = dax_iomap_fault(vmf, pe_size, &pfn, NULL,
(write_fault && !vmf->cow_page) ? (write_fault && !vmf->cow_page) ?
&xfs_iomap_ops : &xfs_read_iomap_ops); &xfs_direct_write_iomap_ops :
&xfs_read_iomap_ops);
if (ret & VM_FAULT_NEEDDSYNC) if (ret & VM_FAULT_NEEDDSYNC)
ret = dax_finish_sync_fault(vmf, pe_size, pfn); ret = dax_finish_sync_fault(vmf, pe_size, pfn);
} else { } else {
if (write_fault) if (write_fault)
ret = iomap_page_mkwrite(vmf, &xfs_iomap_ops); ret = iomap_page_mkwrite(vmf,
&xfs_buffered_write_iomap_ops);
else else
ret = filemap_fault(vmf); ret = filemap_fault(vmf);
} }
......
...@@ -719,16 +719,7 @@ xfs_ilock_for_iomap( ...@@ -719,16 +719,7 @@ xfs_ilock_for_iomap(
} }
static int static int
xfs_file_iomap_begin_delay( xfs_direct_write_iomap_begin(
struct inode *inode,
loff_t offset,
loff_t count,
unsigned flags,
struct iomap *iomap,
struct iomap *srcmap);
static int
xfs_file_iomap_begin(
struct inode *inode, struct inode *inode,
loff_t offset, loff_t offset,
loff_t length, loff_t length,
...@@ -751,13 +742,6 @@ xfs_file_iomap_begin( ...@@ -751,13 +742,6 @@ xfs_file_iomap_begin(
if (XFS_FORCED_SHUTDOWN(mp)) if (XFS_FORCED_SHUTDOWN(mp))
return -EIO; return -EIO;
if (!(flags & IOMAP_DIRECT) && !IS_DAX(inode) &&
!xfs_get_extsz_hint(ip)) {
/* Reserve delalloc blocks for regular writeback. */
return xfs_file_iomap_begin_delay(inode, offset, length, flags,
iomap, srcmap);
}
/* /*
* Lock the inode in the manner required for the specified operation and * Lock the inode in the manner required for the specified operation and
* check for as many conditions that would result in blocking as * check for as many conditions that would result in blocking as
...@@ -864,8 +848,12 @@ xfs_file_iomap_begin( ...@@ -864,8 +848,12 @@ xfs_file_iomap_begin(
return error; return error;
} }
const struct iomap_ops xfs_direct_write_iomap_ops = {
.iomap_begin = xfs_direct_write_iomap_begin,
};
static int static int
xfs_file_iomap_begin_delay( xfs_buffered_write_iomap_begin(
struct inode *inode, struct inode *inode,
loff_t offset, loff_t offset,
loff_t count, loff_t count,
...@@ -884,8 +872,12 @@ xfs_file_iomap_begin_delay( ...@@ -884,8 +872,12 @@ xfs_file_iomap_begin_delay(
int whichfork = XFS_DATA_FORK; int whichfork = XFS_DATA_FORK;
int error = 0; int error = 0;
/* we can't use delayed allocations when using extent size hints */
if (xfs_get_extsz_hint(ip))
return xfs_direct_write_iomap_begin(inode, offset, count,
flags, iomap, srcmap);
ASSERT(!XFS_IS_REALTIME_INODE(ip)); ASSERT(!XFS_IS_REALTIME_INODE(ip));
ASSERT(!xfs_get_extsz_hint(ip));
xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_ilock(ip, XFS_ILOCK_EXCL);
...@@ -1077,18 +1069,23 @@ xfs_file_iomap_begin_delay( ...@@ -1077,18 +1069,23 @@ xfs_file_iomap_begin_delay(
} }
static int static int
xfs_file_iomap_end_delalloc( xfs_buffered_write_iomap_end(
struct xfs_inode *ip, struct inode *inode,
loff_t offset, loff_t offset,
loff_t length, loff_t length,
ssize_t written, ssize_t written,
unsigned flags,
struct iomap *iomap) struct iomap *iomap)
{ {
struct xfs_inode *ip = XFS_I(inode);
struct xfs_mount *mp = ip->i_mount; struct xfs_mount *mp = ip->i_mount;
xfs_fileoff_t start_fsb; xfs_fileoff_t start_fsb;
xfs_fileoff_t end_fsb; xfs_fileoff_t end_fsb;
int error = 0; int error = 0;
if (iomap->type != IOMAP_DELALLOC)
return 0;
/* /*
* Behave as if the write failed if drop writes is enabled. Set the NEW * Behave as if the write failed if drop writes is enabled. Set the NEW
* flag to force delalloc cleanup. * flag to force delalloc cleanup.
...@@ -1133,25 +1130,9 @@ xfs_file_iomap_end_delalloc( ...@@ -1133,25 +1130,9 @@ xfs_file_iomap_end_delalloc(
return 0; return 0;
} }
static int const struct iomap_ops xfs_buffered_write_iomap_ops = {
xfs_file_iomap_end( .iomap_begin = xfs_buffered_write_iomap_begin,
struct inode *inode, .iomap_end = xfs_buffered_write_iomap_end,
loff_t offset,
loff_t length,
ssize_t written,
unsigned flags,
struct iomap *iomap)
{
if ((flags & (IOMAP_WRITE | IOMAP_ZERO)) &&
iomap->type == IOMAP_DELALLOC)
return xfs_file_iomap_end_delalloc(XFS_I(inode), offset,
length, written, iomap);
return 0;
}
const struct iomap_ops xfs_iomap_ops = {
.iomap_begin = xfs_file_iomap_begin,
.iomap_end = xfs_file_iomap_end,
}; };
static int static int
......
...@@ -39,7 +39,8 @@ xfs_aligned_fsb_count( ...@@ -39,7 +39,8 @@ xfs_aligned_fsb_count(
return count_fsb; return count_fsb;
} }
extern const struct iomap_ops xfs_iomap_ops; extern const struct iomap_ops xfs_buffered_write_iomap_ops;
extern const struct iomap_ops xfs_direct_write_iomap_ops;
extern const struct iomap_ops xfs_read_iomap_ops; extern const struct iomap_ops xfs_read_iomap_ops;
extern const struct iomap_ops xfs_seek_iomap_ops; extern const struct iomap_ops xfs_seek_iomap_ops;
extern const struct iomap_ops xfs_xattr_iomap_ops; extern const struct iomap_ops xfs_xattr_iomap_ops;
......
...@@ -883,10 +883,10 @@ xfs_setattr_size( ...@@ -883,10 +883,10 @@ xfs_setattr_size(
if (newsize > oldsize) { if (newsize > oldsize) {
trace_xfs_zero_eof(ip, oldsize, newsize - oldsize); trace_xfs_zero_eof(ip, oldsize, newsize - oldsize);
error = iomap_zero_range(inode, oldsize, newsize - oldsize, error = iomap_zero_range(inode, oldsize, newsize - oldsize,
&did_zeroing, &xfs_iomap_ops); &did_zeroing, &xfs_buffered_write_iomap_ops);
} else { } else {
error = iomap_truncate_page(inode, newsize, &did_zeroing, error = iomap_truncate_page(inode, newsize, &did_zeroing,
&xfs_iomap_ops); &xfs_buffered_write_iomap_ops);
} }
if (error) if (error)
......
...@@ -1270,7 +1270,7 @@ xfs_reflink_zero_posteof( ...@@ -1270,7 +1270,7 @@ xfs_reflink_zero_posteof(
trace_xfs_zero_eof(ip, isize, pos - isize); trace_xfs_zero_eof(ip, isize, pos - isize);
return iomap_zero_range(VFS_I(ip), isize, pos - isize, NULL, return iomap_zero_range(VFS_I(ip), isize, pos - isize, NULL,
&xfs_iomap_ops); &xfs_buffered_write_iomap_ops);
} }
/* /*
...@@ -1527,7 +1527,8 @@ xfs_reflink_unshare( ...@@ -1527,7 +1527,8 @@ xfs_reflink_unshare(
inode_dio_wait(inode); inode_dio_wait(inode);
error = iomap_file_unshare(inode, offset, len, &xfs_iomap_ops); error = iomap_file_unshare(inode, offset, len,
&xfs_buffered_write_iomap_ops);
if (error) if (error)
goto out; goto out;
error = filemap_write_and_wait(inode->i_mapping); error = filemap_write_and_wait(inode->i_mapping);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment