Commit f1ba5faf authored by Shiyang Ruan's avatar Shiyang Ruan Committed by Dan Williams

xfs: add xfs_zero_range and xfs_truncate_page helpers

Add helpers to prepare for using different DAX operations.
Signed-off-by: default avatarShiyang Ruan <ruansy.fnst@fujitsu.com>
[hch: split from a larger patch + slight cleanups]
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarDan Williams <dan.j.williams@intel.com>
Reviewed-by: default avatarDarrick J. Wong <djwong@kernel.org>
Link: https://lore.kernel.org/r/20211129102203.2243509-16-hch@lst.deSigned-off-by: default avatarDan Williams <dan.j.williams@intel.com>
parent 60696eb2
...@@ -1001,7 +1001,7 @@ xfs_free_file_space( ...@@ -1001,7 +1001,7 @@ xfs_free_file_space(
/* /*
* Now that we've unmap all full blocks we'll have to zero out any * Now that we've unmap all full blocks we'll have to zero out any
* partial block at the beginning and/or end. iomap_zero_range is smart * partial block at the beginning and/or end. xfs_zero_range is smart
* enough to skip any holes, including those we just created, but we * enough to skip any holes, including those we just created, but we
* must take care not to zero beyond EOF and enlarge i_size. * must take care not to zero beyond EOF and enlarge i_size.
*/ */
...@@ -1009,15 +1009,14 @@ xfs_free_file_space( ...@@ -1009,15 +1009,14 @@ xfs_free_file_space(
return 0; return 0;
if (offset + len > XFS_ISIZE(ip)) if (offset + len > XFS_ISIZE(ip))
len = XFS_ISIZE(ip) - offset; len = XFS_ISIZE(ip) - offset;
error = iomap_zero_range(VFS_I(ip), offset, len, NULL, error = xfs_zero_range(ip, offset, len, NULL);
&xfs_buffered_write_iomap_ops);
if (error) if (error)
return error; return error;
/* /*
* If we zeroed right up to EOF and EOF straddles a page boundary we * If we zeroed right up to EOF and EOF straddles a page boundary we
* must make sure that the post-EOF area is also zeroed because the * must make sure that the post-EOF area is also zeroed because the
* page could be mmap'd and iomap_zero_range doesn't do that for us. * page could be mmap'd and xfs_zero_range doesn't do that for us.
* Writeback of the eof page will do this, albeit clumsily. * Writeback of the eof page will do this, albeit clumsily.
*/ */
if (offset + len >= XFS_ISIZE(ip) && offset_in_page(offset + len) > 0) { if (offset + len >= XFS_ISIZE(ip) && offset_in_page(offset + len) > 0) {
......
...@@ -437,8 +437,7 @@ xfs_file_write_checks( ...@@ -437,8 +437,7 @@ xfs_file_write_checks(
} }
trace_xfs_zero_eof(ip, isize, iocb->ki_pos - isize); trace_xfs_zero_eof(ip, isize, iocb->ki_pos - isize);
error = iomap_zero_range(inode, isize, iocb->ki_pos - isize, error = xfs_zero_range(ip, isize, iocb->ki_pos - isize, NULL);
NULL, &xfs_buffered_write_iomap_ops);
if (error) if (error)
return error; return error;
} else } else
......
...@@ -1311,3 +1311,28 @@ xfs_xattr_iomap_begin( ...@@ -1311,3 +1311,28 @@ xfs_xattr_iomap_begin(
const struct iomap_ops xfs_xattr_iomap_ops = { const struct iomap_ops xfs_xattr_iomap_ops = {
.iomap_begin = xfs_xattr_iomap_begin, .iomap_begin = xfs_xattr_iomap_begin,
}; };
int
xfs_zero_range(
struct xfs_inode *ip,
loff_t pos,
loff_t len,
bool *did_zero)
{
struct inode *inode = VFS_I(ip);
return iomap_zero_range(inode, pos, len, did_zero,
&xfs_buffered_write_iomap_ops);
}
int
xfs_truncate_page(
struct xfs_inode *ip,
loff_t pos,
bool *did_zero)
{
struct inode *inode = VFS_I(ip);
return iomap_truncate_page(inode, pos, did_zero,
&xfs_buffered_write_iomap_ops);
}
...@@ -20,6 +20,10 @@ xfs_fileoff_t xfs_iomap_eof_align_last_fsb(struct xfs_inode *ip, ...@@ -20,6 +20,10 @@ xfs_fileoff_t xfs_iomap_eof_align_last_fsb(struct xfs_inode *ip,
int xfs_bmbt_to_iomap(struct xfs_inode *, struct iomap *, int xfs_bmbt_to_iomap(struct xfs_inode *, struct iomap *,
struct xfs_bmbt_irec *, u16); struct xfs_bmbt_irec *, u16);
int xfs_zero_range(struct xfs_inode *ip, loff_t pos, loff_t len,
bool *did_zero);
int xfs_truncate_page(struct xfs_inode *ip, loff_t pos, bool *did_zero);
static inline xfs_filblks_t static inline xfs_filblks_t
xfs_aligned_fsb_count( xfs_aligned_fsb_count(
xfs_fileoff_t offset_fsb, xfs_fileoff_t offset_fsb,
......
...@@ -911,8 +911,8 @@ xfs_setattr_size( ...@@ -911,8 +911,8 @@ xfs_setattr_size(
*/ */
if (newsize > oldsize) { if (newsize > oldsize) {
trace_xfs_zero_eof(ip, oldsize, newsize - oldsize); trace_xfs_zero_eof(ip, oldsize, newsize - oldsize);
error = iomap_zero_range(inode, oldsize, newsize - oldsize, error = xfs_zero_range(ip, oldsize, newsize - oldsize,
&did_zeroing, &xfs_buffered_write_iomap_ops); &did_zeroing);
} else { } else {
/* /*
* iomap won't detect a dirty page over an unwritten block (or a * iomap won't detect a dirty page over an unwritten block (or a
...@@ -924,8 +924,7 @@ xfs_setattr_size( ...@@ -924,8 +924,7 @@ xfs_setattr_size(
newsize); newsize);
if (error) if (error)
return error; return error;
error = iomap_truncate_page(inode, newsize, &did_zeroing, error = xfs_truncate_page(ip, newsize, &did_zeroing);
&xfs_buffered_write_iomap_ops);
} }
if (error) if (error)
......
...@@ -1269,8 +1269,7 @@ xfs_reflink_zero_posteof( ...@@ -1269,8 +1269,7 @@ xfs_reflink_zero_posteof(
return 0; return 0;
trace_xfs_zero_eof(ip, isize, pos - isize); trace_xfs_zero_eof(ip, isize, pos - isize);
return iomap_zero_range(VFS_I(ip), isize, pos - isize, NULL, return xfs_zero_range(ip, isize, pos - isize, NULL);
&xfs_buffered_write_iomap_ops);
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment