Commit 5c5b6f75 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Darrick J. Wong

xfs: cleanup xfs_direct_write_iomap_begin

Move more checks into the helpers that determine if we need a COW
operation or allocation and split the return path for when an existing
data for allocation has been found versus a new allocation.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarDarrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: default avatarDarrick J. Wong <darrick.wong@oracle.com>
parent 12dfb58a
...@@ -642,23 +642,42 @@ xfs_iomap_write_unwritten( ...@@ -642,23 +642,42 @@ xfs_iomap_write_unwritten(
static inline bool static inline bool
imap_needs_alloc( imap_needs_alloc(
struct inode *inode, struct inode *inode,
unsigned flags,
struct xfs_bmbt_irec *imap, struct xfs_bmbt_irec *imap,
int nimaps) int nimaps)
{ {
return !nimaps || /* don't allocate blocks when just zeroing */
if (flags & IOMAP_ZERO)
return false;
if (!nimaps ||
imap->br_startblock == HOLESTARTBLOCK || imap->br_startblock == HOLESTARTBLOCK ||
imap->br_startblock == DELAYSTARTBLOCK || imap->br_startblock == DELAYSTARTBLOCK)
(IS_DAX(inode) && imap->br_state == XFS_EXT_UNWRITTEN); return true;
/* we convert unwritten extents before copying the data for DAX */
if (IS_DAX(inode) && imap->br_state == XFS_EXT_UNWRITTEN)
return true;
return false;
} }
static inline bool static inline bool
needs_cow_for_zeroing( imap_needs_cow(
struct xfs_inode *ip,
unsigned int flags,
struct xfs_bmbt_irec *imap, struct xfs_bmbt_irec *imap,
int nimaps) int nimaps)
{ {
return nimaps && if (!xfs_is_cow_inode(ip))
imap->br_startblock != HOLESTARTBLOCK && return false;
imap->br_state != XFS_EXT_UNWRITTEN;
/* when zeroing we don't have to COW holes or unwritten extents */
if (flags & IOMAP_ZERO) {
if (!nimaps ||
imap->br_startblock == HOLESTARTBLOCK ||
imap->br_state == XFS_EXT_UNWRITTEN)
return false;
}
return true;
} }
static int static int
...@@ -742,6 +761,14 @@ xfs_direct_write_iomap_begin( ...@@ -742,6 +761,14 @@ xfs_direct_write_iomap_begin(
if (XFS_FORCED_SHUTDOWN(mp)) if (XFS_FORCED_SHUTDOWN(mp))
return -EIO; return -EIO;
/*
* Writes that span EOF might trigger an IO size update on completion,
* so consider them to be dirty for the purposes of O_DSYNC even if
* there is no other metadata changes pending or have been made here.
*/
if (offset + length > i_size_read(inode))
iomap_flags |= IOMAP_F_DIRTY;
/* /*
* Lock the inode in the manner required for the specified operation and * Lock the inode in the manner required for the specified operation and
* check for as many conditions that would result in blocking as * check for as many conditions that would result in blocking as
...@@ -761,12 +788,7 @@ xfs_direct_write_iomap_begin( ...@@ -761,12 +788,7 @@ xfs_direct_write_iomap_begin(
* Break shared extents if necessary. Checks for non-blocking IO have * Break shared extents if necessary. Checks for non-blocking IO have
* been done up front, so we don't need to do them here. * been done up front, so we don't need to do them here.
*/ */
if (xfs_is_cow_inode(ip)) { if (imap_needs_cow(ip, flags, &imap, nimaps)) {
/* if zeroing doesn't need COW allocation, then we are done. */
if ((flags & IOMAP_ZERO) &&
!needs_cow_for_zeroing(&imap, nimaps))
goto out_found;
/* may drop and re-acquire the ilock */ /* may drop and re-acquire the ilock */
error = xfs_reflink_allocate_cow(ip, &imap, &cmap, &shared, error = xfs_reflink_allocate_cow(ip, &imap, &cmap, &shared,
&lockmode, flags & IOMAP_DIRECT); &lockmode, flags & IOMAP_DIRECT);
...@@ -778,18 +800,17 @@ xfs_direct_write_iomap_begin( ...@@ -778,18 +800,17 @@ xfs_direct_write_iomap_begin(
length = XFS_FSB_TO_B(mp, end_fsb) - offset; length = XFS_FSB_TO_B(mp, end_fsb) - offset;
} }
/* Don't need to allocate over holes when doing zeroing operations. */ if (imap_needs_alloc(inode, flags, &imap, nimaps))
if (flags & IOMAP_ZERO) goto allocate_blocks;
goto out_found;
if (!imap_needs_alloc(inode, &imap, nimaps)) xfs_iunlock(ip, lockmode);
goto out_found; trace_xfs_iomap_found(ip, offset, length, XFS_DATA_FORK, &imap);
return xfs_bmbt_to_iomap(ip, iomap, &imap, iomap_flags);
/* If nowait is set bail since we are going to make allocations. */ allocate_blocks:
if (flags & IOMAP_NOWAIT) {
error = -EAGAIN; error = -EAGAIN;
if (flags & IOMAP_NOWAIT)
goto out_unlock; goto out_unlock;
}
/* /*
* We cap the maximum length we map to a sane size to keep the chunks * We cap the maximum length we map to a sane size to keep the chunks
...@@ -808,29 +829,12 @@ xfs_direct_write_iomap_begin( ...@@ -808,29 +829,12 @@ xfs_direct_write_iomap_begin(
*/ */
if (lockmode == XFS_ILOCK_EXCL) if (lockmode == XFS_ILOCK_EXCL)
xfs_ilock_demote(ip, lockmode); xfs_ilock_demote(ip, lockmode);
error = xfs_iomap_write_direct(ip, offset, length, &imap, error = xfs_iomap_write_direct(ip, offset, length, &imap, nimaps);
nimaps);
if (error) if (error)
return error; return error;
iomap_flags |= IOMAP_F_NEW;
trace_xfs_iomap_alloc(ip, offset, length, XFS_DATA_FORK, &imap); trace_xfs_iomap_alloc(ip, offset, length, XFS_DATA_FORK, &imap);
return xfs_bmbt_to_iomap(ip, iomap, &imap, iomap_flags | IOMAP_F_NEW);
out_finish:
/*
* Writes that span EOF might trigger an IO size update on completion,
* so consider them to be dirty for the purposes of O_DSYNC even if
* there is no other metadata changes pending or have been made here.
*/
if (offset + length > i_size_read(inode))
iomap_flags |= IOMAP_F_DIRTY;
return xfs_bmbt_to_iomap(ip, iomap, &imap, iomap_flags);
out_found:
ASSERT(nimaps);
xfs_iunlock(ip, lockmode);
trace_xfs_iomap_found(ip, offset, length, XFS_DATA_FORK, &imap);
goto out_finish;
out_found_cow: out_found_cow:
xfs_iunlock(ip, lockmode); xfs_iunlock(ip, lockmode);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment