Commit dfa03a5f authored by Dave Chinner's avatar Dave Chinner Committed by Darrick J. Wong

xfs: clean up locking in xfs_file_iomap_begin

Rather than checking what kind of locking is needed in a helper
function and then jumping through hoops to do the locking in line,
move the locking to the helper function that does all the checks
and rename it to xfs_ilock_for_iomap().

This also allows us to hoist all the nonblocking checks up into the
locking helper, further simplifier the code flow in
xfs_file_iomap_begin() and making it easier to understand.
Signed-Off-By: default avatarDave Chinner <dchinner@redhat.com>
Reviewed-by: default avatarCarlos Maiolino <cmaiolino@redhat.com>
Reviewed-by: default avatarDarrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: default avatarDarrick J. Wong <darrick.wong@oracle.com>
parent d0641780
...@@ -946,8 +946,11 @@ xfs_iomap_write_unwritten( ...@@ -946,8 +946,11 @@ xfs_iomap_write_unwritten(
return error; return error;
} }
static inline bool imap_needs_alloc(struct inode *inode, static inline bool
struct xfs_bmbt_irec *imap, int nimaps) imap_needs_alloc(
struct inode *inode,
struct xfs_bmbt_irec *imap,
int nimaps)
{ {
return !nimaps || return !nimaps ||
imap->br_startblock == HOLESTARTBLOCK || imap->br_startblock == HOLESTARTBLOCK ||
...@@ -955,31 +958,58 @@ static inline bool imap_needs_alloc(struct inode *inode, ...@@ -955,31 +958,58 @@ static inline bool imap_needs_alloc(struct inode *inode,
(IS_DAX(inode) && imap->br_state == XFS_EXT_UNWRITTEN); (IS_DAX(inode) && imap->br_state == XFS_EXT_UNWRITTEN);
} }
static inline bool needs_cow_for_zeroing(struct xfs_bmbt_irec *imap, int nimaps) static inline bool
needs_cow_for_zeroing(
struct xfs_bmbt_irec *imap,
int nimaps)
{ {
return nimaps && return nimaps &&
imap->br_startblock != HOLESTARTBLOCK && imap->br_startblock != HOLESTARTBLOCK &&
imap->br_state != XFS_EXT_UNWRITTEN; imap->br_state != XFS_EXT_UNWRITTEN;
} }
static inline bool need_excl_ilock(struct xfs_inode *ip, unsigned flags) static int
xfs_ilock_for_iomap(
struct xfs_inode *ip,
unsigned flags,
unsigned *lockmode)
{ {
unsigned mode = XFS_ILOCK_SHARED;
/* /*
* COW writes may allocate delalloc space or convert unwritten COW * COW writes may allocate delalloc space or convert unwritten COW
* extents, so we need to make sure to take the lock exclusively here. * extents, so we need to make sure to take the lock exclusively here.
*/ */
if (xfs_is_reflink_inode(ip) && (flags & (IOMAP_WRITE | IOMAP_ZERO))) if (xfs_is_reflink_inode(ip) && (flags & (IOMAP_WRITE | IOMAP_ZERO))) {
return true; /*
* FIXME: It could still overwrite on unshared extents and not
* need allocation.
*/
if (flags & IOMAP_NOWAIT)
return -EAGAIN;
mode = XFS_ILOCK_EXCL;
}
/* /*
* Extents not yet cached requires exclusive access, don't block. * Extents not yet cached requires exclusive access, don't block. This
* This is an opencoded xfs_ilock_data_map_shared() to cater for the * is an opencoded xfs_ilock_data_map_shared() call but with
* non-blocking behaviour. * non-blocking behaviour.
*/ */
if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE && if (!(ip->i_df.if_flags & XFS_IFEXTENTS)) {
!(ip->i_df.if_flags & XFS_IFEXTENTS)) if (flags & IOMAP_NOWAIT)
return true; return -EAGAIN;
return false; mode = XFS_ILOCK_EXCL;
}
if (flags & IOMAP_NOWAIT) {
if (!xfs_ilock_nowait(ip, mode))
return -EAGAIN;
} else {
xfs_ilock(ip, mode);
}
*lockmode = mode;
return 0;
} }
static int static int
...@@ -1007,19 +1037,15 @@ xfs_file_iomap_begin( ...@@ -1007,19 +1037,15 @@ xfs_file_iomap_begin(
return xfs_file_iomap_begin_delay(inode, offset, length, iomap); return xfs_file_iomap_begin_delay(inode, offset, length, iomap);
} }
if (need_excl_ilock(ip, flags)) /*
lockmode = XFS_ILOCK_EXCL; * Lock the inode in the manner required for the specified operation and
else * check for as many conditions that would result in blocking as
lockmode = XFS_ILOCK_SHARED; * possible. This removes most of the non-blocking checks from the
* mapping code below.
if (flags & IOMAP_NOWAIT) { */
if (!(ip->i_df.if_flags & XFS_IFEXTENTS)) error = xfs_ilock_for_iomap(ip, flags, &lockmode);
return -EAGAIN; if (error)
if (!xfs_ilock_nowait(ip, lockmode)) return error;
return -EAGAIN;
} else {
xfs_ilock(ip, lockmode);
}
ASSERT(offset <= mp->m_super->s_maxbytes); ASSERT(offset <= mp->m_super->s_maxbytes);
if (offset > mp->m_super->s_maxbytes - length) if (offset > mp->m_super->s_maxbytes - length)
...@@ -1044,19 +1070,17 @@ xfs_file_iomap_begin( ...@@ -1044,19 +1070,17 @@ xfs_file_iomap_begin(
if (!(flags & (IOMAP_WRITE | IOMAP_ZERO))) if (!(flags & (IOMAP_WRITE | IOMAP_ZERO)))
goto out_found; goto out_found;
if (xfs_is_reflink_inode(ip) && /*
((flags & IOMAP_WRITE) || * Break shared extents if necessary. Checks for non-blocking IO have
((flags & IOMAP_ZERO) && needs_cow_for_zeroing(&imap, nimaps)))) { * been done up front, so we don't need to do them here.
*/
if (xfs_is_reflink_inode(ip)) {
/* if zeroing doesn't need COW allocation, then we are done. */
if ((flags & IOMAP_ZERO) &&
!needs_cow_for_zeroing(&imap, nimaps))
goto out_found;
if (flags & IOMAP_DIRECT) { if (flags & IOMAP_DIRECT) {
/*
* A reflinked inode will result in CoW alloc.
* FIXME: It could still overwrite on unshared extents
* and not need allocation.
*/
if (flags & IOMAP_NOWAIT) {
error = -EAGAIN;
goto out_unlock;
}
/* may drop and re-acquire the ilock */ /* may drop and re-acquire the ilock */
error = xfs_reflink_allocate_cow(ip, &imap, &shared, error = xfs_reflink_allocate_cow(ip, &imap, &shared,
&lockmode); &lockmode);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment