Commit db46e604 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Darrick J. Wong

xfs: merge COW handling into xfs_file_iomap_begin_delay

Besides simplifying the code a bit this allows to actually implement
the behavior of using COW preallocation for non-COW data mentioned
in the current comments.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarDarrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: default avatarDarrick J. Wong <darrick.wong@oracle.com>
parent 12df89f2
...@@ -534,15 +534,16 @@ xfs_file_iomap_begin_delay( ...@@ -534,15 +534,16 @@ xfs_file_iomap_begin_delay(
{ {
struct xfs_inode *ip = XFS_I(inode); struct xfs_inode *ip = XFS_I(inode);
struct xfs_mount *mp = ip->i_mount; struct xfs_mount *mp = ip->i_mount;
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
xfs_fileoff_t maxbytes_fsb = xfs_fileoff_t maxbytes_fsb =
XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes); XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
xfs_fileoff_t end_fsb; xfs_fileoff_t end_fsb;
int error = 0, eof = 0; struct xfs_bmbt_irec imap, cmap;
struct xfs_bmbt_irec got; struct xfs_iext_cursor icur, ccur;
struct xfs_iext_cursor icur;
xfs_fsblock_t prealloc_blocks = 0; xfs_fsblock_t prealloc_blocks = 0;
bool eof = false, cow_eof = false, shared;
int whichfork = XFS_DATA_FORK;
int error = 0;
ASSERT(!XFS_IS_REALTIME_INODE(ip)); ASSERT(!XFS_IS_REALTIME_INODE(ip));
ASSERT(!xfs_get_extsz_hint(ip)); ASSERT(!xfs_get_extsz_hint(ip));
...@@ -560,7 +561,7 @@ xfs_file_iomap_begin_delay( ...@@ -560,7 +561,7 @@ xfs_file_iomap_begin_delay(
XFS_STATS_INC(mp, xs_blk_mapw); XFS_STATS_INC(mp, xs_blk_mapw);
if (!(ifp->if_flags & XFS_IFEXTENTS)) { if (!(ip->i_df.if_flags & XFS_IFEXTENTS)) {
error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK); error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
if (error) if (error)
goto out_unlock; goto out_unlock;
...@@ -568,51 +569,92 @@ xfs_file_iomap_begin_delay( ...@@ -568,51 +569,92 @@ xfs_file_iomap_begin_delay(
end_fsb = min(XFS_B_TO_FSB(mp, offset + count), maxbytes_fsb); end_fsb = min(XFS_B_TO_FSB(mp, offset + count), maxbytes_fsb);
eof = !xfs_iext_lookup_extent(ip, ifp, offset_fsb, &icur, &got); /*
* Search the data fork fork first to look up our source mapping. We
* always need the data fork map, as we have to return it to the
* iomap code so that the higher level write code can read data in to
* perform read-modify-write cycles for unaligned writes.
*/
eof = !xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap);
if (eof) if (eof)
got.br_startoff = end_fsb; /* fake hole until the end */ imap.br_startoff = end_fsb; /* fake hole until the end */
/* We never need to allocate blocks for zeroing a hole. */
if ((flags & IOMAP_ZERO) && imap.br_startoff > offset_fsb) {
xfs_hole_to_iomap(ip, iomap, offset_fsb, imap.br_startoff);
goto out_unlock;
}
/*
* Search the COW fork extent list even if we did not find a data fork
* extent. This serves two purposes: first this implements the
* speculative preallocation using cowextsize, so that we also unshare
* block adjacent to shared blocks instead of just the shared blocks
* themselves. Second the lookup in the extent list is generally faster
* than going out to the shared extent tree.
*/
if (xfs_is_reflink_inode(ip)) {
cow_eof = !xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb,
&ccur, &cmap);
if (!cow_eof && cmap.br_startoff <= offset_fsb) {
trace_xfs_reflink_cow_found(ip, &cmap);
whichfork = XFS_COW_FORK;
goto done;
}
}
if (got.br_startoff <= offset_fsb) { if (imap.br_startoff <= offset_fsb) {
/* /*
* For reflink files we may need a delalloc reservation when * For reflink files we may need a delalloc reservation when
* overwriting shared extents. This includes zeroing of * overwriting shared extents. This includes zeroing of
* existing extents that contain data. * existing extents that contain data.
*/ */
if (xfs_is_reflink_inode(ip) && if (!xfs_is_reflink_inode(ip) ||
((flags & IOMAP_WRITE) || ((flags & IOMAP_ZERO) && imap.br_state != XFS_EXT_NORM)) {
got.br_state != XFS_EXT_UNWRITTEN)) { trace_xfs_iomap_found(ip, offset, count, XFS_DATA_FORK,
xfs_trim_extent(&got, offset_fsb, end_fsb - offset_fsb); &imap);
error = xfs_reflink_reserve_cow(ip, &got); goto done;
if (error)
goto out_unlock;
} }
trace_xfs_iomap_found(ip, offset, count, XFS_DATA_FORK, &got); xfs_trim_extent(&imap, offset_fsb, end_fsb - offset_fsb);
goto done;
}
if (flags & IOMAP_ZERO) { /* Trim the mapping to the nearest shared extent boundary. */
xfs_hole_to_iomap(ip, iomap, offset_fsb, got.br_startoff); error = xfs_reflink_trim_around_shared(ip, &imap, &shared);
goto out_unlock; if (error)
goto out_unlock;
/* Not shared? Just report the (potentially capped) extent. */
if (!shared) {
trace_xfs_iomap_found(ip, offset, count, XFS_DATA_FORK,
&imap);
goto done;
}
/*
* Fork all the shared blocks from our write offset until the
* end of the extent.
*/
whichfork = XFS_COW_FORK;
end_fsb = imap.br_startoff + imap.br_blockcount;
} else {
/*
* We cap the maximum length we map here to MAX_WRITEBACK_PAGES
* pages to keep the chunks of work done where somewhat
* symmetric with the work writeback does. This is a completely
* arbitrary number pulled out of thin air.
*
* Note that the values needs to be less than 32-bits wide until
* the lower level functions are updated.
*/
count = min_t(loff_t, count, 1024 * PAGE_SIZE);
end_fsb = min(XFS_B_TO_FSB(mp, offset + count), maxbytes_fsb);
} }
error = xfs_qm_dqattach_locked(ip, false); error = xfs_qm_dqattach_locked(ip, false);
if (error) if (error)
goto out_unlock; goto out_unlock;
/* if (eof && whichfork == XFS_DATA_FORK) {
* We cap the maximum length we map here to MAX_WRITEBACK_PAGES pages
* to keep the chunks of work done where somewhat symmetric with the
* work writeback does. This is a completely arbitrary number pulled
* out of thin air as a best guess for initial testing.
*
* Note that the values needs to be less than 32-bits wide until
* the lower level functions are updated.
*/
count = min_t(loff_t, count, 1024 * PAGE_SIZE);
end_fsb = min(XFS_B_TO_FSB(mp, offset + count), maxbytes_fsb);
if (eof) {
prealloc_blocks = xfs_iomap_prealloc_size(ip, offset, count, prealloc_blocks = xfs_iomap_prealloc_size(ip, offset, count,
&icur); &icur);
if (prealloc_blocks) { if (prealloc_blocks) {
...@@ -635,9 +677,11 @@ xfs_file_iomap_begin_delay( ...@@ -635,9 +677,11 @@ xfs_file_iomap_begin_delay(
} }
retry: retry:
error = xfs_bmapi_reserve_delalloc(ip, XFS_DATA_FORK, offset_fsb, error = xfs_bmapi_reserve_delalloc(ip, whichfork, offset_fsb,
end_fsb - offset_fsb, prealloc_blocks, &got, &icur, end_fsb - offset_fsb, prealloc_blocks,
eof); whichfork == XFS_DATA_FORK ? &imap : &cmap,
whichfork == XFS_DATA_FORK ? &icur : &ccur,
whichfork == XFS_DATA_FORK ? eof : cow_eof);
switch (error) { switch (error) {
case 0: case 0:
break; break;
...@@ -659,9 +703,20 @@ xfs_file_iomap_begin_delay( ...@@ -659,9 +703,20 @@ xfs_file_iomap_begin_delay(
* them out if the write happens to fail. * them out if the write happens to fail.
*/ */
iomap->flags |= IOMAP_F_NEW; iomap->flags |= IOMAP_F_NEW;
trace_xfs_iomap_alloc(ip, offset, count, XFS_DATA_FORK, &got); trace_xfs_iomap_alloc(ip, offset, count, whichfork,
whichfork == XFS_DATA_FORK ? &imap : &cmap);
done: done:
error = xfs_bmbt_to_iomap(ip, iomap, &got, false); if (whichfork == XFS_COW_FORK) {
if (imap.br_startoff > offset_fsb) {
xfs_trim_extent(&cmap, offset_fsb,
imap.br_startoff - offset_fsb);
error = xfs_bmbt_to_iomap(ip, iomap, &cmap, false);
goto out_unlock;
}
/* ensure we only report blocks we have a reservation for */
xfs_trim_extent(&imap, cmap.br_startoff, cmap.br_blockcount);
}
error = xfs_bmbt_to_iomap(ip, iomap, &imap, false);
out_unlock: out_unlock:
xfs_iunlock(ip, XFS_ILOCK_EXCL); xfs_iunlock(ip, XFS_ILOCK_EXCL);
return error; return error;
......
...@@ -234,73 +234,6 @@ xfs_reflink_trim_around_shared( ...@@ -234,73 +234,6 @@ xfs_reflink_trim_around_shared(
} }
} }
/*
* Trim the passed in imap to the next shared/unshared extent boundary, and
* if imap->br_startoff points to a shared extent reserve space for it in the
* COW fork.
*
* Note that imap will always contain the block numbers for the existing blocks
* in the data fork, as the upper layers need them for read-modify-write
* operations.
*/
int
xfs_reflink_reserve_cow(
struct xfs_inode *ip,
struct xfs_bmbt_irec *imap)
{
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
struct xfs_bmbt_irec got;
int error = 0;
bool eof = false;
struct xfs_iext_cursor icur;
bool shared;
/*
* Search the COW fork extent list first. This serves two purposes:
* first this implement the speculative preallocation using cowextisze,
* so that we also unshared block adjacent to shared blocks instead
* of just the shared blocks themselves. Second the lookup in the
* extent list is generally faster than going out to the shared extent
* tree.
*/
if (!xfs_iext_lookup_extent(ip, ifp, imap->br_startoff, &icur, &got))
eof = true;
if (!eof && got.br_startoff <= imap->br_startoff) {
trace_xfs_reflink_cow_found(ip, imap);
xfs_trim_extent(imap, got.br_startoff, got.br_blockcount);
return 0;
}
/* Trim the mapping to the nearest shared extent boundary. */
error = xfs_reflink_trim_around_shared(ip, imap, &shared);
if (error)
return error;
/* Not shared? Just report the (potentially capped) extent. */
if (!shared)
return 0;
/*
* Fork all the shared blocks from our write offset until the end of
* the extent.
*/
error = xfs_qm_dqattach_locked(ip, false);
if (error)
return error;
error = xfs_bmapi_reserve_delalloc(ip, XFS_COW_FORK, imap->br_startoff,
imap->br_blockcount, 0, &got, &icur, eof);
if (error == -ENOSPC || error == -EDQUOT)
trace_xfs_reflink_cow_enospc(ip, imap);
if (error)
return error;
xfs_trim_extent(imap, got.br_startoff, got.br_blockcount);
trace_xfs_reflink_cow_alloc(ip, &got);
return 0;
}
/* Convert part of an unwritten CoW extent to a real one. */ /* Convert part of an unwritten CoW extent to a real one. */
STATIC int STATIC int
xfs_reflink_convert_cow_extent( xfs_reflink_convert_cow_extent(
......
...@@ -12,8 +12,6 @@ extern int xfs_reflink_find_shared(struct xfs_mount *mp, struct xfs_trans *tp, ...@@ -12,8 +12,6 @@ extern int xfs_reflink_find_shared(struct xfs_mount *mp, struct xfs_trans *tp,
extern int xfs_reflink_trim_around_shared(struct xfs_inode *ip, extern int xfs_reflink_trim_around_shared(struct xfs_inode *ip,
struct xfs_bmbt_irec *irec, bool *shared); struct xfs_bmbt_irec *irec, bool *shared);
extern int xfs_reflink_reserve_cow(struct xfs_inode *ip,
struct xfs_bmbt_irec *imap);
extern int xfs_reflink_allocate_cow(struct xfs_inode *ip, extern int xfs_reflink_allocate_cow(struct xfs_inode *ip,
struct xfs_bmbt_irec *imap, bool *shared, uint *lockmode, struct xfs_bmbt_irec *imap, bool *shared, uint *lockmode,
unsigned iomap_flags); unsigned iomap_flags);
......
...@@ -3196,13 +3196,10 @@ DEFINE_INODE_ERROR_EVENT(xfs_reflink_unshare_error); ...@@ -3196,13 +3196,10 @@ DEFINE_INODE_ERROR_EVENT(xfs_reflink_unshare_error);
/* copy on write */ /* copy on write */
DEFINE_INODE_IREC_EVENT(xfs_reflink_trim_around_shared); DEFINE_INODE_IREC_EVENT(xfs_reflink_trim_around_shared);
DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_alloc);
DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_found); DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_found);
DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_enospc); DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_enospc);
DEFINE_INODE_IREC_EVENT(xfs_reflink_convert_cow); DEFINE_INODE_IREC_EVENT(xfs_reflink_convert_cow);
DEFINE_RW_EVENT(xfs_reflink_reserve_cow);
DEFINE_SIMPLE_IO_EVENT(xfs_reflink_bounce_dio_write); DEFINE_SIMPLE_IO_EVENT(xfs_reflink_bounce_dio_write);
DEFINE_SIMPLE_IO_EVENT(xfs_reflink_cancel_cow_range); DEFINE_SIMPLE_IO_EVENT(xfs_reflink_cancel_cow_range);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment