Commit 760fea8b authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Darrick J. Wong

xfs: remove the fork fields in the writepage_ctx and ioend

In preparation for moving the writeback code to iomap.c, replace the
XFS-specific COW fork concept with the iomap IOMAP_F_SHARED flag.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarDave Chinner <dchinner@redhat.com>
Reviewed-by: default avatarDarrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: default avatarDarrick J. Wong <darrick.wong@oracle.com>
parent 5653017b
...@@ -23,7 +23,6 @@ ...@@ -23,7 +23,6 @@
*/ */
struct xfs_writepage_ctx { struct xfs_writepage_ctx {
struct iomap iomap; struct iomap iomap;
int fork;
unsigned int data_seq; unsigned int data_seq;
unsigned int cow_seq; unsigned int cow_seq;
struct xfs_ioend *ioend; struct xfs_ioend *ioend;
...@@ -272,7 +271,7 @@ xfs_end_ioend( ...@@ -272,7 +271,7 @@ xfs_end_ioend(
*/ */
error = blk_status_to_errno(ioend->io_bio->bi_status); error = blk_status_to_errno(ioend->io_bio->bi_status);
if (unlikely(error)) { if (unlikely(error)) {
if (ioend->io_fork == XFS_COW_FORK) if (ioend->io_flags & IOMAP_F_SHARED)
xfs_reflink_cancel_cow_range(ip, offset, size, true); xfs_reflink_cancel_cow_range(ip, offset, size, true);
goto done; goto done;
} }
...@@ -280,7 +279,7 @@ xfs_end_ioend( ...@@ -280,7 +279,7 @@ xfs_end_ioend(
/* /*
* Success: commit the COW or unwritten blocks if needed. * Success: commit the COW or unwritten blocks if needed.
*/ */
if (ioend->io_fork == XFS_COW_FORK) if (ioend->io_flags & IOMAP_F_SHARED)
error = xfs_reflink_end_cow(ip, offset, size); error = xfs_reflink_end_cow(ip, offset, size);
else if (ioend->io_type == IOMAP_UNWRITTEN) else if (ioend->io_type == IOMAP_UNWRITTEN)
error = xfs_iomap_write_unwritten(ip, offset, size, false); error = xfs_iomap_write_unwritten(ip, offset, size, false);
...@@ -304,7 +303,8 @@ xfs_ioend_can_merge( ...@@ -304,7 +303,8 @@ xfs_ioend_can_merge(
{ {
if (ioend->io_bio->bi_status != next->io_bio->bi_status) if (ioend->io_bio->bi_status != next->io_bio->bi_status)
return false; return false;
if ((ioend->io_fork == XFS_COW_FORK) ^ (next->io_fork == XFS_COW_FORK)) if ((ioend->io_flags & IOMAP_F_SHARED) ^
(next->io_flags & IOMAP_F_SHARED))
return false; return false;
if ((ioend->io_type == IOMAP_UNWRITTEN) ^ if ((ioend->io_type == IOMAP_UNWRITTEN) ^
(next->io_type == IOMAP_UNWRITTEN)) (next->io_type == IOMAP_UNWRITTEN))
...@@ -404,6 +404,13 @@ xfs_end_io( ...@@ -404,6 +404,13 @@ xfs_end_io(
} }
} }
static inline bool xfs_ioend_needs_workqueue(struct xfs_ioend *ioend)
{
return ioend->io_private ||
ioend->io_type == IOMAP_UNWRITTEN ||
(ioend->io_flags & IOMAP_F_SHARED);
}
STATIC void STATIC void
xfs_end_bio( xfs_end_bio(
struct bio *bio) struct bio *bio)
...@@ -413,9 +420,7 @@ xfs_end_bio( ...@@ -413,9 +420,7 @@ xfs_end_bio(
struct xfs_mount *mp = ip->i_mount; struct xfs_mount *mp = ip->i_mount;
unsigned long flags; unsigned long flags;
if (ioend->io_fork == XFS_COW_FORK || if (xfs_ioend_needs_workqueue(ioend)) {
ioend->io_type == IOMAP_UNWRITTEN ||
ioend->io_private) {
spin_lock_irqsave(&ip->i_ioend_lock, flags); spin_lock_irqsave(&ip->i_ioend_lock, flags);
if (list_empty(&ip->i_ioend_list)) if (list_empty(&ip->i_ioend_list))
WARN_ON_ONCE(!queue_work(mp->m_unwritten_workqueue, WARN_ON_ONCE(!queue_work(mp->m_unwritten_workqueue,
...@@ -444,7 +449,7 @@ xfs_imap_valid( ...@@ -444,7 +449,7 @@ xfs_imap_valid(
* covers the offset. Be careful to check this first because the caller * covers the offset. Be careful to check this first because the caller
* can revalidate a COW mapping without updating the data seqno. * can revalidate a COW mapping without updating the data seqno.
*/ */
if (wpc->fork == XFS_COW_FORK) if (wpc->iomap.flags & IOMAP_F_SHARED)
return true; return true;
/* /*
...@@ -474,6 +479,7 @@ static int ...@@ -474,6 +479,7 @@ static int
xfs_convert_blocks( xfs_convert_blocks(
struct xfs_writepage_ctx *wpc, struct xfs_writepage_ctx *wpc,
struct xfs_inode *ip, struct xfs_inode *ip,
int whichfork,
loff_t offset) loff_t offset)
{ {
int error; int error;
...@@ -485,8 +491,8 @@ xfs_convert_blocks( ...@@ -485,8 +491,8 @@ xfs_convert_blocks(
* delalloc extent if free space is sufficiently fragmented. * delalloc extent if free space is sufficiently fragmented.
*/ */
do { do {
error = xfs_bmapi_convert_delalloc(ip, wpc->fork, offset, error = xfs_bmapi_convert_delalloc(ip, whichfork, offset,
&wpc->iomap, wpc->fork == XFS_COW_FORK ? &wpc->iomap, whichfork == XFS_COW_FORK ?
&wpc->cow_seq : &wpc->data_seq); &wpc->cow_seq : &wpc->data_seq);
if (error) if (error)
return error; return error;
...@@ -507,6 +513,7 @@ xfs_map_blocks( ...@@ -507,6 +513,7 @@ xfs_map_blocks(
xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + count); xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + count);
xfs_fileoff_t cow_fsb = NULLFILEOFF; xfs_fileoff_t cow_fsb = NULLFILEOFF;
int whichfork = XFS_DATA_FORK;
struct xfs_bmbt_irec imap; struct xfs_bmbt_irec imap;
struct xfs_iext_cursor icur; struct xfs_iext_cursor icur;
int retries = 0; int retries = 0;
...@@ -555,7 +562,7 @@ xfs_map_blocks( ...@@ -555,7 +562,7 @@ xfs_map_blocks(
wpc->cow_seq = READ_ONCE(ip->i_cowfp->if_seq); wpc->cow_seq = READ_ONCE(ip->i_cowfp->if_seq);
xfs_iunlock(ip, XFS_ILOCK_SHARED); xfs_iunlock(ip, XFS_ILOCK_SHARED);
wpc->fork = XFS_COW_FORK; whichfork = XFS_COW_FORK;
goto allocate_blocks; goto allocate_blocks;
} }
...@@ -578,8 +585,6 @@ xfs_map_blocks( ...@@ -578,8 +585,6 @@ xfs_map_blocks(
wpc->data_seq = READ_ONCE(ip->i_df.if_seq); wpc->data_seq = READ_ONCE(ip->i_df.if_seq);
xfs_iunlock(ip, XFS_ILOCK_SHARED); xfs_iunlock(ip, XFS_ILOCK_SHARED);
wpc->fork = XFS_DATA_FORK;
/* landed in a hole or beyond EOF? */ /* landed in a hole or beyond EOF? */
if (imap.br_startoff > offset_fsb) { if (imap.br_startoff > offset_fsb) {
imap.br_blockcount = imap.br_startoff - offset_fsb; imap.br_blockcount = imap.br_startoff - offset_fsb;
...@@ -604,10 +609,10 @@ xfs_map_blocks( ...@@ -604,10 +609,10 @@ xfs_map_blocks(
goto allocate_blocks; goto allocate_blocks;
xfs_bmbt_to_iomap(ip, &wpc->iomap, &imap, 0); xfs_bmbt_to_iomap(ip, &wpc->iomap, &imap, 0);
trace_xfs_map_blocks_found(ip, offset, count, wpc->fork, &imap); trace_xfs_map_blocks_found(ip, offset, count, whichfork, &imap);
return 0; return 0;
allocate_blocks: allocate_blocks:
error = xfs_convert_blocks(wpc, ip, offset); error = xfs_convert_blocks(wpc, ip, whichfork, offset);
if (error) { if (error) {
/* /*
* If we failed to find the extent in the COW fork we might have * If we failed to find the extent in the COW fork we might have
...@@ -616,7 +621,7 @@ xfs_map_blocks( ...@@ -616,7 +621,7 @@ xfs_map_blocks(
* the former case, but prevent additional retries to avoid * the former case, but prevent additional retries to avoid
* looping forever for the latter case. * looping forever for the latter case.
*/ */
if (error == -EAGAIN && wpc->fork == XFS_COW_FORK && !retries++) if (error == -EAGAIN && whichfork == XFS_COW_FORK && !retries++)
goto retry; goto retry;
ASSERT(error != -EAGAIN); ASSERT(error != -EAGAIN);
return error; return error;
...@@ -627,7 +632,7 @@ xfs_map_blocks( ...@@ -627,7 +632,7 @@ xfs_map_blocks(
* original delalloc one. Trim the return extent to the next COW * original delalloc one. Trim the return extent to the next COW
* boundary again to force a re-lookup. * boundary again to force a re-lookup.
*/ */
if (wpc->fork != XFS_COW_FORK && cow_fsb != NULLFILEOFF) { if (whichfork != XFS_COW_FORK && cow_fsb != NULLFILEOFF) {
loff_t cow_offset = XFS_FSB_TO_B(mp, cow_fsb); loff_t cow_offset = XFS_FSB_TO_B(mp, cow_fsb);
if (cow_offset < wpc->iomap.offset + wpc->iomap.length) if (cow_offset < wpc->iomap.offset + wpc->iomap.length)
...@@ -636,7 +641,7 @@ xfs_map_blocks( ...@@ -636,7 +641,7 @@ xfs_map_blocks(
ASSERT(wpc->iomap.offset <= offset); ASSERT(wpc->iomap.offset <= offset);
ASSERT(wpc->iomap.offset + wpc->iomap.length > offset); ASSERT(wpc->iomap.offset + wpc->iomap.length > offset);
trace_xfs_map_blocks_alloc(ip, offset, count, wpc->fork, &imap); trace_xfs_map_blocks_alloc(ip, offset, count, whichfork, &imap);
return 0; return 0;
} }
...@@ -670,14 +675,14 @@ xfs_submit_ioend( ...@@ -670,14 +675,14 @@ xfs_submit_ioend(
nofs_flag = memalloc_nofs_save(); nofs_flag = memalloc_nofs_save();
/* Convert CoW extents to regular */ /* Convert CoW extents to regular */
if (!status && ioend->io_fork == XFS_COW_FORK) { if (!status && (ioend->io_flags & IOMAP_F_SHARED)) {
status = xfs_reflink_convert_cow(XFS_I(ioend->io_inode), status = xfs_reflink_convert_cow(XFS_I(ioend->io_inode),
ioend->io_offset, ioend->io_size); ioend->io_offset, ioend->io_size);
} }
/* Reserve log space if we might write beyond the on-disk inode size. */ /* Reserve log space if we might write beyond the on-disk inode size. */
if (!status && if (!status &&
(ioend->io_fork == XFS_COW_FORK || ((ioend->io_flags & IOMAP_F_SHARED) ||
ioend->io_type != IOMAP_UNWRITTEN) && ioend->io_type != IOMAP_UNWRITTEN) &&
xfs_ioend_is_append(ioend) && xfs_ioend_is_append(ioend) &&
!ioend->io_private) !ioend->io_private)
...@@ -724,8 +729,8 @@ xfs_alloc_ioend( ...@@ -724,8 +729,8 @@ xfs_alloc_ioend(
ioend = container_of(bio, struct xfs_ioend, io_inline_bio); ioend = container_of(bio, struct xfs_ioend, io_inline_bio);
INIT_LIST_HEAD(&ioend->io_list); INIT_LIST_HEAD(&ioend->io_list);
ioend->io_fork = wpc->fork;
ioend->io_type = wpc->iomap.type; ioend->io_type = wpc->iomap.type;
ioend->io_flags = wpc->iomap.flags;
ioend->io_inode = inode; ioend->io_inode = inode;
ioend->io_size = 0; ioend->io_size = 0;
ioend->io_offset = offset; ioend->io_offset = offset;
...@@ -759,6 +764,24 @@ xfs_chain_bio( ...@@ -759,6 +764,24 @@ xfs_chain_bio(
return new; return new;
} }
static bool
xfs_can_add_to_ioend(
struct xfs_writepage_ctx *wpc,
xfs_off_t offset,
sector_t sector)
{
if ((wpc->iomap.flags & IOMAP_F_SHARED) !=
(wpc->ioend->io_flags & IOMAP_F_SHARED))
return false;
if (wpc->iomap.type != wpc->ioend->io_type)
return false;
if (offset != wpc->ioend->io_offset + wpc->ioend->io_size)
return false;
if (sector != bio_end_sector(wpc->ioend->io_bio))
return false;
return true;
}
/* /*
* Test to see if we have an existing ioend structure that we could append to * Test to see if we have an existing ioend structure that we could append to
* first, otherwise finish off the current ioend and start another. * first, otherwise finish off the current ioend and start another.
...@@ -778,11 +801,7 @@ xfs_add_to_ioend( ...@@ -778,11 +801,7 @@ xfs_add_to_ioend(
unsigned poff = offset & (PAGE_SIZE - 1); unsigned poff = offset & (PAGE_SIZE - 1);
bool merged, same_page = false; bool merged, same_page = false;
if (!wpc->ioend || if (!wpc->ioend || !xfs_can_add_to_ioend(wpc, offset, sector)) {
wpc->fork != wpc->ioend->io_fork ||
wpc->iomap.type != wpc->ioend->io_type ||
sector != bio_end_sector(wpc->ioend->io_bio) ||
offset != wpc->ioend->io_offset + wpc->ioend->io_size) {
if (wpc->ioend) if (wpc->ioend)
list_add(&wpc->ioend->io_list, iolist); list_add(&wpc->ioend->io_list, iolist);
wpc->ioend = xfs_alloc_ioend(inode, wpc, offset, sector, wbc); wpc->ioend = xfs_alloc_ioend(inode, wpc, offset, sector, wbc);
......
...@@ -13,8 +13,8 @@ extern struct bio_set xfs_ioend_bioset; ...@@ -13,8 +13,8 @@ extern struct bio_set xfs_ioend_bioset;
*/ */
struct xfs_ioend { struct xfs_ioend {
struct list_head io_list; /* next ioend in chain */ struct list_head io_list; /* next ioend in chain */
int io_fork; /* inode fork written back */
u16 io_type; u16 io_type;
u16 io_flags; /* IOMAP_F_* */
struct inode *io_inode; /* file being written to */ struct inode *io_inode; /* file being written to */
size_t io_size; /* size of the extent */ size_t io_size; /* size of the extent */
xfs_off_t io_offset; /* offset in the file */ xfs_off_t io_offset; /* offset in the file */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment