Commit e8aaba9a authored by Dave Chinner's avatar Dave Chinner Committed by Dave Chinner

xfs: xfs_buf_ioend and xfs_buf_iodone_work duplicate functionality

We do some work in xfs_buf_ioend, and some work in
xfs_buf_iodone_work, but much of that functionality is the same.
This work can all be done in a single function, leaving
xfs_buf_iodone just a wrapper to determine if we should execute it
by workqueue or directly. hence rename xfs_buf_iodone_work to
xfs_buf_ioend(), and add a new xfs_buf_ioend_async() for places that
need async processing.
Signed-off-by: default avatarDave Chinner <dchinner@redhat.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarDave Chinner <david@fromorbit.com>
parent e11bb805
...@@ -998,26 +998,30 @@ xfs_buf_wait_unpin( ...@@ -998,26 +998,30 @@ xfs_buf_wait_unpin(
* Buffer Utility Routines * Buffer Utility Routines
*/ */
STATIC void void
xfs_buf_iodone_work( xfs_buf_ioend(
struct work_struct *work) struct xfs_buf *bp)
{ {
struct xfs_buf *bp = bool read = bp->b_flags & XBF_READ;
container_of(work, xfs_buf_t, b_iodone_work);
bool read = !!(bp->b_flags & XBF_READ); trace_xfs_buf_iodone(bp, _RET_IP_);
bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD); bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
/* only validate buffers that were read without errors */ /* Only validate buffers that were read without errors */
if (read && bp->b_ops && !bp->b_error && (bp->b_flags & XBF_DONE)) if (read && !bp->b_error && bp->b_ops) {
ASSERT(!bp->b_iodone);
bp->b_ops->verify_read(bp); bp->b_ops->verify_read(bp);
}
if (!bp->b_error)
bp->b_flags |= XBF_DONE;
if (bp->b_iodone) if (bp->b_iodone)
(*(bp->b_iodone))(bp); (*(bp->b_iodone))(bp);
else if (bp->b_flags & XBF_ASYNC) else if (bp->b_flags & XBF_ASYNC)
xfs_buf_relse(bp); xfs_buf_relse(bp);
else { else {
ASSERT(read && bp->b_ops);
complete(&bp->b_iowait); complete(&bp->b_iowait);
/* release the !XBF_ASYNC ref now we are done. */ /* release the !XBF_ASYNC ref now we are done. */
...@@ -1025,30 +1029,22 @@ xfs_buf_iodone_work( ...@@ -1025,30 +1029,22 @@ xfs_buf_iodone_work(
} }
} }
void static void
xfs_buf_ioend( xfs_buf_ioend_work(
struct xfs_buf *bp, struct work_struct *work)
int schedule)
{ {
bool read = !!(bp->b_flags & XBF_READ); struct xfs_buf *bp =
container_of(work, xfs_buf_t, b_iodone_work);
trace_xfs_buf_iodone(bp, _RET_IP_);
if (bp->b_error == 0) xfs_buf_ioend(bp);
bp->b_flags |= XBF_DONE; }
if (bp->b_iodone || (read && bp->b_ops) || (bp->b_flags & XBF_ASYNC)) { void
if (schedule) { xfs_buf_ioend_async(
INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work); struct xfs_buf *bp)
{
INIT_WORK(&bp->b_iodone_work, xfs_buf_ioend_work);
queue_work(xfslogd_workqueue, &bp->b_iodone_work); queue_work(xfslogd_workqueue, &bp->b_iodone_work);
} else {
xfs_buf_iodone_work(&bp->b_iodone_work);
}
} else {
bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
complete(&bp->b_iowait);
xfs_buf_rele(bp);
}
} }
void void
...@@ -1099,7 +1095,7 @@ xfs_bioerror( ...@@ -1099,7 +1095,7 @@ xfs_bioerror(
XFS_BUF_UNDONE(bp); XFS_BUF_UNDONE(bp);
xfs_buf_stale(bp); xfs_buf_stale(bp);
xfs_buf_ioend(bp, 0); xfs_buf_ioend(bp);
return -EIO; return -EIO;
} }
...@@ -1185,15 +1181,6 @@ xfs_bwrite( ...@@ -1185,15 +1181,6 @@ xfs_bwrite(
return error; return error;
} }
STATIC void
_xfs_buf_ioend(
xfs_buf_t *bp,
int schedule)
{
if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
xfs_buf_ioend(bp, schedule);
}
STATIC void STATIC void
xfs_buf_bio_end_io( xfs_buf_bio_end_io(
struct bio *bio, struct bio *bio,
...@@ -1211,7 +1198,8 @@ xfs_buf_bio_end_io( ...@@ -1211,7 +1198,8 @@ xfs_buf_bio_end_io(
if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ)) if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp)); invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
_xfs_buf_ioend(bp, 1); if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
xfs_buf_ioend_async(bp);
bio_put(bio); bio_put(bio);
} }
...@@ -1423,15 +1411,17 @@ xfs_buf_iorequest( ...@@ -1423,15 +1411,17 @@ xfs_buf_iorequest(
/* /*
* If _xfs_buf_ioapply failed or we are doing synchronous IO that * If _xfs_buf_ioapply failed or we are doing synchronous IO that
* completes extremely quickly, we can get back here with only the IO * completes extremely quickly, we can get back here with only the IO
* reference we took above. _xfs_buf_ioend will drop it to zero. Run * reference we took above. If we drop it to zero, run completion
* completion processing synchronously so that we don't return to the * processing synchronously so that we don't return to the caller with
* caller with completion still pending. This avoids unnecessary context * completion still pending. This avoids unnecessary context switches
* switches associated with the end_io workqueue. * associated with the end_io workqueue.
*/ */
if (atomic_dec_and_test(&bp->b_io_remaining) == 1) {
if (bp->b_error || !(bp->b_flags & XBF_ASYNC)) if (bp->b_error || !(bp->b_flags & XBF_ASYNC))
_xfs_buf_ioend(bp, 0); xfs_buf_ioend(bp);
else else
_xfs_buf_ioend(bp, 1); xfs_buf_ioend_async(bp);
}
xfs_buf_rele(bp); xfs_buf_rele(bp);
} }
......
...@@ -286,7 +286,7 @@ extern void xfs_buf_unlock(xfs_buf_t *); ...@@ -286,7 +286,7 @@ extern void xfs_buf_unlock(xfs_buf_t *);
/* Buffer Read and Write Routines */ /* Buffer Read and Write Routines */
extern int xfs_bwrite(struct xfs_buf *bp); extern int xfs_bwrite(struct xfs_buf *bp);
extern void xfs_buf_ioend(xfs_buf_t *, int); extern void xfs_buf_ioend(struct xfs_buf *bp);
extern void xfs_buf_ioerror(xfs_buf_t *, int); extern void xfs_buf_ioerror(xfs_buf_t *, int);
extern void xfs_buf_ioerror_alert(struct xfs_buf *, const char *func); extern void xfs_buf_ioerror_alert(struct xfs_buf *, const char *func);
extern void xfs_buf_iorequest(xfs_buf_t *); extern void xfs_buf_iorequest(xfs_buf_t *);
......
...@@ -491,7 +491,7 @@ xfs_buf_item_unpin( ...@@ -491,7 +491,7 @@ xfs_buf_item_unpin(
xfs_buf_ioerror(bp, -EIO); xfs_buf_ioerror(bp, -EIO);
XFS_BUF_UNDONE(bp); XFS_BUF_UNDONE(bp);
xfs_buf_stale(bp); xfs_buf_stale(bp);
xfs_buf_ioend(bp, 0); xfs_buf_ioend(bp);
} }
} }
...@@ -1115,7 +1115,7 @@ xfs_buf_iodone_callbacks( ...@@ -1115,7 +1115,7 @@ xfs_buf_iodone_callbacks(
xfs_buf_do_callbacks(bp); xfs_buf_do_callbacks(bp);
bp->b_fspriv = NULL; bp->b_fspriv = NULL;
bp->b_iodone = NULL; bp->b_iodone = NULL;
xfs_buf_ioend(bp, 0); xfs_buf_ioend(bp);
} }
/* /*
......
...@@ -3056,7 +3056,7 @@ xfs_iflush_cluster( ...@@ -3056,7 +3056,7 @@ xfs_iflush_cluster(
XFS_BUF_UNDONE(bp); XFS_BUF_UNDONE(bp);
xfs_buf_stale(bp); xfs_buf_stale(bp);
xfs_buf_ioerror(bp, -EIO); xfs_buf_ioerror(bp, -EIO);
xfs_buf_ioend(bp, 0); xfs_buf_ioend(bp);
} else { } else {
xfs_buf_stale(bp); xfs_buf_stale(bp);
xfs_buf_relse(bp); xfs_buf_relse(bp);
......
...@@ -1678,7 +1678,7 @@ xlog_bdstrat( ...@@ -1678,7 +1678,7 @@ xlog_bdstrat(
if (iclog->ic_state & XLOG_STATE_IOERROR) { if (iclog->ic_state & XLOG_STATE_IOERROR) {
xfs_buf_ioerror(bp, -EIO); xfs_buf_ioerror(bp, -EIO);
xfs_buf_stale(bp); xfs_buf_stale(bp);
xfs_buf_ioend(bp, 0); xfs_buf_ioend(bp);
/* /*
* It would seem logical to return EIO here, but we rely on * It would seem logical to return EIO here, but we rely on
* the log state machine to propagate I/O errors instead of * the log state machine to propagate I/O errors instead of
......
...@@ -383,7 +383,7 @@ xlog_recover_iodone( ...@@ -383,7 +383,7 @@ xlog_recover_iodone(
SHUTDOWN_META_IO_ERROR); SHUTDOWN_META_IO_ERROR);
} }
bp->b_iodone = NULL; bp->b_iodone = NULL;
xfs_buf_ioend(bp, 0); xfs_buf_ioend(bp);
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment