Commit 45eddb41 authored by Dave Chinner's avatar Dave Chinner Committed by Darrick J. Wong

xfs: factor out forced iclog flushes

We force iclogs in several places - we need them all to have the
same cache flush semantics, so start by factoring out the iclog
force into a common helper.
Signed-off-by: default avatarDave Chinner <dchinner@redhat.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarDarrick J. Wong <djwong@kernel.org>
Signed-off-by: default avatarDarrick J. Wong <djwong@kernel.org>
parent 0dc8f7f1
......@@ -778,6 +778,20 @@ xfs_log_mount_cancel(
xfs_log_unmount(mp);
}
/*
* Flush out the iclog to disk ensuring that device caches are flushed and
* the iclog hits stable storage before any completion waiters are woken.
*/
static inline int
xlog_force_iclog(
struct xlog_in_core *iclog)
{
atomic_inc(&iclog->ic_refcnt);
if (iclog->ic_state == XLOG_STATE_ACTIVE)
xlog_state_switch_iclogs(iclog->ic_log, iclog, 0);
return xlog_state_release_iclog(iclog->ic_log, iclog, 0);
}
/*
* Wait for the iclog and all prior iclogs to be written disk as required by the
* log force state machine. Waiting on ic_force_wait ensures iclog completions
......@@ -863,18 +877,8 @@ xlog_unmount_write(
spin_lock(&log->l_icloglock);
iclog = log->l_iclog;
atomic_inc(&iclog->ic_refcnt);
if (iclog->ic_state == XLOG_STATE_ACTIVE)
xlog_state_switch_iclogs(log, iclog, 0);
else
ASSERT(iclog->ic_state == XLOG_STATE_WANT_SYNC ||
iclog->ic_state == XLOG_STATE_IOERROR);
/*
* Ensure the journal is fully flushed and on stable storage once the
* iclog containing the unmount record is written.
*/
iclog->ic_flags |= (XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA);
error = xlog_state_release_iclog(log, iclog, 0);
error = xlog_force_iclog(iclog);
xlog_wait_on_iclog(iclog);
if (tic) {
......@@ -3201,17 +3205,9 @@ xfs_log_force(
iclog = iclog->ic_prev;
} else if (iclog->ic_state == XLOG_STATE_ACTIVE) {
if (atomic_read(&iclog->ic_refcnt) == 0) {
/*
* We are the only one with access to this iclog.
*
* Flush it out now. There should be a roundoff of zero
* to show that someone has already taken care of the
* roundoff from the previous sync.
*/
atomic_inc(&iclog->ic_refcnt);
/* We have exclusive access to this iclog. */
lsn = be64_to_cpu(iclog->ic_header.h_lsn);
xlog_state_switch_iclogs(log, iclog, 0);
if (xlog_state_release_iclog(log, iclog, 0))
if (xlog_force_iclog(iclog))
goto out_error;
if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn)
......@@ -3289,9 +3285,7 @@ xlog_force_lsn(
&log->l_icloglock);
return -EAGAIN;
}
atomic_inc(&iclog->ic_refcnt);
xlog_state_switch_iclogs(log, iclog, 0);
if (xlog_state_release_iclog(log, iclog, 0))
if (xlog_force_iclog(iclog))
goto out_error;
if (log_flushed)
*log_flushed = 1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment