Commit 57e80956 authored by Matthew Wilcox's avatar Matthew Wilcox Committed by Darrick J. Wong

xfs: Rename xa_ elements to ail_

This is a simple rename, except that xa_ail becomes ail_head.
Signed-off-by: default avatarMatthew Wilcox <mawilcox@microsoft.com>
Reviewed-by: default avatarDarrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: default avatarDarrick J. Wong <darrick.wong@oracle.com>
parent ae23395d
...@@ -460,7 +460,7 @@ xfs_buf_item_unpin( ...@@ -460,7 +460,7 @@ xfs_buf_item_unpin(
list_del_init(&bp->b_li_list); list_del_init(&bp->b_li_list);
bp->b_iodone = NULL; bp->b_iodone = NULL;
} else { } else {
spin_lock(&ailp->xa_lock); spin_lock(&ailp->ail_lock);
xfs_trans_ail_delete(ailp, lip, SHUTDOWN_LOG_IO_ERROR); xfs_trans_ail_delete(ailp, lip, SHUTDOWN_LOG_IO_ERROR);
xfs_buf_item_relse(bp); xfs_buf_item_relse(bp);
ASSERT(bp->b_log_item == NULL); ASSERT(bp->b_log_item == NULL);
...@@ -1057,12 +1057,12 @@ xfs_buf_do_callbacks_fail( ...@@ -1057,12 +1057,12 @@ xfs_buf_do_callbacks_fail(
lip = list_first_entry(&bp->b_li_list, struct xfs_log_item, lip = list_first_entry(&bp->b_li_list, struct xfs_log_item,
li_bio_list); li_bio_list);
ailp = lip->li_ailp; ailp = lip->li_ailp;
spin_lock(&ailp->xa_lock); spin_lock(&ailp->ail_lock);
list_for_each_entry(lip, &bp->b_li_list, li_bio_list) { list_for_each_entry(lip, &bp->b_li_list, li_bio_list) {
if (lip->li_ops->iop_error) if (lip->li_ops->iop_error)
lip->li_ops->iop_error(lip, bp); lip->li_ops->iop_error(lip, bp);
} }
spin_unlock(&ailp->xa_lock); spin_unlock(&ailp->ail_lock);
} }
static bool static bool
...@@ -1226,7 +1226,7 @@ xfs_buf_iodone( ...@@ -1226,7 +1226,7 @@ xfs_buf_iodone(
* *
* Either way, AIL is useless if we're forcing a shutdown. * Either way, AIL is useless if we're forcing a shutdown.
*/ */
spin_lock(&ailp->xa_lock); spin_lock(&ailp->ail_lock);
xfs_trans_ail_delete(ailp, lip, SHUTDOWN_CORRUPT_INCORE); xfs_trans_ail_delete(ailp, lip, SHUTDOWN_CORRUPT_INCORE);
xfs_buf_item_free(BUF_ITEM(lip)); xfs_buf_item_free(BUF_ITEM(lip));
} }
...@@ -1246,7 +1246,7 @@ xfs_buf_resubmit_failed_buffers( ...@@ -1246,7 +1246,7 @@ xfs_buf_resubmit_failed_buffers(
/* /*
* Clear XFS_LI_FAILED flag from all items before resubmit * Clear XFS_LI_FAILED flag from all items before resubmit
* *
* XFS_LI_FAILED set/clear is protected by xa_lock, caller this * XFS_LI_FAILED set/clear is protected by ail_lock, caller this
* function already have it acquired * function already have it acquired
*/ */
list_for_each_entry(lip, &bp->b_li_list, li_bio_list) list_for_each_entry(lip, &bp->b_li_list, li_bio_list)
......
...@@ -918,7 +918,7 @@ xfs_qm_dqflush_done( ...@@ -918,7 +918,7 @@ xfs_qm_dqflush_done(
(lip->li_flags & XFS_LI_FAILED))) { (lip->li_flags & XFS_LI_FAILED))) {
/* xfs_trans_ail_delete() drops the AIL lock. */ /* xfs_trans_ail_delete() drops the AIL lock. */
spin_lock(&ailp->xa_lock); spin_lock(&ailp->ail_lock);
if (lip->li_lsn == qip->qli_flush_lsn) { if (lip->li_lsn == qip->qli_flush_lsn) {
xfs_trans_ail_delete(ailp, lip, SHUTDOWN_CORRUPT_INCORE); xfs_trans_ail_delete(ailp, lip, SHUTDOWN_CORRUPT_INCORE);
} else { } else {
...@@ -928,7 +928,7 @@ xfs_qm_dqflush_done( ...@@ -928,7 +928,7 @@ xfs_qm_dqflush_done(
*/ */
if (lip->li_flags & XFS_LI_FAILED) if (lip->li_flags & XFS_LI_FAILED)
xfs_clear_li_failed(lip); xfs_clear_li_failed(lip);
spin_unlock(&ailp->xa_lock); spin_unlock(&ailp->ail_lock);
} }
} }
......
...@@ -157,8 +157,9 @@ xfs_dquot_item_error( ...@@ -157,8 +157,9 @@ xfs_dquot_item_error(
STATIC uint STATIC uint
xfs_qm_dquot_logitem_push( xfs_qm_dquot_logitem_push(
struct xfs_log_item *lip, struct xfs_log_item *lip,
struct list_head *buffer_list) __releases(&lip->li_ailp->xa_lock) struct list_head *buffer_list)
__acquires(&lip->li_ailp->xa_lock) __releases(&lip->li_ailp->ail_lock)
__acquires(&lip->li_ailp->ail_lock)
{ {
struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot; struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot;
struct xfs_buf *bp = lip->li_buf; struct xfs_buf *bp = lip->li_buf;
...@@ -205,7 +206,7 @@ xfs_qm_dquot_logitem_push( ...@@ -205,7 +206,7 @@ xfs_qm_dquot_logitem_push(
goto out_unlock; goto out_unlock;
} }
spin_unlock(&lip->li_ailp->xa_lock); spin_unlock(&lip->li_ailp->ail_lock);
error = xfs_qm_dqflush(dqp, &bp); error = xfs_qm_dqflush(dqp, &bp);
if (error) { if (error) {
...@@ -217,7 +218,7 @@ xfs_qm_dquot_logitem_push( ...@@ -217,7 +218,7 @@ xfs_qm_dquot_logitem_push(
xfs_buf_relse(bp); xfs_buf_relse(bp);
} }
spin_lock(&lip->li_ailp->xa_lock); spin_lock(&lip->li_ailp->ail_lock);
out_unlock: out_unlock:
xfs_dqunlock(dqp); xfs_dqunlock(dqp);
return rval; return rval;
...@@ -400,7 +401,7 @@ xfs_qm_qoffend_logitem_committed( ...@@ -400,7 +401,7 @@ xfs_qm_qoffend_logitem_committed(
* Delete the qoff-start logitem from the AIL. * Delete the qoff-start logitem from the AIL.
* xfs_trans_ail_delete() drops the AIL lock. * xfs_trans_ail_delete() drops the AIL lock.
*/ */
spin_lock(&ailp->xa_lock); spin_lock(&ailp->ail_lock);
xfs_trans_ail_delete(ailp, &qfs->qql_item, SHUTDOWN_LOG_IO_ERROR); xfs_trans_ail_delete(ailp, &qfs->qql_item, SHUTDOWN_LOG_IO_ERROR);
kmem_free(qfs->qql_item.li_lv_shadow); kmem_free(qfs->qql_item.li_lv_shadow);
......
...@@ -502,8 +502,8 @@ STATIC uint ...@@ -502,8 +502,8 @@ STATIC uint
xfs_inode_item_push( xfs_inode_item_push(
struct xfs_log_item *lip, struct xfs_log_item *lip,
struct list_head *buffer_list) struct list_head *buffer_list)
__releases(&lip->li_ailp->xa_lock) __releases(&lip->li_ailp->ail_lock)
__acquires(&lip->li_ailp->xa_lock) __acquires(&lip->li_ailp->ail_lock)
{ {
struct xfs_inode_log_item *iip = INODE_ITEM(lip); struct xfs_inode_log_item *iip = INODE_ITEM(lip);
struct xfs_inode *ip = iip->ili_inode; struct xfs_inode *ip = iip->ili_inode;
...@@ -562,7 +562,7 @@ xfs_inode_item_push( ...@@ -562,7 +562,7 @@ xfs_inode_item_push(
ASSERT(iip->ili_fields != 0 || XFS_FORCED_SHUTDOWN(ip->i_mount)); ASSERT(iip->ili_fields != 0 || XFS_FORCED_SHUTDOWN(ip->i_mount));
ASSERT(iip->ili_logged == 0 || XFS_FORCED_SHUTDOWN(ip->i_mount)); ASSERT(iip->ili_logged == 0 || XFS_FORCED_SHUTDOWN(ip->i_mount));
spin_unlock(&lip->li_ailp->xa_lock); spin_unlock(&lip->li_ailp->ail_lock);
error = xfs_iflush(ip, &bp); error = xfs_iflush(ip, &bp);
if (!error) { if (!error) {
...@@ -571,7 +571,7 @@ xfs_inode_item_push( ...@@ -571,7 +571,7 @@ xfs_inode_item_push(
xfs_buf_relse(bp); xfs_buf_relse(bp);
} }
spin_lock(&lip->li_ailp->xa_lock); spin_lock(&lip->li_ailp->ail_lock);
out_unlock: out_unlock:
xfs_iunlock(ip, XFS_ILOCK_SHARED); xfs_iunlock(ip, XFS_ILOCK_SHARED);
return rval; return rval;
...@@ -759,7 +759,7 @@ xfs_iflush_done( ...@@ -759,7 +759,7 @@ xfs_iflush_done(
bool mlip_changed = false; bool mlip_changed = false;
/* this is an opencoded batch version of xfs_trans_ail_delete */ /* this is an opencoded batch version of xfs_trans_ail_delete */
spin_lock(&ailp->xa_lock); spin_lock(&ailp->ail_lock);
list_for_each_entry(blip, &tmp, li_bio_list) { list_for_each_entry(blip, &tmp, li_bio_list) {
if (INODE_ITEM(blip)->ili_logged && if (INODE_ITEM(blip)->ili_logged &&
blip->li_lsn == INODE_ITEM(blip)->ili_flush_lsn) blip->li_lsn == INODE_ITEM(blip)->ili_flush_lsn)
...@@ -770,15 +770,15 @@ xfs_iflush_done( ...@@ -770,15 +770,15 @@ xfs_iflush_done(
} }
if (mlip_changed) { if (mlip_changed) {
if (!XFS_FORCED_SHUTDOWN(ailp->xa_mount)) if (!XFS_FORCED_SHUTDOWN(ailp->ail_mount))
xlog_assign_tail_lsn_locked(ailp->xa_mount); xlog_assign_tail_lsn_locked(ailp->ail_mount);
if (list_empty(&ailp->xa_ail)) if (list_empty(&ailp->ail_head))
wake_up_all(&ailp->xa_empty); wake_up_all(&ailp->ail_empty);
} }
spin_unlock(&ailp->xa_lock); spin_unlock(&ailp->ail_lock);
if (mlip_changed) if (mlip_changed)
xfs_log_space_wake(ailp->xa_mount); xfs_log_space_wake(ailp->ail_mount);
} }
/* /*
......
...@@ -1149,7 +1149,7 @@ xlog_assign_tail_lsn_locked( ...@@ -1149,7 +1149,7 @@ xlog_assign_tail_lsn_locked(
struct xfs_log_item *lip; struct xfs_log_item *lip;
xfs_lsn_t tail_lsn; xfs_lsn_t tail_lsn;
assert_spin_locked(&mp->m_ail->xa_lock); assert_spin_locked(&mp->m_ail->ail_lock);
/* /*
* To make sure we always have a valid LSN for the log tail we keep * To make sure we always have a valid LSN for the log tail we keep
...@@ -1172,9 +1172,9 @@ xlog_assign_tail_lsn( ...@@ -1172,9 +1172,9 @@ xlog_assign_tail_lsn(
{ {
xfs_lsn_t tail_lsn; xfs_lsn_t tail_lsn;
spin_lock(&mp->m_ail->xa_lock); spin_lock(&mp->m_ail->ail_lock);
tail_lsn = xlog_assign_tail_lsn_locked(mp); tail_lsn = xlog_assign_tail_lsn_locked(mp);
spin_unlock(&mp->m_ail->xa_lock); spin_unlock(&mp->m_ail->ail_lock);
return tail_lsn; return tail_lsn;
} }
......
...@@ -3427,7 +3427,7 @@ xlog_recover_efi_pass2( ...@@ -3427,7 +3427,7 @@ xlog_recover_efi_pass2(
} }
atomic_set(&efip->efi_next_extent, efi_formatp->efi_nextents); atomic_set(&efip->efi_next_extent, efi_formatp->efi_nextents);
spin_lock(&log->l_ailp->xa_lock); spin_lock(&log->l_ailp->ail_lock);
/* /*
* The EFI has two references. One for the EFD and one for EFI to ensure * The EFI has two references. One for the EFD and one for EFI to ensure
* it makes it into the AIL. Insert the EFI into the AIL directly and * it makes it into the AIL. Insert the EFI into the AIL directly and
...@@ -3470,7 +3470,7 @@ xlog_recover_efd_pass2( ...@@ -3470,7 +3470,7 @@ xlog_recover_efd_pass2(
* Search for the EFI with the id in the EFD format structure in the * Search for the EFI with the id in the EFD format structure in the
* AIL. * AIL.
*/ */
spin_lock(&ailp->xa_lock); spin_lock(&ailp->ail_lock);
lip = xfs_trans_ail_cursor_first(ailp, &cur, 0); lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
while (lip != NULL) { while (lip != NULL) {
if (lip->li_type == XFS_LI_EFI) { if (lip->li_type == XFS_LI_EFI) {
...@@ -3480,9 +3480,9 @@ xlog_recover_efd_pass2( ...@@ -3480,9 +3480,9 @@ xlog_recover_efd_pass2(
* Drop the EFD reference to the EFI. This * Drop the EFD reference to the EFI. This
* removes the EFI from the AIL and frees it. * removes the EFI from the AIL and frees it.
*/ */
spin_unlock(&ailp->xa_lock); spin_unlock(&ailp->ail_lock);
xfs_efi_release(efip); xfs_efi_release(efip);
spin_lock(&ailp->xa_lock); spin_lock(&ailp->ail_lock);
break; break;
} }
} }
...@@ -3490,7 +3490,7 @@ xlog_recover_efd_pass2( ...@@ -3490,7 +3490,7 @@ xlog_recover_efd_pass2(
} }
xfs_trans_ail_cursor_done(&cur); xfs_trans_ail_cursor_done(&cur);
spin_unlock(&ailp->xa_lock); spin_unlock(&ailp->ail_lock);
return 0; return 0;
} }
...@@ -3523,7 +3523,7 @@ xlog_recover_rui_pass2( ...@@ -3523,7 +3523,7 @@ xlog_recover_rui_pass2(
} }
atomic_set(&ruip->rui_next_extent, rui_formatp->rui_nextents); atomic_set(&ruip->rui_next_extent, rui_formatp->rui_nextents);
spin_lock(&log->l_ailp->xa_lock); spin_lock(&log->l_ailp->ail_lock);
/* /*
* The RUI has two references. One for the RUD and one for RUI to ensure * The RUI has two references. One for the RUD and one for RUI to ensure
* it makes it into the AIL. Insert the RUI into the AIL directly and * it makes it into the AIL. Insert the RUI into the AIL directly and
...@@ -3563,7 +3563,7 @@ xlog_recover_rud_pass2( ...@@ -3563,7 +3563,7 @@ xlog_recover_rud_pass2(
* Search for the RUI with the id in the RUD format structure in the * Search for the RUI with the id in the RUD format structure in the
* AIL. * AIL.
*/ */
spin_lock(&ailp->xa_lock); spin_lock(&ailp->ail_lock);
lip = xfs_trans_ail_cursor_first(ailp, &cur, 0); lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
while (lip != NULL) { while (lip != NULL) {
if (lip->li_type == XFS_LI_RUI) { if (lip->li_type == XFS_LI_RUI) {
...@@ -3573,9 +3573,9 @@ xlog_recover_rud_pass2( ...@@ -3573,9 +3573,9 @@ xlog_recover_rud_pass2(
* Drop the RUD reference to the RUI. This * Drop the RUD reference to the RUI. This
* removes the RUI from the AIL and frees it. * removes the RUI from the AIL and frees it.
*/ */
spin_unlock(&ailp->xa_lock); spin_unlock(&ailp->ail_lock);
xfs_rui_release(ruip); xfs_rui_release(ruip);
spin_lock(&ailp->xa_lock); spin_lock(&ailp->ail_lock);
break; break;
} }
} }
...@@ -3583,7 +3583,7 @@ xlog_recover_rud_pass2( ...@@ -3583,7 +3583,7 @@ xlog_recover_rud_pass2(
} }
xfs_trans_ail_cursor_done(&cur); xfs_trans_ail_cursor_done(&cur);
spin_unlock(&ailp->xa_lock); spin_unlock(&ailp->ail_lock);
return 0; return 0;
} }
...@@ -3639,7 +3639,7 @@ xlog_recover_cui_pass2( ...@@ -3639,7 +3639,7 @@ xlog_recover_cui_pass2(
} }
atomic_set(&cuip->cui_next_extent, cui_formatp->cui_nextents); atomic_set(&cuip->cui_next_extent, cui_formatp->cui_nextents);
spin_lock(&log->l_ailp->xa_lock); spin_lock(&log->l_ailp->ail_lock);
/* /*
* The CUI has two references. One for the CUD and one for CUI to ensure * The CUI has two references. One for the CUD and one for CUI to ensure
* it makes it into the AIL. Insert the CUI into the AIL directly and * it makes it into the AIL. Insert the CUI into the AIL directly and
...@@ -3680,7 +3680,7 @@ xlog_recover_cud_pass2( ...@@ -3680,7 +3680,7 @@ xlog_recover_cud_pass2(
* Search for the CUI with the id in the CUD format structure in the * Search for the CUI with the id in the CUD format structure in the
* AIL. * AIL.
*/ */
spin_lock(&ailp->xa_lock); spin_lock(&ailp->ail_lock);
lip = xfs_trans_ail_cursor_first(ailp, &cur, 0); lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
while (lip != NULL) { while (lip != NULL) {
if (lip->li_type == XFS_LI_CUI) { if (lip->li_type == XFS_LI_CUI) {
...@@ -3690,9 +3690,9 @@ xlog_recover_cud_pass2( ...@@ -3690,9 +3690,9 @@ xlog_recover_cud_pass2(
* Drop the CUD reference to the CUI. This * Drop the CUD reference to the CUI. This
* removes the CUI from the AIL and frees it. * removes the CUI from the AIL and frees it.
*/ */
spin_unlock(&ailp->xa_lock); spin_unlock(&ailp->ail_lock);
xfs_cui_release(cuip); xfs_cui_release(cuip);
spin_lock(&ailp->xa_lock); spin_lock(&ailp->ail_lock);
break; break;
} }
} }
...@@ -3700,7 +3700,7 @@ xlog_recover_cud_pass2( ...@@ -3700,7 +3700,7 @@ xlog_recover_cud_pass2(
} }
xfs_trans_ail_cursor_done(&cur); xfs_trans_ail_cursor_done(&cur);
spin_unlock(&ailp->xa_lock); spin_unlock(&ailp->ail_lock);
return 0; return 0;
} }
...@@ -3758,7 +3758,7 @@ xlog_recover_bui_pass2( ...@@ -3758,7 +3758,7 @@ xlog_recover_bui_pass2(
} }
atomic_set(&buip->bui_next_extent, bui_formatp->bui_nextents); atomic_set(&buip->bui_next_extent, bui_formatp->bui_nextents);
spin_lock(&log->l_ailp->xa_lock); spin_lock(&log->l_ailp->ail_lock);
/* /*
* The RUI has two references. One for the RUD and one for RUI to ensure * The RUI has two references. One for the RUD and one for RUI to ensure
* it makes it into the AIL. Insert the RUI into the AIL directly and * it makes it into the AIL. Insert the RUI into the AIL directly and
...@@ -3799,7 +3799,7 @@ xlog_recover_bud_pass2( ...@@ -3799,7 +3799,7 @@ xlog_recover_bud_pass2(
* Search for the BUI with the id in the BUD format structure in the * Search for the BUI with the id in the BUD format structure in the
* AIL. * AIL.
*/ */
spin_lock(&ailp->xa_lock); spin_lock(&ailp->ail_lock);
lip = xfs_trans_ail_cursor_first(ailp, &cur, 0); lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
while (lip != NULL) { while (lip != NULL) {
if (lip->li_type == XFS_LI_BUI) { if (lip->li_type == XFS_LI_BUI) {
...@@ -3809,9 +3809,9 @@ xlog_recover_bud_pass2( ...@@ -3809,9 +3809,9 @@ xlog_recover_bud_pass2(
* Drop the BUD reference to the BUI. This * Drop the BUD reference to the BUI. This
* removes the BUI from the AIL and frees it. * removes the BUI from the AIL and frees it.
*/ */
spin_unlock(&ailp->xa_lock); spin_unlock(&ailp->ail_lock);
xfs_bui_release(buip); xfs_bui_release(buip);
spin_lock(&ailp->xa_lock); spin_lock(&ailp->ail_lock);
break; break;
} }
} }
...@@ -3819,7 +3819,7 @@ xlog_recover_bud_pass2( ...@@ -3819,7 +3819,7 @@ xlog_recover_bud_pass2(
} }
xfs_trans_ail_cursor_done(&cur); xfs_trans_ail_cursor_done(&cur);
spin_unlock(&ailp->xa_lock); spin_unlock(&ailp->ail_lock);
return 0; return 0;
} }
...@@ -4652,9 +4652,9 @@ xlog_recover_process_efi( ...@@ -4652,9 +4652,9 @@ xlog_recover_process_efi(
if (test_bit(XFS_EFI_RECOVERED, &efip->efi_flags)) if (test_bit(XFS_EFI_RECOVERED, &efip->efi_flags))
return 0; return 0;
spin_unlock(&ailp->xa_lock); spin_unlock(&ailp->ail_lock);
error = xfs_efi_recover(mp, efip); error = xfs_efi_recover(mp, efip);
spin_lock(&ailp->xa_lock); spin_lock(&ailp->ail_lock);
return error; return error;
} }
...@@ -4670,9 +4670,9 @@ xlog_recover_cancel_efi( ...@@ -4670,9 +4670,9 @@ xlog_recover_cancel_efi(
efip = container_of(lip, struct xfs_efi_log_item, efi_item); efip = container_of(lip, struct xfs_efi_log_item, efi_item);
spin_unlock(&ailp->xa_lock); spin_unlock(&ailp->ail_lock);
xfs_efi_release(efip); xfs_efi_release(efip);
spin_lock(&ailp->xa_lock); spin_lock(&ailp->ail_lock);
} }
/* Recover the RUI if necessary. */ /* Recover the RUI if necessary. */
...@@ -4692,9 +4692,9 @@ xlog_recover_process_rui( ...@@ -4692,9 +4692,9 @@ xlog_recover_process_rui(
if (test_bit(XFS_RUI_RECOVERED, &ruip->rui_flags)) if (test_bit(XFS_RUI_RECOVERED, &ruip->rui_flags))
return 0; return 0;
spin_unlock(&ailp->xa_lock); spin_unlock(&ailp->ail_lock);
error = xfs_rui_recover(mp, ruip); error = xfs_rui_recover(mp, ruip);
spin_lock(&ailp->xa_lock); spin_lock(&ailp->ail_lock);
return error; return error;
} }
...@@ -4710,9 +4710,9 @@ xlog_recover_cancel_rui( ...@@ -4710,9 +4710,9 @@ xlog_recover_cancel_rui(
ruip = container_of(lip, struct xfs_rui_log_item, rui_item); ruip = container_of(lip, struct xfs_rui_log_item, rui_item);
spin_unlock(&ailp->xa_lock); spin_unlock(&ailp->ail_lock);
xfs_rui_release(ruip); xfs_rui_release(ruip);
spin_lock(&ailp->xa_lock); spin_lock(&ailp->ail_lock);
} }
/* Recover the CUI if necessary. */ /* Recover the CUI if necessary. */
...@@ -4733,9 +4733,9 @@ xlog_recover_process_cui( ...@@ -4733,9 +4733,9 @@ xlog_recover_process_cui(
if (test_bit(XFS_CUI_RECOVERED, &cuip->cui_flags)) if (test_bit(XFS_CUI_RECOVERED, &cuip->cui_flags))
return 0; return 0;
spin_unlock(&ailp->xa_lock); spin_unlock(&ailp->ail_lock);
error = xfs_cui_recover(mp, cuip, dfops); error = xfs_cui_recover(mp, cuip, dfops);
spin_lock(&ailp->xa_lock); spin_lock(&ailp->ail_lock);
return error; return error;
} }
...@@ -4751,9 +4751,9 @@ xlog_recover_cancel_cui( ...@@ -4751,9 +4751,9 @@ xlog_recover_cancel_cui(
cuip = container_of(lip, struct xfs_cui_log_item, cui_item); cuip = container_of(lip, struct xfs_cui_log_item, cui_item);
spin_unlock(&ailp->xa_lock); spin_unlock(&ailp->ail_lock);
xfs_cui_release(cuip); xfs_cui_release(cuip);
spin_lock(&ailp->xa_lock); spin_lock(&ailp->ail_lock);
} }
/* Recover the BUI if necessary. */ /* Recover the BUI if necessary. */
...@@ -4774,9 +4774,9 @@ xlog_recover_process_bui( ...@@ -4774,9 +4774,9 @@ xlog_recover_process_bui(
if (test_bit(XFS_BUI_RECOVERED, &buip->bui_flags)) if (test_bit(XFS_BUI_RECOVERED, &buip->bui_flags))
return 0; return 0;
spin_unlock(&ailp->xa_lock); spin_unlock(&ailp->ail_lock);
error = xfs_bui_recover(mp, buip, dfops); error = xfs_bui_recover(mp, buip, dfops);
spin_lock(&ailp->xa_lock); spin_lock(&ailp->ail_lock);
return error; return error;
} }
...@@ -4792,9 +4792,9 @@ xlog_recover_cancel_bui( ...@@ -4792,9 +4792,9 @@ xlog_recover_cancel_bui(
buip = container_of(lip, struct xfs_bui_log_item, bui_item); buip = container_of(lip, struct xfs_bui_log_item, bui_item);
spin_unlock(&ailp->xa_lock); spin_unlock(&ailp->ail_lock);
xfs_bui_release(buip); xfs_bui_release(buip);
spin_lock(&ailp->xa_lock); spin_lock(&ailp->ail_lock);
} }
/* Is this log item a deferred action intent? */ /* Is this log item a deferred action intent? */
...@@ -4882,7 +4882,7 @@ xlog_recover_process_intents( ...@@ -4882,7 +4882,7 @@ xlog_recover_process_intents(
#endif #endif
ailp = log->l_ailp; ailp = log->l_ailp;
spin_lock(&ailp->xa_lock); spin_lock(&ailp->ail_lock);
lip = xfs_trans_ail_cursor_first(ailp, &cur, 0); lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
#if defined(DEBUG) || defined(XFS_WARN) #if defined(DEBUG) || defined(XFS_WARN)
last_lsn = xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block); last_lsn = xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block);
...@@ -4936,7 +4936,7 @@ xlog_recover_process_intents( ...@@ -4936,7 +4936,7 @@ xlog_recover_process_intents(
} }
out: out:
xfs_trans_ail_cursor_done(&cur); xfs_trans_ail_cursor_done(&cur);
spin_unlock(&ailp->xa_lock); spin_unlock(&ailp->ail_lock);
if (error) if (error)
xfs_defer_cancel(&dfops); xfs_defer_cancel(&dfops);
else else
...@@ -4959,7 +4959,7 @@ xlog_recover_cancel_intents( ...@@ -4959,7 +4959,7 @@ xlog_recover_cancel_intents(
struct xfs_ail *ailp; struct xfs_ail *ailp;
ailp = log->l_ailp; ailp = log->l_ailp;
spin_lock(&ailp->xa_lock); spin_lock(&ailp->ail_lock);
lip = xfs_trans_ail_cursor_first(ailp, &cur, 0); lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
while (lip != NULL) { while (lip != NULL) {
/* /*
...@@ -4993,7 +4993,7 @@ xlog_recover_cancel_intents( ...@@ -4993,7 +4993,7 @@ xlog_recover_cancel_intents(
} }
xfs_trans_ail_cursor_done(&cur); xfs_trans_ail_cursor_done(&cur);
spin_unlock(&ailp->xa_lock); spin_unlock(&ailp->ail_lock);
return error; return error;
} }
......
...@@ -803,8 +803,8 @@ xfs_log_item_batch_insert( ...@@ -803,8 +803,8 @@ xfs_log_item_batch_insert(
{ {
int i; int i;
spin_lock(&ailp->xa_lock); spin_lock(&ailp->ail_lock);
/* xfs_trans_ail_update_bulk drops ailp->xa_lock */ /* xfs_trans_ail_update_bulk drops ailp->ail_lock */
xfs_trans_ail_update_bulk(ailp, cur, log_items, nr_items, commit_lsn); xfs_trans_ail_update_bulk(ailp, cur, log_items, nr_items, commit_lsn);
for (i = 0; i < nr_items; i++) { for (i = 0; i < nr_items; i++) {
...@@ -847,9 +847,9 @@ xfs_trans_committed_bulk( ...@@ -847,9 +847,9 @@ xfs_trans_committed_bulk(
struct xfs_ail_cursor cur; struct xfs_ail_cursor cur;
int i = 0; int i = 0;
spin_lock(&ailp->xa_lock); spin_lock(&ailp->ail_lock);
xfs_trans_ail_cursor_last(ailp, &cur, commit_lsn); xfs_trans_ail_cursor_last(ailp, &cur, commit_lsn);
spin_unlock(&ailp->xa_lock); spin_unlock(&ailp->ail_lock);
/* unpin all the log items */ /* unpin all the log items */
for (lv = log_vector; lv; lv = lv->lv_next ) { for (lv = log_vector; lv; lv = lv->lv_next ) {
...@@ -869,7 +869,7 @@ xfs_trans_committed_bulk( ...@@ -869,7 +869,7 @@ xfs_trans_committed_bulk(
* object into the AIL as we are in a shutdown situation. * object into the AIL as we are in a shutdown situation.
*/ */
if (aborted) { if (aborted) {
ASSERT(XFS_FORCED_SHUTDOWN(ailp->xa_mount)); ASSERT(XFS_FORCED_SHUTDOWN(ailp->ail_mount));
lip->li_ops->iop_unpin(lip, 1); lip->li_ops->iop_unpin(lip, 1);
continue; continue;
} }
...@@ -883,11 +883,11 @@ xfs_trans_committed_bulk( ...@@ -883,11 +883,11 @@ xfs_trans_committed_bulk(
* not affect the AIL cursor the bulk insert path is * not affect the AIL cursor the bulk insert path is
* using. * using.
*/ */
spin_lock(&ailp->xa_lock); spin_lock(&ailp->ail_lock);
if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0) if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0)
xfs_trans_ail_update(ailp, lip, item_lsn); xfs_trans_ail_update(ailp, lip, item_lsn);
else else
spin_unlock(&ailp->xa_lock); spin_unlock(&ailp->ail_lock);
lip->li_ops->iop_unpin(lip, 0); lip->li_ops->iop_unpin(lip, 0);
continue; continue;
} }
...@@ -905,9 +905,9 @@ xfs_trans_committed_bulk( ...@@ -905,9 +905,9 @@ xfs_trans_committed_bulk(
if (i) if (i)
xfs_log_item_batch_insert(ailp, &cur, log_items, i, commit_lsn); xfs_log_item_batch_insert(ailp, &cur, log_items, i, commit_lsn);
spin_lock(&ailp->xa_lock); spin_lock(&ailp->ail_lock);
xfs_trans_ail_cursor_done(&cur); xfs_trans_ail_cursor_done(&cur);
spin_unlock(&ailp->xa_lock); spin_unlock(&ailp->ail_lock);
} }
/* /*
......
This diff is collapsed.
...@@ -431,8 +431,8 @@ xfs_trans_brelse( ...@@ -431,8 +431,8 @@ xfs_trans_brelse(
* If the fs has shutdown and we dropped the last reference, it may fall * If the fs has shutdown and we dropped the last reference, it may fall
* on us to release a (possibly dirty) bli if it never made it to the * on us to release a (possibly dirty) bli if it never made it to the
* AIL (e.g., the aborted unpin already happened and didn't release it * AIL (e.g., the aborted unpin already happened and didn't release it
* due to our reference). Since we're already shutdown and need xa_lock, * due to our reference). Since we're already shutdown and need
* just force remove from the AIL and release the bli here. * ail_lock, just force remove from the AIL and release the bli here.
*/ */
if (XFS_FORCED_SHUTDOWN(tp->t_mountp) && freed) { if (XFS_FORCED_SHUTDOWN(tp->t_mountp) && freed) {
xfs_trans_ail_remove(&bip->bli_item, SHUTDOWN_LOG_IO_ERROR); xfs_trans_ail_remove(&bip->bli_item, SHUTDOWN_LOG_IO_ERROR);
......
...@@ -65,17 +65,17 @@ struct xfs_ail_cursor { ...@@ -65,17 +65,17 @@ struct xfs_ail_cursor {
* Eventually we need to drive the locking in here as well. * Eventually we need to drive the locking in here as well.
*/ */
struct xfs_ail { struct xfs_ail {
struct xfs_mount *xa_mount; struct xfs_mount *ail_mount;
struct task_struct *xa_task; struct task_struct *ail_task;
struct list_head xa_ail; struct list_head ail_head;
xfs_lsn_t xa_target; xfs_lsn_t ail_target;
xfs_lsn_t xa_target_prev; xfs_lsn_t ail_target_prev;
struct list_head xa_cursors; struct list_head ail_cursors;
spinlock_t xa_lock; spinlock_t ail_lock;
xfs_lsn_t xa_last_pushed_lsn; xfs_lsn_t ail_last_pushed_lsn;
int xa_log_flush; int ail_log_flush;
struct list_head xa_buf_list; struct list_head ail_buf_list;
wait_queue_head_t xa_empty; wait_queue_head_t ail_empty;
}; };
/* /*
...@@ -84,7 +84,7 @@ struct xfs_ail { ...@@ -84,7 +84,7 @@ struct xfs_ail {
void xfs_trans_ail_update_bulk(struct xfs_ail *ailp, void xfs_trans_ail_update_bulk(struct xfs_ail *ailp,
struct xfs_ail_cursor *cur, struct xfs_ail_cursor *cur,
struct xfs_log_item **log_items, int nr_items, struct xfs_log_item **log_items, int nr_items,
xfs_lsn_t lsn) __releases(ailp->xa_lock); xfs_lsn_t lsn) __releases(ailp->ail_lock);
/* /*
* Return a pointer to the first item in the AIL. If the AIL is empty, then * Return a pointer to the first item in the AIL. If the AIL is empty, then
* return NULL. * return NULL.
...@@ -93,7 +93,7 @@ static inline struct xfs_log_item * ...@@ -93,7 +93,7 @@ static inline struct xfs_log_item *
xfs_ail_min( xfs_ail_min(
struct xfs_ail *ailp) struct xfs_ail *ailp)
{ {
return list_first_entry_or_null(&ailp->xa_ail, struct xfs_log_item, return list_first_entry_or_null(&ailp->ail_head, struct xfs_log_item,
li_ail); li_ail);
} }
...@@ -101,14 +101,14 @@ static inline void ...@@ -101,14 +101,14 @@ static inline void
xfs_trans_ail_update( xfs_trans_ail_update(
struct xfs_ail *ailp, struct xfs_ail *ailp,
struct xfs_log_item *lip, struct xfs_log_item *lip,
xfs_lsn_t lsn) __releases(ailp->xa_lock) xfs_lsn_t lsn) __releases(ailp->ail_lock)
{ {
xfs_trans_ail_update_bulk(ailp, NULL, &lip, 1, lsn); xfs_trans_ail_update_bulk(ailp, NULL, &lip, 1, lsn);
} }
bool xfs_ail_delete_one(struct xfs_ail *ailp, struct xfs_log_item *lip); bool xfs_ail_delete_one(struct xfs_ail *ailp, struct xfs_log_item *lip);
void xfs_trans_ail_delete(struct xfs_ail *ailp, struct xfs_log_item *lip, void xfs_trans_ail_delete(struct xfs_ail *ailp, struct xfs_log_item *lip,
int shutdown_type) __releases(ailp->xa_lock); int shutdown_type) __releases(ailp->ail_lock);
static inline void static inline void
xfs_trans_ail_remove( xfs_trans_ail_remove(
...@@ -117,12 +117,12 @@ xfs_trans_ail_remove( ...@@ -117,12 +117,12 @@ xfs_trans_ail_remove(
{ {
struct xfs_ail *ailp = lip->li_ailp; struct xfs_ail *ailp = lip->li_ailp;
spin_lock(&ailp->xa_lock); spin_lock(&ailp->ail_lock);
/* xfs_trans_ail_delete() drops the AIL lock */ /* xfs_trans_ail_delete() drops the AIL lock */
if (lip->li_flags & XFS_LI_IN_AIL) if (lip->li_flags & XFS_LI_IN_AIL)
xfs_trans_ail_delete(ailp, lip, shutdown_type); xfs_trans_ail_delete(ailp, lip, shutdown_type);
else else
spin_unlock(&ailp->xa_lock); spin_unlock(&ailp->ail_lock);
} }
void xfs_ail_push(struct xfs_ail *, xfs_lsn_t); void xfs_ail_push(struct xfs_ail *, xfs_lsn_t);
...@@ -149,9 +149,9 @@ xfs_trans_ail_copy_lsn( ...@@ -149,9 +149,9 @@ xfs_trans_ail_copy_lsn(
xfs_lsn_t *src) xfs_lsn_t *src)
{ {
ASSERT(sizeof(xfs_lsn_t) == 8); /* don't lock if it shrinks */ ASSERT(sizeof(xfs_lsn_t) == 8); /* don't lock if it shrinks */
spin_lock(&ailp->xa_lock); spin_lock(&ailp->ail_lock);
*dst = *src; *dst = *src;
spin_unlock(&ailp->xa_lock); spin_unlock(&ailp->ail_lock);
} }
#else #else
static inline void static inline void
...@@ -172,7 +172,7 @@ xfs_clear_li_failed( ...@@ -172,7 +172,7 @@ xfs_clear_li_failed(
struct xfs_buf *bp = lip->li_buf; struct xfs_buf *bp = lip->li_buf;
ASSERT(lip->li_flags & XFS_LI_IN_AIL); ASSERT(lip->li_flags & XFS_LI_IN_AIL);
lockdep_assert_held(&lip->li_ailp->xa_lock); lockdep_assert_held(&lip->li_ailp->ail_lock);
if (lip->li_flags & XFS_LI_FAILED) { if (lip->li_flags & XFS_LI_FAILED) {
lip->li_flags &= ~XFS_LI_FAILED; lip->li_flags &= ~XFS_LI_FAILED;
...@@ -186,7 +186,7 @@ xfs_set_li_failed( ...@@ -186,7 +186,7 @@ xfs_set_li_failed(
struct xfs_log_item *lip, struct xfs_log_item *lip,
struct xfs_buf *bp) struct xfs_buf *bp)
{ {
lockdep_assert_held(&lip->li_ailp->xa_lock); lockdep_assert_held(&lip->li_ailp->ail_lock);
if (!(lip->li_flags & XFS_LI_FAILED)) { if (!(lip->li_flags & XFS_LI_FAILED)) {
xfs_buf_hold(bp); xfs_buf_hold(bp);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment