Commit 04755d2e authored by Dave Chinner's avatar Dave Chinner Committed by Dave Chinner

xfs: refactor xlog_recover_process_iunlinks()

For upcoming changes to the way inode unlinked list processing is
done, the structure of recovery needs to change slightly. We also
really need to untangle the messy error handling in list recovery
so that actions like emptying the bucket on inode lookup failure
are associated with the bucket list walk failing, not failing
to look up the inode.

Refactor the recovery code now to keep the re-organisation seperate
to the algorithm changes.
Signed-off-by: default avatarDave Chinner <dchinner@redhat.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarDarrick J. Wong <djwong@kernel.org>
parent 4fcc94d6
...@@ -2667,41 +2667,35 @@ xlog_recover_clear_agi_bucket( ...@@ -2667,41 +2667,35 @@ xlog_recover_clear_agi_bucket(
return; return;
} }
STATIC xfs_agino_t static int
xlog_recover_process_one_iunlink( xlog_recover_iunlink_bucket(
struct xfs_perag *pag, struct xfs_perag *pag,
xfs_agino_t agino, struct xfs_agi *agi,
int bucket) int bucket)
{ {
struct xfs_inode *ip; struct xfs_mount *mp = pag->pag_mount;
xfs_ino_t ino; struct xfs_inode *ip;
int error; xfs_agino_t agino;
ino = XFS_AGINO_TO_INO(pag->pag_mount, pag->pag_agno, agino); agino = be32_to_cpu(agi->agi_unlinked[bucket]);
error = xfs_iget(pag->pag_mount, NULL, ino, 0, 0, &ip); while (agino != NULLAGINO) {
if (error) int error;
goto fail;
xfs_iflags_clear(ip, XFS_IRECOVERY); error = xfs_iget(mp, NULL,
ASSERT(VFS_I(ip)->i_nlink == 0); XFS_AGINO_TO_INO(mp, pag->pag_agno, agino),
ASSERT(VFS_I(ip)->i_mode != 0); 0, 0, &ip);
if (error)
return error;;
agino = ip->i_next_unlinked; ASSERT(VFS_I(ip)->i_nlink == 0);
xfs_irele(ip); ASSERT(VFS_I(ip)->i_mode != 0);
return agino; xfs_iflags_clear(ip, XFS_IRECOVERY);
agino = ip->i_next_unlinked;
fail: xfs_irele(ip);
/* cond_resched();
* We can't read in the inode this bucket points to, or this inode }
* is messed up. Just ditch this bucket of inodes. We will lose return 0;
* some inodes and space, but at least we won't hang.
*
* Call xlog_recover_clear_agi_bucket() to perform a transaction to
* clear the inode pointer in the bucket.
*/
xfs_inodegc_flush(pag->pag_mount);
xlog_recover_clear_agi_bucket(pag, bucket);
return NULLAGINO;
} }
/* /*
...@@ -2727,59 +2721,70 @@ xlog_recover_process_one_iunlink( ...@@ -2727,59 +2721,70 @@ xlog_recover_process_one_iunlink(
* scheduled on this CPU to ensure other scheduled work can run without undue * scheduled on this CPU to ensure other scheduled work can run without undue
* latency. * latency.
*/ */
STATIC void static void
xlog_recover_process_iunlinks( xlog_recover_iunlink_ag(
struct xlog *log) struct xfs_perag *pag)
{ {
struct xfs_mount *mp = log->l_mp;
struct xfs_perag *pag;
xfs_agnumber_t agno;
struct xfs_agi *agi; struct xfs_agi *agi;
struct xfs_buf *agibp; struct xfs_buf *agibp;
xfs_agino_t agino;
int bucket; int bucket;
int error; int error;
for_each_perag(mp, agno, pag) { error = xfs_read_agi(pag, NULL, &agibp);
error = xfs_read_agi(pag, NULL, &agibp); if (error) {
/*
* AGI is b0rked. Don't process it.
*
* We should probably mark the filesystem as corrupt after we've
* recovered all the ag's we can....
*/
return;
}
/*
* Unlock the buffer so that it can be acquired in the normal course of
* the transaction to truncate and free each inode. Because we are not
* racing with anyone else here for the AGI buffer, we don't even need
* to hold it locked to read the initial unlinked bucket entries out of
* the buffer. We keep buffer reference though, so that it stays pinned
* in memory while we need the buffer.
*/
agi = agibp->b_addr;
xfs_buf_unlock(agibp);
for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) {
error = xlog_recover_iunlink_bucket(pag, agi, bucket);
if (error) { if (error) {
/* /*
* AGI is b0rked. Don't process it. * Bucket is unrecoverable, so only a repair scan can
* * free the remaining unlinked inodes. Just empty the
* We should probably mark the filesystem as corrupt * bucket and remaining inodes on it unreferenced and
* after we've recovered all the ag's we can.... * unfreeable.
*/ */
continue; xfs_inodegc_flush(pag->pag_mount);
} xlog_recover_clear_agi_bucket(pag, bucket);
/*
* Unlock the buffer so that it can be acquired in the normal
* course of the transaction to truncate and free each inode.
* Because we are not racing with anyone else here for the AGI
* buffer, we don't even need to hold it locked to read the
* initial unlinked bucket entries out of the buffer. We keep
* buffer reference though, so that it stays pinned in memory
* while we need the buffer.
*/
agi = agibp->b_addr;
xfs_buf_unlock(agibp);
for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) {
agino = be32_to_cpu(agi->agi_unlinked[bucket]);
while (agino != NULLAGINO) {
agino = xlog_recover_process_one_iunlink(pag,
agino, bucket);
cond_resched();
}
} }
xfs_buf_rele(agibp);
} }
xfs_buf_rele(agibp);
}
static void
xlog_recover_process_iunlinks(
struct xlog *log)
{
struct xfs_perag *pag;
xfs_agnumber_t agno;
for_each_perag(log->l_mp, agno, pag)
xlog_recover_iunlink_ag(pag);
/* /*
* Flush the pending unlinked inodes to ensure that the inactivations * Flush the pending unlinked inodes to ensure that the inactivations
* are fully completed on disk and the incore inodes can be reclaimed * are fully completed on disk and the incore inodes can be reclaimed
* before we signal that recovery is complete. * before we signal that recovery is complete.
*/ */
xfs_inodegc_flush(mp); xfs_inodegc_flush(log->l_mp);
} }
STATIC void STATIC void
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment