Commit 8f6d7f4f authored by Josef Bacik's avatar Josef Bacik

Btrfs: break out of orphan cleanup if we can't make progress

I noticed while running xfstests 83 that if we didn't have enough space to
delete our inode the orphan cleanup would just loop.  This is because it keeps
finding the same orphan item and keeps trying to kill it but can't because we
don't get an error back from iput for deleting the inode.  So keep track of the
last guy we tried to kill, if it's the same as the one we're trying to kill
currently we know we are having problems and can just error out.  I don't have a
way to test this so look hard and make sure it's right.  Thanks,
Signed-off-by: default avatarJosef Bacik <josef@redhat.com>
parent 726c35fa
...@@ -2230,6 +2230,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root) ...@@ -2230,6 +2230,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
struct btrfs_key key, found_key; struct btrfs_key key, found_key;
struct btrfs_trans_handle *trans; struct btrfs_trans_handle *trans;
struct inode *inode; struct inode *inode;
u64 last_objectid = 0;
int ret = 0, nr_unlink = 0, nr_truncate = 0; int ret = 0, nr_unlink = 0, nr_truncate = 0;
if (cmpxchg(&root->orphan_cleanup_state, 0, ORPHAN_CLEANUP_STARTED)) if (cmpxchg(&root->orphan_cleanup_state, 0, ORPHAN_CLEANUP_STARTED))
...@@ -2281,6 +2282,16 @@ int btrfs_orphan_cleanup(struct btrfs_root *root) ...@@ -2281,6 +2282,16 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
* crossing root thing. we store the inode number in the * crossing root thing. we store the inode number in the
* offset of the orphan item. * offset of the orphan item.
*/ */
if (found_key.offset == last_objectid) {
printk(KERN_ERR "btrfs: Error removing orphan entry, "
"stopping orphan cleanup\n");
ret = -EINVAL;
goto out;
}
last_objectid = found_key.offset;
found_key.objectid = found_key.offset; found_key.objectid = found_key.offset;
found_key.type = BTRFS_INODE_ITEM_KEY; found_key.type = BTRFS_INODE_ITEM_KEY;
found_key.offset = 0; found_key.offset = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment