Commit e562a8bd authored by Qu Wenruo's avatar Qu Wenruo Committed by David Sterba

btrfs: introduce BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN

Introduce a new runtime flag, BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN,
which will inform qgroup rescan to cancel its work asynchronously.

This is to address the window when an operation makes qgroup numbers
inconsistent (like qgroup inheriting) while a qgroup rescan is running.

In that case, qgroup inconsistent flag will be cleared when qgroup
rescan finishes.
But we changed the ownership of some extents, which means the rescan is
already meaningless, and the qgroup inconsistent flag should not be
cleared.

With the new flag, each time we set INCONSISTENT flag, we also set this
new flag to inform any running qgroup rescan to exit immediately, and
leaving the INCONSISTENT flag there.

The new runtime flag can only be cleared when a new rescan is started.
Signed-off-by: default avatarQu Wenruo <wqu@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent e71564c0
...@@ -333,6 +333,12 @@ int btrfs_verify_qgroup_counts(struct btrfs_fs_info *fs_info, u64 qgroupid, ...@@ -333,6 +333,12 @@ int btrfs_verify_qgroup_counts(struct btrfs_fs_info *fs_info, u64 qgroupid,
} }
#endif #endif
static void qgroup_mark_inconsistent(struct btrfs_fs_info *fs_info)
{
fs_info->qgroup_flags |= (BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT |
BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN);
}
/* /*
* The full config is read in one go, only called from open_ctree() * The full config is read in one go, only called from open_ctree()
* It doesn't use any locking, as at this point we're still single-threaded * It doesn't use any locking, as at this point we're still single-threaded
...@@ -401,7 +407,7 @@ int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info) ...@@ -401,7 +407,7 @@ int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
} }
if (btrfs_qgroup_status_generation(l, ptr) != if (btrfs_qgroup_status_generation(l, ptr) !=
fs_info->generation) { fs_info->generation) {
flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; qgroup_mark_inconsistent(fs_info);
btrfs_err(fs_info, btrfs_err(fs_info,
"qgroup generation mismatch, marked as inconsistent"); "qgroup generation mismatch, marked as inconsistent");
} }
...@@ -419,7 +425,7 @@ int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info) ...@@ -419,7 +425,7 @@ int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
if ((qgroup && found_key.type == BTRFS_QGROUP_INFO_KEY) || if ((qgroup && found_key.type == BTRFS_QGROUP_INFO_KEY) ||
(!qgroup && found_key.type == BTRFS_QGROUP_LIMIT_KEY)) { (!qgroup && found_key.type == BTRFS_QGROUP_LIMIT_KEY)) {
btrfs_err(fs_info, "inconsistent qgroup config"); btrfs_err(fs_info, "inconsistent qgroup config");
flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; qgroup_mark_inconsistent(fs_info);
} }
if (!qgroup) { if (!qgroup) {
qgroup = add_qgroup_rb(fs_info, found_key.offset); qgroup = add_qgroup_rb(fs_info, found_key.offset);
...@@ -1734,7 +1740,7 @@ int btrfs_limit_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid, ...@@ -1734,7 +1740,7 @@ int btrfs_limit_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid,
ret = update_qgroup_limit_item(trans, qgroup); ret = update_qgroup_limit_item(trans, qgroup);
if (ret) { if (ret) {
fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; qgroup_mark_inconsistent(fs_info);
btrfs_info(fs_info, "unable to update quota limit for %llu", btrfs_info(fs_info, "unable to update quota limit for %llu",
qgroupid); qgroupid);
} }
...@@ -1810,7 +1816,7 @@ int btrfs_qgroup_trace_extent_post(struct btrfs_trans_handle *trans, ...@@ -1810,7 +1816,7 @@ int btrfs_qgroup_trace_extent_post(struct btrfs_trans_handle *trans,
ret = btrfs_find_all_roots(NULL, trans->fs_info, bytenr, 0, &old_root, ret = btrfs_find_all_roots(NULL, trans->fs_info, bytenr, 0, &old_root,
true); true);
if (ret < 0) { if (ret < 0) {
trans->fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; qgroup_mark_inconsistent(trans->fs_info);
btrfs_warn(trans->fs_info, btrfs_warn(trans->fs_info,
"error accounting new delayed refs extent (err code: %d), quota inconsistent", "error accounting new delayed refs extent (err code: %d), quota inconsistent",
ret); ret);
...@@ -2286,7 +2292,7 @@ static int qgroup_trace_subtree_swap(struct btrfs_trans_handle *trans, ...@@ -2286,7 +2292,7 @@ static int qgroup_trace_subtree_swap(struct btrfs_trans_handle *trans,
out: out:
btrfs_free_path(dst_path); btrfs_free_path(dst_path);
if (ret < 0) if (ret < 0)
fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; qgroup_mark_inconsistent(fs_info);
return ret; return ret;
} }
...@@ -2790,12 +2796,10 @@ int btrfs_run_qgroups(struct btrfs_trans_handle *trans) ...@@ -2790,12 +2796,10 @@ int btrfs_run_qgroups(struct btrfs_trans_handle *trans)
spin_unlock(&fs_info->qgroup_lock); spin_unlock(&fs_info->qgroup_lock);
ret = update_qgroup_info_item(trans, qgroup); ret = update_qgroup_info_item(trans, qgroup);
if (ret) if (ret)
fs_info->qgroup_flags |= qgroup_mark_inconsistent(fs_info);
BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
ret = update_qgroup_limit_item(trans, qgroup); ret = update_qgroup_limit_item(trans, qgroup);
if (ret) if (ret)
fs_info->qgroup_flags |= qgroup_mark_inconsistent(fs_info);
BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
spin_lock(&fs_info->qgroup_lock); spin_lock(&fs_info->qgroup_lock);
} }
if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
...@@ -2806,7 +2810,7 @@ int btrfs_run_qgroups(struct btrfs_trans_handle *trans) ...@@ -2806,7 +2810,7 @@ int btrfs_run_qgroups(struct btrfs_trans_handle *trans)
ret = update_qgroup_status_item(trans); ret = update_qgroup_status_item(trans);
if (ret) if (ret)
fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; qgroup_mark_inconsistent(fs_info);
return ret; return ret;
} }
...@@ -2924,7 +2928,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid, ...@@ -2924,7 +2928,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
ret = update_qgroup_limit_item(trans, dstgroup); ret = update_qgroup_limit_item(trans, dstgroup);
if (ret) { if (ret) {
fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; qgroup_mark_inconsistent(fs_info);
btrfs_info(fs_info, btrfs_info(fs_info,
"unable to update quota limit for %llu", "unable to update quota limit for %llu",
dstgroup->qgroupid); dstgroup->qgroupid);
...@@ -3030,7 +3034,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid, ...@@ -3030,7 +3034,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
if (!committing) if (!committing)
mutex_unlock(&fs_info->qgroup_ioctl_lock); mutex_unlock(&fs_info->qgroup_ioctl_lock);
if (need_rescan) if (need_rescan)
fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; qgroup_mark_inconsistent(fs_info);
return ret; return ret;
} }
...@@ -3303,7 +3307,8 @@ static bool rescan_should_stop(struct btrfs_fs_info *fs_info) ...@@ -3303,7 +3307,8 @@ static bool rescan_should_stop(struct btrfs_fs_info *fs_info)
{ {
return btrfs_fs_closing(fs_info) || return btrfs_fs_closing(fs_info) ||
test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state) || test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state) ||
!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); !test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN;
} }
static void btrfs_qgroup_rescan_worker(struct btrfs_work *work) static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
...@@ -3368,7 +3373,8 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work) ...@@ -3368,7 +3373,8 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
} }
mutex_lock(&fs_info->qgroup_rescan_lock); mutex_lock(&fs_info->qgroup_rescan_lock);
if (!stopped) if (!stopped ||
fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN)
fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN; fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
if (trans) { if (trans) {
ret = update_qgroup_status_item(trans); ret = update_qgroup_status_item(trans);
...@@ -3379,6 +3385,7 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work) ...@@ -3379,6 +3385,7 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
} }
} }
fs_info->qgroup_rescan_running = false; fs_info->qgroup_rescan_running = false;
fs_info->qgroup_flags &= ~BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN;
complete_all(&fs_info->qgroup_rescan_completion); complete_all(&fs_info->qgroup_rescan_completion);
mutex_unlock(&fs_info->qgroup_rescan_lock); mutex_unlock(&fs_info->qgroup_rescan_lock);
...@@ -3389,6 +3396,8 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work) ...@@ -3389,6 +3396,8 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
if (stopped) { if (stopped) {
btrfs_info(fs_info, "qgroup scan paused"); btrfs_info(fs_info, "qgroup scan paused");
} else if (fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN) {
btrfs_info(fs_info, "qgroup scan cancelled");
} else if (err >= 0) { } else if (err >= 0) {
btrfs_info(fs_info, "qgroup scan completed%s", btrfs_info(fs_info, "qgroup scan completed%s",
err > 0 ? " (inconsistency flag cleared)" : ""); err > 0 ? " (inconsistency flag cleared)" : "");
...@@ -3451,6 +3460,7 @@ qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid, ...@@ -3451,6 +3460,7 @@ qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
memset(&fs_info->qgroup_rescan_progress, 0, memset(&fs_info->qgroup_rescan_progress, 0,
sizeof(fs_info->qgroup_rescan_progress)); sizeof(fs_info->qgroup_rescan_progress));
fs_info->qgroup_flags &= ~BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN;
fs_info->qgroup_rescan_progress.objectid = progress_objectid; fs_info->qgroup_rescan_progress.objectid = progress_objectid;
init_completion(&fs_info->qgroup_rescan_completion); init_completion(&fs_info->qgroup_rescan_completion);
mutex_unlock(&fs_info->qgroup_rescan_lock); mutex_unlock(&fs_info->qgroup_rescan_lock);
...@@ -4248,8 +4258,7 @@ int btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle *trans, ...@@ -4248,8 +4258,7 @@ int btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle *trans,
spin_unlock(&blocks->lock); spin_unlock(&blocks->lock);
out: out:
if (ret < 0) if (ret < 0)
fs_info->qgroup_flags |= qgroup_mark_inconsistent(fs_info);
BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
return ret; return ret;
} }
...@@ -4336,7 +4345,7 @@ int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans, ...@@ -4336,7 +4345,7 @@ int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans,
btrfs_err_rl(fs_info, btrfs_err_rl(fs_info,
"failed to account subtree at bytenr %llu: %d", "failed to account subtree at bytenr %llu: %d",
subvol_eb->start, ret); subvol_eb->start, ret);
fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; qgroup_mark_inconsistent(fs_info);
} }
return ret; return ret;
} }
......
...@@ -100,6 +100,8 @@ ...@@ -100,6 +100,8 @@
* subtree rescan for them. * subtree rescan for them.
*/ */
#define BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN (1UL << 3)
/* /*
* Record a dirty extent, and info qgroup to update quota on it * Record a dirty extent, and info qgroup to update quota on it
* TODO: Use kmem cache to alloc it. * TODO: Use kmem cache to alloc it.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment