Commit 3c471b65 authored by Kent Overstreet's avatar Kent Overstreet

bcachefs: convert bch_fs_flags to x-macro

Now we can print out filesystem flags in sysfs, useful for debugging
various "what's my filesystem doing" issues.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 9e243d3c
......@@ -566,32 +566,38 @@ struct bch_dev {
struct io_count __percpu *io_done;
};
enum {
/* startup: */
BCH_FS_STARTED,
BCH_FS_MAY_GO_RW,
BCH_FS_RW,
BCH_FS_WAS_RW,
/* shutdown: */
BCH_FS_STOPPING,
BCH_FS_EMERGENCY_RO,
BCH_FS_GOING_RO,
BCH_FS_WRITE_DISABLE_COMPLETE,
BCH_FS_CLEAN_SHUTDOWN,
/* fsck passes: */
BCH_FS_FSCK_DONE,
BCH_FS_INITIAL_GC_UNFIXED, /* kill when we enumerate fsck errors */
BCH_FS_NEED_ANOTHER_GC,
BCH_FS_NEED_DELETE_DEAD_SNAPSHOTS,
/* errors: */
BCH_FS_ERROR,
BCH_FS_TOPOLOGY_ERROR,
BCH_FS_ERRORS_FIXED,
BCH_FS_ERRORS_NOT_FIXED,
/*
* fsck_done - kill?
*
* replace with something more general from enumated fsck passes/errors:
* initial_gc_unfixed
* error
* topology error
*/
#define BCH_FS_FLAGS() \
x(started) \
x(may_go_rw) \
x(rw) \
x(was_rw) \
x(stopping) \
x(emergency_ro) \
x(going_ro) \
x(write_disable_complete) \
x(clean_shutdown) \
x(fsck_done) \
x(initial_gc_unfixed) \
x(need_another_gc) \
x(need_delete_dead_snapshots) \
x(error) \
x(topology_error) \
x(errors_fixed) \
x(errors_not_fixed)
enum bch_fs_flags {
#define x(n) BCH_FS_##n,
BCH_FS_FLAGS()
#undef x
};
struct btree_debug {
......@@ -1070,7 +1076,7 @@ static inline void bch2_write_ref_get(struct bch_fs *c, enum bch_write_ref ref)
static inline bool bch2_write_ref_tryget(struct bch_fs *c, enum bch_write_ref ref)
{
#ifdef BCH_WRITE_REF_DEBUG
return !test_bit(BCH_FS_GOING_RO, &c->flags) &&
return !test_bit(BCH_FS_going_ro, &c->flags) &&
atomic_long_inc_not_zero(&c->writes[ref]);
#else
return percpu_ref_tryget_live(&c->writes);
......@@ -1089,7 +1095,7 @@ static inline void bch2_write_ref_put(struct bch_fs *c, enum bch_write_ref ref)
if (atomic_long_read(&c->writes[i]))
return;
set_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags);
set_bit(BCH_FS_write_disable_complete, &c->flags);
wake_up(&bch2_read_only_wait);
#else
percpu_ref_put(&c->writes);
......
......@@ -108,7 +108,7 @@ static int bch2_gc_check_topology(struct bch_fs *c,
ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_topology);
goto err;
} else {
set_bit(BCH_FS_INITIAL_GC_UNFIXED, &c->flags);
set_bit(BCH_FS_initial_gc_unfixed, &c->flags);
}
}
}
......@@ -134,7 +134,7 @@ static int bch2_gc_check_topology(struct bch_fs *c,
ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_topology);
goto err;
} else {
set_bit(BCH_FS_INITIAL_GC_UNFIXED, &c->flags);
set_bit(BCH_FS_initial_gc_unfixed, &c->flags);
}
}
......@@ -619,7 +619,7 @@ static int bch2_check_fix_ptrs(struct btree_trans *trans, enum btree_id btree_id
g->data_type = 0;
g->dirty_sectors = 0;
g->cached_sectors = 0;
set_bit(BCH_FS_NEED_ANOTHER_GC, &c->flags);
set_bit(BCH_FS_need_another_gc, &c->flags);
} else {
do_update = true;
}
......@@ -664,7 +664,7 @@ static int bch2_check_fix_ptrs(struct btree_trans *trans, enum btree_id btree_id
bch2_bkey_val_to_text(&buf, c, *k), buf.buf))) {
if (data_type == BCH_DATA_btree) {
g->data_type = data_type;
set_bit(BCH_FS_NEED_ANOTHER_GC, &c->flags);
set_bit(BCH_FS_need_another_gc, &c->flags);
} else {
do_update = true;
}
......@@ -996,7 +996,7 @@ static int bch2_gc_btree_init_recurse(struct btree_trans *trans, struct btree *b
/* Continue marking when opted to not
* fix the error: */
ret = 0;
set_bit(BCH_FS_INITIAL_GC_UNFIXED, &c->flags);
set_bit(BCH_FS_initial_gc_unfixed, &c->flags);
continue;
}
} else if (ret) {
......@@ -1845,7 +1845,7 @@ int bch2_gc(struct bch_fs *c, bool initial, bool metadata_only)
#endif
c->gc_count++;
if (test_bit(BCH_FS_NEED_ANOTHER_GC, &c->flags) ||
if (test_bit(BCH_FS_need_another_gc, &c->flags) ||
(!iter && bch2_test_restart_gc)) {
if (iter++ > 2) {
bch_info(c, "Unable to fix bucket gens, looping");
......@@ -1857,7 +1857,7 @@ int bch2_gc(struct bch_fs *c, bool initial, bool metadata_only)
* XXX: make sure gens we fixed got saved
*/
bch_info(c, "Second GC pass needed, restarting:");
clear_bit(BCH_FS_NEED_ANOTHER_GC, &c->flags);
clear_bit(BCH_FS_need_another_gc, &c->flags);
__gc_pos_set(c, gc_phase(GC_PHASE_NOT_RUNNING));
bch2_gc_stripes_reset(c, metadata_only);
......
......@@ -781,7 +781,7 @@ static int btree_path_prefetch(struct btree_trans *trans, struct btree_path *pat
struct btree_node_iter node_iter = l->iter;
struct bkey_packed *k;
struct bkey_buf tmp;
unsigned nr = test_bit(BCH_FS_STARTED, &c->flags)
unsigned nr = test_bit(BCH_FS_started, &c->flags)
? (path->level > 1 ? 0 : 2)
: (path->level > 1 ? 1 : 16);
bool was_locked = btree_node_locked(path, path->level);
......@@ -816,7 +816,7 @@ static int btree_path_prefetch_j(struct btree_trans *trans, struct btree_path *p
struct bch_fs *c = trans->c;
struct bkey_s_c k;
struct bkey_buf tmp;
unsigned nr = test_bit(BCH_FS_STARTED, &c->flags)
unsigned nr = test_bit(BCH_FS_started, &c->flags)
? (path->level > 1 ? 0 : 2)
: (path->level > 1 ? 1 : 16);
bool was_locked = btree_node_locked(path, path->level);
......
......@@ -177,7 +177,7 @@ int bch2_journal_key_insert_take(struct bch_fs *c, enum btree_id id,
struct journal_keys *keys = &c->journal_keys;
size_t idx = bch2_journal_key_search(keys, id, level, k->k.p);
BUG_ON(test_bit(BCH_FS_RW, &c->flags));
BUG_ON(test_bit(BCH_FS_rw, &c->flags));
if (idx < keys->size &&
journal_key_cmp(&n, &keys->d[idx]) == 0) {
......
......@@ -778,7 +778,7 @@ bool bch2_btree_insert_key_cached(struct btree_trans *trans,
ck->valid = true;
if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
EBUG_ON(test_bit(BCH_FS_CLEAN_SHUTDOWN, &c->flags));
EBUG_ON(test_bit(BCH_FS_clean_shutdown, &c->flags));
set_bit(BKEY_CACHED_DIRTY, &ck->flags);
atomic_long_inc(&c->btree_key_cache.nr_dirty);
......@@ -1005,7 +1005,7 @@ void bch2_fs_btree_key_cache_exit(struct btree_key_cache *bc)
if (atomic_long_read(&bc->nr_dirty) &&
!bch2_journal_error(&c->journal) &&
test_bit(BCH_FS_WAS_RW, &c->flags))
test_bit(BCH_FS_was_rw, &c->flags))
panic("btree key cache shutdown error: nr_dirty nonzero (%li)\n",
atomic_long_read(&bc->nr_dirty));
......
......@@ -287,7 +287,7 @@ inline void bch2_btree_insert_key_leaf(struct btree_trans *trans,
bch2_btree_add_journal_pin(c, b, journal_seq);
if (unlikely(!btree_node_dirty(b))) {
EBUG_ON(test_bit(BCH_FS_CLEAN_SHUTDOWN, &c->flags));
EBUG_ON(test_bit(BCH_FS_clean_shutdown, &c->flags));
set_btree_node_dirty_acct(c, b);
}
......@@ -995,7 +995,7 @@ bch2_trans_commit_get_rw_cold(struct btree_trans *trans, unsigned flags)
int ret;
if (likely(!(flags & BCH_TRANS_COMMIT_lazy_rw)) ||
test_bit(BCH_FS_STARTED, &c->flags))
test_bit(BCH_FS_started, &c->flags))
return -BCH_ERR_erofs_trans_commit;
ret = drop_locks_do(trans, bch2_fs_read_write_early(c));
......@@ -1060,7 +1060,7 @@ int __bch2_trans_commit(struct btree_trans *trans, unsigned flags)
return ret;
}
if (unlikely(!test_bit(BCH_FS_MAY_GO_RW, &c->flags))) {
if (unlikely(!test_bit(BCH_FS_may_go_rw, &c->flags))) {
ret = do_bch2_trans_commit_to_journal_replay(trans);
goto out_reset;
}
......@@ -1086,7 +1086,7 @@ int __bch2_trans_commit(struct btree_trans *trans, unsigned flags)
goto out;
}
EBUG_ON(test_bit(BCH_FS_CLEAN_SHUTDOWN, &c->flags));
EBUG_ON(test_bit(BCH_FS_clean_shutdown, &c->flags));
trans->journal_u64s = trans->extra_journal_entries.nr;
trans->journal_transaction_names = READ_ONCE(c->opts.journal_transaction_names);
......
......@@ -2080,7 +2080,7 @@ void bch2_btree_node_rewrite_async(struct bch_fs *c, struct btree *b)
a->seq = b->data->keys.seq;
INIT_WORK(&a->work, async_btree_node_rewrite_work);
if (unlikely(!test_bit(BCH_FS_MAY_GO_RW, &c->flags))) {
if (unlikely(!test_bit(BCH_FS_may_go_rw, &c->flags))) {
mutex_lock(&c->pending_node_rewrites_lock);
list_add(&a->list, &c->pending_node_rewrites);
mutex_unlock(&c->pending_node_rewrites_lock);
......@@ -2088,7 +2088,7 @@ void bch2_btree_node_rewrite_async(struct bch_fs *c, struct btree *b)
}
if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_node_rewrite)) {
if (test_bit(BCH_FS_STARTED, &c->flags)) {
if (test_bit(BCH_FS_started, &c->flags)) {
bch_err(c, "%s: error getting c->writes ref", __func__);
kfree(a);
return;
......
......@@ -418,7 +418,7 @@ static long bch2_ioctl_fs_usage(struct bch_fs *c,
unsigned i;
int ret = 0;
if (!test_bit(BCH_FS_STARTED, &c->flags))
if (!test_bit(BCH_FS_started, &c->flags))
return -EINVAL;
if (get_user(replica_entries_bytes, &user_arg->replica_entries_bytes))
......@@ -492,7 +492,7 @@ static long bch2_ioctl_dev_usage(struct bch_fs *c,
struct bch_dev *ca;
unsigned i;
if (!test_bit(BCH_FS_STARTED, &c->flags))
if (!test_bit(BCH_FS_started, &c->flags))
return -EINVAL;
if (copy_from_user(&arg, user_arg, sizeof(arg)))
......@@ -533,7 +533,7 @@ static long bch2_ioctl_dev_usage_v2(struct bch_fs *c,
struct bch_dev *ca;
int ret = 0;
if (!test_bit(BCH_FS_STARTED, &c->flags))
if (!test_bit(BCH_FS_started, &c->flags))
return -EINVAL;
if (copy_from_user(&arg, user_arg, sizeof(arg)))
......@@ -725,7 +725,7 @@ long bch2_fs_ioctl(struct bch_fs *c, unsigned cmd, void __user *arg)
BCH_IOCTL(disk_get_idx, struct bch_ioctl_disk_get_idx);
}
if (!test_bit(BCH_FS_STARTED, &c->flags))
if (!test_bit(BCH_FS_started, &c->flags))
return -EINVAL;
switch (cmd) {
......
......@@ -1415,7 +1415,7 @@ __bch2_ec_stripe_head_get(struct btree_trans *trans,
if (ret)
return ERR_PTR(ret);
if (test_bit(BCH_FS_GOING_RO, &c->flags)) {
if (test_bit(BCH_FS_going_ro, &c->flags)) {
h = ERR_PTR(-BCH_ERR_erofs_no_writes);
goto found;
}
......
......@@ -7,7 +7,7 @@
bool bch2_inconsistent_error(struct bch_fs *c)
{
set_bit(BCH_FS_ERROR, &c->flags);
set_bit(BCH_FS_error, &c->flags);
switch (c->opts.errors) {
case BCH_ON_ERROR_continue:
......@@ -26,8 +26,8 @@ bool bch2_inconsistent_error(struct bch_fs *c)
void bch2_topology_error(struct bch_fs *c)
{
set_bit(BCH_FS_TOPOLOGY_ERROR, &c->flags);
if (test_bit(BCH_FS_FSCK_DONE, &c->flags))
set_bit(BCH_FS_topology_error, &c->flags);
if (test_bit(BCH_FS_fsck_done, &c->flags))
bch2_inconsistent_error(c);
}
......@@ -114,7 +114,7 @@ static struct fsck_err_state *fsck_err_get(struct bch_fs *c, const char *fmt)
{
struct fsck_err_state *s;
if (test_bit(BCH_FS_FSCK_DONE, &c->flags))
if (test_bit(BCH_FS_fsck_done, &c->flags))
return NULL;
list_for_each_entry(s, &c->fsck_error_msgs, list)
......@@ -196,7 +196,7 @@ int bch2_fsck_err(struct bch_fs *c,
prt_printf(out, bch2_log_msg(c, ""));
#endif
if (test_bit(BCH_FS_FSCK_DONE, &c->flags)) {
if (test_bit(BCH_FS_fsck_done, &c->flags)) {
if (c->opts.errors != BCH_ON_ERROR_continue ||
!(flags & (FSCK_CAN_FIX|FSCK_CAN_IGNORE))) {
prt_str(out, ", shutting down");
......@@ -256,7 +256,7 @@ int bch2_fsck_err(struct bch_fs *c,
if (print)
bch2_print_string_as_lines(KERN_ERR, out->buf);
if (!test_bit(BCH_FS_FSCK_DONE, &c->flags) &&
if (!test_bit(BCH_FS_fsck_done, &c->flags) &&
(ret != -BCH_ERR_fsck_fix &&
ret != -BCH_ERR_fsck_ignore))
bch_err(c, "Unable to continue, halting");
......@@ -274,10 +274,10 @@ int bch2_fsck_err(struct bch_fs *c,
bch2_inconsistent_error(c);
if (ret == -BCH_ERR_fsck_fix) {
set_bit(BCH_FS_ERRORS_FIXED, &c->flags);
set_bit(BCH_FS_errors_fixed, &c->flags);
} else {
set_bit(BCH_FS_ERRORS_NOT_FIXED, &c->flags);
set_bit(BCH_FS_ERROR, &c->flags);
set_bit(BCH_FS_errors_not_fixed, &c->flags);
set_bit(BCH_FS_error, &c->flags);
}
return ret;
......
......@@ -638,7 +638,7 @@ static int __bch2_writepage(struct folio *folio,
/* Check for writing past i_size: */
WARN_ONCE((bio_end_sector(&w->io->op.wbio.bio) << 9) >
round_up(i_size, block_bytes(c)) &&
!test_bit(BCH_FS_EMERGENCY_RO, &c->flags),
!test_bit(BCH_FS_emergency_ro, &c->flags),
"writing past i_size: %llu > %llu (unrounded %llu)\n",
bio_end_sector(&w->io->op.wbio.bio) << 9,
round_up(i_size, block_bytes(c)),
......
......@@ -1770,7 +1770,7 @@ static int bch2_unfreeze(struct super_block *sb)
struct bch_fs *c = sb->s_fs_info;
int ret;
if (test_bit(BCH_FS_EMERGENCY_RO, &c->flags))
if (test_bit(BCH_FS_emergency_ro, &c->flags))
return 0;
down_write(&c->state_lock);
......
......@@ -448,7 +448,7 @@ static int snapshots_seen_update(struct bch_fs *c, struct snapshots_seen *s,
bch2_btree_id_str(btree_id),
pos.inode, pos.offset,
i->id, n.id, n.equiv);
set_bit(BCH_FS_NEED_DELETE_DEAD_SNAPSHOTS, &c->flags);
set_bit(BCH_FS_need_delete_dead_snapshots, &c->flags);
return bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_delete_dead_snapshots);
}
}
......
......@@ -1182,7 +1182,7 @@ int bch2_delete_dead_inodes(struct bch_fs *c)
break;
if (ret) {
if (!test_bit(BCH_FS_RW, &c->flags)) {
if (!test_bit(BCH_FS_rw, &c->flags)) {
bch2_trans_unlock(trans);
bch2_fs_lazy_rw(c);
}
......
......@@ -267,7 +267,7 @@ void bch2_blacklist_entries_gc(struct work_struct *work)
while (!(ret = PTR_ERR_OR_ZERO(b)) &&
b &&
!test_bit(BCH_FS_STOPPING, &c->flags))
!test_bit(BCH_FS_stopping, &c->flags))
b = bch2_btree_iter_next_node(&iter);
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
......
......@@ -533,7 +533,7 @@ static int bch2_set_may_go_rw(struct bch_fs *c)
move_gap(keys->d, keys->nr, keys->size, keys->gap, keys->nr);
keys->gap = keys->nr;
set_bit(BCH_FS_MAY_GO_RW, &c->flags);
set_bit(BCH_FS_may_go_rw, &c->flags);
if (keys->nr)
return bch2_fs_read_write_early(c);
return 0;
......@@ -961,13 +961,13 @@ int bch2_fs_recovery(struct bch_fs *c)
/* If we fixed errors, verify that fs is actually clean now: */
if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG) &&
test_bit(BCH_FS_ERRORS_FIXED, &c->flags) &&
!test_bit(BCH_FS_ERRORS_NOT_FIXED, &c->flags) &&
!test_bit(BCH_FS_ERROR, &c->flags)) {
test_bit(BCH_FS_errors_fixed, &c->flags) &&
!test_bit(BCH_FS_errors_not_fixed, &c->flags) &&
!test_bit(BCH_FS_error, &c->flags)) {
bch2_flush_fsck_errs(c);
bch_info(c, "Fixed errors, running fsck a second time to verify fs is clean");
clear_bit(BCH_FS_ERRORS_FIXED, &c->flags);
clear_bit(BCH_FS_errors_fixed, &c->flags);
c->curr_recovery_pass = BCH_RECOVERY_PASS_check_alloc_info;
......@@ -975,13 +975,13 @@ int bch2_fs_recovery(struct bch_fs *c)
if (ret)
goto err;
if (test_bit(BCH_FS_ERRORS_FIXED, &c->flags) ||
test_bit(BCH_FS_ERRORS_NOT_FIXED, &c->flags)) {
if (test_bit(BCH_FS_errors_fixed, &c->flags) ||
test_bit(BCH_FS_errors_not_fixed, &c->flags)) {
bch_err(c, "Second fsck run was not clean");
set_bit(BCH_FS_ERRORS_NOT_FIXED, &c->flags);
set_bit(BCH_FS_errors_not_fixed, &c->flags);
}
set_bit(BCH_FS_ERRORS_FIXED, &c->flags);
set_bit(BCH_FS_errors_fixed, &c->flags);
}
if (enabled_qtypes(c)) {
......@@ -1000,13 +1000,13 @@ int bch2_fs_recovery(struct bch_fs *c)
write_sb = true;
}
if (!test_bit(BCH_FS_ERROR, &c->flags) &&
if (!test_bit(BCH_FS_error, &c->flags) &&
!(c->disk_sb.sb->compat[0] & cpu_to_le64(1ULL << BCH_COMPAT_alloc_info))) {
c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_alloc_info);
write_sb = true;
}
if (!test_bit(BCH_FS_ERROR, &c->flags)) {
if (!test_bit(BCH_FS_error, &c->flags)) {
struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext);
if (ext &&
(!bch2_is_zero(ext->recovery_passes_required, sizeof(ext->recovery_passes_required)) ||
......@@ -1018,8 +1018,8 @@ int bch2_fs_recovery(struct bch_fs *c)
}
if (c->opts.fsck &&
!test_bit(BCH_FS_ERROR, &c->flags) &&
!test_bit(BCH_FS_ERRORS_NOT_FIXED, &c->flags)) {
!test_bit(BCH_FS_error, &c->flags) &&
!test_bit(BCH_FS_errors_not_fixed, &c->flags)) {
SET_BCH_SB_HAS_ERRORS(c->disk_sb.sb, 0);
SET_BCH_SB_HAS_TOPOLOGY_ERRORS(c->disk_sb.sb, 0);
write_sb = true;
......@@ -1053,7 +1053,7 @@ int bch2_fs_recovery(struct bch_fs *c)
ret = 0;
out:
set_bit(BCH_FS_FSCK_DONE, &c->flags);
set_bit(BCH_FS_fsck_done, &c->flags);
bch2_flush_fsck_errs(c);
if (!c->opts.keep_journal &&
......@@ -1061,7 +1061,7 @@ int bch2_fs_recovery(struct bch_fs *c)
bch2_journal_keys_put_initial(c);
kfree(clean);
if (!ret && test_bit(BCH_FS_NEED_DELETE_DEAD_SNAPSHOTS, &c->flags)) {
if (!ret && test_bit(BCH_FS_need_delete_dead_snapshots, &c->flags)) {
bch2_fs_read_write_early(c);
bch2_delete_dead_snapshots_async(c);
}
......@@ -1100,8 +1100,8 @@ int bch2_fs_initialize(struct bch_fs *c)
mutex_unlock(&c->sb_lock);
c->curr_recovery_pass = ARRAY_SIZE(recovery_pass_fns);
set_bit(BCH_FS_MAY_GO_RW, &c->flags);
set_bit(BCH_FS_FSCK_DONE, &c->flags);
set_bit(BCH_FS_may_go_rw, &c->flags);
set_bit(BCH_FS_fsck_done, &c->flags);
for (i = 0; i < BTREE_ID_NR; i++)
bch2_btree_root_alloc(c, i);
......
......@@ -318,7 +318,7 @@ int bch2_mark_snapshot(struct btree_trans *trans,
__set_is_ancestor_bitmap(c, id);
if (BCH_SNAPSHOT_DELETED(s.v)) {
set_bit(BCH_FS_NEED_DELETE_DEAD_SNAPSHOTS, &c->flags);
set_bit(BCH_FS_need_delete_dead_snapshots, &c->flags);
if (c->curr_recovery_pass > BCH_RECOVERY_PASS_delete_dead_snapshots)
bch2_delete_dead_snapshots_async(c);
}
......@@ -1376,10 +1376,10 @@ int bch2_delete_dead_snapshots(struct bch_fs *c)
u32 *i, id;
int ret = 0;
if (!test_and_clear_bit(BCH_FS_NEED_DELETE_DEAD_SNAPSHOTS, &c->flags))
if (!test_and_clear_bit(BCH_FS_need_delete_dead_snapshots, &c->flags))
return 0;
if (!test_bit(BCH_FS_STARTED, &c->flags)) {
if (!test_bit(BCH_FS_started, &c->flags)) {
ret = bch2_fs_read_write_early(c);
if (ret) {
bch_err_msg(c, ret, "deleting dead snapshots: error going rw");
......@@ -1680,7 +1680,7 @@ static int bch2_check_snapshot_needs_deletion(struct btree_trans *trans, struct
if (BCH_SNAPSHOT_DELETED(snap.v) ||
bch2_snapshot_equiv(c, k.k->p.offset) != k.k->p.offset ||
(ret = bch2_snapshot_needs_delete(trans, k)) > 0) {
set_bit(BCH_FS_NEED_DELETE_DEAD_SNAPSHOTS, &c->flags);
set_bit(BCH_FS_need_delete_dead_snapshots, &c->flags);
return 0;
}
......
......@@ -950,9 +950,9 @@ int bch2_write_super(struct bch_fs *c)
le64_add_cpu(&c->disk_sb.sb->seq, 1);
if (test_bit(BCH_FS_ERROR, &c->flags))
if (test_bit(BCH_FS_error, &c->flags))
SET_BCH_SB_HAS_ERRORS(c->disk_sb.sb, 1);
if (test_bit(BCH_FS_TOPOLOGY_ERROR, &c->flags))
if (test_bit(BCH_FS_topology_error, &c->flags))
SET_BCH_SB_HAS_TOPOLOGY_ERRORS(c->disk_sb.sb, 1);
SET_BCH_SB_BIG_ENDIAN(c->disk_sb.sb, CPU_BIG_ENDIAN);
......
......@@ -79,6 +79,13 @@ MODULE_SOFTDEP("pre: chacha20");
MODULE_SOFTDEP("pre: poly1305");
MODULE_SOFTDEP("pre: xxhash");
const char * const bch2_fs_flag_strs[] = {
#define x(n) #n,
BCH_FS_FLAGS()
#undef x
NULL
};
#define KTYPE(type) \
static const struct attribute_group type ## _group = { \
.attrs = type ## _files \
......@@ -246,8 +253,8 @@ static void __bch2_fs_read_only(struct bch_fs *c)
journal_cur_seq(&c->journal));
if (test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags) &&
!test_bit(BCH_FS_EMERGENCY_RO, &c->flags))
set_bit(BCH_FS_CLEAN_SHUTDOWN, &c->flags);
!test_bit(BCH_FS_emergency_ro, &c->flags))
set_bit(BCH_FS_clean_shutdown, &c->flags);
bch2_fs_journal_stop(&c->journal);
/*
......@@ -262,19 +269,19 @@ static void bch2_writes_disabled(struct percpu_ref *writes)
{
struct bch_fs *c = container_of(writes, struct bch_fs, writes);
set_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags);
set_bit(BCH_FS_write_disable_complete, &c->flags);
wake_up(&bch2_read_only_wait);
}
#endif
void bch2_fs_read_only(struct bch_fs *c)
{
if (!test_bit(BCH_FS_RW, &c->flags)) {
if (!test_bit(BCH_FS_rw, &c->flags)) {
bch2_journal_reclaim_stop(&c->journal);
return;
}
BUG_ON(test_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags));
BUG_ON(test_bit(BCH_FS_write_disable_complete, &c->flags));
bch_verbose(c, "going read-only");
......@@ -282,7 +289,7 @@ void bch2_fs_read_only(struct bch_fs *c)
* Block new foreground-end write operations from starting - any new
* writes will return -EROFS:
*/
set_bit(BCH_FS_GOING_RO, &c->flags);
set_bit(BCH_FS_going_ro, &c->flags);
#ifndef BCH_WRITE_REF_DEBUG
percpu_ref_kill(&c->writes);
#else
......@@ -302,30 +309,30 @@ void bch2_fs_read_only(struct bch_fs *c)
* that going RO is complete:
*/
wait_event(bch2_read_only_wait,
test_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags) ||
test_bit(BCH_FS_EMERGENCY_RO, &c->flags));
test_bit(BCH_FS_write_disable_complete, &c->flags) ||
test_bit(BCH_FS_emergency_ro, &c->flags));
bool writes_disabled = test_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags);
bool writes_disabled = test_bit(BCH_FS_write_disable_complete, &c->flags);
if (writes_disabled)
bch_verbose(c, "finished waiting for writes to stop");
__bch2_fs_read_only(c);
wait_event(bch2_read_only_wait,
test_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags));
test_bit(BCH_FS_write_disable_complete, &c->flags));
if (!writes_disabled)
bch_verbose(c, "finished waiting for writes to stop");
clear_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags);
clear_bit(BCH_FS_GOING_RO, &c->flags);
clear_bit(BCH_FS_RW, &c->flags);
clear_bit(BCH_FS_write_disable_complete, &c->flags);
clear_bit(BCH_FS_going_ro, &c->flags);
clear_bit(BCH_FS_rw, &c->flags);
if (!bch2_journal_error(&c->journal) &&
!test_bit(BCH_FS_ERROR, &c->flags) &&
!test_bit(BCH_FS_EMERGENCY_RO, &c->flags) &&
test_bit(BCH_FS_STARTED, &c->flags) &&
test_bit(BCH_FS_CLEAN_SHUTDOWN, &c->flags) &&
!test_bit(BCH_FS_error, &c->flags) &&
!test_bit(BCH_FS_emergency_ro, &c->flags) &&
test_bit(BCH_FS_started, &c->flags) &&
test_bit(BCH_FS_clean_shutdown, &c->flags) &&
!c->opts.norecovery) {
BUG_ON(c->journal.last_empty_seq != journal_cur_seq(&c->journal));
BUG_ON(atomic_read(&c->btree_cache.dirty));
......@@ -356,7 +363,7 @@ static void bch2_fs_read_only_async(struct bch_fs *c)
bool bch2_fs_emergency_read_only(struct bch_fs *c)
{
bool ret = !test_and_set_bit(BCH_FS_EMERGENCY_RO, &c->flags);
bool ret = !test_and_set_bit(BCH_FS_emergency_ro, &c->flags);
bch2_journal_halt(&c->journal);
bch2_fs_read_only_async(c);
......@@ -397,12 +404,12 @@ static int __bch2_fs_read_write(struct bch_fs *c, bool early)
unsigned i;
int ret;
if (test_bit(BCH_FS_INITIAL_GC_UNFIXED, &c->flags)) {
if (test_bit(BCH_FS_initial_gc_unfixed, &c->flags)) {
bch_err(c, "cannot go rw, unfixed btree errors");
return -BCH_ERR_erofs_unfixed_errors;
}
if (test_bit(BCH_FS_RW, &c->flags))
if (test_bit(BCH_FS_rw, &c->flags))
return 0;
if (c->opts.norecovery)
......@@ -425,7 +432,7 @@ static int __bch2_fs_read_write(struct bch_fs *c, bool early)
if (ret)
goto err;
clear_bit(BCH_FS_CLEAN_SHUTDOWN, &c->flags);
clear_bit(BCH_FS_clean_shutdown, &c->flags);
/*
* First journal write must be a flush write: after a clean shutdown we
......@@ -439,8 +446,8 @@ static int __bch2_fs_read_write(struct bch_fs *c, bool early)
bch2_dev_allocator_add(c, ca);
bch2_recalc_capacity(c);
set_bit(BCH_FS_RW, &c->flags);
set_bit(BCH_FS_WAS_RW, &c->flags);
set_bit(BCH_FS_rw, &c->flags);
set_bit(BCH_FS_was_rw, &c->flags);
#ifndef BCH_WRITE_REF_DEBUG
percpu_ref_reinit(&c->writes);
......@@ -473,7 +480,7 @@ static int __bch2_fs_read_write(struct bch_fs *c, bool early)
bch2_do_pending_node_rewrites(c);
return 0;
err:
if (test_bit(BCH_FS_RW, &c->flags))
if (test_bit(BCH_FS_rw, &c->flags))
bch2_fs_read_only(c);
else
__bch2_fs_read_only(c);
......@@ -573,7 +580,7 @@ void __bch2_fs_stop(struct bch_fs *c)
bch_verbose(c, "shutting down");
set_bit(BCH_FS_STOPPING, &c->flags);
set_bit(BCH_FS_stopping, &c->flags);
cancel_work_sync(&c->journal_seq_blacklist_gc_work);
......@@ -966,7 +973,7 @@ int bch2_fs_start(struct bch_fs *c)
down_write(&c->state_lock);
BUG_ON(test_bit(BCH_FS_STARTED, &c->flags));
BUG_ON(test_bit(BCH_FS_started, &c->flags));
mutex_lock(&c->sb_lock);
......@@ -1001,12 +1008,12 @@ int bch2_fs_start(struct bch_fs *c)
goto err;
}
set_bit(BCH_FS_STARTED, &c->flags);
set_bit(BCH_FS_started, &c->flags);
if (c->opts.read_only || c->opts.nochanges) {
bch2_fs_read_only(c);
} else {
ret = !test_bit(BCH_FS_RW, &c->flags)
ret = !test_bit(BCH_FS_rw, &c->flags)
? bch2_fs_read_write(c)
: bch2_fs_read_write_late(c);
if (ret)
......
......@@ -8,6 +8,8 @@
#include <linux/math64.h>
extern const char * const bch2_fs_flag_strs[];
struct bch_fs *bch2_dev_to_fs(dev_t);
struct bch_fs *bch2_uuid_to_fs(__uuid_t);
......@@ -37,8 +39,8 @@ int bch2_fs_read_write_early(struct bch_fs *);
*/
static inline void bch2_fs_lazy_rw(struct bch_fs *c)
{
if (!test_bit(BCH_FS_RW, &c->flags) &&
!test_bit(BCH_FS_WAS_RW, &c->flags))
if (!test_bit(BCH_FS_rw, &c->flags) &&
!test_bit(BCH_FS_was_rw, &c->flags))
bch2_fs_read_write_early(c);
}
......
......@@ -145,6 +145,7 @@ rw_attribute(gc_gens_pos);
read_attribute(uuid);
read_attribute(minor);
read_attribute(flags);
read_attribute(bucket_size);
read_attribute(first_bucket);
read_attribute(nbuckets);
......@@ -268,7 +269,7 @@ static int bch2_compression_stats_to_text(struct printbuf *out, struct bch_fs *c
memset(s, 0, sizeof(s));
if (!test_bit(BCH_FS_STARTED, &c->flags))
if (!test_bit(BCH_FS_started, &c->flags))
return -EPERM;
trans = bch2_trans_get(c);
......@@ -384,6 +385,9 @@ SHOW(bch2_fs)
sysfs_print(minor, c->minor);
sysfs_printf(internal_uuid, "%pU", c->sb.uuid.b);
if (attr == &sysfs_flags)
prt_bitflags(out, bch2_fs_flag_strs, c->flags);
sysfs_hprint(btree_cache_size, bch2_btree_cache_size(c));
if (attr == &sysfs_btree_write_stats)
......@@ -497,12 +501,12 @@ STORE(bch2_fs)
/* Debugging: */
if (!test_bit(BCH_FS_STARTED, &c->flags))
if (!test_bit(BCH_FS_started, &c->flags))
return -EPERM;
/* Debugging: */
if (!test_bit(BCH_FS_RW, &c->flags))
if (!test_bit(BCH_FS_rw, &c->flags))
return -EROFS;
if (attr == &sysfs_prune_cache) {
......@@ -634,6 +638,7 @@ STORE(bch2_fs_internal)
SYSFS_OPS(bch2_fs_internal);
struct attribute *bch2_fs_internal_files[] = {
&sysfs_flags,
&sysfs_journal_debug,
&sysfs_btree_updates,
&sysfs_btree_cache,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment