Commit a9310ab0 authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: Fixes for startup on very full filesystems

 - Always pass BTREE_INSERT_USE_RESERVE when writing alloc btree keys
 - Don't strand buckest on the copygc freelist until after recovery is
   done and we're starting copygc.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent d9b59a57
...@@ -315,7 +315,9 @@ static int bch2_alloc_write_key(struct btree_trans *trans, ...@@ -315,7 +315,9 @@ static int bch2_alloc_write_key(struct btree_trans *trans,
bch2_trans_update(trans, iter, &a->k_i, bch2_trans_update(trans, iter, &a->k_i,
BTREE_TRIGGER_NORUN); BTREE_TRIGGER_NORUN);
ret = bch2_trans_commit(trans, NULL, NULL, ret = bch2_trans_commit(trans, NULL, NULL,
BTREE_INSERT_NOFAIL|flags); BTREE_INSERT_NOFAIL|
BTREE_INSERT_USE_RESERVE|
flags);
err: err:
if (ret == -EINTR) if (ret == -EINTR)
goto retry; goto retry;
...@@ -1033,7 +1035,16 @@ static int push_invalidated_bucket(struct bch_fs *c, struct bch_dev *ca, size_t ...@@ -1033,7 +1035,16 @@ static int push_invalidated_bucket(struct bch_fs *c, struct bch_dev *ca, size_t
set_current_state(TASK_INTERRUPTIBLE); set_current_state(TASK_INTERRUPTIBLE);
spin_lock(&c->freelist_lock); spin_lock(&c->freelist_lock);
for (i = 0; i < RESERVE_NR; i++) for (i = 0; i < RESERVE_NR; i++) {
/*
* Don't strand buckets on the copygc freelist until
* after recovery is finished:
*/
if (!test_bit(BCH_FS_STARTED, &c->flags) &&
i == RESERVE_MOVINGGC)
continue;
if (fifo_push(&ca->free[i], bucket)) { if (fifo_push(&ca->free[i], bucket)) {
fifo_pop(&ca->free_inc, bucket); fifo_pop(&ca->free_inc, bucket);
...@@ -1043,6 +1054,7 @@ static int push_invalidated_bucket(struct bch_fs *c, struct bch_dev *ca, size_t ...@@ -1043,6 +1054,7 @@ static int push_invalidated_bucket(struct bch_fs *c, struct bch_dev *ca, size_t
spin_unlock(&c->freelist_lock); spin_unlock(&c->freelist_lock);
goto out; goto out;
} }
}
if (ca->allocator_state != ALLOCATOR_BLOCKED_FULL) { if (ca->allocator_state != ALLOCATOR_BLOCKED_FULL) {
ca->allocator_state = ALLOCATOR_BLOCKED_FULL; ca->allocator_state = ALLOCATOR_BLOCKED_FULL;
......
...@@ -860,6 +860,8 @@ int bch2_fs_start(struct bch_fs *c) ...@@ -860,6 +860,8 @@ int bch2_fs_start(struct bch_fs *c)
if (bch2_fs_init_fault("fs_start")) if (bch2_fs_init_fault("fs_start"))
goto err; goto err;
set_bit(BCH_FS_STARTED, &c->flags);
if (c->opts.read_only || c->opts.nochanges) { if (c->opts.read_only || c->opts.nochanges) {
bch2_fs_read_only(c); bch2_fs_read_only(c);
} else { } else {
...@@ -871,7 +873,6 @@ int bch2_fs_start(struct bch_fs *c) ...@@ -871,7 +873,6 @@ int bch2_fs_start(struct bch_fs *c)
goto err; goto err;
} }
set_bit(BCH_FS_STARTED, &c->flags);
print_mount_opts(c); print_mount_opts(c);
ret = 0; ret = 0;
out: out:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment