Commit 3c5d0b72 authored by Kent Overstreet's avatar Kent Overstreet

bcachefs: fix failure to relock in bch2_btree_node_mem_alloc()

We weren't always so strict about trans->locked state - but now we are,
and new assertions are shaking some bugs out.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 1dceae4c
...@@ -159,6 +159,16 @@ struct btree *__bch2_btree_node_mem_alloc(struct bch_fs *c) ...@@ -159,6 +159,16 @@ struct btree *__bch2_btree_node_mem_alloc(struct bch_fs *c)
return b; return b;
} }
void bch2_btree_node_to_freelist(struct bch_fs *c, struct btree *b)
{
mutex_lock(&c->btree_cache.lock);
list_move(&b->list, &c->btree_cache.freeable);
mutex_unlock(&c->btree_cache.lock);
six_unlock_write(&b->c.lock);
six_unlock_intent(&b->c.lock);
}
/* Btree in memory cache - hash table */ /* Btree in memory cache - hash table */
void bch2_btree_node_hash_remove(struct btree_cache *bc, struct btree *b) void bch2_btree_node_hash_remove(struct btree_cache *bc, struct btree *b)
...@@ -736,6 +746,13 @@ struct btree *bch2_btree_node_mem_alloc(struct btree_trans *trans, bool pcpu_rea ...@@ -736,6 +746,13 @@ struct btree *bch2_btree_node_mem_alloc(struct btree_trans *trans, bool pcpu_rea
start_time); start_time);
memalloc_nofs_restore(flags); memalloc_nofs_restore(flags);
int ret = bch2_trans_relock(trans);
if (unlikely(ret)) {
bch2_btree_node_to_freelist(c, b);
return ERR_PTR(ret);
}
return b; return b;
err: err:
mutex_lock(&bc->lock); mutex_lock(&bc->lock);
......
...@@ -12,6 +12,8 @@ struct btree_iter; ...@@ -12,6 +12,8 @@ struct btree_iter;
void bch2_recalc_btree_reserve(struct bch_fs *); void bch2_recalc_btree_reserve(struct bch_fs *);
void bch2_btree_node_to_freelist(struct bch_fs *, struct btree *);
void bch2_btree_node_hash_remove(struct btree_cache *, struct btree *); void bch2_btree_node_hash_remove(struct btree_cache *, struct btree *);
int __bch2_btree_node_hash_insert(struct btree_cache *, struct btree *); int __bch2_btree_node_hash_insert(struct btree_cache *, struct btree *);
int bch2_btree_node_hash_insert(struct btree_cache *, struct btree *, int bch2_btree_node_hash_insert(struct btree_cache *, struct btree *,
......
...@@ -317,6 +317,12 @@ static struct btree *__bch2_btree_node_alloc(struct btree_trans *trans, ...@@ -317,6 +317,12 @@ static struct btree *__bch2_btree_node_alloc(struct btree_trans *trans,
: 0; : 0;
int ret; int ret;
b = bch2_btree_node_mem_alloc(trans, interior_node);
if (IS_ERR(b))
return b;
BUG_ON(b->ob.nr);
mutex_lock(&c->btree_reserve_cache_lock); mutex_lock(&c->btree_reserve_cache_lock);
if (c->btree_reserve_cache_nr > nr_reserve) { if (c->btree_reserve_cache_nr > nr_reserve) {
struct btree_alloc *a = struct btree_alloc *a =
...@@ -325,10 +331,9 @@ static struct btree *__bch2_btree_node_alloc(struct btree_trans *trans, ...@@ -325,10 +331,9 @@ static struct btree *__bch2_btree_node_alloc(struct btree_trans *trans,
obs = a->ob; obs = a->ob;
bkey_copy(&tmp.k, &a->k); bkey_copy(&tmp.k, &a->k);
mutex_unlock(&c->btree_reserve_cache_lock); mutex_unlock(&c->btree_reserve_cache_lock);
goto mem_alloc; goto out;
} }
mutex_unlock(&c->btree_reserve_cache_lock); mutex_unlock(&c->btree_reserve_cache_lock);
retry: retry:
ret = bch2_alloc_sectors_start_trans(trans, ret = bch2_alloc_sectors_start_trans(trans,
c->opts.metadata_target ?: c->opts.metadata_target ?:
...@@ -341,7 +346,7 @@ static struct btree *__bch2_btree_node_alloc(struct btree_trans *trans, ...@@ -341,7 +346,7 @@ static struct btree *__bch2_btree_node_alloc(struct btree_trans *trans,
c->opts.metadata_replicas_required), c->opts.metadata_replicas_required),
watermark, 0, cl, &wp); watermark, 0, cl, &wp);
if (unlikely(ret)) if (unlikely(ret))
return ERR_PTR(ret); goto err;
if (wp->sectors_free < btree_sectors(c)) { if (wp->sectors_free < btree_sectors(c)) {
struct open_bucket *ob; struct open_bucket *ob;
...@@ -360,19 +365,16 @@ static struct btree *__bch2_btree_node_alloc(struct btree_trans *trans, ...@@ -360,19 +365,16 @@ static struct btree *__bch2_btree_node_alloc(struct btree_trans *trans,
bch2_open_bucket_get(c, wp, &obs); bch2_open_bucket_get(c, wp, &obs);
bch2_alloc_sectors_done(c, wp); bch2_alloc_sectors_done(c, wp);
mem_alloc: out:
b = bch2_btree_node_mem_alloc(trans, interior_node);
six_unlock_write(&b->c.lock);
six_unlock_intent(&b->c.lock);
/* we hold cannibalize_lock: */
BUG_ON(IS_ERR(b));
BUG_ON(b->ob.nr);
bkey_copy(&b->key, &tmp.k); bkey_copy(&b->key, &tmp.k);
b->ob = obs; b->ob = obs;
six_unlock_write(&b->c.lock);
six_unlock_intent(&b->c.lock);
return b; return b;
err:
bch2_btree_node_to_freelist(c, b);
return ERR_PTR(ret);
} }
static struct btree *bch2_btree_node_alloc(struct btree_update *as, static struct btree *bch2_btree_node_alloc(struct btree_update *as,
...@@ -2439,6 +2441,9 @@ int bch2_btree_node_update_key(struct btree_trans *trans, struct btree_iter *ite ...@@ -2439,6 +2441,9 @@ int bch2_btree_node_update_key(struct btree_trans *trans, struct btree_iter *ite
} }
new_hash = bch2_btree_node_mem_alloc(trans, false); new_hash = bch2_btree_node_mem_alloc(trans, false);
ret = PTR_ERR_OR_ZERO(new_hash);
if (ret)
goto err;
} }
path->intent_ref++; path->intent_ref++;
...@@ -2446,14 +2451,9 @@ int bch2_btree_node_update_key(struct btree_trans *trans, struct btree_iter *ite ...@@ -2446,14 +2451,9 @@ int bch2_btree_node_update_key(struct btree_trans *trans, struct btree_iter *ite
commit_flags, skip_triggers); commit_flags, skip_triggers);
--path->intent_ref; --path->intent_ref;
if (new_hash) { if (new_hash)
mutex_lock(&c->btree_cache.lock); bch2_btree_node_to_freelist(c, new_hash);
list_move(&new_hash->list, &c->btree_cache.freeable); err:
mutex_unlock(&c->btree_cache.lock);
six_unlock_write(&new_hash->c.lock);
six_unlock_intent(&new_hash->c.lock);
}
closure_sync(&cl); closure_sync(&cl);
bch2_btree_cache_cannibalize_unlock(trans); bch2_btree_cache_cannibalize_unlock(trans);
return ret; return ret;
...@@ -2522,6 +2522,10 @@ int bch2_btree_root_alloc_fake_trans(struct btree_trans *trans, enum btree_id id ...@@ -2522,6 +2522,10 @@ int bch2_btree_root_alloc_fake_trans(struct btree_trans *trans, enum btree_id id
b = bch2_btree_node_mem_alloc(trans, false); b = bch2_btree_node_mem_alloc(trans, false);
bch2_btree_cache_cannibalize_unlock(trans); bch2_btree_cache_cannibalize_unlock(trans);
ret = PTR_ERR_OR_ZERO(b);
if (ret)
return ret;
set_btree_node_fake(b); set_btree_node_fake(b);
set_btree_node_need_rewrite(b); set_btree_node_need_rewrite(b);
b->c.level = level; b->c.level = level;
...@@ -2553,7 +2557,7 @@ int bch2_btree_root_alloc_fake_trans(struct btree_trans *trans, enum btree_id id ...@@ -2553,7 +2557,7 @@ int bch2_btree_root_alloc_fake_trans(struct btree_trans *trans, enum btree_id id
void bch2_btree_root_alloc_fake(struct bch_fs *c, enum btree_id id, unsigned level) void bch2_btree_root_alloc_fake(struct bch_fs *c, enum btree_id id, unsigned level)
{ {
bch2_trans_run(c, bch2_btree_root_alloc_fake_trans(trans, id, level)); bch2_trans_run(c, lockrestart_do(trans, bch2_btree_root_alloc_fake_trans(trans, id, level)));
} }
static void bch2_btree_update_to_text(struct printbuf *out, struct btree_update *as) static void bch2_btree_update_to_text(struct printbuf *out, struct btree_update *as)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment