Commit 39fbc5a4 authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: gc lock no longer needed for disk reservations

Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 76f4c7b0
......@@ -1474,7 +1474,6 @@ static int __bch2_fs_allocator_start(struct bch_fs *c)
&journal_seq);
fifo_push(&ca->free[RESERVE_BTREE], bu);
bucket_set_dirty(ca, bu);
}
}
......
......@@ -141,24 +141,23 @@ static int bch2_gc_mark_key(struct bch_fs *c, struct bkey_s_c k,
bkey_for_each_ptr(ptrs, ptr) {
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
size_t b = PTR_BUCKET_NR(ca, ptr);
struct bucket *g = PTR_BUCKET(ca, ptr);
struct bucket *g = PTR_BUCKET(ca, ptr, true);
if (mustfix_fsck_err_on(!g->gen_valid, c,
"found ptr with missing gen in alloc btree,\n"
"type %u gen %u",
k.k->type, ptr->gen)) {
g->_mark.gen = ptr->gen;
g->gen_valid = 1;
bucket_set_dirty(ca, b);
g->_mark.gen = ptr->gen;
g->_mark.dirty = true;
g->gen_valid = 1;
}
if (mustfix_fsck_err_on(gen_cmp(ptr->gen, g->mark.gen) > 0, c,
"%u ptr gen in the future: %u > %u",
k.k->type, ptr->gen, g->mark.gen)) {
g->_mark.gen = ptr->gen;
g->gen_valid = 1;
bucket_set_dirty(ca, b);
g->_mark.gen = ptr->gen;
g->_mark.dirty = true;
g->gen_valid = 1;
set_bit(BCH_FS_FIXED_GENS, &c->flags);
}
}
......@@ -166,8 +165,7 @@ static int bch2_gc_mark_key(struct bch_fs *c, struct bkey_s_c k,
bkey_for_each_ptr(ptrs, ptr) {
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
size_t b = PTR_BUCKET_NR(ca, ptr);
struct bucket *g = __bucket(ca, b, true);
struct bucket *g = PTR_BUCKET(ca, ptr, true);
if (gen_after(g->oldest_gen, ptr->gen))
g->oldest_gen = ptr->gen;
......@@ -646,13 +644,14 @@ static int bch2_gc_start(struct bch_fs *c)
struct bch_dev *ca;
unsigned i;
percpu_down_write(&c->mark_lock);
/*
* indicate to stripe code that we need to allocate for the gc stripes
* radix tree, too
*/
gc_pos_set(c, gc_phase(GC_PHASE_START));
percpu_down_write(&c->mark_lock);
BUG_ON(c->usage[1]);
c->usage[1] = __alloc_percpu_gfp(sizeof(struct bch_fs_usage) +
......
......@@ -490,7 +490,6 @@ enum btree_insert_ret {
/* leaf node needs to be split */
BTREE_INSERT_BTREE_NODE_FULL,
BTREE_INSERT_ENOSPC,
BTREE_INSERT_NEED_GC_LOCK,
BTREE_INSERT_NEED_MARK_REPLICAS,
};
......
......@@ -484,7 +484,7 @@ static struct btree_reserve *bch2_btree_reserve_get(struct bch_fs *c,
struct btree *b;
struct disk_reservation disk_res = { 0, 0 };
unsigned sectors = nr_nodes * c->opts.btree_node_size;
int ret, disk_res_flags = BCH_DISK_RESERVATION_GC_LOCK_HELD;
int ret, disk_res_flags = 0;
if (flags & BTREE_INSERT_NOFAIL)
disk_res_flags |= BCH_DISK_RESERVATION_NOFAIL;
......@@ -1947,8 +1947,7 @@ static void __bch2_btree_node_update_key(struct bch_fs *c,
ret = bch2_disk_reservation_add(c, &as->reserve->disk_res,
c->opts.btree_node_size *
bch2_bkey_nr_ptrs(bkey_i_to_s_c(&new_key->k_i)),
BCH_DISK_RESERVATION_NOFAIL|
BCH_DISK_RESERVATION_GC_LOCK_HELD);
BCH_DISK_RESERVATION_NOFAIL);
BUG_ON(ret);
parent = btree_node_parent(iter, b);
......
......@@ -719,18 +719,6 @@ int __bch2_btree_insert_at(struct btree_insert *trans)
ret = -EINTR;
}
break;
case BTREE_INSERT_NEED_GC_LOCK:
ret = -EINTR;
if (!down_read_trylock(&c->gc_lock)) {
if (flags & BTREE_INSERT_NOUNLOCK)
goto out;
bch2_btree_iter_unlock(trans->entries[0].iter);
down_read(&c->gc_lock);
}
up_read(&c->gc_lock);
break;
case BTREE_INSERT_ENOSPC:
ret = -ENOSPC;
break;
......
......@@ -407,14 +407,14 @@ static inline void update_cached_sectors(struct bch_fs *c,
}
static void __bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
size_t b, struct bucket_mark *old,
size_t b, struct bucket_mark *ret,
bool gc)
{
struct bch_fs_usage *fs_usage = this_cpu_ptr(c->usage[gc]);
struct bucket *g = __bucket(ca, b, gc);
struct bucket_mark new;
struct bucket_mark old, new;
*old = bucket_data_cmpxchg(c, ca, fs_usage, g, new, ({
old = bucket_data_cmpxchg(c, ca, fs_usage, g, new, ({
BUG_ON(!is_available_bucket(new));
new.owned_by_allocator = true;
......@@ -425,9 +425,12 @@ static void __bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
new.gen++;
}));
if (old->cached_sectors)
if (old.cached_sectors)
update_cached_sectors(c, fs_usage, ca->dev_idx,
-old->cached_sectors);
-old.cached_sectors);
if (ret)
*ret = old;
}
void bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
......@@ -437,6 +440,9 @@ void bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
__bch2_invalidate_bucket(c, ca, b, old, false);
if (gc_visited(c, gc_phase(GC_PHASE_START)))
__bch2_invalidate_bucket(c, ca, b, NULL, true);
if (!old->owned_by_allocator && old->cached_sectors)
trace_invalidate(ca, bucket_to_sector(ca, b),
old->cached_sectors);
......@@ -1091,24 +1097,8 @@ int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
return 0;
recalculate:
/*
* GC recalculates sectors_available when it starts, so that hopefully
* we don't normally end up blocking here:
*/
/*
* Piss fuck, we can be called from extent_insert_fixup() with btree
* locks held:
*/
if (!(flags & BCH_DISK_RESERVATION_GC_LOCK_HELD)) {
if (!(flags & BCH_DISK_RESERVATION_BTREE_LOCKS_HELD))
down_read(&c->gc_lock);
else if (!down_read_trylock(&c->gc_lock))
return -EINTR;
}
percpu_down_write(&c->mark_lock);
sectors_available = bch2_recalc_sectors_available(c);
if (sectors <= sectors_available ||
......@@ -1125,9 +1115,6 @@ int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
percpu_up_write(&c->mark_lock);
if (!(flags & BCH_DISK_RESERVATION_GC_LOCK_HELD))
up_read(&c->gc_lock);
return ret;
}
......
......@@ -57,18 +57,6 @@ static inline struct bucket *bucket(struct bch_dev *ca, size_t b)
return __bucket(ca, b, false);
}
static inline void bucket_set_dirty(struct bch_dev *ca, size_t b)
{
struct bucket *g;
struct bucket_mark m;
rcu_read_lock();
g = bucket(ca, b);
bucket_cmpxchg(g, m, m.dirty = true);
rcu_read_unlock();
}
static inline void bucket_io_clock_reset(struct bch_fs *c, struct bch_dev *ca,
size_t b, int rw)
{
......@@ -99,7 +87,8 @@ static inline size_t PTR_BUCKET_NR(const struct bch_dev *ca,
}
static inline struct bucket *PTR_BUCKET(struct bch_dev *ca,
const struct bch_extent_ptr *ptr)
const struct bch_extent_ptr *ptr,
bool gc)
{
return bucket(ca, PTR_BUCKET_NR(ca, ptr));
}
......@@ -285,8 +274,6 @@ static inline void bch2_disk_reservation_put(struct bch_fs *c,
}
#define BCH_DISK_RESERVATION_NOFAIL (1 << 0)
#define BCH_DISK_RESERVATION_GC_LOCK_HELD (1 << 1)
#define BCH_DISK_RESERVATION_BTREE_LOCKS_HELD (1 << 2)
int bch2_disk_reservation_add(struct bch_fs *,
struct disk_reservation *,
......
......@@ -979,10 +979,8 @@ bch2_extent_can_insert(struct btree_insert *trans,
if (overlap == BCH_EXTENT_OVERLAP_MIDDLE &&
(sectors = bch2_extent_is_compressed(k))) {
int flags = BCH_DISK_RESERVATION_BTREE_LOCKS_HELD;
if (trans->flags & BTREE_INSERT_NOFAIL)
flags |= BCH_DISK_RESERVATION_NOFAIL;
int flags = trans->flags & BTREE_INSERT_NOFAIL
? BCH_DISK_RESERVATION_NOFAIL : 0;
switch (bch2_disk_reservation_add(trans->c,
trans->disk_res,
......@@ -991,8 +989,6 @@ bch2_extent_can_insert(struct btree_insert *trans,
break;
case -ENOSPC:
return BTREE_INSERT_ENOSPC;
case -EINTR:
return BTREE_INSERT_NEED_GC_LOCK;
default:
BUG();
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment