Commit 39fbc5a4 authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: gc lock no longer needed for disk reservations

Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 76f4c7b0
...@@ -1474,7 +1474,6 @@ static int __bch2_fs_allocator_start(struct bch_fs *c) ...@@ -1474,7 +1474,6 @@ static int __bch2_fs_allocator_start(struct bch_fs *c)
&journal_seq); &journal_seq);
fifo_push(&ca->free[RESERVE_BTREE], bu); fifo_push(&ca->free[RESERVE_BTREE], bu);
bucket_set_dirty(ca, bu);
} }
} }
......
...@@ -141,24 +141,23 @@ static int bch2_gc_mark_key(struct bch_fs *c, struct bkey_s_c k, ...@@ -141,24 +141,23 @@ static int bch2_gc_mark_key(struct bch_fs *c, struct bkey_s_c k,
bkey_for_each_ptr(ptrs, ptr) { bkey_for_each_ptr(ptrs, ptr) {
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev); struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
size_t b = PTR_BUCKET_NR(ca, ptr); struct bucket *g = PTR_BUCKET(ca, ptr, true);
struct bucket *g = PTR_BUCKET(ca, ptr);
if (mustfix_fsck_err_on(!g->gen_valid, c, if (mustfix_fsck_err_on(!g->gen_valid, c,
"found ptr with missing gen in alloc btree,\n" "found ptr with missing gen in alloc btree,\n"
"type %u gen %u", "type %u gen %u",
k.k->type, ptr->gen)) { k.k->type, ptr->gen)) {
g->_mark.gen = ptr->gen; g->_mark.gen = ptr->gen;
g->gen_valid = 1; g->_mark.dirty = true;
bucket_set_dirty(ca, b); g->gen_valid = 1;
} }
if (mustfix_fsck_err_on(gen_cmp(ptr->gen, g->mark.gen) > 0, c, if (mustfix_fsck_err_on(gen_cmp(ptr->gen, g->mark.gen) > 0, c,
"%u ptr gen in the future: %u > %u", "%u ptr gen in the future: %u > %u",
k.k->type, ptr->gen, g->mark.gen)) { k.k->type, ptr->gen, g->mark.gen)) {
g->_mark.gen = ptr->gen; g->_mark.gen = ptr->gen;
g->gen_valid = 1; g->_mark.dirty = true;
bucket_set_dirty(ca, b); g->gen_valid = 1;
set_bit(BCH_FS_FIXED_GENS, &c->flags); set_bit(BCH_FS_FIXED_GENS, &c->flags);
} }
} }
...@@ -166,8 +165,7 @@ static int bch2_gc_mark_key(struct bch_fs *c, struct bkey_s_c k, ...@@ -166,8 +165,7 @@ static int bch2_gc_mark_key(struct bch_fs *c, struct bkey_s_c k,
bkey_for_each_ptr(ptrs, ptr) { bkey_for_each_ptr(ptrs, ptr) {
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev); struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
size_t b = PTR_BUCKET_NR(ca, ptr); struct bucket *g = PTR_BUCKET(ca, ptr, true);
struct bucket *g = __bucket(ca, b, true);
if (gen_after(g->oldest_gen, ptr->gen)) if (gen_after(g->oldest_gen, ptr->gen))
g->oldest_gen = ptr->gen; g->oldest_gen = ptr->gen;
...@@ -646,13 +644,14 @@ static int bch2_gc_start(struct bch_fs *c) ...@@ -646,13 +644,14 @@ static int bch2_gc_start(struct bch_fs *c)
struct bch_dev *ca; struct bch_dev *ca;
unsigned i; unsigned i;
percpu_down_write(&c->mark_lock);
/* /*
* indicate to stripe code that we need to allocate for the gc stripes * indicate to stripe code that we need to allocate for the gc stripes
* radix tree, too * radix tree, too
*/ */
gc_pos_set(c, gc_phase(GC_PHASE_START)); gc_pos_set(c, gc_phase(GC_PHASE_START));
percpu_down_write(&c->mark_lock);
BUG_ON(c->usage[1]); BUG_ON(c->usage[1]);
c->usage[1] = __alloc_percpu_gfp(sizeof(struct bch_fs_usage) + c->usage[1] = __alloc_percpu_gfp(sizeof(struct bch_fs_usage) +
......
...@@ -490,7 +490,6 @@ enum btree_insert_ret { ...@@ -490,7 +490,6 @@ enum btree_insert_ret {
/* leaf node needs to be split */ /* leaf node needs to be split */
BTREE_INSERT_BTREE_NODE_FULL, BTREE_INSERT_BTREE_NODE_FULL,
BTREE_INSERT_ENOSPC, BTREE_INSERT_ENOSPC,
BTREE_INSERT_NEED_GC_LOCK,
BTREE_INSERT_NEED_MARK_REPLICAS, BTREE_INSERT_NEED_MARK_REPLICAS,
}; };
......
...@@ -484,7 +484,7 @@ static struct btree_reserve *bch2_btree_reserve_get(struct bch_fs *c, ...@@ -484,7 +484,7 @@ static struct btree_reserve *bch2_btree_reserve_get(struct bch_fs *c,
struct btree *b; struct btree *b;
struct disk_reservation disk_res = { 0, 0 }; struct disk_reservation disk_res = { 0, 0 };
unsigned sectors = nr_nodes * c->opts.btree_node_size; unsigned sectors = nr_nodes * c->opts.btree_node_size;
int ret, disk_res_flags = BCH_DISK_RESERVATION_GC_LOCK_HELD; int ret, disk_res_flags = 0;
if (flags & BTREE_INSERT_NOFAIL) if (flags & BTREE_INSERT_NOFAIL)
disk_res_flags |= BCH_DISK_RESERVATION_NOFAIL; disk_res_flags |= BCH_DISK_RESERVATION_NOFAIL;
...@@ -1947,8 +1947,7 @@ static void __bch2_btree_node_update_key(struct bch_fs *c, ...@@ -1947,8 +1947,7 @@ static void __bch2_btree_node_update_key(struct bch_fs *c,
ret = bch2_disk_reservation_add(c, &as->reserve->disk_res, ret = bch2_disk_reservation_add(c, &as->reserve->disk_res,
c->opts.btree_node_size * c->opts.btree_node_size *
bch2_bkey_nr_ptrs(bkey_i_to_s_c(&new_key->k_i)), bch2_bkey_nr_ptrs(bkey_i_to_s_c(&new_key->k_i)),
BCH_DISK_RESERVATION_NOFAIL| BCH_DISK_RESERVATION_NOFAIL);
BCH_DISK_RESERVATION_GC_LOCK_HELD);
BUG_ON(ret); BUG_ON(ret);
parent = btree_node_parent(iter, b); parent = btree_node_parent(iter, b);
......
...@@ -719,18 +719,6 @@ int __bch2_btree_insert_at(struct btree_insert *trans) ...@@ -719,18 +719,6 @@ int __bch2_btree_insert_at(struct btree_insert *trans)
ret = -EINTR; ret = -EINTR;
} }
break; break;
case BTREE_INSERT_NEED_GC_LOCK:
ret = -EINTR;
if (!down_read_trylock(&c->gc_lock)) {
if (flags & BTREE_INSERT_NOUNLOCK)
goto out;
bch2_btree_iter_unlock(trans->entries[0].iter);
down_read(&c->gc_lock);
}
up_read(&c->gc_lock);
break;
case BTREE_INSERT_ENOSPC: case BTREE_INSERT_ENOSPC:
ret = -ENOSPC; ret = -ENOSPC;
break; break;
......
...@@ -407,14 +407,14 @@ static inline void update_cached_sectors(struct bch_fs *c, ...@@ -407,14 +407,14 @@ static inline void update_cached_sectors(struct bch_fs *c,
} }
static void __bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca, static void __bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
size_t b, struct bucket_mark *old, size_t b, struct bucket_mark *ret,
bool gc) bool gc)
{ {
struct bch_fs_usage *fs_usage = this_cpu_ptr(c->usage[gc]); struct bch_fs_usage *fs_usage = this_cpu_ptr(c->usage[gc]);
struct bucket *g = __bucket(ca, b, gc); struct bucket *g = __bucket(ca, b, gc);
struct bucket_mark new; struct bucket_mark old, new;
*old = bucket_data_cmpxchg(c, ca, fs_usage, g, new, ({ old = bucket_data_cmpxchg(c, ca, fs_usage, g, new, ({
BUG_ON(!is_available_bucket(new)); BUG_ON(!is_available_bucket(new));
new.owned_by_allocator = true; new.owned_by_allocator = true;
...@@ -425,9 +425,12 @@ static void __bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca, ...@@ -425,9 +425,12 @@ static void __bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
new.gen++; new.gen++;
})); }));
if (old->cached_sectors) if (old.cached_sectors)
update_cached_sectors(c, fs_usage, ca->dev_idx, update_cached_sectors(c, fs_usage, ca->dev_idx,
-old->cached_sectors); -old.cached_sectors);
if (ret)
*ret = old;
} }
void bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca, void bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
...@@ -437,6 +440,9 @@ void bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca, ...@@ -437,6 +440,9 @@ void bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
__bch2_invalidate_bucket(c, ca, b, old, false); __bch2_invalidate_bucket(c, ca, b, old, false);
if (gc_visited(c, gc_phase(GC_PHASE_START)))
__bch2_invalidate_bucket(c, ca, b, NULL, true);
if (!old->owned_by_allocator && old->cached_sectors) if (!old->owned_by_allocator && old->cached_sectors)
trace_invalidate(ca, bucket_to_sector(ca, b), trace_invalidate(ca, bucket_to_sector(ca, b),
old->cached_sectors); old->cached_sectors);
...@@ -1091,24 +1097,8 @@ int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res, ...@@ -1091,24 +1097,8 @@ int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
return 0; return 0;
recalculate: recalculate:
/*
* GC recalculates sectors_available when it starts, so that hopefully
* we don't normally end up blocking here:
*/
/*
* Piss fuck, we can be called from extent_insert_fixup() with btree
* locks held:
*/
if (!(flags & BCH_DISK_RESERVATION_GC_LOCK_HELD)) {
if (!(flags & BCH_DISK_RESERVATION_BTREE_LOCKS_HELD))
down_read(&c->gc_lock);
else if (!down_read_trylock(&c->gc_lock))
return -EINTR;
}
percpu_down_write(&c->mark_lock); percpu_down_write(&c->mark_lock);
sectors_available = bch2_recalc_sectors_available(c); sectors_available = bch2_recalc_sectors_available(c);
if (sectors <= sectors_available || if (sectors <= sectors_available ||
...@@ -1125,9 +1115,6 @@ int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res, ...@@ -1125,9 +1115,6 @@ int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
percpu_up_write(&c->mark_lock); percpu_up_write(&c->mark_lock);
if (!(flags & BCH_DISK_RESERVATION_GC_LOCK_HELD))
up_read(&c->gc_lock);
return ret; return ret;
} }
......
...@@ -57,18 +57,6 @@ static inline struct bucket *bucket(struct bch_dev *ca, size_t b) ...@@ -57,18 +57,6 @@ static inline struct bucket *bucket(struct bch_dev *ca, size_t b)
return __bucket(ca, b, false); return __bucket(ca, b, false);
} }
static inline void bucket_set_dirty(struct bch_dev *ca, size_t b)
{
struct bucket *g;
struct bucket_mark m;
rcu_read_lock();
g = bucket(ca, b);
bucket_cmpxchg(g, m, m.dirty = true);
rcu_read_unlock();
}
static inline void bucket_io_clock_reset(struct bch_fs *c, struct bch_dev *ca, static inline void bucket_io_clock_reset(struct bch_fs *c, struct bch_dev *ca,
size_t b, int rw) size_t b, int rw)
{ {
...@@ -99,7 +87,8 @@ static inline size_t PTR_BUCKET_NR(const struct bch_dev *ca, ...@@ -99,7 +87,8 @@ static inline size_t PTR_BUCKET_NR(const struct bch_dev *ca,
} }
static inline struct bucket *PTR_BUCKET(struct bch_dev *ca, static inline struct bucket *PTR_BUCKET(struct bch_dev *ca,
const struct bch_extent_ptr *ptr) const struct bch_extent_ptr *ptr,
bool gc)
{ {
return bucket(ca, PTR_BUCKET_NR(ca, ptr)); return bucket(ca, PTR_BUCKET_NR(ca, ptr));
} }
...@@ -285,8 +274,6 @@ static inline void bch2_disk_reservation_put(struct bch_fs *c, ...@@ -285,8 +274,6 @@ static inline void bch2_disk_reservation_put(struct bch_fs *c,
} }
#define BCH_DISK_RESERVATION_NOFAIL (1 << 0) #define BCH_DISK_RESERVATION_NOFAIL (1 << 0)
#define BCH_DISK_RESERVATION_GC_LOCK_HELD (1 << 1)
#define BCH_DISK_RESERVATION_BTREE_LOCKS_HELD (1 << 2)
int bch2_disk_reservation_add(struct bch_fs *, int bch2_disk_reservation_add(struct bch_fs *,
struct disk_reservation *, struct disk_reservation *,
......
...@@ -979,10 +979,8 @@ bch2_extent_can_insert(struct btree_insert *trans, ...@@ -979,10 +979,8 @@ bch2_extent_can_insert(struct btree_insert *trans,
if (overlap == BCH_EXTENT_OVERLAP_MIDDLE && if (overlap == BCH_EXTENT_OVERLAP_MIDDLE &&
(sectors = bch2_extent_is_compressed(k))) { (sectors = bch2_extent_is_compressed(k))) {
int flags = BCH_DISK_RESERVATION_BTREE_LOCKS_HELD; int flags = trans->flags & BTREE_INSERT_NOFAIL
? BCH_DISK_RESERVATION_NOFAIL : 0;
if (trans->flags & BTREE_INSERT_NOFAIL)
flags |= BCH_DISK_RESERVATION_NOFAIL;
switch (bch2_disk_reservation_add(trans->c, switch (bch2_disk_reservation_add(trans->c,
trans->disk_res, trans->disk_res,
...@@ -991,8 +989,6 @@ bch2_extent_can_insert(struct btree_insert *trans, ...@@ -991,8 +989,6 @@ bch2_extent_can_insert(struct btree_insert *trans,
break; break;
case -ENOSPC: case -ENOSPC:
return BTREE_INSERT_ENOSPC; return BTREE_INSERT_ENOSPC;
case -EINTR:
return BTREE_INSERT_NEED_GC_LOCK;
default: default:
BUG(); BUG();
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment