Commit 103e2127 authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: replicas: prep work for stripes

Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 7a920560
...@@ -155,10 +155,10 @@ static int bch2_btree_mark_ptrs_initial(struct bch_fs *c, enum bkey_type type, ...@@ -155,10 +155,10 @@ static int bch2_btree_mark_ptrs_initial(struct bch_fs *c, enum bkey_type type,
k.k->version.lo > journal_cur_seq(&c->journal)); k.k->version.lo > journal_cur_seq(&c->journal));
if (test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) || if (test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) ||
fsck_err_on(!bch2_bkey_replicas_marked(c, data_type, k), c, fsck_err_on(!bch2_bkey_replicas_marked(c, type, k), c,
"superblock not marked as containing replicas (type %u)", "superblock not marked as containing replicas (type %u)",
data_type)) { data_type)) {
ret = bch2_mark_bkey_replicas(c, data_type, k); ret = bch2_mark_bkey_replicas(c, type, k);
if (ret) if (ret)
return ret; return ret;
} }
......
...@@ -551,7 +551,7 @@ static struct btree_reserve *bch2_btree_reserve_get(struct bch_fs *c, ...@@ -551,7 +551,7 @@ static struct btree_reserve *bch2_btree_reserve_get(struct bch_fs *c,
goto err_free; goto err_free;
} }
ret = bch2_mark_bkey_replicas(c, BCH_DATA_BTREE, ret = bch2_mark_bkey_replicas(c, BKEY_TYPE_BTREE,
bkey_i_to_s_c(&b->key)); bkey_i_to_s_c(&b->key));
if (ret) if (ret)
goto err_free; goto err_free;
...@@ -2063,7 +2063,7 @@ int bch2_btree_node_update_key(struct bch_fs *c, struct btree_iter *iter, ...@@ -2063,7 +2063,7 @@ int bch2_btree_node_update_key(struct bch_fs *c, struct btree_iter *iter,
goto err; goto err;
} }
ret = bch2_mark_bkey_replicas(c, BCH_DATA_BTREE, ret = bch2_mark_bkey_replicas(c, BKEY_TYPE_BTREE,
extent_i_to_s_c(new_key).s_c); extent_i_to_s_c(new_key).s_c);
if (ret) if (ret)
goto err_free_update; goto err_free_update;
......
...@@ -648,7 +648,7 @@ void bch2_btree_ptr_debugcheck(struct bch_fs *c, struct btree *b, ...@@ -648,7 +648,7 @@ void bch2_btree_ptr_debugcheck(struct bch_fs *c, struct btree *b,
goto err; goto err;
} }
if (!bch2_bkey_replicas_marked(c, BCH_DATA_BTREE, e.s_c)) { if (!bch2_bkey_replicas_marked(c, btree_node_type(b), e.s_c)) {
bch2_bkey_val_to_text(c, btree_node_type(b), bch2_bkey_val_to_text(c, btree_node_type(b),
buf, sizeof(buf), k); buf, sizeof(buf), k);
bch2_fs_bug(c, bch2_fs_bug(c,
...@@ -1681,8 +1681,7 @@ static void bch2_extent_debugcheck_extent(struct bch_fs *c, struct btree *b, ...@@ -1681,8 +1681,7 @@ static void bch2_extent_debugcheck_extent(struct bch_fs *c, struct btree *b,
return; return;
} }
if (!bkey_extent_is_cached(e.k) && if (!bch2_bkey_replicas_marked(c, btree_node_type(b), e.s_c)) {
!bch2_bkey_replicas_marked(c, BCH_DATA_USER, e.s_c)) {
bch2_bkey_val_to_text(c, btree_node_type(b), bch2_bkey_val_to_text(c, btree_node_type(b),
buf, sizeof(buf), e.s_c); buf, sizeof(buf), e.s_c);
bch2_fs_bug(c, bch2_fs_bug(c,
......
...@@ -337,7 +337,8 @@ static void __bch2_write_index(struct bch_write_op *op) ...@@ -337,7 +337,8 @@ static void __bch2_write_index(struct bch_write_op *op)
} }
if (!(op->flags & BCH_WRITE_NOMARK_REPLICAS)) { if (!(op->flags & BCH_WRITE_NOMARK_REPLICAS)) {
ret = bch2_mark_bkey_replicas(c, BCH_DATA_USER, e.s_c); ret = bch2_mark_bkey_replicas(c, BKEY_TYPE_EXTENTS,
e.s_c);
if (ret) if (ret)
goto err; goto err;
} }
......
...@@ -51,7 +51,7 @@ static int bch2_dev_usrdata_drop(struct bch_fs *c, unsigned dev_idx, int flags) ...@@ -51,7 +51,7 @@ static int bch2_dev_usrdata_drop(struct bch_fs *c, unsigned dev_idx, int flags)
!(ret = btree_iter_err(k))) { !(ret = btree_iter_err(k))) {
if (!bkey_extent_is_data(k.k) || if (!bkey_extent_is_data(k.k) ||
!bch2_extent_has_device(bkey_s_c_to_extent(k), dev_idx)) { !bch2_extent_has_device(bkey_s_c_to_extent(k), dev_idx)) {
ret = bch2_mark_bkey_replicas(c, BCH_DATA_USER, k); ret = bch2_mark_bkey_replicas(c, BKEY_TYPE_EXTENTS, k);
if (ret) if (ret)
break; break;
bch2_btree_iter_next(&iter); bch2_btree_iter_next(&iter);
...@@ -72,7 +72,7 @@ static int bch2_dev_usrdata_drop(struct bch_fs *c, unsigned dev_idx, int flags) ...@@ -72,7 +72,7 @@ static int bch2_dev_usrdata_drop(struct bch_fs *c, unsigned dev_idx, int flags)
*/ */
bch2_extent_normalize(c, e.s); bch2_extent_normalize(c, e.s);
ret = bch2_mark_bkey_replicas(c, BCH_DATA_USER, ret = bch2_mark_bkey_replicas(c, BKEY_TYPE_EXTENTS,
bkey_i_to_s_c(&tmp.key)); bkey_i_to_s_c(&tmp.key));
if (ret) if (ret)
break; break;
...@@ -135,7 +135,7 @@ static int bch2_dev_metadata_drop(struct bch_fs *c, unsigned dev_idx, int flags) ...@@ -135,7 +135,7 @@ static int bch2_dev_metadata_drop(struct bch_fs *c, unsigned dev_idx, int flags)
*/ */
bch2_btree_iter_downgrade(&iter); bch2_btree_iter_downgrade(&iter);
ret = bch2_mark_bkey_replicas(c, BCH_DATA_BTREE, ret = bch2_mark_bkey_replicas(c, BKEY_TYPE_BTREE,
bkey_i_to_s_c(&b->key)); bkey_i_to_s_c(&b->key));
if (ret) if (ret)
goto err; goto err;
......
...@@ -149,7 +149,7 @@ static int bch2_migrate_index_update(struct bch_write_op *op) ...@@ -149,7 +149,7 @@ static int bch2_migrate_index_update(struct bch_write_op *op)
goto next; goto next;
} }
ret = bch2_mark_bkey_replicas(c, BCH_DATA_USER, ret = bch2_mark_bkey_replicas(c, BKEY_TYPE_EXTENTS,
extent_i_to_s_c(insert).s_c); extent_i_to_s_c(insert).s_c);
if (ret) if (ret)
break; break;
...@@ -600,7 +600,7 @@ static int bch2_gc_data_replicas(struct bch_fs *c) ...@@ -600,7 +600,7 @@ static int bch2_gc_data_replicas(struct bch_fs *c)
for_each_btree_key(&iter, c, BTREE_ID_EXTENTS, POS_MIN, for_each_btree_key(&iter, c, BTREE_ID_EXTENTS, POS_MIN,
BTREE_ITER_PREFETCH, k) { BTREE_ITER_PREFETCH, k) {
ret = bch2_mark_bkey_replicas(c, BCH_DATA_USER, k); ret = bch2_mark_bkey_replicas(c, BKEY_TYPE_EXTENTS, k);
if (ret) if (ret)
break; break;
} }
...@@ -624,7 +624,7 @@ static int bch2_gc_btree_replicas(struct bch_fs *c) ...@@ -624,7 +624,7 @@ static int bch2_gc_btree_replicas(struct bch_fs *c)
for (id = 0; id < BTREE_ID_NR; id++) { for (id = 0; id < BTREE_ID_NR; id++) {
for_each_btree_node(&iter, c, id, POS_MIN, BTREE_ITER_PREFETCH, b) { for_each_btree_node(&iter, c, id, POS_MIN, BTREE_ITER_PREFETCH, b) {
ret = bch2_mark_bkey_replicas(c, BCH_DATA_BTREE, ret = bch2_mark_bkey_replicas(c, BKEY_TYPE_BTREE,
bkey_i_to_s_c(&b->key)); bkey_i_to_s_c(&b->key));
bch2_btree_iter_cond_resched(&iter); bch2_btree_iter_cond_resched(&iter);
......
...@@ -74,6 +74,42 @@ int bch2_cpu_replicas_to_text(struct bch_replicas_cpu *r, ...@@ -74,6 +74,42 @@ int bch2_cpu_replicas_to_text(struct bch_replicas_cpu *r,
return out - buf; return out - buf;
} }
static void extent_to_replicas(struct bkey_s_c k,
struct bch_replicas_entry *r)
{
if (bkey_extent_is_data(k.k)) {
struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
const union bch_extent_entry *entry;
struct extent_ptr_decoded p;
extent_for_each_ptr_decode(e, p, entry)
if (!p.ptr.cached)
r->devs[r->nr_devs++] = p.ptr.dev;
}
}
static void bkey_to_replicas(enum bkey_type type,
struct bkey_s_c k,
struct bch_replicas_entry *e)
{
e->nr_devs = 0;
switch (type) {
case BKEY_TYPE_BTREE:
e->data_type = BCH_DATA_BTREE;
extent_to_replicas(k, e);
break;
case BKEY_TYPE_EXTENTS:
e->data_type = BCH_DATA_USER;
extent_to_replicas(k, e);
break;
default:
break;
}
replicas_entry_sort(e);
}
static inline void devlist_to_replicas(struct bch_devs_list devs, static inline void devlist_to_replicas(struct bch_devs_list devs,
enum bch_data_type data_type, enum bch_data_type data_type,
struct bch_replicas_entry *e) struct bch_replicas_entry *e)
...@@ -189,13 +225,28 @@ static int bch2_mark_replicas_slowpath(struct bch_fs *c, ...@@ -189,13 +225,28 @@ static int bch2_mark_replicas_slowpath(struct bch_fs *c,
return ret; return ret;
} }
static int __bch2_mark_replicas(struct bch_fs *c,
struct bch_replicas_entry *devs)
{
struct bch_replicas_cpu *r, *gc_r;
bool marked;
rcu_read_lock();
r = rcu_dereference(c->replicas);
gc_r = rcu_dereference(c->replicas_gc);
marked = replicas_has_entry(r, devs) &&
(!likely(gc_r) || replicas_has_entry(gc_r, devs));
rcu_read_unlock();
return likely(marked) ? 0
: bch2_mark_replicas_slowpath(c, devs);
}
int bch2_mark_replicas(struct bch_fs *c, int bch2_mark_replicas(struct bch_fs *c,
enum bch_data_type data_type, enum bch_data_type data_type,
struct bch_devs_list devs) struct bch_devs_list devs)
{ {
struct bch_replicas_entry_padded search; struct bch_replicas_entry_padded search;
struct bch_replicas_cpu *r, *gc_r;
bool marked;
if (!devs.nr) if (!devs.nr)
return 0; return 0;
...@@ -206,31 +257,31 @@ int bch2_mark_replicas(struct bch_fs *c, ...@@ -206,31 +257,31 @@ int bch2_mark_replicas(struct bch_fs *c,
devlist_to_replicas(devs, data_type, &search.e); devlist_to_replicas(devs, data_type, &search.e);
rcu_read_lock(); return __bch2_mark_replicas(c, &search.e);
r = rcu_dereference(c->replicas);
gc_r = rcu_dereference(c->replicas_gc);
marked = replicas_has_entry(r, &search.e) &&
(!likely(gc_r) || replicas_has_entry(gc_r, &search.e));
rcu_read_unlock();
return likely(marked) ? 0
: bch2_mark_replicas_slowpath(c, &search.e);
} }
int bch2_mark_bkey_replicas(struct bch_fs *c, int bch2_mark_bkey_replicas(struct bch_fs *c,
enum bch_data_type data_type, enum bkey_type type,
struct bkey_s_c k) struct bkey_s_c k)
{ {
struct bch_devs_list cached = bch2_bkey_cached_devs(k); struct bch_replicas_entry_padded search;
unsigned i;
int ret; int ret;
for (i = 0; i < cached.nr; i++) if (type == BKEY_TYPE_EXTENTS) {
if ((ret = bch2_mark_replicas(c, BCH_DATA_CACHED, struct bch_devs_list cached = bch2_bkey_cached_devs(k);
bch2_dev_list_single(cached.devs[i])))) unsigned i;
return ret;
for (i = 0; i < cached.nr; i++)
if ((ret = bch2_mark_replicas(c, BCH_DATA_CACHED,
bch2_dev_list_single(cached.devs[i]))))
return ret;
}
bkey_to_replicas(type, k, &search.e);
return bch2_mark_replicas(c, data_type, bch2_bkey_dirty_devs(k)); return search.e.nr_devs
? __bch2_mark_replicas(c, &search.e)
: 0;
} }
int bch2_replicas_gc_end(struct bch_fs *c, int ret) int bch2_replicas_gc_end(struct bch_fs *c, int ret)
...@@ -507,18 +558,32 @@ bool bch2_replicas_marked(struct bch_fs *c, ...@@ -507,18 +558,32 @@ bool bch2_replicas_marked(struct bch_fs *c,
} }
bool bch2_bkey_replicas_marked(struct bch_fs *c, bool bch2_bkey_replicas_marked(struct bch_fs *c,
enum bch_data_type data_type, enum bkey_type type,
struct bkey_s_c k) struct bkey_s_c k)
{ {
struct bch_devs_list cached = bch2_bkey_cached_devs(k); struct bch_replicas_entry_padded search;
unsigned i; bool ret;
if (type == BKEY_TYPE_EXTENTS) {
struct bch_devs_list cached = bch2_bkey_cached_devs(k);
unsigned i;
for (i = 0; i < cached.nr; i++)
if (!bch2_replicas_marked(c, BCH_DATA_CACHED,
bch2_dev_list_single(cached.devs[i])))
return false;
}
bkey_to_replicas(type, k, &search.e);
for (i = 0; i < cached.nr; i++) if (!search.e.nr_devs)
if (!bch2_replicas_marked(c, BCH_DATA_CACHED, return true;
bch2_dev_list_single(cached.devs[i])))
return false; rcu_read_lock();
ret = replicas_has_entry(rcu_dereference(c->replicas), &search.e);
rcu_read_unlock();
return bch2_replicas_marked(c, data_type, bch2_bkey_dirty_devs(k)); return ret;
} }
struct replicas_status __bch2_replicas_status(struct bch_fs *c, struct replicas_status __bch2_replicas_status(struct bch_fs *c,
......
...@@ -6,11 +6,11 @@ ...@@ -6,11 +6,11 @@
bool bch2_replicas_marked(struct bch_fs *, enum bch_data_type, bool bch2_replicas_marked(struct bch_fs *, enum bch_data_type,
struct bch_devs_list); struct bch_devs_list);
bool bch2_bkey_replicas_marked(struct bch_fs *, enum bch_data_type, bool bch2_bkey_replicas_marked(struct bch_fs *, enum bkey_type,
struct bkey_s_c); struct bkey_s_c);
int bch2_mark_replicas(struct bch_fs *, enum bch_data_type, int bch2_mark_replicas(struct bch_fs *, enum bch_data_type,
struct bch_devs_list); struct bch_devs_list);
int bch2_mark_bkey_replicas(struct bch_fs *, enum bch_data_type, int bch2_mark_bkey_replicas(struct bch_fs *, enum bkey_type,
struct bkey_s_c); struct bkey_s_c);
int bch2_cpu_replicas_to_text(struct bch_replicas_cpu *, char *, size_t); int bch2_cpu_replicas_to_text(struct bch_replicas_cpu *, char *, size_t);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment