Commit 3577df5f authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: serialize persistent_reserved

Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 3e0745e2
...@@ -1363,7 +1363,8 @@ static inline __u64 __bset_magic(struct bch_sb *sb) ...@@ -1363,7 +1363,8 @@ static inline __u64 __bset_magic(struct bch_sb *sb)
x(prio_ptrs, 2) \ x(prio_ptrs, 2) \
x(blacklist, 3) \ x(blacklist, 3) \
x(blacklist_v2, 4) \ x(blacklist_v2, 4) \
x(usage, 5) x(usage, 5) \
x(data_usage, 6)
enum { enum {
#define x(f, nr) BCH_JSET_ENTRY_##f = nr, #define x(f, nr) BCH_JSET_ENTRY_##f = nr,
...@@ -1394,7 +1395,7 @@ struct jset_entry_blacklist_v2 { ...@@ -1394,7 +1395,7 @@ struct jset_entry_blacklist_v2 {
}; };
enum { enum {
FS_USAGE_REPLICAS = 0, FS_USAGE_RESERVED = 0,
FS_USAGE_INODES = 1, FS_USAGE_INODES = 1,
FS_USAGE_KEY_VERSION = 2, FS_USAGE_KEY_VERSION = 2,
FS_USAGE_NR = 3 FS_USAGE_NR = 3
...@@ -1402,8 +1403,12 @@ enum { ...@@ -1402,8 +1403,12 @@ enum {
struct jset_entry_usage { struct jset_entry_usage {
struct jset_entry entry; struct jset_entry entry;
__le64 sectors; __le64 v;
__u8 type; } __attribute__((packed));
struct jset_entry_data_usage {
struct jset_entry entry;
__le64 v;
struct bch_replicas_entry r; struct bch_replicas_entry r;
} __attribute__((packed)); } __attribute__((packed));
......
...@@ -123,6 +123,9 @@ void bch2_fs_usage_initialize(struct bch_fs *c) ...@@ -123,6 +123,9 @@ void bch2_fs_usage_initialize(struct bch_fs *c)
nr = sizeof(struct bch_fs_usage) / sizeof(u64) + c->replicas.nr; nr = sizeof(struct bch_fs_usage) / sizeof(u64) + c->replicas.nr;
usage = (void *) bch2_acc_percpu_u64s((void *) c->usage[0], nr); usage = (void *) bch2_acc_percpu_u64s((void *) c->usage[0], nr);
for (i = 0; i < BCH_REPLICAS_MAX; i++)
usage->s.reserved += usage->persistent_reserved[i];
for (i = 0; i < c->replicas.nr; i++) { for (i = 0; i < c->replicas.nr; i++) {
struct bch_replicas_entry *e = struct bch_replicas_entry *e =
cpu_replicas_entry(&c->replicas, i); cpu_replicas_entry(&c->replicas, i);
......
...@@ -309,6 +309,27 @@ static int journal_entry_validate_usage(struct bch_fs *c, ...@@ -309,6 +309,27 @@ static int journal_entry_validate_usage(struct bch_fs *c,
unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64); unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
int ret = 0; int ret = 0;
if (journal_entry_err_on(bytes < sizeof(*u),
c,
"invalid journal entry usage: bad size")) {
journal_entry_null_range(entry, vstruct_next(entry));
return ret;
}
fsck_err:
return ret;
}
static int journal_entry_validate_data_usage(struct bch_fs *c,
struct jset *jset,
struct jset_entry *entry,
int write)
{
struct jset_entry_data_usage *u =
container_of(entry, struct jset_entry_data_usage, entry);
unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
int ret = 0;
if (journal_entry_err_on(bytes < sizeof(*u) || if (journal_entry_err_on(bytes < sizeof(*u) ||
bytes < sizeof(*u) + u->r.nr_devs, bytes < sizeof(*u) + u->r.nr_devs,
c, c,
......
...@@ -75,23 +75,32 @@ static int journal_replay_entry_early(struct bch_fs *c, ...@@ -75,23 +75,32 @@ static int journal_replay_entry_early(struct bch_fs *c,
struct jset_entry_usage *u = struct jset_entry_usage *u =
container_of(entry, struct jset_entry_usage, entry); container_of(entry, struct jset_entry_usage, entry);
switch (u->type) { switch (entry->btree_id) {
case FS_USAGE_REPLICAS: case FS_USAGE_RESERVED:
ret = bch2_replicas_set_usage(c, &u->r, if (entry->level < BCH_REPLICAS_MAX)
le64_to_cpu(u->sectors)); percpu_u64_set(&c->usage[0]->
persistent_reserved[entry->level],
le64_to_cpu(u->v));
break; break;
case FS_USAGE_INODES: case FS_USAGE_INODES:
percpu_u64_set(&c->usage[0]->s.nr_inodes, percpu_u64_set(&c->usage[0]->s.nr_inodes,
le64_to_cpu(u->sectors)); le64_to_cpu(u->v));
break; break;
case FS_USAGE_KEY_VERSION: case FS_USAGE_KEY_VERSION:
atomic64_set(&c->key_version, atomic64_set(&c->key_version,
le64_to_cpu(u->sectors)); le64_to_cpu(u->v));
break; break;
} }
break; break;
} }
case BCH_JSET_ENTRY_data_usage: {
struct jset_entry_data_usage *u =
container_of(entry, struct jset_entry_data_usage, entry);
ret = bch2_replicas_set_usage(c, &u->r,
le64_to_cpu(u->v));
break;
}
} }
return ret; return ret;
...@@ -156,7 +165,8 @@ static bool journal_empty(struct list_head *journal) ...@@ -156,7 +165,8 @@ static bool journal_empty(struct list_head *journal)
list_for_each_entry(i, journal, list) { list_for_each_entry(i, journal, list) {
vstruct_for_each(&i->j, entry) { vstruct_for_each(&i->j, entry) {
if (entry->type == BCH_JSET_ENTRY_btree_root || if (entry->type == BCH_JSET_ENTRY_btree_root ||
entry->type == BCH_JSET_ENTRY_usage) entry->type == BCH_JSET_ENTRY_usage ||
entry->type == BCH_JSET_ENTRY_data_usage)
continue; continue;
if (entry->type == BCH_JSET_ENTRY_btree_keys && if (entry->type == BCH_JSET_ENTRY_btree_keys &&
......
...@@ -312,9 +312,14 @@ static unsigned reserve_journal_replicas(struct bch_fs *c, ...@@ -312,9 +312,14 @@ static unsigned reserve_journal_replicas(struct bch_fs *c,
journal_res_u64s += journal_res_u64s +=
DIV_ROUND_UP(sizeof(struct jset_entry_usage), sizeof(u64)); DIV_ROUND_UP(sizeof(struct jset_entry_usage), sizeof(u64));
/* persistent_reserved: */
journal_res_u64s +=
DIV_ROUND_UP(sizeof(struct jset_entry_usage), sizeof(u64)) *
BCH_REPLICAS_MAX;
for_each_cpu_replicas_entry(r, e) for_each_cpu_replicas_entry(r, e)
journal_res_u64s += journal_res_u64s +=
DIV_ROUND_UP(sizeof(struct jset_entry_usage) + DIV_ROUND_UP(sizeof(struct jset_entry_data_usage) +
e->nr_devs, sizeof(u64)); e->nr_devs, sizeof(u64));
return journal_res_u64s; return journal_res_u64s;
} }
......
...@@ -900,7 +900,6 @@ bch2_journal_super_entries_add_common(struct bch_fs *c, ...@@ -900,7 +900,6 @@ bch2_journal_super_entries_add_common(struct bch_fs *c,
struct jset_entry *entry, struct jset_entry *entry,
u64 journal_seq) u64 journal_seq)
{ {
struct jset_entry_usage *u;
struct btree_root *r; struct btree_root *r;
unsigned i; unsigned i;
...@@ -929,24 +928,45 @@ bch2_journal_super_entries_add_common(struct bch_fs *c, ...@@ -929,24 +928,45 @@ bch2_journal_super_entries_add_common(struct bch_fs *c,
{ {
u64 nr_inodes = percpu_u64_get(&c->usage[0]->s.nr_inodes); u64 nr_inodes = percpu_u64_get(&c->usage[0]->s.nr_inodes);
struct jset_entry_usage *u =
container_of(entry, struct jset_entry_usage, entry);
u = container_of(entry, struct jset_entry_usage, entry);
memset(u, 0, sizeof(*u)); memset(u, 0, sizeof(*u));
u->entry.u64s = DIV_ROUND_UP(sizeof(*u), sizeof(u64)) - 1; u->entry.u64s = DIV_ROUND_UP(sizeof(*u), sizeof(u64)) - 1;
u->entry.type = BCH_JSET_ENTRY_usage; u->entry.type = BCH_JSET_ENTRY_usage;
u->sectors = cpu_to_le64(nr_inodes); u->entry.btree_id = FS_USAGE_INODES;
u->type = FS_USAGE_INODES; u->v = cpu_to_le64(nr_inodes);
entry = vstruct_next(entry); entry = vstruct_next(entry);
} }
{ {
u = container_of(entry, struct jset_entry_usage, entry); struct jset_entry_usage *u =
container_of(entry, struct jset_entry_usage, entry);
memset(u, 0, sizeof(*u)); memset(u, 0, sizeof(*u));
u->entry.u64s = DIV_ROUND_UP(sizeof(*u), sizeof(u64)) - 1; u->entry.u64s = DIV_ROUND_UP(sizeof(*u), sizeof(u64)) - 1;
u->entry.type = BCH_JSET_ENTRY_usage; u->entry.type = BCH_JSET_ENTRY_usage;
u->sectors = cpu_to_le64(atomic64_read(&c->key_version)); u->entry.btree_id = FS_USAGE_KEY_VERSION;
u->type = FS_USAGE_KEY_VERSION; u->v = cpu_to_le64(atomic64_read(&c->key_version));
entry = vstruct_next(entry);
}
for (i = 0; i < BCH_REPLICAS_MAX; i++) {
struct jset_entry_usage *u =
container_of(entry, struct jset_entry_usage, entry);
u64 sectors = percpu_u64_get(&c->usage[0]->persistent_reserved[i]);
if (!sectors)
continue;
memset(u, 0, sizeof(*u));
u->entry.u64s = DIV_ROUND_UP(sizeof(*u), sizeof(u64)) - 1;
u->entry.type = BCH_JSET_ENTRY_usage;
u->entry.btree_id = FS_USAGE_RESERVED;
u->entry.level = i;
u->v = sectors;
entry = vstruct_next(entry); entry = vstruct_next(entry);
} }
...@@ -955,13 +975,14 @@ bch2_journal_super_entries_add_common(struct bch_fs *c, ...@@ -955,13 +975,14 @@ bch2_journal_super_entries_add_common(struct bch_fs *c,
struct bch_replicas_entry *e = struct bch_replicas_entry *e =
cpu_replicas_entry(&c->replicas, i); cpu_replicas_entry(&c->replicas, i);
u64 sectors = percpu_u64_get(&c->usage[0]->data[i]); u64 sectors = percpu_u64_get(&c->usage[0]->data[i]);
struct jset_entry_data_usage *u =
container_of(entry, struct jset_entry_data_usage, entry);
u = container_of(entry, struct jset_entry_usage, entry); memset(u, 0, sizeof(*u));
u->entry.u64s = DIV_ROUND_UP(sizeof(*u) + e->nr_devs, u->entry.u64s = DIV_ROUND_UP(sizeof(*u) + e->nr_devs,
sizeof(u64)) - 1; sizeof(u64)) - 1;
u->entry.type = BCH_JSET_ENTRY_usage; u->entry.type = BCH_JSET_ENTRY_data_usage;
u->sectors = cpu_to_le64(sectors); u->v = cpu_to_le64(sectors);
u->type = FS_USAGE_REPLICAS;
unsafe_memcpy(&u->r, e, replicas_entry_bytes(e), unsafe_memcpy(&u->r, e, replicas_entry_bytes(e),
"embedded variable length struct"); "embedded variable length struct");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment