Commit 63332394 authored by Kent Overstreet's avatar Kent Overstreet

bcachefs: Move snapshot table size to struct snapshot_table

We need to add bounds checking for snapshot table accesses - it turns
out there are cases where we do need to use the snapshots table before
fsck checks have completed (and indeed, fsck may not have been run).
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent aa6e130e
...@@ -810,7 +810,6 @@ struct bch_fs { ...@@ -810,7 +810,6 @@ struct bch_fs {
/* snapshot.c: */ /* snapshot.c: */
struct snapshot_table __rcu *snapshots; struct snapshot_table __rcu *snapshots;
size_t snapshot_table_size;
struct mutex snapshot_table_lock; struct mutex snapshot_table_lock;
struct rw_semaphore snapshot_create_lock; struct rw_semaphore snapshot_create_lock;
......
...@@ -151,36 +151,39 @@ bool __bch2_snapshot_is_ancestor(struct bch_fs *c, u32 id, u32 ancestor) ...@@ -151,36 +151,39 @@ bool __bch2_snapshot_is_ancestor(struct bch_fs *c, u32 id, u32 ancestor)
static noinline struct snapshot_t *__snapshot_t_mut(struct bch_fs *c, u32 id) static noinline struct snapshot_t *__snapshot_t_mut(struct bch_fs *c, u32 id)
{ {
size_t idx = U32_MAX - id; size_t idx = U32_MAX - id;
size_t new_size;
struct snapshot_table *new, *old; struct snapshot_table *new, *old;
new_size = max(16UL, roundup_pow_of_two(idx + 1)); size_t new_bytes = kmalloc_size_roundup(struct_size(new, s, idx + 1));
size_t new_size = (new_bytes - sizeof(*new)) / sizeof(new->s[0]);
new = kvzalloc(struct_size(new, s, new_size), GFP_KERNEL); new = kvzalloc(new_bytes, GFP_KERNEL);
if (!new) if (!new)
return NULL; return NULL;
new->nr = new_size;
old = rcu_dereference_protected(c->snapshots, true); old = rcu_dereference_protected(c->snapshots, true);
if (old) if (old)
memcpy(new->s, memcpy(new->s, old->s, sizeof(old->s[0]) * old->nr);
rcu_dereference_protected(c->snapshots, true)->s,
sizeof(new->s[0]) * c->snapshot_table_size);
rcu_assign_pointer(c->snapshots, new); rcu_assign_pointer(c->snapshots, new);
c->snapshot_table_size = new_size; kvfree_rcu(old, rcu);
kvfree_rcu_mightsleep(old);
return &rcu_dereference_protected(c->snapshots, true)->s[idx]; return &rcu_dereference_protected(c->snapshots,
lockdep_is_held(&c->snapshot_table_lock))->s[idx];
} }
static inline struct snapshot_t *snapshot_t_mut(struct bch_fs *c, u32 id) static inline struct snapshot_t *snapshot_t_mut(struct bch_fs *c, u32 id)
{ {
size_t idx = U32_MAX - id; size_t idx = U32_MAX - id;
struct snapshot_table *table =
rcu_dereference_protected(c->snapshots,
lockdep_is_held(&c->snapshot_table_lock));
lockdep_assert_held(&c->snapshot_table_lock); lockdep_assert_held(&c->snapshot_table_lock);
if (likely(idx < c->snapshot_table_size)) if (likely(table && idx < table->nr))
return &rcu_dereference_protected(c->snapshots, true)->s[idx]; return &table->s[idx];
return __snapshot_t_mut(c, id); return __snapshot_t_mut(c, id);
} }
......
...@@ -33,7 +33,11 @@ int bch2_mark_snapshot(struct btree_trans *, enum btree_id, unsigned, ...@@ -33,7 +33,11 @@ int bch2_mark_snapshot(struct btree_trans *, enum btree_id, unsigned,
static inline struct snapshot_t *__snapshot_t(struct snapshot_table *t, u32 id) static inline struct snapshot_t *__snapshot_t(struct snapshot_table *t, u32 id)
{ {
return &t->s[U32_MAX - id]; u32 idx = U32_MAX - id;
return likely(t && idx < t->nr)
? &t->s[idx]
: NULL;
} }
static inline const struct snapshot_t *snapshot_t(struct bch_fs *c, u32 id) static inline const struct snapshot_t *snapshot_t(struct bch_fs *c, u32 id)
......
...@@ -20,6 +20,8 @@ struct snapshot_t { ...@@ -20,6 +20,8 @@ struct snapshot_t {
}; };
struct snapshot_table { struct snapshot_table {
struct rcu_head rcu;
size_t nr;
#ifndef RUST_BINDGEN #ifndef RUST_BINDGEN
DECLARE_FLEX_ARRAY(struct snapshot_t, s); DECLARE_FLEX_ARRAY(struct snapshot_t, s);
#else #else
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment