Commit 35d5aff2 authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: Kill bch2_fs_usage_scratch_get()

This is an important cleanup, eliminating an unnecessary copy in the
transaction commit path.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 9c2e6242
......@@ -12,6 +12,7 @@
#include "error.h"
#include "extents.h"
#include "journal.h"
#include "replicas.h"
#include "trace.h"
#include <linux/prefetch.h>
......
......@@ -437,10 +437,6 @@ static int bch2_btree_reserve_get(struct btree_update *as, unsigned nr_nodes,
goto err_free;
}
ret = bch2_mark_bkey_replicas(c, bkey_i_to_s_c(&b->key));
if (ret)
goto err_free;
as->prealloc_nodes[as->nr_prealloc_nodes++] = b;
}
......
......@@ -376,7 +376,6 @@ bch2_trans_commit_write_locked(struct btree_trans *trans,
struct btree_insert_entry **stopped_at)
{
struct bch_fs *c = trans->c;
struct bch_fs_usage_online *fs_usage = NULL;
struct btree_insert_entry *i;
struct btree_trans_commit_hook *h;
unsigned u64s = 0;
......@@ -424,13 +423,11 @@ bch2_trans_commit_write_locked(struct btree_trans *trans,
if (marking) {
percpu_down_read(&c->mark_lock);
fs_usage = bch2_fs_usage_scratch_get(c);
}
/* Must be called under mark_lock: */
if (marking && trans->fs_usage_deltas &&
bch2_replicas_delta_list_apply(c, &fs_usage->u,
trans->fs_usage_deltas)) {
!bch2_replicas_delta_list_marked(c, trans->fs_usage_deltas)) {
ret = BTREE_INSERT_NEED_MARK_REPLICAS;
goto err;
}
......@@ -474,10 +471,10 @@ bch2_trans_commit_write_locked(struct btree_trans *trans,
trans_for_each_update(trans, i)
if (BTREE_NODE_TYPE_HAS_MEM_TRIGGERS & (1U << i->bkey_type))
bch2_mark_update(trans, i->iter, i->k,
&fs_usage->u, i->trigger_flags);
NULL, i->trigger_flags);
if (marking)
bch2_trans_fs_usage_apply(trans, fs_usage);
if (marking && trans->fs_usage_deltas)
bch2_trans_fs_usage_apply(trans, trans->fs_usage_deltas);
if (unlikely(c->gc_pos.phase))
bch2_trans_mark_gc(trans);
......@@ -486,7 +483,6 @@ bch2_trans_commit_write_locked(struct btree_trans *trans,
do_btree_insert_one(trans, i->iter, i->k);
err:
if (marking) {
bch2_fs_usage_scratch_put(c, fs_usage);
percpu_up_read(&c->mark_lock);
}
......
......@@ -167,37 +167,6 @@ void bch2_fs_usage_initialize(struct bch_fs *c)
percpu_up_write(&c->mark_lock);
}
void bch2_fs_usage_scratch_put(struct bch_fs *c, struct bch_fs_usage_online *fs_usage)
{
if (fs_usage == c->usage_scratch)
mutex_unlock(&c->usage_scratch_lock);
else
kfree(fs_usage);
}
struct bch_fs_usage_online *bch2_fs_usage_scratch_get(struct bch_fs *c)
{
struct bch_fs_usage_online *ret;
unsigned bytes = sizeof(struct bch_fs_usage_online) + sizeof(u64) *
READ_ONCE(c->replicas.nr);
ret = kzalloc(bytes, GFP_NOWAIT|__GFP_NOWARN);
if (ret)
return ret;
if (mutex_trylock(&c->usage_scratch_lock))
goto out_pool;
ret = kzalloc(bytes, GFP_NOFS);
if (ret)
return ret;
mutex_lock(&c->usage_scratch_lock);
out_pool:
ret = c->usage_scratch;
memset(ret, 0, bytes);
return ret;
}
static inline struct bch_dev_usage *dev_usage_ptr(struct bch_dev *ca,
unsigned journal_seq,
bool gc)
......@@ -459,6 +428,8 @@ static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
percpu_rwsem_assert_held(&c->mark_lock);
preempt_disable();
if (!fs_usage)
fs_usage = fs_usage_ptr(c, journal_seq, gc);
u = dev_usage_ptr(ca, journal_seq, gc);
if (bucket_type(old))
......@@ -486,22 +457,17 @@ static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
bch2_wake_allocator(ca);
}
static inline int update_replicas(struct bch_fs *c,
struct bch_fs_usage *fs_usage,
struct bch_replicas_entry *r,
s64 sectors)
static inline void update_replicas(struct bch_fs *c,
struct bch_fs_usage *fs_usage,
struct bch_replicas_entry *r,
s64 sectors)
{
int idx = bch2_replicas_entry_idx(c, r);
if (idx < 0)
return -1;
if (!fs_usage)
return 0;
BUG_ON(idx < 0);
fs_usage_data_type_to_base(fs_usage, r->data_type, sectors);
fs_usage->replicas[idx] += sectors;
return 0;
}
static inline void update_cached_sectors(struct bch_fs *c,
......@@ -579,55 +545,6 @@ static inline void update_cached_sectors_list(struct btree_trans *trans,
update_replicas_list(trans, &r.e, sectors);
}
static inline struct replicas_delta *
replicas_delta_next(struct replicas_delta *d)
{
return (void *) d + replicas_entry_bytes(&d->r) + 8;
}
int bch2_replicas_delta_list_apply(struct bch_fs *c,
struct bch_fs_usage *fs_usage,
struct replicas_delta_list *r)
{
struct replicas_delta *d = r->d;
struct replicas_delta *top = (void *) r->d + r->used;
unsigned i;
for (d = r->d; d != top; d = replicas_delta_next(d))
if (update_replicas(c, fs_usage, &d->r, d->delta)) {
top = d;
goto unwind;
}
if (!fs_usage)
return 0;
fs_usage->nr_inodes += r->nr_inodes;
for (i = 0; i < BCH_REPLICAS_MAX; i++) {
fs_usage->reserved += r->persistent_reserved[i];
fs_usage->persistent_reserved[i] += r->persistent_reserved[i];
}
return 0;
unwind:
for (d = r->d; d != top; d = replicas_delta_next(d))
update_replicas(c, fs_usage, &d->r, -d->delta);
return -1;
}
int bch2_replicas_delta_list_mark(struct bch_fs *c,
struct replicas_delta_list *r)
{
struct replicas_delta *d = r->d;
struct replicas_delta *top = (void *) r->d + r->used;
int ret = 0;
for (d = r->d; !ret && d != top; d = replicas_delta_next(d))
ret = bch2_mark_replicas(c, &d->r);
return ret;
}
#define do_mark_fn(fn, c, pos, flags, ...) \
({ \
int gc, ret = 0; \
......@@ -1400,62 +1317,15 @@ int bch2_mark_update(struct btree_trans *trans,
return ret;
}
static int bch2_fs_usage_apply(struct bch_fs *c,
struct bch_fs_usage_online *src,
struct disk_reservation *disk_res,
unsigned journal_seq)
{
struct bch_fs_usage *dst;
s64 added = src->u.data + src->u.reserved;
s64 should_not_have_added;
int ret = 0;
percpu_rwsem_assert_held(&c->mark_lock);
/*
* Not allowed to reduce sectors_available except by getting a
* reservation:
*/
should_not_have_added = added - (s64) (disk_res ? disk_res->sectors : 0);
if (WARN_ONCE(should_not_have_added > 0,
"disk usage increased by %lli more than reservation of %llu",
added, disk_res ? disk_res->sectors : 0)) {
atomic64_sub(should_not_have_added, &c->sectors_available);
added -= should_not_have_added;
ret = -1;
}
if (added > 0) {
disk_res->sectors -= added;
src->online_reserved -= added;
}
this_cpu_add(*c->online_reserved, src->online_reserved);
preempt_disable();
dst = fs_usage_ptr(c, journal_seq, false);
acc_u64s((u64 *) dst, (u64 *) &src->u, fs_usage_u64s(c));
preempt_enable();
return ret;
}
void bch2_trans_fs_usage_apply(struct btree_trans *trans,
struct bch_fs_usage_online *fs_usage)
static noinline __cold
void fs_usage_apply_warn(struct btree_trans *trans,
unsigned disk_res_sectors)
{
struct bch_fs *c = trans->c;
struct btree_insert_entry *i;
static int warned_disk_usage = 0;
u64 disk_res_sectors = trans->disk_res ? trans->disk_res->sectors : 0;
char buf[200];
if (!bch2_fs_usage_apply(c, fs_usage, trans->disk_res,
trans->journal_res.seq) ||
warned_disk_usage ||
xchg(&warned_disk_usage, 1))
return;
bch_err(c, "disk usage increased more than %llu sectors reserved",
bch_err(c, "disk usage increased more than %u sectors reserved",
disk_res_sectors);
trans_for_each_update(trans, i) {
......@@ -1490,6 +1360,65 @@ void bch2_trans_fs_usage_apply(struct btree_trans *trans,
}
}
void bch2_trans_fs_usage_apply(struct btree_trans *trans,
struct replicas_delta_list *deltas)
{
struct bch_fs *c = trans->c;
static int warned_disk_usage = 0;
bool warn = false;
unsigned disk_res_sectors = trans->disk_res ? trans->disk_res->sectors : 0;
struct replicas_delta *d = deltas->d;
struct replicas_delta *top = (void *) deltas->d + deltas->used;
struct bch_fs_usage *dst;
s64 added = 0, should_not_have_added;
unsigned i;
percpu_rwsem_assert_held(&c->mark_lock);
preempt_disable();
dst = fs_usage_ptr(c, trans->journal_res.seq, false);
for (d = deltas->d; d != top; d = replicas_delta_next(d)) {
switch (d->r.data_type) {
case BCH_DATA_btree:
case BCH_DATA_user:
case BCH_DATA_parity:
added += d->delta;
}
update_replicas(c, dst, &d->r, d->delta);
}
dst->nr_inodes += deltas->nr_inodes;
for (i = 0; i < BCH_REPLICAS_MAX; i++) {
added += deltas->persistent_reserved[i];
dst->reserved += deltas->persistent_reserved[i];
dst->persistent_reserved[i] += deltas->persistent_reserved[i];
}
/*
* Not allowed to reduce sectors_available except by getting a
* reservation:
*/
should_not_have_added = added - (s64) disk_res_sectors;
if (unlikely(should_not_have_added > 0)) {
atomic64_sub(should_not_have_added, &c->sectors_available);
added -= should_not_have_added;
warn = true;
}
if (added > 0) {
trans->disk_res->sectors -= added;
this_cpu_sub(*c->online_reserved, added);
}
preempt_enable();
if (unlikely(warn) && !xchg(&warned_disk_usage, 1))
fs_usage_apply_warn(trans, disk_res_sectors);
}
/* trans_mark: */
static struct btree_iter *trans_get_update(struct btree_trans *trans,
......
......@@ -216,9 +216,6 @@ static inline unsigned dev_usage_u64s(void)
return sizeof(struct bch_dev_usage) / sizeof(u64);
}
void bch2_fs_usage_scratch_put(struct bch_fs *, struct bch_fs_usage_online *);
struct bch_fs_usage_online *bch2_fs_usage_scratch_get(struct bch_fs *);
u64 bch2_fs_usage_read_one(struct bch_fs *, u64 *);
struct bch_fs_usage_online *bch2_fs_usage_read(struct bch_fs *);
......@@ -250,16 +247,11 @@ int bch2_mark_key(struct bch_fs *, struct bkey_s_c, unsigned,
int bch2_mark_update(struct btree_trans *, struct btree_iter *,
struct bkey_i *, struct bch_fs_usage *, unsigned);
int bch2_replicas_delta_list_apply(struct bch_fs *,
struct bch_fs_usage *,
struct replicas_delta_list *);
int bch2_replicas_delta_list_mark(struct bch_fs *,
struct replicas_delta_list *);
int bch2_trans_mark_key(struct btree_trans *, struct bkey_s_c, struct bkey_s_c,
unsigned, s64, unsigned);
int bch2_trans_mark_update(struct btree_trans *, struct btree_iter *iter,
struct bkey_i *insert, unsigned);
void bch2_trans_fs_usage_apply(struct btree_trans *, struct bch_fs_usage_online *);
void bch2_trans_fs_usage_apply(struct btree_trans *, struct replicas_delta_list *);
int bch2_trans_mark_metadata_bucket(struct btree_trans *,
struct disk_reservation *, struct bch_dev *,
......
......@@ -96,22 +96,6 @@ struct bch_fs_usage_short {
u64 nr_inodes;
};
struct replicas_delta {
s64 delta;
struct bch_replicas_entry r;
} __packed;
struct replicas_delta_list {
unsigned size;
unsigned used;
struct {} memset_start;
u64 nr_inodes;
u64 persistent_reserved[BCH_REPLICAS_MAX];
struct {} memset_end;
struct replicas_delta d[0];
};
/*
* A reservation for space on disk:
*/
......
......@@ -471,6 +471,36 @@ static int __bch2_mark_bkey_replicas(struct bch_fs *c, struct bkey_s_c k,
return 0;
}
/* replicas delta list: */
bool bch2_replicas_delta_list_marked(struct bch_fs *c,
struct replicas_delta_list *r)
{
struct replicas_delta *d = r->d;
struct replicas_delta *top = (void *) r->d + r->used;
percpu_rwsem_assert_held(&c->mark_lock);
for (d = r->d; d != top; d = replicas_delta_next(d))
if (bch2_replicas_entry_idx(c, &d->r) < 0)
return false;
return true;
}
int bch2_replicas_delta_list_mark(struct bch_fs *c,
struct replicas_delta_list *r)
{
struct replicas_delta *d = r->d;
struct replicas_delta *top = (void *) r->d + r->used;
int ret = 0;
for (d = r->d; !ret && d != top; d = replicas_delta_next(d))
ret = bch2_mark_replicas(c, &d->r);
return ret;
}
/* bkey replicas: */
bool bch2_bkey_replicas_marked(struct bch_fs *c,
struct bkey_s_c k)
{
......@@ -482,6 +512,11 @@ int bch2_mark_bkey_replicas(struct bch_fs *c, struct bkey_s_c k)
return __bch2_mark_bkey_replicas(c, k, false);
}
/*
* Old replicas_gc mechanism: only used for journal replicas entries now, should
* die at some point:
*/
int bch2_replicas_gc_end(struct bch_fs *c, int ret)
{
unsigned i;
......@@ -575,6 +610,8 @@ int bch2_replicas_gc_start(struct bch_fs *c, unsigned typemask)
return 0;
}
/* New much simpler mechanism for clearing out unneeded replicas entries: */
int bch2_replicas_gc2(struct bch_fs *c)
{
struct bch_replicas_cpu new = { 0 };
......
......@@ -26,6 +26,31 @@ bool bch2_replicas_marked(struct bch_fs *, struct bch_replicas_entry *);
int bch2_mark_replicas(struct bch_fs *,
struct bch_replicas_entry *);
struct replicas_delta {
s64 delta;
struct bch_replicas_entry r;
} __packed;
struct replicas_delta_list {
unsigned size;
unsigned used;
struct {} memset_start;
u64 nr_inodes;
u64 persistent_reserved[BCH_REPLICAS_MAX];
struct {} memset_end;
struct replicas_delta d[0];
};
static inline struct replicas_delta *
replicas_delta_next(struct replicas_delta *d)
{
return (void *) d + replicas_entry_bytes(&d->r) + 8;
}
bool bch2_replicas_delta_list_marked(struct bch_fs *, struct replicas_delta_list *);
int bch2_replicas_delta_list_mark(struct bch_fs *, struct replicas_delta_list *);
void bch2_bkey_to_replicas(struct bch_replicas_entry *, struct bkey_s_c);
bool bch2_bkey_replicas_marked(struct bch_fs *, struct bkey_s_c);
int bch2_mark_bkey_replicas(struct bch_fs *, struct bkey_s_c);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment