Commit e53a961c authored by Kent Overstreet's avatar Kent Overstreet

bcachefs: Rename enum alloc_reserve -> bch_watermark

This is prep work for consolidating with JOURNAL_WATERMARK.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent e9d01723
...@@ -220,7 +220,7 @@ static inline u64 should_invalidate_buckets(struct bch_dev *ca, ...@@ -220,7 +220,7 @@ static inline u64 should_invalidate_buckets(struct bch_dev *ca,
u64 free = max_t(s64, 0, u64 free = max_t(s64, 0,
u.d[BCH_DATA_free].buckets u.d[BCH_DATA_free].buckets
+ u.d[BCH_DATA_need_discard].buckets + u.d[BCH_DATA_need_discard].buckets
- bch2_dev_buckets_reserved(ca, RESERVE_stripe)); - bch2_dev_buckets_reserved(ca, BCH_WATERMARK_stripe));
return clamp_t(s64, want_free - free, 0, u.d[BCH_DATA_cached].buckets); return clamp_t(s64, want_free - free, 0, u.d[BCH_DATA_cached].buckets);
} }
......
...@@ -44,9 +44,9 @@ static void bch2_trans_mutex_lock_norelock(struct btree_trans *trans, ...@@ -44,9 +44,9 @@ static void bch2_trans_mutex_lock_norelock(struct btree_trans *trans,
} }
} }
const char * const bch2_alloc_reserves[] = { const char * const bch2_watermarks[] = {
#define x(t) #t, #define x(t) #t,
BCH_ALLOC_RESERVES() BCH_WATERMARKS()
#undef x #undef x
NULL NULL
}; };
...@@ -188,13 +188,13 @@ long bch2_bucket_alloc_new_fs(struct bch_dev *ca) ...@@ -188,13 +188,13 @@ long bch2_bucket_alloc_new_fs(struct bch_dev *ca)
return -1; return -1;
} }
static inline unsigned open_buckets_reserved(enum alloc_reserve reserve) static inline unsigned open_buckets_reserved(enum bch_watermark watermark)
{ {
switch (reserve) { switch (watermark) {
case RESERVE_btree: case BCH_WATERMARK_btree:
case RESERVE_btree_movinggc: case BCH_WATERMARK_btree_copygc:
return 0; return 0;
case RESERVE_movinggc: case BCH_WATERMARK_copygc:
return OPEN_BUCKETS_COUNT / 4; return OPEN_BUCKETS_COUNT / 4;
default: default:
return OPEN_BUCKETS_COUNT / 2; return OPEN_BUCKETS_COUNT / 2;
...@@ -203,7 +203,7 @@ static inline unsigned open_buckets_reserved(enum alloc_reserve reserve) ...@@ -203,7 +203,7 @@ static inline unsigned open_buckets_reserved(enum alloc_reserve reserve)
static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev *ca, static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
u64 bucket, u64 bucket,
enum alloc_reserve reserve, enum bch_watermark watermark,
const struct bch_alloc_v4 *a, const struct bch_alloc_v4 *a,
struct bucket_alloc_state *s, struct bucket_alloc_state *s,
struct closure *cl) struct closure *cl)
...@@ -233,7 +233,7 @@ static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev * ...@@ -233,7 +233,7 @@ static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev *
spin_lock(&c->freelist_lock); spin_lock(&c->freelist_lock);
if (unlikely(c->open_buckets_nr_free <= open_buckets_reserved(reserve))) { if (unlikely(c->open_buckets_nr_free <= open_buckets_reserved(watermark))) {
if (cl) if (cl)
closure_wait(&c->open_buckets_wait, cl); closure_wait(&c->open_buckets_wait, cl);
...@@ -284,7 +284,7 @@ static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev * ...@@ -284,7 +284,7 @@ static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev *
} }
static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bch_dev *ca, static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bch_dev *ca,
enum alloc_reserve reserve, u64 free_entry, enum bch_watermark watermark, u64 free_entry,
struct bucket_alloc_state *s, struct bucket_alloc_state *s,
struct bkey_s_c freespace_k, struct bkey_s_c freespace_k,
struct closure *cl) struct closure *cl)
...@@ -374,7 +374,7 @@ static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bc ...@@ -374,7 +374,7 @@ static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bc
} }
} }
ob = __try_alloc_bucket(c, ca, b, reserve, a, s, cl); ob = __try_alloc_bucket(c, ca, b, watermark, a, s, cl);
if (!ob) if (!ob)
iter.path->preserve = false; iter.path->preserve = false;
err: err:
...@@ -394,7 +394,7 @@ static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bc ...@@ -394,7 +394,7 @@ static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bc
static noinline struct open_bucket * static noinline struct open_bucket *
bch2_bucket_alloc_early(struct btree_trans *trans, bch2_bucket_alloc_early(struct btree_trans *trans,
struct bch_dev *ca, struct bch_dev *ca,
enum alloc_reserve reserve, enum bch_watermark watermark,
struct bucket_alloc_state *s, struct bucket_alloc_state *s,
struct closure *cl) struct closure *cl)
{ {
...@@ -424,7 +424,7 @@ bch2_bucket_alloc_early(struct btree_trans *trans, ...@@ -424,7 +424,7 @@ bch2_bucket_alloc_early(struct btree_trans *trans,
s->buckets_seen++; s->buckets_seen++;
ob = __try_alloc_bucket(trans->c, ca, k.k->p.offset, reserve, a, s, cl); ob = __try_alloc_bucket(trans->c, ca, k.k->p.offset, watermark, a, s, cl);
if (ob) if (ob)
break; break;
} }
...@@ -445,7 +445,7 @@ bch2_bucket_alloc_early(struct btree_trans *trans, ...@@ -445,7 +445,7 @@ bch2_bucket_alloc_early(struct btree_trans *trans,
static struct open_bucket *bch2_bucket_alloc_freelist(struct btree_trans *trans, static struct open_bucket *bch2_bucket_alloc_freelist(struct btree_trans *trans,
struct bch_dev *ca, struct bch_dev *ca,
enum alloc_reserve reserve, enum bch_watermark watermark,
struct bucket_alloc_state *s, struct bucket_alloc_state *s,
struct closure *cl) struct closure *cl)
{ {
...@@ -474,7 +474,7 @@ static struct open_bucket *bch2_bucket_alloc_freelist(struct btree_trans *trans, ...@@ -474,7 +474,7 @@ static struct open_bucket *bch2_bucket_alloc_freelist(struct btree_trans *trans,
s->buckets_seen++; s->buckets_seen++;
ob = try_alloc_bucket(trans, ca, reserve, ob = try_alloc_bucket(trans, ca, watermark,
alloc_cursor, s, k, cl); alloc_cursor, s, k, cl);
if (ob) { if (ob) {
iter.path->preserve = false; iter.path->preserve = false;
...@@ -507,7 +507,7 @@ static struct open_bucket *bch2_bucket_alloc_freelist(struct btree_trans *trans, ...@@ -507,7 +507,7 @@ static struct open_bucket *bch2_bucket_alloc_freelist(struct btree_trans *trans,
*/ */
static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans, static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans,
struct bch_dev *ca, struct bch_dev *ca,
enum alloc_reserve reserve, enum bch_watermark watermark,
struct closure *cl, struct closure *cl,
struct bch_dev_usage *usage) struct bch_dev_usage *usage)
{ {
...@@ -519,7 +519,7 @@ static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans, ...@@ -519,7 +519,7 @@ static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans,
bool waiting = false; bool waiting = false;
again: again:
bch2_dev_usage_read_fast(ca, usage); bch2_dev_usage_read_fast(ca, usage);
avail = dev_buckets_free(ca, *usage, reserve); avail = dev_buckets_free(ca, *usage, watermark);
if (usage->d[BCH_DATA_need_discard].buckets > avail) if (usage->d[BCH_DATA_need_discard].buckets > avail)
bch2_do_discards(c); bch2_do_discards(c);
...@@ -548,8 +548,8 @@ static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans, ...@@ -548,8 +548,8 @@ static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans,
closure_wake_up(&c->freelist_wait); closure_wake_up(&c->freelist_wait);
alloc: alloc:
ob = likely(freespace) ob = likely(freespace)
? bch2_bucket_alloc_freelist(trans, ca, reserve, &s, cl) ? bch2_bucket_alloc_freelist(trans, ca, watermark, &s, cl)
: bch2_bucket_alloc_early(trans, ca, reserve, &s, cl); : bch2_bucket_alloc_early(trans, ca, watermark, &s, cl);
if (s.skipped_need_journal_commit * 2 > avail) if (s.skipped_need_journal_commit * 2 > avail)
bch2_journal_flush_async(&c->journal, NULL); bch2_journal_flush_async(&c->journal, NULL);
...@@ -564,7 +564,7 @@ static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans, ...@@ -564,7 +564,7 @@ static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans,
if (!IS_ERR(ob)) if (!IS_ERR(ob))
trace_and_count(c, bucket_alloc, ca, trace_and_count(c, bucket_alloc, ca,
bch2_alloc_reserves[reserve], bch2_watermarks[watermark],
ob->bucket, ob->bucket,
usage->d[BCH_DATA_free].buckets, usage->d[BCH_DATA_free].buckets,
avail, avail,
...@@ -575,7 +575,7 @@ static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans, ...@@ -575,7 +575,7 @@ static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans,
""); "");
else if (!bch2_err_matches(PTR_ERR(ob), BCH_ERR_transaction_restart)) else if (!bch2_err_matches(PTR_ERR(ob), BCH_ERR_transaction_restart))
trace_and_count(c, bucket_alloc_fail, ca, trace_and_count(c, bucket_alloc_fail, ca,
bch2_alloc_reserves[reserve], bch2_watermarks[watermark],
0, 0,
usage->d[BCH_DATA_free].buckets, usage->d[BCH_DATA_free].buckets,
avail, avail,
...@@ -589,14 +589,14 @@ static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans, ...@@ -589,14 +589,14 @@ static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans,
} }
struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca, struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca,
enum alloc_reserve reserve, enum bch_watermark watermark,
struct closure *cl) struct closure *cl)
{ {
struct bch_dev_usage usage; struct bch_dev_usage usage;
struct open_bucket *ob; struct open_bucket *ob;
bch2_trans_do(c, NULL, NULL, 0, bch2_trans_do(c, NULL, NULL, 0,
PTR_ERR_OR_ZERO(ob = bch2_bucket_alloc_trans(&trans, ca, reserve, PTR_ERR_OR_ZERO(ob = bch2_bucket_alloc_trans(&trans, ca, watermark,
cl, &usage))); cl, &usage)));
return ob; return ob;
} }
...@@ -629,7 +629,7 @@ static inline void bch2_dev_stripe_increment_inlined(struct bch_dev *ca, ...@@ -629,7 +629,7 @@ static inline void bch2_dev_stripe_increment_inlined(struct bch_dev *ca,
struct bch_dev_usage *usage) struct bch_dev_usage *usage)
{ {
u64 *v = stripe->next_alloc + ca->dev_idx; u64 *v = stripe->next_alloc + ca->dev_idx;
u64 free_space = dev_buckets_available(ca, RESERVE_none); u64 free_space = dev_buckets_available(ca, BCH_WATERMARK_normal);
u64 free_space_inv = free_space u64 free_space_inv = free_space
? div64_u64(1ULL << 48, free_space) ? div64_u64(1ULL << 48, free_space)
: 1ULL << 48; : 1ULL << 48;
...@@ -692,7 +692,7 @@ int bch2_bucket_alloc_set_trans(struct btree_trans *trans, ...@@ -692,7 +692,7 @@ int bch2_bucket_alloc_set_trans(struct btree_trans *trans,
bool *have_cache, bool *have_cache,
unsigned flags, unsigned flags,
enum bch_data_type data_type, enum bch_data_type data_type,
enum alloc_reserve reserve, enum bch_watermark watermark,
struct closure *cl) struct closure *cl)
{ {
struct bch_fs *c = trans->c; struct bch_fs *c = trans->c;
...@@ -725,7 +725,7 @@ int bch2_bucket_alloc_set_trans(struct btree_trans *trans, ...@@ -725,7 +725,7 @@ int bch2_bucket_alloc_set_trans(struct btree_trans *trans,
continue; continue;
} }
ob = bch2_bucket_alloc_trans(trans, ca, reserve, cl, &usage); ob = bch2_bucket_alloc_trans(trans, ca, watermark, cl, &usage);
if (!IS_ERR(ob)) if (!IS_ERR(ob))
bch2_dev_stripe_increment_inlined(ca, stripe, &usage); bch2_dev_stripe_increment_inlined(ca, stripe, &usage);
percpu_ref_put(&ca->ref); percpu_ref_put(&ca->ref);
...@@ -766,7 +766,7 @@ static int bucket_alloc_from_stripe(struct btree_trans *trans, ...@@ -766,7 +766,7 @@ static int bucket_alloc_from_stripe(struct btree_trans *trans,
unsigned nr_replicas, unsigned nr_replicas,
unsigned *nr_effective, unsigned *nr_effective,
bool *have_cache, bool *have_cache,
enum alloc_reserve reserve, enum bch_watermark watermark,
unsigned flags, unsigned flags,
struct closure *cl) struct closure *cl)
{ {
...@@ -784,7 +784,7 @@ static int bucket_alloc_from_stripe(struct btree_trans *trans, ...@@ -784,7 +784,7 @@ static int bucket_alloc_from_stripe(struct btree_trans *trans,
if (ec_open_bucket(c, ptrs)) if (ec_open_bucket(c, ptrs))
return 0; return 0;
h = bch2_ec_stripe_head_get(trans, target, 0, nr_replicas - 1, reserve, cl); h = bch2_ec_stripe_head_get(trans, target, 0, nr_replicas - 1, watermark, cl);
if (IS_ERR(h)) if (IS_ERR(h))
return PTR_ERR(h); return PTR_ERR(h);
if (!h) if (!h)
...@@ -879,7 +879,7 @@ static int bucket_alloc_set_partial(struct bch_fs *c, ...@@ -879,7 +879,7 @@ static int bucket_alloc_set_partial(struct bch_fs *c,
unsigned nr_replicas, unsigned nr_replicas,
unsigned *nr_effective, unsigned *nr_effective,
bool *have_cache, bool ec, bool *have_cache, bool ec,
enum alloc_reserve reserve, enum bch_watermark watermark,
unsigned flags) unsigned flags)
{ {
int i, ret = 0; int i, ret = 0;
...@@ -901,7 +901,7 @@ static int bucket_alloc_set_partial(struct bch_fs *c, ...@@ -901,7 +901,7 @@ static int bucket_alloc_set_partial(struct bch_fs *c,
u64 avail; u64 avail;
bch2_dev_usage_read_fast(ca, &usage); bch2_dev_usage_read_fast(ca, &usage);
avail = dev_buckets_free(ca, usage, reserve); avail = dev_buckets_free(ca, usage, watermark);
if (!avail) if (!avail)
continue; continue;
...@@ -931,7 +931,7 @@ static int __open_bucket_add_buckets(struct btree_trans *trans, ...@@ -931,7 +931,7 @@ static int __open_bucket_add_buckets(struct btree_trans *trans,
unsigned nr_replicas, unsigned nr_replicas,
unsigned *nr_effective, unsigned *nr_effective,
bool *have_cache, bool *have_cache,
enum alloc_reserve reserve, enum bch_watermark watermark,
unsigned flags, unsigned flags,
struct closure *_cl) struct closure *_cl)
{ {
...@@ -962,7 +962,7 @@ static int __open_bucket_add_buckets(struct btree_trans *trans, ...@@ -962,7 +962,7 @@ static int __open_bucket_add_buckets(struct btree_trans *trans,
ret = bucket_alloc_set_partial(c, ptrs, wp, &devs, ret = bucket_alloc_set_partial(c, ptrs, wp, &devs,
nr_replicas, nr_effective, nr_replicas, nr_effective,
have_cache, erasure_code, reserve, flags); have_cache, erasure_code, watermark, flags);
if (ret) if (ret)
return ret; return ret;
...@@ -971,7 +971,7 @@ static int __open_bucket_add_buckets(struct btree_trans *trans, ...@@ -971,7 +971,7 @@ static int __open_bucket_add_buckets(struct btree_trans *trans,
target, target,
nr_replicas, nr_effective, nr_replicas, nr_effective,
have_cache, have_cache,
reserve, flags, _cl); watermark, flags, _cl);
} else { } else {
retry_blocking: retry_blocking:
/* /*
...@@ -980,7 +980,7 @@ static int __open_bucket_add_buckets(struct btree_trans *trans, ...@@ -980,7 +980,7 @@ static int __open_bucket_add_buckets(struct btree_trans *trans,
*/ */
ret = bch2_bucket_alloc_set_trans(trans, ptrs, &wp->stripe, &devs, ret = bch2_bucket_alloc_set_trans(trans, ptrs, &wp->stripe, &devs,
nr_replicas, nr_effective, have_cache, nr_replicas, nr_effective, have_cache,
flags, wp->data_type, reserve, cl); flags, wp->data_type, watermark, cl);
if (ret && if (ret &&
!bch2_err_matches(ret, BCH_ERR_transaction_restart) && !bch2_err_matches(ret, BCH_ERR_transaction_restart) &&
!bch2_err_matches(ret, BCH_ERR_insufficient_devices) && !bch2_err_matches(ret, BCH_ERR_insufficient_devices) &&
...@@ -1003,7 +1003,7 @@ static int open_bucket_add_buckets(struct btree_trans *trans, ...@@ -1003,7 +1003,7 @@ static int open_bucket_add_buckets(struct btree_trans *trans,
unsigned nr_replicas, unsigned nr_replicas,
unsigned *nr_effective, unsigned *nr_effective,
bool *have_cache, bool *have_cache,
enum alloc_reserve reserve, enum bch_watermark watermark,
unsigned flags, unsigned flags,
struct closure *cl) struct closure *cl)
{ {
...@@ -1013,7 +1013,7 @@ static int open_bucket_add_buckets(struct btree_trans *trans, ...@@ -1013,7 +1013,7 @@ static int open_bucket_add_buckets(struct btree_trans *trans,
ret = __open_bucket_add_buckets(trans, ptrs, wp, ret = __open_bucket_add_buckets(trans, ptrs, wp,
devs_have, target, erasure_code, devs_have, target, erasure_code,
nr_replicas, nr_effective, have_cache, nr_replicas, nr_effective, have_cache,
reserve, flags, cl); watermark, flags, cl);
if (bch2_err_matches(ret, BCH_ERR_transaction_restart) || if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
bch2_err_matches(ret, BCH_ERR_operation_blocked) || bch2_err_matches(ret, BCH_ERR_operation_blocked) ||
bch2_err_matches(ret, BCH_ERR_freelist_empty) || bch2_err_matches(ret, BCH_ERR_freelist_empty) ||
...@@ -1026,7 +1026,7 @@ static int open_bucket_add_buckets(struct btree_trans *trans, ...@@ -1026,7 +1026,7 @@ static int open_bucket_add_buckets(struct btree_trans *trans,
ret = __open_bucket_add_buckets(trans, ptrs, wp, ret = __open_bucket_add_buckets(trans, ptrs, wp,
devs_have, target, false, devs_have, target, false,
nr_replicas, nr_effective, have_cache, nr_replicas, nr_effective, have_cache,
reserve, flags, cl); watermark, flags, cl);
return ret < 0 ? ret : 0; return ret < 0 ? ret : 0;
} }
...@@ -1263,7 +1263,7 @@ int bch2_alloc_sectors_start_trans(struct btree_trans *trans, ...@@ -1263,7 +1263,7 @@ int bch2_alloc_sectors_start_trans(struct btree_trans *trans,
struct bch_devs_list *devs_have, struct bch_devs_list *devs_have,
unsigned nr_replicas, unsigned nr_replicas,
unsigned nr_replicas_required, unsigned nr_replicas_required,
enum alloc_reserve reserve, enum bch_watermark watermark,
unsigned flags, unsigned flags,
struct closure *cl, struct closure *cl,
struct write_point **wp_ret) struct write_point **wp_ret)
...@@ -1296,7 +1296,7 @@ int bch2_alloc_sectors_start_trans(struct btree_trans *trans, ...@@ -1296,7 +1296,7 @@ int bch2_alloc_sectors_start_trans(struct btree_trans *trans,
ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have, ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
target, erasure_code, target, erasure_code,
nr_replicas, &nr_effective, nr_replicas, &nr_effective,
&have_cache, reserve, &have_cache, watermark,
flags, NULL); flags, NULL);
if (!ret || if (!ret ||
bch2_err_matches(ret, BCH_ERR_transaction_restart)) bch2_err_matches(ret, BCH_ERR_transaction_restart))
...@@ -1315,14 +1315,14 @@ int bch2_alloc_sectors_start_trans(struct btree_trans *trans, ...@@ -1315,14 +1315,14 @@ int bch2_alloc_sectors_start_trans(struct btree_trans *trans,
ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have, ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
0, erasure_code, 0, erasure_code,
nr_replicas, &nr_effective, nr_replicas, &nr_effective,
&have_cache, reserve, &have_cache, watermark,
flags, cl); flags, cl);
} else { } else {
allocate_blocking: allocate_blocking:
ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have, ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
target, erasure_code, target, erasure_code,
nr_replicas, &nr_effective, nr_replicas, &nr_effective,
&have_cache, reserve, &have_cache, watermark,
flags, cl); flags, cl);
} }
alloc_done: alloc_done:
......
...@@ -14,7 +14,7 @@ struct bch_dev; ...@@ -14,7 +14,7 @@ struct bch_dev;
struct bch_fs; struct bch_fs;
struct bch_devs_List; struct bch_devs_List;
extern const char * const bch2_alloc_reserves[]; extern const char * const bch2_watermarks[];
void bch2_reset_alloc_cursors(struct bch_fs *); void bch2_reset_alloc_cursors(struct bch_fs *);
...@@ -31,7 +31,7 @@ void bch2_dev_stripe_increment(struct bch_dev *, struct dev_stripe_state *); ...@@ -31,7 +31,7 @@ void bch2_dev_stripe_increment(struct bch_dev *, struct dev_stripe_state *);
long bch2_bucket_alloc_new_fs(struct bch_dev *); long bch2_bucket_alloc_new_fs(struct bch_dev *);
struct open_bucket *bch2_bucket_alloc(struct bch_fs *, struct bch_dev *, struct open_bucket *bch2_bucket_alloc(struct bch_fs *, struct bch_dev *,
enum alloc_reserve, struct closure *); enum bch_watermark, struct closure *);
static inline void ob_push(struct bch_fs *c, struct open_buckets *obs, static inline void ob_push(struct bch_fs *c, struct open_buckets *obs,
struct open_bucket *ob) struct open_bucket *ob)
...@@ -152,7 +152,7 @@ static inline bool bch2_bucket_is_open_safe(struct bch_fs *c, unsigned dev, u64 ...@@ -152,7 +152,7 @@ static inline bool bch2_bucket_is_open_safe(struct bch_fs *c, unsigned dev, u64
int bch2_bucket_alloc_set_trans(struct btree_trans *, struct open_buckets *, int bch2_bucket_alloc_set_trans(struct btree_trans *, struct open_buckets *,
struct dev_stripe_state *, struct bch_devs_mask *, struct dev_stripe_state *, struct bch_devs_mask *,
unsigned, unsigned *, bool *, unsigned, unsigned, unsigned *, bool *, unsigned,
enum bch_data_type, enum alloc_reserve, enum bch_data_type, enum bch_watermark,
struct closure *); struct closure *);
int bch2_alloc_sectors_start_trans(struct btree_trans *, int bch2_alloc_sectors_start_trans(struct btree_trans *,
...@@ -160,7 +160,7 @@ int bch2_alloc_sectors_start_trans(struct btree_trans *, ...@@ -160,7 +160,7 @@ int bch2_alloc_sectors_start_trans(struct btree_trans *,
struct write_point_specifier, struct write_point_specifier,
struct bch_devs_list *, struct bch_devs_list *,
unsigned, unsigned, unsigned, unsigned,
enum alloc_reserve, enum bch_watermark,
unsigned, unsigned,
struct closure *, struct closure *,
struct write_point **); struct write_point **);
......
...@@ -16,20 +16,18 @@ struct bucket_alloc_state { ...@@ -16,20 +16,18 @@ struct bucket_alloc_state {
u64 skipped_nouse; u64 skipped_nouse;
}; };
struct ec_bucket_buf; #define BCH_WATERMARKS() \
x(btree_copygc) \
#define BCH_ALLOC_RESERVES() \
x(btree_movinggc) \
x(btree) \ x(btree) \
x(movinggc) \ x(copygc) \
x(none) \ x(normal) \
x(stripe) x(stripe)
enum alloc_reserve { enum bch_watermark {
#define x(name) RESERVE_##name, #define x(name) BCH_WATERMARK_##name,
BCH_ALLOC_RESERVES() BCH_WATERMARKS()
#undef x #undef x
RESERVE_NR, BCH_WATERMARK_NR,
}; };
#define OPEN_BUCKETS_COUNT 1024 #define OPEN_BUCKETS_COUNT 1024
......
...@@ -247,15 +247,15 @@ static struct btree *__bch2_btree_node_alloc(struct btree_trans *trans, ...@@ -247,15 +247,15 @@ static struct btree *__bch2_btree_node_alloc(struct btree_trans *trans,
struct open_buckets ob = { .nr = 0 }; struct open_buckets ob = { .nr = 0 };
struct bch_devs_list devs_have = (struct bch_devs_list) { 0 }; struct bch_devs_list devs_have = (struct bch_devs_list) { 0 };
unsigned nr_reserve; unsigned nr_reserve;
enum alloc_reserve alloc_reserve; enum bch_watermark alloc_reserve;
int ret; int ret;
if (flags & BTREE_INSERT_USE_RESERVE) { if (flags & BTREE_INSERT_USE_RESERVE) {
nr_reserve = 0; nr_reserve = 0;
alloc_reserve = RESERVE_btree_movinggc; alloc_reserve = BCH_WATERMARK_btree_copygc;
} else { } else {
nr_reserve = BTREE_NODE_RESERVE; nr_reserve = BTREE_NODE_RESERVE;
alloc_reserve = RESERVE_btree; alloc_reserve = BCH_WATERMARK_btree;
} }
mutex_lock(&c->btree_reserve_cache_lock); mutex_lock(&c->btree_reserve_cache_lock);
......
...@@ -150,26 +150,26 @@ static inline struct bch_dev_usage bch2_dev_usage_read(struct bch_dev *ca) ...@@ -150,26 +150,26 @@ static inline struct bch_dev_usage bch2_dev_usage_read(struct bch_dev *ca)
void bch2_dev_usage_init(struct bch_dev *); void bch2_dev_usage_init(struct bch_dev *);
static inline u64 bch2_dev_buckets_reserved(struct bch_dev *ca, enum alloc_reserve reserve) static inline u64 bch2_dev_buckets_reserved(struct bch_dev *ca, enum bch_watermark watermark)
{ {
s64 reserved = 0; s64 reserved = 0;
switch (reserve) { switch (watermark) {
case RESERVE_NR: case BCH_WATERMARK_NR:
unreachable(); unreachable();
case RESERVE_stripe: case BCH_WATERMARK_stripe:
reserved += ca->mi.nbuckets >> 6; reserved += ca->mi.nbuckets >> 6;
fallthrough; fallthrough;
case RESERVE_none: case BCH_WATERMARK_normal:
reserved += ca->mi.nbuckets >> 6; reserved += ca->mi.nbuckets >> 6;
fallthrough; fallthrough;
case RESERVE_movinggc: case BCH_WATERMARK_copygc:
reserved += ca->nr_btree_reserve; reserved += ca->nr_btree_reserve;
fallthrough; fallthrough;
case RESERVE_btree: case BCH_WATERMARK_btree:
reserved += ca->nr_btree_reserve; reserved += ca->nr_btree_reserve;
fallthrough; fallthrough;
case RESERVE_btree_movinggc: case BCH_WATERMARK_btree_copygc:
break; break;
} }
...@@ -178,17 +178,17 @@ static inline u64 bch2_dev_buckets_reserved(struct bch_dev *ca, enum alloc_reser ...@@ -178,17 +178,17 @@ static inline u64 bch2_dev_buckets_reserved(struct bch_dev *ca, enum alloc_reser
static inline u64 dev_buckets_free(struct bch_dev *ca, static inline u64 dev_buckets_free(struct bch_dev *ca,
struct bch_dev_usage usage, struct bch_dev_usage usage,
enum alloc_reserve reserve) enum bch_watermark watermark)
{ {
return max_t(s64, 0, return max_t(s64, 0,
usage.d[BCH_DATA_free].buckets - usage.d[BCH_DATA_free].buckets -
ca->nr_open_buckets - ca->nr_open_buckets -
bch2_dev_buckets_reserved(ca, reserve)); bch2_dev_buckets_reserved(ca, watermark));
} }
static inline u64 __dev_buckets_available(struct bch_dev *ca, static inline u64 __dev_buckets_available(struct bch_dev *ca,
struct bch_dev_usage usage, struct bch_dev_usage usage,
enum alloc_reserve reserve) enum bch_watermark watermark)
{ {
return max_t(s64, 0, return max_t(s64, 0,
usage.d[BCH_DATA_free].buckets usage.d[BCH_DATA_free].buckets
...@@ -196,13 +196,13 @@ static inline u64 __dev_buckets_available(struct bch_dev *ca, ...@@ -196,13 +196,13 @@ static inline u64 __dev_buckets_available(struct bch_dev *ca,
+ usage.d[BCH_DATA_need_gc_gens].buckets + usage.d[BCH_DATA_need_gc_gens].buckets
+ usage.d[BCH_DATA_need_discard].buckets + usage.d[BCH_DATA_need_discard].buckets
- ca->nr_open_buckets - ca->nr_open_buckets
- bch2_dev_buckets_reserved(ca, reserve)); - bch2_dev_buckets_reserved(ca, watermark));
} }
static inline u64 dev_buckets_available(struct bch_dev *ca, static inline u64 dev_buckets_available(struct bch_dev *ca,
enum alloc_reserve reserve) enum bch_watermark watermark)
{ {
return __dev_buckets_available(ca, bch2_dev_usage_read(ca), reserve); return __dev_buckets_available(ca, bch2_dev_usage_read(ca), watermark);
} }
/* Filesystem usage: */ /* Filesystem usage: */
......
...@@ -381,7 +381,7 @@ void bch2_update_unwritten_extent(struct btree_trans *trans, ...@@ -381,7 +381,7 @@ void bch2_update_unwritten_extent(struct btree_trans *trans,
&update->op.devs_have, &update->op.devs_have,
update->op.nr_replicas, update->op.nr_replicas,
update->op.nr_replicas, update->op.nr_replicas,
update->op.alloc_reserve, update->op.watermark,
0, &cl, &wp); 0, &cl, &wp);
if (bch2_err_matches(ret, BCH_ERR_operation_blocked)) { if (bch2_err_matches(ret, BCH_ERR_operation_blocked)) {
bch2_trans_unlock(trans); bch2_trans_unlock(trans);
...@@ -459,7 +459,7 @@ int bch2_data_update_init(struct btree_trans *trans, ...@@ -459,7 +459,7 @@ int bch2_data_update_init(struct btree_trans *trans,
bch2_compression_opt_to_type[io_opts.background_compression ?: bch2_compression_opt_to_type[io_opts.background_compression ?:
io_opts.compression]; io_opts.compression];
if (m->data_opts.btree_insert_flags & BTREE_INSERT_USE_RESERVE) if (m->data_opts.btree_insert_flags & BTREE_INSERT_USE_RESERVE)
m->op.alloc_reserve = RESERVE_movinggc; m->op.watermark = BCH_WATERMARK_copygc;
bkey_for_each_ptr(ptrs, ptr) bkey_for_each_ptr(ptrs, ptr)
percpu_ref_get(&bch_dev_bkey_exists(c, ptr->dev)->ref); percpu_ref_get(&bch_dev_bkey_exists(c, ptr->dev)->ref);
......
...@@ -1333,7 +1333,7 @@ static int ec_new_stripe_alloc(struct bch_fs *c, struct ec_stripe_head *h) ...@@ -1333,7 +1333,7 @@ static int ec_new_stripe_alloc(struct bch_fs *c, struct ec_stripe_head *h)
static struct ec_stripe_head * static struct ec_stripe_head *
ec_new_stripe_head_alloc(struct bch_fs *c, unsigned target, ec_new_stripe_head_alloc(struct bch_fs *c, unsigned target,
unsigned algo, unsigned redundancy, unsigned algo, unsigned redundancy,
enum alloc_reserve reserve) enum bch_watermark watermark)
{ {
struct ec_stripe_head *h; struct ec_stripe_head *h;
struct bch_dev *ca; struct bch_dev *ca;
...@@ -1349,7 +1349,7 @@ ec_new_stripe_head_alloc(struct bch_fs *c, unsigned target, ...@@ -1349,7 +1349,7 @@ ec_new_stripe_head_alloc(struct bch_fs *c, unsigned target,
h->target = target; h->target = target;
h->algo = algo; h->algo = algo;
h->redundancy = redundancy; h->redundancy = redundancy;
h->reserve = reserve; h->watermark = watermark;
rcu_read_lock(); rcu_read_lock();
h->devs = target_rw_devs(c, BCH_DATA_user, target); h->devs = target_rw_devs(c, BCH_DATA_user, target);
...@@ -1384,7 +1384,7 @@ struct ec_stripe_head *__bch2_ec_stripe_head_get(struct btree_trans *trans, ...@@ -1384,7 +1384,7 @@ struct ec_stripe_head *__bch2_ec_stripe_head_get(struct btree_trans *trans,
unsigned target, unsigned target,
unsigned algo, unsigned algo,
unsigned redundancy, unsigned redundancy,
enum alloc_reserve reserve) enum bch_watermark watermark)
{ {
struct bch_fs *c = trans->c; struct bch_fs *c = trans->c;
struct ec_stripe_head *h; struct ec_stripe_head *h;
...@@ -1406,21 +1406,21 @@ struct ec_stripe_head *__bch2_ec_stripe_head_get(struct btree_trans *trans, ...@@ -1406,21 +1406,21 @@ struct ec_stripe_head *__bch2_ec_stripe_head_get(struct btree_trans *trans,
if (h->target == target && if (h->target == target &&
h->algo == algo && h->algo == algo &&
h->redundancy == redundancy && h->redundancy == redundancy &&
h->reserve == reserve) { h->watermark == watermark) {
ret = bch2_trans_mutex_lock(trans, &h->lock); ret = bch2_trans_mutex_lock(trans, &h->lock);
if (ret) if (ret)
h = ERR_PTR(ret); h = ERR_PTR(ret);
goto found; goto found;
} }
h = ec_new_stripe_head_alloc(c, target, algo, redundancy, reserve); h = ec_new_stripe_head_alloc(c, target, algo, redundancy, watermark);
found: found:
mutex_unlock(&c->ec_stripe_head_lock); mutex_unlock(&c->ec_stripe_head_lock);
return h; return h;
} }
static int new_stripe_alloc_buckets(struct btree_trans *trans, struct ec_stripe_head *h, static int new_stripe_alloc_buckets(struct btree_trans *trans, struct ec_stripe_head *h,
enum alloc_reserve reserve, struct closure *cl) enum bch_watermark watermark, struct closure *cl)
{ {
struct bch_fs *c = trans->c; struct bch_fs *c = trans->c;
struct bch_devs_mask devs = h->devs; struct bch_devs_mask devs = h->devs;
...@@ -1453,7 +1453,7 @@ static int new_stripe_alloc_buckets(struct btree_trans *trans, struct ec_stripe_ ...@@ -1453,7 +1453,7 @@ static int new_stripe_alloc_buckets(struct btree_trans *trans, struct ec_stripe_
&nr_have_parity, &nr_have_parity,
&have_cache, 0, &have_cache, 0,
BCH_DATA_parity, BCH_DATA_parity,
reserve, watermark,
cl); cl);
open_bucket_for_each(c, &buckets, ob, i) { open_bucket_for_each(c, &buckets, ob, i) {
...@@ -1480,7 +1480,7 @@ static int new_stripe_alloc_buckets(struct btree_trans *trans, struct ec_stripe_ ...@@ -1480,7 +1480,7 @@ static int new_stripe_alloc_buckets(struct btree_trans *trans, struct ec_stripe_
&nr_have_data, &nr_have_data,
&have_cache, 0, &have_cache, 0,
BCH_DATA_user, BCH_DATA_user,
reserve, watermark,
cl); cl);
open_bucket_for_each(c, &buckets, ob, i) { open_bucket_for_each(c, &buckets, ob, i) {
...@@ -1658,7 +1658,7 @@ struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *trans, ...@@ -1658,7 +1658,7 @@ struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *trans,
unsigned target, unsigned target,
unsigned algo, unsigned algo,
unsigned redundancy, unsigned redundancy,
enum alloc_reserve reserve, enum bch_watermark watermark,
struct closure *cl) struct closure *cl)
{ {
struct bch_fs *c = trans->c; struct bch_fs *c = trans->c;
...@@ -1666,7 +1666,7 @@ struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *trans, ...@@ -1666,7 +1666,7 @@ struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *trans,
bool waiting = false; bool waiting = false;
int ret; int ret;
h = __bch2_ec_stripe_head_get(trans, target, algo, redundancy, reserve); h = __bch2_ec_stripe_head_get(trans, target, algo, redundancy, watermark);
if (!h) if (!h)
bch_err(c, "no stripe head"); bch_err(c, "no stripe head");
if (IS_ERR_OR_NULL(h)) if (IS_ERR_OR_NULL(h))
...@@ -1687,7 +1687,7 @@ struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *trans, ...@@ -1687,7 +1687,7 @@ struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *trans,
goto alloc_existing; goto alloc_existing;
/* First, try to allocate a full stripe: */ /* First, try to allocate a full stripe: */
ret = new_stripe_alloc_buckets(trans, h, RESERVE_stripe, NULL) ?: ret = new_stripe_alloc_buckets(trans, h, BCH_WATERMARK_stripe, NULL) ?:
__bch2_ec_stripe_head_reserve(trans, h); __bch2_ec_stripe_head_reserve(trans, h);
if (!ret) if (!ret)
goto allocate_buf; goto allocate_buf;
...@@ -1706,8 +1706,8 @@ struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *trans, ...@@ -1706,8 +1706,8 @@ struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *trans,
if (waiting || !cl || ret != -BCH_ERR_stripe_alloc_blocked) if (waiting || !cl || ret != -BCH_ERR_stripe_alloc_blocked)
goto err; goto err;
if (reserve == RESERVE_movinggc) { if (watermark == BCH_WATERMARK_copygc) {
ret = new_stripe_alloc_buckets(trans, h, reserve, NULL) ?: ret = new_stripe_alloc_buckets(trans, h, watermark, NULL) ?:
__bch2_ec_stripe_head_reserve(trans, h); __bch2_ec_stripe_head_reserve(trans, h);
if (ret) if (ret)
goto err; goto err;
...@@ -1723,10 +1723,10 @@ struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *trans, ...@@ -1723,10 +1723,10 @@ struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *trans,
closure_wake_up(&c->freelist_wait); closure_wake_up(&c->freelist_wait);
alloc_existing: alloc_existing:
/* /*
* Retry allocating buckets, with the reserve watermark for this * Retry allocating buckets, with the watermark for this
* particular write: * particular write:
*/ */
ret = new_stripe_alloc_buckets(trans, h, reserve, cl); ret = new_stripe_alloc_buckets(trans, h, watermark, cl);
if (ret) if (ret)
goto err; goto err;
...@@ -1880,7 +1880,7 @@ void bch2_new_stripes_to_text(struct printbuf *out, struct bch_fs *c) ...@@ -1880,7 +1880,7 @@ void bch2_new_stripes_to_text(struct printbuf *out, struct bch_fs *c)
list_for_each_entry(h, &c->ec_stripe_head_list, list) { list_for_each_entry(h, &c->ec_stripe_head_list, list) {
prt_printf(out, "target %u algo %u redundancy %u %s:\n", prt_printf(out, "target %u algo %u redundancy %u %s:\n",
h->target, h->algo, h->redundancy, h->target, h->algo, h->redundancy,
bch2_alloc_reserves[h->reserve]); bch2_watermarks[h->watermark]);
if (h->s) if (h->s)
prt_printf(out, "\tidx %llu blocks %u+%u allocated %u\n", prt_printf(out, "\tidx %llu blocks %u+%u allocated %u\n",
...@@ -1898,7 +1898,7 @@ void bch2_new_stripes_to_text(struct printbuf *out, struct bch_fs *c) ...@@ -1898,7 +1898,7 @@ void bch2_new_stripes_to_text(struct printbuf *out, struct bch_fs *c)
s->idx, s->nr_data, s->nr_parity, s->idx, s->nr_data, s->nr_parity,
atomic_read(&s->ref[STRIPE_REF_io]), atomic_read(&s->ref[STRIPE_REF_io]),
atomic_read(&s->ref[STRIPE_REF_stripe]), atomic_read(&s->ref[STRIPE_REF_stripe]),
bch2_alloc_reserves[s->h->reserve]); bch2_watermarks[s->h->watermark]);
} }
mutex_unlock(&c->ec_stripe_new_lock); mutex_unlock(&c->ec_stripe_new_lock);
} }
......
...@@ -187,7 +187,7 @@ struct ec_stripe_head { ...@@ -187,7 +187,7 @@ struct ec_stripe_head {
unsigned target; unsigned target;
unsigned algo; unsigned algo;
unsigned redundancy; unsigned redundancy;
enum alloc_reserve reserve; enum bch_watermark watermark;
struct bch_devs_mask devs; struct bch_devs_mask devs;
unsigned nr_active_devs; unsigned nr_active_devs;
...@@ -211,7 +211,7 @@ int bch2_ec_stripe_new_alloc(struct bch_fs *, struct ec_stripe_head *); ...@@ -211,7 +211,7 @@ int bch2_ec_stripe_new_alloc(struct bch_fs *, struct ec_stripe_head *);
void bch2_ec_stripe_head_put(struct bch_fs *, struct ec_stripe_head *); void bch2_ec_stripe_head_put(struct bch_fs *, struct ec_stripe_head *);
struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *, struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *,
unsigned, unsigned, unsigned, unsigned, unsigned, unsigned,
enum alloc_reserve, struct closure *); enum bch_watermark, struct closure *);
void bch2_stripes_heap_update(struct bch_fs *, struct stripe *, size_t); void bch2_stripes_heap_update(struct bch_fs *, struct stripe *, size_t);
void bch2_stripes_heap_del(struct bch_fs *, struct stripe *, size_t); void bch2_stripes_heap_del(struct bch_fs *, struct stripe *, size_t);
......
...@@ -451,7 +451,7 @@ int bch2_extent_fallocate(struct btree_trans *trans, ...@@ -451,7 +451,7 @@ int bch2_extent_fallocate(struct btree_trans *trans,
&devs_have, &devs_have,
opts.data_replicas, opts.data_replicas,
opts.data_replicas, opts.data_replicas,
RESERVE_none, 0, &cl, &wp); BCH_WATERMARK_normal, 0, &cl, &wp);
if (ret) { if (ret) {
bch2_trans_unlock(trans); bch2_trans_unlock(trans);
closure_sync(&cl); closure_sync(&cl);
...@@ -1696,7 +1696,7 @@ static void __bch2_write(struct bch_write_op *op) ...@@ -1696,7 +1696,7 @@ static void __bch2_write(struct bch_write_op *op)
&op->devs_have, &op->devs_have,
op->nr_replicas, op->nr_replicas,
op->nr_replicas_required, op->nr_replicas_required,
op->alloc_reserve, op->watermark,
op->flags, op->flags,
(op->flags & (BCH_WRITE_ALLOC_NOWAIT| (op->flags & (BCH_WRITE_ALLOC_NOWAIT|
BCH_WRITE_ONLY_SPECIFIED_DEVS)) BCH_WRITE_ONLY_SPECIFIED_DEVS))
......
...@@ -59,7 +59,7 @@ enum bch_write_flags { ...@@ -59,7 +59,7 @@ enum bch_write_flags {
static inline struct workqueue_struct *index_update_wq(struct bch_write_op *op) static inline struct workqueue_struct *index_update_wq(struct bch_write_op *op)
{ {
return op->alloc_reserve == RESERVE_movinggc return op->watermark == BCH_WATERMARK_copygc
? op->c->copygc_wq ? op->c->copygc_wq
: op->c->btree_update_wq; : op->c->btree_update_wq;
} }
...@@ -89,7 +89,7 @@ static inline void bch2_write_op_init(struct bch_write_op *op, struct bch_fs *c, ...@@ -89,7 +89,7 @@ static inline void bch2_write_op_init(struct bch_write_op *op, struct bch_fs *c,
op->compression_type = bch2_compression_opt_to_type[opts.compression]; op->compression_type = bch2_compression_opt_to_type[opts.compression];
op->nr_replicas = 0; op->nr_replicas = 0;
op->nr_replicas_required = c->opts.data_replicas_required; op->nr_replicas_required = c->opts.data_replicas_required;
op->alloc_reserve = RESERVE_none; op->watermark = BCH_WATERMARK_normal;
op->incompressible = 0; op->incompressible = 0;
op->open_buckets.nr = 0; op->open_buckets.nr = 0;
op->devs_have.nr = 0; op->devs_have.nr = 0;
......
...@@ -119,7 +119,7 @@ struct bch_write_op { ...@@ -119,7 +119,7 @@ struct bch_write_op {
unsigned compression_type:4; unsigned compression_type:4;
unsigned nr_replicas:4; unsigned nr_replicas:4;
unsigned nr_replicas_required:4; unsigned nr_replicas_required:4;
unsigned alloc_reserve:3; unsigned watermark:3;
unsigned incompressible:1; unsigned incompressible:1;
unsigned stripe_waited:1; unsigned stripe_waited:1;
......
...@@ -828,7 +828,7 @@ static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr, ...@@ -828,7 +828,7 @@ static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
break; break;
} }
} else { } else {
ob[nr_got] = bch2_bucket_alloc(c, ca, RESERVE_none, cl); ob[nr_got] = bch2_bucket_alloc(c, ca, BCH_WATERMARK_normal, cl);
ret = PTR_ERR_OR_ZERO(ob[nr_got]); ret = PTR_ERR_OR_ZERO(ob[nr_got]);
if (ret) if (ret)
break; break;
......
...@@ -271,7 +271,7 @@ unsigned long bch2_copygc_wait_amount(struct bch_fs *c) ...@@ -271,7 +271,7 @@ unsigned long bch2_copygc_wait_amount(struct bch_fs *c)
for_each_rw_member(ca, c, dev_idx) { for_each_rw_member(ca, c, dev_idx) {
struct bch_dev_usage usage = bch2_dev_usage_read(ca); struct bch_dev_usage usage = bch2_dev_usage_read(ca);
fragmented_allowed = ((__dev_buckets_available(ca, usage, RESERVE_stripe) * fragmented_allowed = ((__dev_buckets_available(ca, usage, BCH_WATERMARK_stripe) *
ca->mi.bucket_size) >> 1); ca->mi.bucket_size) >> 1);
fragmented = 0; fragmented = 0;
......
...@@ -850,8 +850,8 @@ static void dev_alloc_debug_to_text(struct printbuf *out, struct bch_dev *ca) ...@@ -850,8 +850,8 @@ static void dev_alloc_debug_to_text(struct printbuf *out, struct bch_dev *ca)
prt_printf(out, "reserves:"); prt_printf(out, "reserves:");
prt_newline(out); prt_newline(out);
for (i = 0; i < RESERVE_NR; i++) { for (i = 0; i < BCH_WATERMARK_NR; i++) {
prt_str(out, bch2_alloc_reserves[i]); prt_str(out, bch2_watermarks[i]);
prt_tab(out); prt_tab(out);
prt_u64(out, bch2_dev_buckets_reserved(ca, i)); prt_u64(out, bch2_dev_buckets_reserved(ca, i));
prt_tab_rjust(out); prt_tab_rjust(out);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment