Commit 7635e1a6 authored by Kent Overstreet's avatar Kent Overstreet

bcachefs: Rework open bucket partial list allocation

Now, any open_bucket can go on the partial list: allocating from the
partial list has been moved to its own dedicated function,
open_bucket_add_bucets() -> bucket_alloc_set_partial().

In particular, this means that erasure coded buckets can safely go on
the partial list; the new location works with the "allocate an ec bucket
first, then the rest" logic.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent e53d03fe
This diff is collapsed.
......@@ -31,8 +31,7 @@ void bch2_dev_stripe_increment(struct bch_dev *, struct dev_stripe_state *);
long bch2_bucket_alloc_new_fs(struct bch_dev *);
struct open_bucket *bch2_bucket_alloc(struct bch_fs *, struct bch_dev *,
enum alloc_reserve, bool,
struct closure *);
enum alloc_reserve, struct closure *);
static inline void ob_push(struct bch_fs *c, struct open_buckets *obs,
struct open_bucket *ob)
......@@ -152,8 +151,9 @@ static inline bool bch2_bucket_is_open_safe(struct bch_fs *c, unsigned dev, u64
int bch2_bucket_alloc_set_trans(struct btree_trans *, struct open_buckets *,
struct dev_stripe_state *, struct bch_devs_mask *,
unsigned, unsigned *, bool *, enum alloc_reserve,
unsigned, struct closure *);
unsigned, unsigned *, bool *, unsigned,
enum bch_data_type, enum alloc_reserve,
struct closure *);
int bch2_alloc_sectors_start_trans(struct btree_trans *,
unsigned, unsigned,
......
......@@ -53,10 +53,9 @@ struct open_bucket {
* the block in the stripe this open_bucket corresponds to:
*/
u8 ec_idx;
enum bch_data_type data_type:8;
enum bch_data_type data_type:6;
unsigned valid:1;
unsigned on_partial_list:1;
unsigned alloc_reserve:3;
u8 dev;
u8 gen;
......
......@@ -1451,9 +1451,9 @@ static int new_stripe_alloc_buckets(struct btree_trans *trans, struct ec_stripe_
&devs,
h->s->nr_parity,
&nr_have_parity,
&have_cache,
&have_cache, 0,
BCH_DATA_parity,
reserve,
0,
cl);
open_bucket_for_each(c, &buckets, ob, i) {
......@@ -1478,9 +1478,9 @@ static int new_stripe_alloc_buckets(struct btree_trans *trans, struct ec_stripe_
&devs,
h->s->nr_data,
&nr_have_data,
&have_cache,
&have_cache, 0,
BCH_DATA_user,
reserve,
0,
cl);
open_bucket_for_each(c, &buckets, ob, i) {
......
......@@ -780,8 +780,7 @@ static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
break;
}
} else {
ob[nr_got] = bch2_bucket_alloc(c, ca, RESERVE_none,
false, cl);
ob[nr_got] = bch2_bucket_alloc(c, ca, RESERVE_none, cl);
ret = PTR_ERR_OR_ZERO(ob[nr_got]);
if (ret)
break;
......
......@@ -516,7 +516,6 @@ DEFINE_EVENT(bch_fs, gc_gens_end,
DECLARE_EVENT_CLASS(bucket_alloc,
TP_PROTO(struct bch_dev *ca, const char *alloc_reserve,
bool user,
u64 bucket,
u64 free,
u64 avail,
......@@ -525,14 +524,13 @@ DECLARE_EVENT_CLASS(bucket_alloc,
struct bucket_alloc_state *s,
bool nonblocking,
const char *err),
TP_ARGS(ca, alloc_reserve, user, bucket, free, avail,
TP_ARGS(ca, alloc_reserve, bucket, free, avail,
copygc_wait_amount, copygc_waiting_for,
s, nonblocking, err),
TP_STRUCT__entry(
__field(u8, dev )
__array(char, reserve, 16 )
__field(bool, user )
__field(u64, bucket )
__field(u64, free )
__field(u64, avail )
......@@ -550,7 +548,6 @@ DECLARE_EVENT_CLASS(bucket_alloc,
TP_fast_assign(
__entry->dev = ca->dev_idx;
strscpy(__entry->reserve, alloc_reserve, sizeof(__entry->reserve));
__entry->user = user;
__entry->bucket = bucket;
__entry->free = free;
__entry->avail = avail;
......@@ -565,9 +562,8 @@ DECLARE_EVENT_CLASS(bucket_alloc,
strscpy(__entry->err, err, sizeof(__entry->err));
),
TP_printk("reserve %s user %u bucket %u:%llu free %llu avail %llu copygc_wait %llu/%lli seen %llu open %llu need_journal_commit %llu nouse %llu nocow %llu nonblocking %u err %s",
TP_printk("reserve %s bucket %u:%llu free %llu avail %llu copygc_wait %llu/%lli seen %llu open %llu need_journal_commit %llu nouse %llu nocow %llu nonblocking %u err %s",
__entry->reserve,
__entry->user,
__entry->dev,
__entry->bucket,
__entry->free,
......@@ -585,7 +581,6 @@ DECLARE_EVENT_CLASS(bucket_alloc,
DEFINE_EVENT(bucket_alloc, bucket_alloc,
TP_PROTO(struct bch_dev *ca, const char *alloc_reserve,
bool user,
u64 bucket,
u64 free,
u64 avail,
......@@ -594,14 +589,13 @@ DEFINE_EVENT(bucket_alloc, bucket_alloc,
struct bucket_alloc_state *s,
bool nonblocking,
const char *err),
TP_ARGS(ca, alloc_reserve, user, bucket, free, avail,
TP_ARGS(ca, alloc_reserve, bucket, free, avail,
copygc_wait_amount, copygc_waiting_for,
s, nonblocking, err)
);
DEFINE_EVENT(bucket_alloc, bucket_alloc_fail,
TP_PROTO(struct bch_dev *ca, const char *alloc_reserve,
bool user,
u64 bucket,
u64 free,
u64 avail,
......@@ -610,7 +604,7 @@ DEFINE_EVENT(bucket_alloc, bucket_alloc_fail,
struct bucket_alloc_state *s,
bool nonblocking,
const char *err),
TP_ARGS(ca, alloc_reserve, user, bucket, free, avail,
TP_ARGS(ca, alloc_reserve, bucket, free, avail,
copygc_wait_amount, copygc_waiting_for,
s, nonblocking, err)
);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment