Commit c93cead0 authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: Always use bch2_extent_trim_atomic()

Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent db8a5f0a
......@@ -490,8 +490,6 @@ struct btree_root {
enum btree_insert_ret {
BTREE_INSERT_OK,
/* extent spanned multiple leaf nodes: have to traverse to next node: */
BTREE_INSERT_NEED_TRAVERSE,
/* leaf node needs to be split */
BTREE_INSERT_BTREE_NODE_FULL,
BTREE_INSERT_ENOSPC,
......
......@@ -128,9 +128,6 @@ enum {
int bch2_btree_delete_at(struct btree_iter *, unsigned);
int bch2_btree_insert_list_at(struct btree_iter *, struct keylist *,
struct disk_reservation *, u64 *, unsigned);
int bch2_btree_insert(struct bch_fs *, enum btree_id, struct bkey_i *,
struct disk_reservation *, u64 *, int flags);
......
......@@ -185,9 +185,8 @@ void bch2_btree_journal_key(struct btree_insert *trans,
set_btree_node_dirty(b);
}
static enum btree_insert_ret
bch2_insert_fixup_key(struct btree_insert *trans,
struct btree_insert_entry *insert)
static void bch2_insert_fixup_key(struct btree_insert *trans,
struct btree_insert_entry *insert)
{
struct btree_iter *iter = insert->iter;
struct btree_iter_level *l = &iter->l[0];
......@@ -199,30 +198,27 @@ bch2_insert_fixup_key(struct btree_insert *trans,
if (bch2_btree_bset_insert_key(iter, l->b, &l->iter,
insert->k))
bch2_btree_journal_key(trans, iter, insert->k);
return BTREE_INSERT_OK;
}
/**
* btree_insert_key - insert a key one key into a leaf node
*/
static enum btree_insert_ret
btree_insert_key_leaf(struct btree_insert *trans,
struct btree_insert_entry *insert)
static void btree_insert_key_leaf(struct btree_insert *trans,
struct btree_insert_entry *insert)
{
struct bch_fs *c = trans->c;
struct btree_iter *iter = insert->iter;
struct btree *b = iter->l[0].b;
enum btree_insert_ret ret;
int old_u64s = le16_to_cpu(btree_bset_last(b)->u64s);
int old_live_u64s = b->nr.live_u64s;
int live_u64s_added, u64s_added;
bch2_mark_update(trans, insert);
ret = !btree_node_is_extents(b)
? bch2_insert_fixup_key(trans, insert)
: bch2_insert_fixup_extent(trans, insert);
if (!btree_node_is_extents(b))
bch2_insert_fixup_key(trans, insert);
else
bch2_insert_fixup_extent(trans, insert);
live_u64s_added = (int) b->nr.live_u64s - old_live_u64s;
u64s_added = (int) le16_to_cpu(btree_bset_last(b)->u64s) - old_u64s;
......@@ -237,7 +233,6 @@ btree_insert_key_leaf(struct btree_insert *trans,
bch2_btree_iter_reinit_node(iter, b);
trace_btree_insert_key(c, b, insert->k);
return ret;
}
/* Deferred btree updates: */
......@@ -291,9 +286,8 @@ static void deferred_update_flush(struct journal *j,
kfree(k);
}
static enum btree_insert_ret
btree_insert_key_deferred(struct btree_insert *trans,
struct btree_insert_entry *insert)
static void btree_insert_key_deferred(struct btree_insert *trans,
struct btree_insert_entry *insert)
{
struct bch_fs *c = trans->c;
struct journal *j = &c->journal;
......@@ -321,8 +315,6 @@ btree_insert_key_deferred(struct btree_insert *trans,
bch2_journal_pin_update(j, trans->journal_res.seq, &d->journal,
deferred_update_flush);
spin_unlock(&d->lock);
return BTREE_INSERT_OK;
}
void bch2_deferred_update_free(struct bch_fs *c,
......@@ -485,13 +477,13 @@ btree_key_can_insert(struct btree_insert *trans,
return BTREE_INSERT_OK;
}
static inline enum btree_insert_ret
do_btree_insert_one(struct btree_insert *trans,
struct btree_insert_entry *insert)
static inline void do_btree_insert_one(struct btree_insert *trans,
struct btree_insert_entry *insert)
{
return likely(!insert->deferred)
? btree_insert_key_leaf(trans, insert)
: btree_insert_key_deferred(trans, insert);
if (likely(!insert->deferred))
btree_insert_key_leaf(trans, insert);
else
btree_insert_key_deferred(trans, insert);
}
/*
......@@ -595,19 +587,8 @@ static inline int do_btree_insert_at(struct btree_insert *trans,
}
trans->did_work = true;
trans_for_each_entry(trans, i) {
switch (do_btree_insert_one(trans, i)) {
case BTREE_INSERT_OK:
break;
case BTREE_INSERT_NEED_TRAVERSE:
BUG_ON((trans->flags &
(BTREE_INSERT_ATOMIC|BTREE_INSERT_NOUNLOCK)));
ret = -EINTR;
goto out;
default:
BUG();
}
}
trans_for_each_entry(trans, i)
do_btree_insert_one(trans, i);
out:
BUG_ON(ret &&
(trans->flags & BTREE_INSERT_JOURNAL_RESERVED) &&
......@@ -629,6 +610,8 @@ static inline void btree_insert_entry_checks(struct bch_fs *c,
if (!i->deferred) {
BUG_ON(i->iter->level);
BUG_ON(bkey_cmp(bkey_start_pos(&i->k->k), i->iter->pos));
EBUG_ON((i->iter->flags & BTREE_ITER_IS_EXTENTS) &&
!bch2_extent_is_atomic(i->k, i->iter));
bch2_btree_iter_verify_locks(i->iter);
}
......@@ -875,28 +858,6 @@ int bch2_btree_delete_at(struct btree_iter *iter, unsigned flags)
BTREE_INSERT_ENTRY(iter, &k));
}
int bch2_btree_insert_list_at(struct btree_iter *iter,
struct keylist *keys,
struct disk_reservation *disk_res,
u64 *journal_seq, unsigned flags)
{
BUG_ON(flags & BTREE_INSERT_ATOMIC);
BUG_ON(bch2_keylist_empty(keys));
bch2_verify_keylist_sorted(keys);
while (!bch2_keylist_empty(keys)) {
int ret = bch2_btree_insert_at(iter->c, disk_res,
journal_seq, flags,
BTREE_INSERT_ENTRY(iter, bch2_keylist_front(keys)));
if (ret)
return ret;
bch2_keylist_pop_front(keys);
}
return 0;
}
/**
* bch_btree_insert - insert keys into the extent btree
* @c: pointer to struct bch_fs
......@@ -962,6 +923,7 @@ int bch2_btree_delete_range(struct bch_fs *c, enum btree_id id,
/* create the biggest key we can */
bch2_key_resize(&delete.k, max_sectors);
bch2_cut_back(end, &delete.k);
bch2_extent_trim_atomic(&delete, &iter);
}
ret = bch2_btree_insert_at(c, NULL, journal_seq,
......
......@@ -928,15 +928,25 @@ static void extent_insert_committed(struct extent_insert_state *s)
insert->k.needs_whiteout = false;
}
void bch2_extent_trim_atomic(struct bkey_i *k, struct btree_iter *iter)
static inline struct bpos
bch2_extent_atomic_end(struct bkey_i *k, struct btree_iter *iter)
{
struct btree *b = iter->l[0].b;
BUG_ON(iter->uptodate > BTREE_ITER_NEED_PEEK);
BUG_ON(bkey_cmp(bkey_start_pos(&k->k), b->data->min_key) < 0);
bch2_cut_back(b->key.k.p, &k->k);
return bpos_min(k->k.p, b->key.k.p);
}
BUG_ON(bkey_cmp(bkey_start_pos(&k->k), b->data->min_key) < 0);
void bch2_extent_trim_atomic(struct bkey_i *k, struct btree_iter *iter)
{
bch2_cut_back(bch2_extent_atomic_end(k, iter), &k->k);
}
bool bch2_extent_is_atomic(struct bkey_i *k, struct btree_iter *iter)
{
return !bkey_cmp(bch2_extent_atomic_end(k, iter), k->k.p);
}
enum btree_insert_ret
......@@ -952,9 +962,6 @@ bch2_extent_can_insert(struct btree_insert *trans,
struct bkey_s_c k;
int sectors;
BUG_ON(trans->flags & BTREE_INSERT_ATOMIC &&
!bch2_extent_is_atomic(&insert->k->k, insert->iter));
/*
* We avoid creating whiteouts whenever possible when deleting, but
* those optimizations mean we may potentially insert two whiteouts
......@@ -1216,12 +1223,10 @@ static void __bch2_insert_fixup_extent(struct extent_insert_state *s)
* If the end of iter->pos is not the same as the end of insert, then
* key insertion needs to continue/be retried.
*/
enum btree_insert_ret
bch2_insert_fixup_extent(struct btree_insert *trans,
struct btree_insert_entry *insert)
void bch2_insert_fixup_extent(struct btree_insert *trans,
struct btree_insert_entry *insert)
{
struct btree_iter *iter = insert->iter;
struct btree *b = iter->l[0].b;
struct extent_insert_state s = {
.trans = trans,
.insert = insert,
......@@ -1248,16 +1253,9 @@ bch2_insert_fixup_extent(struct btree_insert *trans,
extent_insert_committed(&s);
BUG_ON(insert->k->k.size);
EBUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&insert->k->k)));
EBUG_ON(bkey_cmp(iter->pos, s.committed));
if (insert->k->k.size) {
/* got to the end of this leaf node */
BUG_ON(bkey_cmp(iter->pos, b->key.k.p));
return BTREE_INSERT_NEED_TRAVERSE;
}
return BTREE_INSERT_OK;
}
const char *bch2_extent_invalid(const struct bch_fs *c, struct bkey_s_c k)
......
......@@ -407,21 +407,13 @@ enum merge_result bch2_reservation_merge(struct bch_fs *,
}
void bch2_extent_trim_atomic(struct bkey_i *, struct btree_iter *);
static inline bool bch2_extent_is_atomic(struct bkey *k,
struct btree_iter *iter)
{
struct btree *b = iter->l[0].b;
return bkey_cmp(k->p, b->key.k.p) <= 0 &&
bkey_cmp(bkey_start_pos(k), b->data->min_key) >= 0;
}
bool bch2_extent_is_atomic(struct bkey_i *, struct btree_iter *);
enum btree_insert_ret
bch2_extent_can_insert(struct btree_insert *, struct btree_insert_entry *,
unsigned *);
enum btree_insert_ret
bch2_insert_fixup_extent(struct btree_insert *, struct btree_insert_entry *);
void bch2_insert_fixup_extent(struct btree_insert *,
struct btree_insert_entry *);
void bch2_extent_mark_replicas_cached(struct bch_fs *, struct bkey_s_extent,
unsigned, unsigned);
......
......@@ -293,18 +293,36 @@ static void bch2_write_done(struct closure *cl)
int bch2_write_index_default(struct bch_write_op *op)
{
struct bch_fs *c = op->c;
struct keylist *keys = &op->insert_keys;
struct btree_iter iter;
int ret;
bch2_btree_iter_init(&iter, op->c, BTREE_ID_EXTENTS,
bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS,
bkey_start_pos(&bch2_keylist_front(keys)->k),
BTREE_ITER_INTENT);
ret = bch2_btree_insert_list_at(&iter, keys, &op->res,
op_journal_seq(op),
BTREE_INSERT_NOFAIL|
BTREE_INSERT_USE_RESERVE);
do {
BKEY_PADDED(k) split;
bkey_copy(&split.k, bch2_keylist_front(keys));
bch2_extent_trim_atomic(&split.k, &iter);
ret = bch2_btree_insert_at(c, &op->res,
op_journal_seq(op),
BTREE_INSERT_NOFAIL|
BTREE_INSERT_USE_RESERVE,
BTREE_INSERT_ENTRY(&iter, &split.k));
if (ret)
break;
if (bkey_cmp(iter.pos, bch2_keylist_front(keys)->k.p) < 0)
bch2_cut_front(iter.pos, bch2_keylist_front(keys));
else
bch2_keylist_pop_front(keys);
} while (!bch2_keylist_empty(keys));
bch2_btree_iter_unlock(&iter);
return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment