Commit cf4f0432 authored by Josef Bacik's avatar Josef Bacik Committed by David Sterba

btrfs: move ->parent and ->ref_root into btrfs_delayed_ref_node

These two members are shared by both the tree refs and data refs, so
move them into btrfs_delayed_ref_node proper.  This allows us to greatly
simplify the comparison code, as the shared refs always only sort on
parent, and the non shared refs always sort first on ref_root, and then
only data refs sort on their specific fields.
Reviewed-by: default avatarFilipe Manana <fdmanana@suse.com>
Signed-off-by: default avatarJosef Bacik <josef@toxicpanda.com>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 12390e42
......@@ -928,7 +928,7 @@ static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
}
ref = btrfs_delayed_node_to_tree_ref(node);
ret = add_indirect_ref(fs_info, preftrees, ref->root,
ret = add_indirect_ref(fs_info, preftrees, node->ref_root,
key_ptr, ref->level + 1,
node->bytenr, count, sc,
GFP_ATOMIC);
......@@ -941,7 +941,7 @@ static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
ref = btrfs_delayed_node_to_tree_ref(node);
ret = add_direct_ref(fs_info, preftrees, ref->level + 1,
ref->parent, node->bytenr, count,
node->parent, node->bytenr, count,
sc, GFP_ATOMIC);
break;
}
......@@ -972,18 +972,14 @@ static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
if (sc && count < 0)
sc->have_delayed_delete_refs = true;
ret = add_indirect_ref(fs_info, preftrees, ref->root,
ret = add_indirect_ref(fs_info, preftrees, node->ref_root,
&key, 0, node->bytenr, count, sc,
GFP_ATOMIC);
break;
}
case BTRFS_SHARED_DATA_REF_KEY: {
/* SHARED DIRECT FULL backref */
struct btrfs_delayed_data_ref *ref;
ref = btrfs_delayed_node_to_data_ref(node);
ret = add_direct_ref(fs_info, preftrees, 0, ref->parent,
ret = add_direct_ref(fs_info, preftrees, 0, node->parent,
node->bytenr, count, sc,
GFP_ATOMIC);
break;
......
......@@ -303,55 +303,20 @@ int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
return 0;
}
/*
* compare two delayed tree backrefs with same bytenr and type
*/
static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref1,
struct btrfs_delayed_tree_ref *ref2)
{
struct btrfs_delayed_ref_node *node = btrfs_delayed_tree_ref_to_node(ref1);
if (node->type == BTRFS_TREE_BLOCK_REF_KEY) {
if (ref1->root < ref2->root)
return -1;
if (ref1->root > ref2->root)
return 1;
} else {
if (ref1->parent < ref2->parent)
return -1;
if (ref1->parent > ref2->parent)
return 1;
}
return 0;
}
/*
* compare two delayed data backrefs with same bytenr and type
*/
static int comp_data_refs(struct btrfs_delayed_data_ref *ref1,
struct btrfs_delayed_data_ref *ref2)
static int comp_data_refs(struct btrfs_delayed_ref_node *ref1,
struct btrfs_delayed_ref_node *ref2)
{
struct btrfs_delayed_ref_node *node = btrfs_delayed_data_ref_to_node(ref1);
if (node->type == BTRFS_EXTENT_DATA_REF_KEY) {
if (ref1->root < ref2->root)
return -1;
if (ref1->root > ref2->root)
return 1;
if (ref1->objectid < ref2->objectid)
return -1;
if (ref1->objectid > ref2->objectid)
return 1;
if (ref1->offset < ref2->offset)
return -1;
if (ref1->offset > ref2->offset)
return 1;
} else {
if (ref1->parent < ref2->parent)
return -1;
if (ref1->parent > ref2->parent)
return 1;
}
if (ref1->data_ref.objectid < ref2->data_ref.objectid)
return -1;
if (ref1->data_ref.objectid > ref2->data_ref.objectid)
return 1;
if (ref1->data_ref.offset < ref2->data_ref.offset)
return -1;
if (ref1->data_ref.offset > ref2->data_ref.offset)
return 1;
return 0;
}
......@@ -365,13 +330,20 @@ static int comp_refs(struct btrfs_delayed_ref_node *ref1,
return -1;
if (ref1->type > ref2->type)
return 1;
if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
ref1->type == BTRFS_SHARED_BLOCK_REF_KEY)
ret = comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref1),
btrfs_delayed_node_to_tree_ref(ref2));
else
ret = comp_data_refs(btrfs_delayed_node_to_data_ref(ref1),
btrfs_delayed_node_to_data_ref(ref2));
if (ref1->type == BTRFS_SHARED_BLOCK_REF_KEY ||
ref1->type == BTRFS_SHARED_DATA_REF_KEY) {
if (ref1->parent < ref2->parent)
return -1;
if (ref1->parent > ref2->parent)
return 1;
} else {
if (ref1->ref_root < ref2->ref_root)
return -1;
if (ref1->ref_root > ref2->ref_root)
return -1;
if (ref1->type == BTRFS_EXTENT_DATA_REF_KEY)
ret = comp_data_refs(ref1, ref2);
}
if (ret)
return ret;
if (check_seq) {
......@@ -1005,17 +977,15 @@ static void init_delayed_ref_common(struct btrfs_fs_info *fs_info,
ref->action = action;
ref->seq = seq;
ref->type = btrfs_ref_type(generic_ref);
ref->ref_root = generic_ref->ref_root;
ref->parent = generic_ref->parent;
RB_CLEAR_NODE(&ref->ref_node);
INIT_LIST_HEAD(&ref->add_list);
if (generic_ref->type == BTRFS_REF_DATA) {
ref->data_ref.root = generic_ref->ref_root;
ref->data_ref.parent = generic_ref->parent;
ref->data_ref.objectid = generic_ref->data_ref.ino;
ref->data_ref.offset = generic_ref->data_ref.offset;
} else {
ref->tree_ref.root = generic_ref->ref_root;
ref->tree_ref.parent = generic_ref->parent;
ref->tree_ref.level = generic_ref->tree_ref.level;
}
}
......
......@@ -31,14 +31,10 @@ enum btrfs_delayed_ref_action {
} __packed;
struct btrfs_delayed_tree_ref {
u64 root;
u64 parent;
int level;
};
struct btrfs_delayed_data_ref {
u64 root;
u64 parent;
u64 objectid;
u64 offset;
};
......@@ -61,6 +57,15 @@ struct btrfs_delayed_ref_node {
/* seq number to keep track of insertion order */
u64 seq;
/* The ref_root for this ref */
u64 ref_root;
/*
* The parent for this ref, if this isn't set the ref_root is the
* reference owner.
*/
u64 parent;
/* ref count on this data structure */
refcount_t refs;
......
......@@ -1577,7 +1577,7 @@ static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
trace_run_delayed_data_ref(trans->fs_info, node);
if (node->type == BTRFS_SHARED_DATA_REF_KEY)
parent = ref->parent;
parent = node->parent;
if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
struct btrfs_key key;
......@@ -1596,7 +1596,7 @@ static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
key.type = BTRFS_EXTENT_ITEM_KEY;
key.offset = node->num_bytes;
ret = alloc_reserved_file_extent(trans, parent, ref->root,
ret = alloc_reserved_file_extent(trans, parent, node->ref_root,
flags, ref->objectid,
ref->offset, &key,
node->ref_mod, href->owning_root);
......@@ -1604,12 +1604,12 @@ static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
if (!ret)
ret = btrfs_record_squota_delta(trans->fs_info, &delta);
} else if (node->action == BTRFS_ADD_DELAYED_REF) {
ret = __btrfs_inc_extent_ref(trans, node, parent, ref->root,
ret = __btrfs_inc_extent_ref(trans, node, parent, node->ref_root,
ref->objectid, ref->offset,
extent_op);
} else if (node->action == BTRFS_DROP_DELAYED_REF) {
ret = __btrfs_free_extent(trans, href, node, parent,
ref->root, ref->objectid,
node->ref_root, ref->objectid,
ref->offset, extent_op);
} else {
BUG();
......@@ -1740,8 +1740,8 @@ static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
trace_run_delayed_tree_ref(trans->fs_info, node);
if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
parent = ref->parent;
ref_root = ref->root;
parent = node->parent;
ref_root = node->ref_root;
if (unlikely(node->ref_mod != 1)) {
btrfs_err(trans->fs_info,
......@@ -2359,7 +2359,7 @@ static noinline int check_delayed_ref(struct btrfs_root *root,
* If our ref doesn't match the one we're currently looking at
* then we have a cross reference.
*/
if (data_ref->root != root->root_key.objectid ||
if (ref->ref_root != root->root_key.objectid ||
data_ref->objectid != objectid ||
data_ref->offset != offset) {
ret = 1;
......@@ -4946,11 +4946,11 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
if (node->type == BTRFS_SHARED_BLOCK_REF_KEY) {
btrfs_set_extent_inline_ref_type(leaf, iref,
BTRFS_SHARED_BLOCK_REF_KEY);
btrfs_set_extent_inline_ref_offset(leaf, iref, ref->parent);
btrfs_set_extent_inline_ref_offset(leaf, iref, node->parent);
} else {
btrfs_set_extent_inline_ref_type(leaf, iref,
BTRFS_TREE_BLOCK_REF_KEY);
btrfs_set_extent_inline_ref_offset(leaf, iref, ref->root);
btrfs_set_extent_inline_ref_offset(leaf, iref, node->ref_root);
}
btrfs_mark_buffer_dirty(trans, leaf);
......
......@@ -887,8 +887,8 @@ DECLARE_EVENT_CLASS(btrfs_delayed_tree_ref,
__entry->bytenr = ref->bytenr;
__entry->num_bytes = ref->num_bytes;
__entry->action = ref->action;
__entry->parent = ref->tree_ref.parent;
__entry->ref_root = ref->tree_ref.root;
__entry->parent = ref->parent;
__entry->ref_root = ref->ref_root;
__entry->level = ref->tree_ref.level;
__entry->type = ref->type;
__entry->seq = ref->seq;
......@@ -945,8 +945,8 @@ DECLARE_EVENT_CLASS(btrfs_delayed_data_ref,
__entry->bytenr = ref->bytenr;
__entry->num_bytes = ref->num_bytes;
__entry->action = ref->action;
__entry->parent = ref->data_ref.parent;
__entry->ref_root = ref->data_ref.root;
__entry->parent = ref->parent;
__entry->ref_root = ref->ref_root;
__entry->owner = ref->data_ref.objectid;
__entry->offset = ref->data_ref.offset;
__entry->type = ref->type;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment