Commit 4eafdb15 authored by Joe Thornber's avatar Joe Thornber Committed by Mike Snitzer

dm btree: improve btree residency

This commit improves the residency of btrees built in the metadata for
dm-thin and dm-cache.

When inserting a new entry into a full btree node the current code
splits the node into two.  This can result in very many half full nodes,
particularly if the insertions are occurring in an ascending order (as
happens in dm-thin with large writes).

With this commit, when we insert into a full node we first try and move
some entries to a neighbouring node that has space, failing that it
tries to split two neighbouring nodes into three.

Results are given below.  'Residency' is how full nodes are on average
as a percentage.  Average instruction counts for the operations
are given to show the extra processing has little overhead.

                         +--------------------------+--------------------------+
                         |         Before           |         After            |
+------------+-----------+-----------+--------------+-----------+--------------+
|    Test    |   Phase   | Residency | Instructions | Residency | Instructions |
+------------+-----------+-----------+--------------+-----------+--------------+
| Ascending  | insert    |        50 |         1876 |        96 |         1930 |
|            | overwrite |        50 |         1789 |        96 |         1746 |
|            | lookup    |        50 |          778 |        96 |          778 |
| Descending | insert    |        50 |         3024 |        96 |         3181 |
|            | overwrite |        50 |         1789 |        96 |         1746 |
|            | lookup    |        50 |          778 |        96 |          778 |
| Random     | insert    |        68 |         3800 |        84 |         3736 |
|            | overwrite |        68 |         4254 |        84 |         3911 |
|            | lookup    |        68 |          779 |        84 |          779 |
| Runs       | insert    |        63 |         2546 |        82 |         2815 |
|            | overwrite |        63 |         2013 |        82 |         1986 |
|            | lookup    |        63 |          778 |        82 |          779 |
+------------+-----------+-----------+--------------+-----------+--------------+

   Ascending - keys are inserted in ascending order.
   Descending - keys are inserted in descending order.
   Random - keys are inserted in random order.
   Runs - keys are split into ascending runs of ~20 length.  Then
          the runs are shuffled.
Signed-off-by: default avatarJoe Thornber <ejt@redhat.com>
Signed-off-by: Colin Ian King <colin.king@canonical.com> # contains_key() fix
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
parent 7e768532
......@@ -500,6 +500,122 @@ int dm_btree_lookup_next(struct dm_btree_info *info, dm_block_t root,
EXPORT_SYMBOL_GPL(dm_btree_lookup_next);
/*----------------------------------------------------------------*/
/*
* Copies entries from one region of a btree node to another. The regions
* must not overlap.
*/
static void copy_entries(struct btree_node *dest, unsigned dest_offset,
struct btree_node *src, unsigned src_offset,
unsigned count)
{
size_t value_size = le32_to_cpu(dest->header.value_size);
memcpy(dest->keys + dest_offset, src->keys + src_offset, count * sizeof(uint64_t));
memcpy(value_ptr(dest, dest_offset), value_ptr(src, src_offset), count * value_size);
}
/*
* Moves entries from one region fo a btree node to another. The regions
* may overlap.
*/
static void move_entries(struct btree_node *dest, unsigned dest_offset,
struct btree_node *src, unsigned src_offset,
unsigned count)
{
size_t value_size = le32_to_cpu(dest->header.value_size);
memmove(dest->keys + dest_offset, src->keys + src_offset, count * sizeof(uint64_t));
memmove(value_ptr(dest, dest_offset), value_ptr(src, src_offset), count * value_size);
}
/*
* Erases the first 'count' entries of a btree node, shifting following
* entries down into their place.
*/
static void shift_down(struct btree_node *n, unsigned count)
{
move_entries(n, 0, n, count, le32_to_cpu(n->header.nr_entries) - count);
}
/*
* Moves entries in a btree node up 'count' places, making space for
* new entries at the start of the node.
*/
static void shift_up(struct btree_node *n, unsigned count)
{
move_entries(n, count, n, 0, le32_to_cpu(n->header.nr_entries));
}
/*
* Redistributes entries between two btree nodes to make them
* have similar numbers of entries.
*/
static void redistribute2(struct btree_node *left, struct btree_node *right)
{
unsigned nr_left = le32_to_cpu(left->header.nr_entries);
unsigned nr_right = le32_to_cpu(right->header.nr_entries);
unsigned total = nr_left + nr_right;
unsigned target_left = total / 2;
unsigned target_right = total - target_left;
if (nr_left < target_left) {
unsigned delta = target_left - nr_left;
copy_entries(left, nr_left, right, 0, delta);
shift_down(right, delta);
} else if (nr_left > target_left) {
unsigned delta = nr_left - target_left;
if (nr_right)
shift_up(right, delta);
copy_entries(right, 0, left, target_left, delta);
}
left->header.nr_entries = cpu_to_le32(target_left);
right->header.nr_entries = cpu_to_le32(target_right);
}
/*
* Redistribute entries between three nodes. Assumes the central
* node is empty.
*/
static void redistribute3(struct btree_node *left, struct btree_node *center,
struct btree_node *right)
{
unsigned nr_left = le32_to_cpu(left->header.nr_entries);
unsigned nr_center = le32_to_cpu(center->header.nr_entries);
unsigned nr_right = le32_to_cpu(right->header.nr_entries);
unsigned total, target_left, target_center, target_right;
BUG_ON(nr_center);
total = nr_left + nr_right;
target_left = total / 3;
target_center = (total - target_left) / 2;
target_right = (total - target_left - target_center);
if (nr_left < target_left) {
unsigned left_short = target_left - nr_left;
copy_entries(left, nr_left, right, 0, left_short);
copy_entries(center, 0, right, left_short, target_center);
shift_down(right, nr_right - target_right);
} else if (nr_left < (target_left + target_center)) {
unsigned left_to_center = nr_left - target_left;
copy_entries(center, 0, left, target_left, left_to_center);
copy_entries(center, left_to_center, right, 0, target_center - left_to_center);
shift_down(right, nr_right - target_right);
} else {
unsigned right_short = target_right - nr_right;
shift_up(right, right_short);
copy_entries(right, 0, left, nr_left - right_short, right_short);
copy_entries(center, 0, left, target_left, nr_left - target_left);
}
left->header.nr_entries = cpu_to_le32(target_left);
center->header.nr_entries = cpu_to_le32(target_center);
right->header.nr_entries = cpu_to_le32(target_right);
}
/*
* Splits a node by creating a sibling node and shifting half the nodes
* contents across. Assumes there is a parent node, and it has room for
......@@ -530,12 +646,10 @@ EXPORT_SYMBOL_GPL(dm_btree_lookup_next);
*
* Where A* is a shadow of A.
*/
static int btree_split_sibling(struct shadow_spine *s, unsigned parent_index,
uint64_t key)
static int split_one_into_two(struct shadow_spine *s, unsigned parent_index,
struct dm_btree_value_type *vt, uint64_t key)
{
int r;
size_t size;
unsigned nr_left, nr_right;
struct dm_block *left, *right, *parent;
struct btree_node *ln, *rn, *pn;
__le64 location;
......@@ -549,36 +663,18 @@ static int btree_split_sibling(struct shadow_spine *s, unsigned parent_index,
ln = dm_block_data(left);
rn = dm_block_data(right);
nr_left = le32_to_cpu(ln->header.nr_entries) / 2;
nr_right = le32_to_cpu(ln->header.nr_entries) - nr_left;
ln->header.nr_entries = cpu_to_le32(nr_left);
rn->header.flags = ln->header.flags;
rn->header.nr_entries = cpu_to_le32(nr_right);
rn->header.nr_entries = cpu_to_le32(0);
rn->header.max_entries = ln->header.max_entries;
rn->header.value_size = ln->header.value_size;
memcpy(rn->keys, ln->keys + nr_left, nr_right * sizeof(rn->keys[0]));
redistribute2(ln, rn);
size = le32_to_cpu(ln->header.flags) & INTERNAL_NODE ?
sizeof(uint64_t) : s->info->value_type.size;
memcpy(value_ptr(rn, 0), value_ptr(ln, nr_left),
size * nr_right);
/*
* Patch up the parent
*/
/* patch up the parent */
parent = shadow_parent(s);
pn = dm_block_data(parent);
location = cpu_to_le64(dm_block_location(left));
__dm_bless_for_disk(&location);
memcpy_disk(value_ptr(pn, parent_index),
&location, sizeof(__le64));
location = cpu_to_le64(dm_block_location(right));
__dm_bless_for_disk(&location);
r = insert_at(sizeof(__le64), pn, parent_index + 1,
le64_to_cpu(rn->keys[0]), &location);
if (r) {
......@@ -586,6 +682,7 @@ static int btree_split_sibling(struct shadow_spine *s, unsigned parent_index,
return r;
}
/* patch up the spine */
if (key < le64_to_cpu(rn->keys[0])) {
unlock_block(s->info, right);
s->nodes[1] = left;
......@@ -597,6 +694,121 @@ static int btree_split_sibling(struct shadow_spine *s, unsigned parent_index,
return 0;
}
/*
* We often need to modify a sibling node. This function shadows a particular
* child of the given parent node. Making sure to update the parent to point
* to the new shadow.
*/
static int shadow_child(struct dm_btree_info *info, struct dm_btree_value_type *vt,
struct btree_node *parent, unsigned index,
struct dm_block **result)
{
int r, inc;
dm_block_t root;
struct btree_node *node;
root = value64(parent, index);
r = dm_tm_shadow_block(info->tm, root, &btree_node_validator,
result, &inc);
if (r)
return r;
node = dm_block_data(*result);
if (inc)
inc_children(info->tm, node, vt);
*((__le64 *) value_ptr(parent, index)) =
cpu_to_le64(dm_block_location(*result));
return 0;
}
/*
* Splits two nodes into three. This is more work, but results in fuller
* nodes, so saves metadata space.
*/
static int split_two_into_three(struct shadow_spine *s, unsigned parent_index,
struct dm_btree_value_type *vt, uint64_t key)
{
int r;
unsigned middle_index;
struct dm_block *left, *middle, *right, *parent;
struct btree_node *ln, *rn, *mn, *pn;
__le64 location;
parent = shadow_parent(s);
pn = dm_block_data(parent);
if (parent_index == 0) {
middle_index = 1;
left = shadow_current(s);
r = shadow_child(s->info, vt, pn, parent_index + 1, &right);
if (r)
return r;
} else {
middle_index = parent_index;
right = shadow_current(s);
r = shadow_child(s->info, vt, pn, parent_index - 1, &left);
if (r)
return r;
}
r = new_block(s->info, &middle);
if (r < 0)
return r;
ln = dm_block_data(left);
mn = dm_block_data(middle);
rn = dm_block_data(right);
mn->header.nr_entries = cpu_to_le32(0);
mn->header.flags = ln->header.flags;
mn->header.max_entries = ln->header.max_entries;
mn->header.value_size = ln->header.value_size;
redistribute3(ln, mn, rn);
/* patch up the parent */
pn->keys[middle_index] = rn->keys[0];
location = cpu_to_le64(dm_block_location(middle));
__dm_bless_for_disk(&location);
r = insert_at(sizeof(__le64), pn, middle_index,
le64_to_cpu(mn->keys[0]), &location);
if (r) {
if (shadow_current(s) != left)
unlock_block(s->info, left);
unlock_block(s->info, middle);
if (shadow_current(s) != right)
unlock_block(s->info, right);
return r;
}
/* patch up the spine */
if (key < le64_to_cpu(mn->keys[0])) {
unlock_block(s->info, middle);
unlock_block(s->info, right);
s->nodes[1] = left;
} else if (key < le64_to_cpu(rn->keys[0])) {
unlock_block(s->info, left);
unlock_block(s->info, right);
s->nodes[1] = middle;
} else {
unlock_block(s->info, left);
unlock_block(s->info, middle);
s->nodes[1] = right;
}
return 0;
}
/*----------------------------------------------------------------*/
/*
* Splits a node by creating two new children beneath the given node.
*
......@@ -690,6 +902,186 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
return 0;
}
/*----------------------------------------------------------------*/
/*
* Redistributes a node's entries with its left sibling.
*/
static int rebalance_left(struct shadow_spine *s, struct dm_btree_value_type *vt,
unsigned parent_index, uint64_t key)
{
int r;
struct dm_block *sib;
struct btree_node *left, *right, *parent = dm_block_data(shadow_parent(s));
r = shadow_child(s->info, vt, parent, parent_index - 1, &sib);
if (r)
return r;
left = dm_block_data(sib);
right = dm_block_data(shadow_current(s));
redistribute2(left, right);
*key_ptr(parent, parent_index) = right->keys[0];
if (key < le64_to_cpu(right->keys[0])) {
unlock_block(s->info, s->nodes[1]);
s->nodes[1] = sib;
} else {
unlock_block(s->info, sib);
}
return 0;
}
/*
* Redistributes a nodes entries with its right sibling.
*/
static int rebalance_right(struct shadow_spine *s, struct dm_btree_value_type *vt,
unsigned parent_index, uint64_t key)
{
int r;
struct dm_block *sib;
struct btree_node *left, *right, *parent = dm_block_data(shadow_parent(s));
r = shadow_child(s->info, vt, parent, parent_index + 1, &sib);
if (r)
return r;
left = dm_block_data(shadow_current(s));
right = dm_block_data(sib);
redistribute2(left, right);
*key_ptr(parent, parent_index + 1) = right->keys[0];
if (key < le64_to_cpu(right->keys[0])) {
unlock_block(s->info, sib);
} else {
unlock_block(s->info, s->nodes[1]);
s->nodes[1] = sib;
}
return 0;
}
/*
* Returns the number of spare entries in a node.
*/
static int get_node_free_space(struct dm_btree_info *info, dm_block_t b, unsigned *space)
{
int r;
unsigned nr_entries;
struct dm_block *block;
struct btree_node *node;
r = bn_read_lock(info, b, &block);
if (r)
return r;
node = dm_block_data(block);
nr_entries = le32_to_cpu(node->header.nr_entries);
*space = le32_to_cpu(node->header.max_entries) - nr_entries;
unlock_block(info, block);
return 0;
}
/*
* Make space in a node, either by moving some entries to a sibling,
* or creating a new sibling node. SPACE_THRESHOLD defines the minimum
* number of free entries that must be in the sibling to make the move
* worth while. If the siblings are shared (eg, part of a snapshot),
* then they are not touched, since this break sharing and so consume
* more space than we save.
*/
#define SPACE_THRESHOLD 8
static int rebalance_or_split(struct shadow_spine *s, struct dm_btree_value_type *vt,
unsigned parent_index, uint64_t key)
{
int r;
struct btree_node *parent = dm_block_data(shadow_parent(s));
unsigned nr_parent = le32_to_cpu(parent->header.nr_entries);
unsigned free_space;
int left_shared = 0, right_shared = 0;
/* Should we move entries to the left sibling? */
if (parent_index > 0) {
dm_block_t left_b = value64(parent, parent_index - 1);
r = dm_tm_block_is_shared(s->info->tm, left_b, &left_shared);
if (r)
return r;
if (!left_shared) {
r = get_node_free_space(s->info, left_b, &free_space);
if (r)
return r;
if (free_space >= SPACE_THRESHOLD)
return rebalance_left(s, vt, parent_index, key);
}
}
/* Should we move entries to the right sibling? */
if (parent_index < (nr_parent - 1)) {
dm_block_t right_b = value64(parent, parent_index + 1);
r = dm_tm_block_is_shared(s->info->tm, right_b, &right_shared);
if (r)
return r;
if (!right_shared) {
r = get_node_free_space(s->info, right_b, &free_space);
if (r)
return r;
if (free_space >= SPACE_THRESHOLD)
return rebalance_right(s, vt, parent_index, key);
}
}
/*
* We need to split the node, normally we split two nodes
* into three. But when inserting a sequence that is either
* monotonically increasing or decreasing it's better to split
* a single node into two.
*/
if (left_shared || right_shared || (nr_parent <= 2) ||
(parent_index == 0) || (parent_index + 1 == nr_parent)) {
return split_one_into_two(s, parent_index, vt, key);
} else {
return split_two_into_three(s, parent_index, vt, key);
}
}
/*
* Does the node contain a particular key?
*/
static bool contains_key(struct btree_node *node, uint64_t key)
{
int i = lower_bound(node, key);
if (i >= 0 && le64_to_cpu(node->keys[i]) == key)
return true;
return false;
}
/*
* In general we preemptively make sure there's a free entry in every
* node on the spine when doing an insert. But we can avoid that with
* leaf nodes if we know it's an overwrite.
*/
static bool has_space_for_insert(struct btree_node *node, uint64_t key)
{
if (node->header.nr_entries == node->header.max_entries) {
if (le32_to_cpu(node->header.flags) & LEAF_NODE) {
/* we don't need space if it's an overwrite */
return contains_key(node, key);
}
return false;
}
return true;
}
static int btree_insert_raw(struct shadow_spine *s, dm_block_t root,
struct dm_btree_value_type *vt,
uint64_t key, unsigned *index)
......@@ -719,17 +1111,18 @@ static int btree_insert_raw(struct shadow_spine *s, dm_block_t root,
node = dm_block_data(shadow_current(s));
if (node->header.nr_entries == node->header.max_entries) {
if (!has_space_for_insert(node, key)) {
if (top)
r = btree_split_beneath(s, key);
else
r = btree_split_sibling(s, i, key);
r = rebalance_or_split(s, vt, i, key);
if (r < 0)
return r;
}
/* making space can cause the current node to change */
node = dm_block_data(shadow_current(s));
}
i = lower_bound(node, key);
......
......@@ -379,6 +379,15 @@ int dm_tm_ref(struct dm_transaction_manager *tm, dm_block_t b,
return dm_sm_get_count(tm->sm, b, result);
}
int dm_tm_block_is_shared(struct dm_transaction_manager *tm, dm_block_t b,
int *result)
{
if (tm->is_clone)
return -EWOULDBLOCK;
return dm_sm_count_is_more_than_one(tm->sm, b, result);
}
struct dm_block_manager *dm_tm_get_bm(struct dm_transaction_manager *tm)
{
return tm->bm;
......
......@@ -103,8 +103,14 @@ void dm_tm_inc(struct dm_transaction_manager *tm, dm_block_t b);
void dm_tm_dec(struct dm_transaction_manager *tm, dm_block_t b);
int dm_tm_ref(struct dm_transaction_manager *tm, dm_block_t b,
uint32_t *result);
int dm_tm_ref(struct dm_transaction_manager *tm, dm_block_t b, uint32_t *result);
/*
* Finds out if a given block is shared (ie. has a reference count higher
* than one).
*/
int dm_tm_block_is_shared(struct dm_transaction_manager *tm, dm_block_t b,
int *result);
struct dm_block_manager *dm_tm_get_bm(struct dm_transaction_manager *tm);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment