Commit be500ed7 authored by Joe Thornber's avatar Joe Thornber Committed by Mike Snitzer

dm space maps: improve performance with inc/dec on ranges of blocks

When we break sharing on btree nodes we typically need to increment
the reference counts to every value held in the node.  This can
cause a lot of repeated calls to the space maps.  Fix this by changing
the interface to the space map inc/dec methods to take ranges of
adjacent blocks to be operated on.

For installations that are using a lot of snapshots this will reduce
cpu overhead of fundamental operations such as provisioning a new block,
or deleting a snapshot, by as much as 10 times.
Signed-off-by: default avatarJoe Thornber <ejt@redhat.com>
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
parent 5faafc77
...@@ -363,28 +363,32 @@ static void ws_unpack(const struct writeset_disk *disk, struct writeset_metadata ...@@ -363,28 +363,32 @@ static void ws_unpack(const struct writeset_disk *disk, struct writeset_metadata
core->root = le64_to_cpu(disk->root); core->root = le64_to_cpu(disk->root);
} }
static void ws_inc(void *context, const void *value) static void ws_inc(void *context, const void *value, unsigned count)
{ {
struct era_metadata *md = context; struct era_metadata *md = context;
struct writeset_disk ws_d; struct writeset_disk ws_d;
dm_block_t b; dm_block_t b;
unsigned i;
memcpy(&ws_d, value, sizeof(ws_d)); for (i = 0; i < count; i++) {
b = le64_to_cpu(ws_d.root); memcpy(&ws_d, value + (i * sizeof(ws_d)), sizeof(ws_d));
b = le64_to_cpu(ws_d.root);
dm_tm_inc(md->tm, b); dm_tm_inc(md->tm, b);
}
} }
static void ws_dec(void *context, const void *value) static void ws_dec(void *context, const void *value, unsigned count)
{ {
struct era_metadata *md = context; struct era_metadata *md = context;
struct writeset_disk ws_d; struct writeset_disk ws_d;
dm_block_t b; dm_block_t b;
unsigned i;
memcpy(&ws_d, value, sizeof(ws_d)); for (i = 0; i < count; i++) {
b = le64_to_cpu(ws_d.root); memcpy(&ws_d, value + (i * sizeof(ws_d)), sizeof(ws_d));
b = le64_to_cpu(ws_d.root);
dm_bitset_del(&md->bitset_info, b); dm_bitset_del(&md->bitset_info, b);
}
} }
static int ws_eq(void *context, const void *value1, const void *value2) static int ws_eq(void *context, const void *value1, const void *value2)
......
...@@ -311,28 +311,53 @@ static void unpack_block_time(uint64_t v, dm_block_t *b, uint32_t *t) ...@@ -311,28 +311,53 @@ static void unpack_block_time(uint64_t v, dm_block_t *b, uint32_t *t)
*t = v & ((1 << 24) - 1); *t = v & ((1 << 24) - 1);
} }
static void data_block_inc(void *context, const void *value_le) /*
* It's more efficient to call dm_sm_{inc,dec}_blocks as few times as
* possible. 'with_runs' reads contiguous runs of blocks, and calls the
* given sm function.
*/
typedef int (*run_fn)(struct dm_space_map *, dm_block_t, dm_block_t);
static void with_runs(struct dm_space_map *sm, const __le64 *value_le, unsigned count, run_fn fn)
{ {
struct dm_space_map *sm = context; uint64_t b, begin, end;
__le64 v_le;
uint64_t b;
uint32_t t; uint32_t t;
bool in_run = false;
unsigned i;
memcpy(&v_le, value_le, sizeof(v_le)); for (i = 0; i < count; i++, value_le++) {
unpack_block_time(le64_to_cpu(v_le), &b, &t); /* We know value_le is 8 byte aligned */
dm_sm_inc_block(sm, b); unpack_block_time(le64_to_cpu(*value_le), &b, &t);
if (in_run) {
if (b == end) {
end++;
} else {
fn(sm, begin, end);
begin = b;
end = b + 1;
}
} else {
in_run = true;
begin = b;
end = b + 1;
}
}
if (in_run)
fn(sm, begin, end);
} }
static void data_block_dec(void *context, const void *value_le) static void data_block_inc(void *context, const void *value_le, unsigned count)
{ {
struct dm_space_map *sm = context; with_runs((struct dm_space_map *) context,
__le64 v_le; (const __le64 *) value_le, count, dm_sm_inc_blocks);
uint64_t b; }
uint32_t t;
memcpy(&v_le, value_le, sizeof(v_le)); static void data_block_dec(void *context, const void *value_le, unsigned count)
unpack_block_time(le64_to_cpu(v_le), &b, &t); {
dm_sm_dec_block(sm, b); with_runs((struct dm_space_map *) context,
(const __le64 *) value_le, count, dm_sm_dec_blocks);
} }
static int data_block_equal(void *context, const void *value1_le, const void *value2_le) static int data_block_equal(void *context, const void *value1_le, const void *value2_le)
...@@ -349,27 +374,25 @@ static int data_block_equal(void *context, const void *value1_le, const void *va ...@@ -349,27 +374,25 @@ static int data_block_equal(void *context, const void *value1_le, const void *va
return b1 == b2; return b1 == b2;
} }
static void subtree_inc(void *context, const void *value) static void subtree_inc(void *context, const void *value, unsigned count)
{ {
struct dm_btree_info *info = context; struct dm_btree_info *info = context;
__le64 root_le; const __le64 *root_le = value;
uint64_t root; unsigned i;
memcpy(&root_le, value, sizeof(root_le)); for (i = 0; i < count; i++, root_le++)
root = le64_to_cpu(root_le); dm_tm_inc(info->tm, le64_to_cpu(*root_le));
dm_tm_inc(info->tm, root);
} }
static void subtree_dec(void *context, const void *value) static void subtree_dec(void *context, const void *value, unsigned count)
{ {
struct dm_btree_info *info = context; struct dm_btree_info *info = context;
__le64 root_le; const __le64 *root_le = value;
uint64_t root; unsigned i;
memcpy(&root_le, value, sizeof(root_le)); for (i = 0; i < count; i++, root_le++)
root = le64_to_cpu(root_le); if (dm_btree_del(info, le64_to_cpu(*root_le)))
if (dm_btree_del(info, root)) DMERR("btree delete failed");
DMERR("btree delete failed");
} }
static int subtree_equal(void *context, const void *value1_le, const void *value2_le) static int subtree_equal(void *context, const void *value1_le, const void *value2_le)
...@@ -1761,11 +1784,7 @@ int dm_pool_inc_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_ ...@@ -1761,11 +1784,7 @@ int dm_pool_inc_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_
int r = 0; int r = 0;
pmd_write_lock(pmd); pmd_write_lock(pmd);
for (; b != e; b++) { r = dm_sm_inc_blocks(pmd->data_sm, b, e);
r = dm_sm_inc_block(pmd->data_sm, b);
if (r)
break;
}
pmd_write_unlock(pmd); pmd_write_unlock(pmd);
return r; return r;
...@@ -1776,11 +1795,7 @@ int dm_pool_dec_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_ ...@@ -1776,11 +1795,7 @@ int dm_pool_dec_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_
int r = 0; int r = 0;
pmd_write_lock(pmd); pmd_write_lock(pmd);
for (; b != e; b++) { r = dm_sm_dec_blocks(pmd->data_sm, b, e);
r = dm_sm_dec_block(pmd->data_sm, b);
if (r)
break;
}
pmd_write_unlock(pmd); pmd_write_unlock(pmd);
return r; return r;
......
...@@ -108,12 +108,10 @@ static void *element_at(struct dm_array_info *info, struct array_block *ab, ...@@ -108,12 +108,10 @@ static void *element_at(struct dm_array_info *info, struct array_block *ab,
* in an array block. * in an array block.
*/ */
static void on_entries(struct dm_array_info *info, struct array_block *ab, static void on_entries(struct dm_array_info *info, struct array_block *ab,
void (*fn)(void *, const void *)) void (*fn)(void *, const void *, unsigned))
{ {
unsigned i, nr_entries = le32_to_cpu(ab->nr_entries); unsigned nr_entries = le32_to_cpu(ab->nr_entries);
fn(info->value_type.context, element_at(info, ab, 0), nr_entries);
for (i = 0; i < nr_entries; i++)
fn(info->value_type.context, element_at(info, ab, i));
} }
/* /*
...@@ -175,19 +173,18 @@ static int alloc_ablock(struct dm_array_info *info, size_t size_of_block, ...@@ -175,19 +173,18 @@ static int alloc_ablock(struct dm_array_info *info, size_t size_of_block,
static void fill_ablock(struct dm_array_info *info, struct array_block *ab, static void fill_ablock(struct dm_array_info *info, struct array_block *ab,
const void *value, unsigned new_nr) const void *value, unsigned new_nr)
{ {
unsigned i; uint32_t nr_entries, delta, i;
uint32_t nr_entries;
struct dm_btree_value_type *vt = &info->value_type; struct dm_btree_value_type *vt = &info->value_type;
BUG_ON(new_nr > le32_to_cpu(ab->max_entries)); BUG_ON(new_nr > le32_to_cpu(ab->max_entries));
BUG_ON(new_nr < le32_to_cpu(ab->nr_entries)); BUG_ON(new_nr < le32_to_cpu(ab->nr_entries));
nr_entries = le32_to_cpu(ab->nr_entries); nr_entries = le32_to_cpu(ab->nr_entries);
for (i = nr_entries; i < new_nr; i++) { delta = new_nr - nr_entries;
if (vt->inc) if (vt->inc)
vt->inc(vt->context, value); vt->inc(vt->context, value, delta);
for (i = nr_entries; i < new_nr; i++)
memcpy(element_at(info, ab, i), value, vt->size); memcpy(element_at(info, ab, i), value, vt->size);
}
ab->nr_entries = cpu_to_le32(new_nr); ab->nr_entries = cpu_to_le32(new_nr);
} }
...@@ -199,17 +196,16 @@ static void fill_ablock(struct dm_array_info *info, struct array_block *ab, ...@@ -199,17 +196,16 @@ static void fill_ablock(struct dm_array_info *info, struct array_block *ab,
static void trim_ablock(struct dm_array_info *info, struct array_block *ab, static void trim_ablock(struct dm_array_info *info, struct array_block *ab,
unsigned new_nr) unsigned new_nr)
{ {
unsigned i; uint32_t nr_entries, delta;
uint32_t nr_entries;
struct dm_btree_value_type *vt = &info->value_type; struct dm_btree_value_type *vt = &info->value_type;
BUG_ON(new_nr > le32_to_cpu(ab->max_entries)); BUG_ON(new_nr > le32_to_cpu(ab->max_entries));
BUG_ON(new_nr > le32_to_cpu(ab->nr_entries)); BUG_ON(new_nr > le32_to_cpu(ab->nr_entries));
nr_entries = le32_to_cpu(ab->nr_entries); nr_entries = le32_to_cpu(ab->nr_entries);
for (i = nr_entries; i > new_nr; i--) delta = nr_entries - new_nr;
if (vt->dec) if (vt->dec)
vt->dec(vt->context, element_at(info, ab, i - 1)); vt->dec(vt->context, element_at(info, ab, new_nr - 1), delta);
ab->nr_entries = cpu_to_le32(new_nr); ab->nr_entries = cpu_to_le32(new_nr);
} }
...@@ -573,16 +569,17 @@ static int grow(struct resize *resize) ...@@ -573,16 +569,17 @@ static int grow(struct resize *resize)
* These are the value_type functions for the btree elements, which point * These are the value_type functions for the btree elements, which point
* to array blocks. * to array blocks.
*/ */
static void block_inc(void *context, const void *value) static void block_inc(void *context, const void *value, unsigned count)
{ {
__le64 block_le; const __le64 *block_le = value;
struct dm_array_info *info = context; struct dm_array_info *info = context;
unsigned i;
memcpy(&block_le, value, sizeof(block_le)); for (i = 0; i < count; i++, block_le++)
dm_tm_inc(info->btree_info.tm, le64_to_cpu(block_le)); dm_tm_inc(info->btree_info.tm, le64_to_cpu(*block_le));
} }
static void block_dec(void *context, const void *value) static void __block_dec(void *context, const void *value)
{ {
int r; int r;
uint64_t b; uint64_t b;
...@@ -621,6 +618,13 @@ static void block_dec(void *context, const void *value) ...@@ -621,6 +618,13 @@ static void block_dec(void *context, const void *value)
dm_tm_dec(info->btree_info.tm, b); dm_tm_dec(info->btree_info.tm, b);
} }
static void block_dec(void *context, const void *value, unsigned count)
{
unsigned i;
for (i = 0; i < count; i++, value += sizeof(__le64))
__block_dec(context, value);
}
static int block_equal(void *context, const void *value1, const void *value2) static int block_equal(void *context, const void *value1, const void *value2)
{ {
return !memcmp(value1, value2, sizeof(__le64)); return !memcmp(value1, value2, sizeof(__le64));
...@@ -711,7 +715,7 @@ static int populate_ablock_with_values(struct dm_array_info *info, struct array_ ...@@ -711,7 +715,7 @@ static int populate_ablock_with_values(struct dm_array_info *info, struct array_
return r; return r;
if (vt->inc) if (vt->inc)
vt->inc(vt->context, element_at(info, ab, i)); vt->inc(vt->context, element_at(info, ab, i), 1);
} }
ab->nr_entries = cpu_to_le32(new_nr); ab->nr_entries = cpu_to_le32(new_nr);
...@@ -822,9 +826,9 @@ static int array_set_value(struct dm_array_info *info, dm_block_t root, ...@@ -822,9 +826,9 @@ static int array_set_value(struct dm_array_info *info, dm_block_t root,
old_value = element_at(info, ab, entry); old_value = element_at(info, ab, entry);
if (vt->dec && if (vt->dec &&
(!vt->equal || !vt->equal(vt->context, old_value, value))) { (!vt->equal || !vt->equal(vt->context, old_value, value))) {
vt->dec(vt->context, old_value); vt->dec(vt->context, old_value, 1);
if (vt->inc) if (vt->inc)
vt->inc(vt->context, value); vt->inc(vt->context, value, 1);
} }
memcpy(old_value, value, info->value_type.size); memcpy(old_value, value, info->value_type.size);
......
...@@ -144,4 +144,17 @@ extern struct dm_block_validator btree_node_validator; ...@@ -144,4 +144,17 @@ extern struct dm_block_validator btree_node_validator;
extern void init_le64_type(struct dm_transaction_manager *tm, extern void init_le64_type(struct dm_transaction_manager *tm,
struct dm_btree_value_type *vt); struct dm_btree_value_type *vt);
/*
* This returns a shadowed btree leaf that you may modify. In practise
* this means overwrites only, since an insert could cause a node to
* be split. Useful if you need access to the old value to calculate the
* new one.
*
* This only works with single level btrees. The given key must be present in
* the tree, otherwise -EINVAL will be returned.
*/
int btree_get_overwrite_leaf(struct dm_btree_info *info, dm_block_t root,
uint64_t key, int *index,
dm_block_t *new_root, struct dm_block **leaf);
#endif /* DM_BTREE_INTERNAL_H */ #endif /* DM_BTREE_INTERNAL_H */
...@@ -544,7 +544,7 @@ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root, ...@@ -544,7 +544,7 @@ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
if (info->value_type.dec) if (info->value_type.dec)
info->value_type.dec(info->value_type.context, info->value_type.dec(info->value_type.context,
value_ptr(n, index)); value_ptr(n, index), 1);
delete_at(n, index); delete_at(n, index);
} }
...@@ -653,7 +653,7 @@ static int remove_one(struct dm_btree_info *info, dm_block_t root, ...@@ -653,7 +653,7 @@ static int remove_one(struct dm_btree_info *info, dm_block_t root,
if (k >= keys[last_level] && k < end_key) { if (k >= keys[last_level] && k < end_key) {
if (info->value_type.dec) if (info->value_type.dec)
info->value_type.dec(info->value_type.context, info->value_type.dec(info->value_type.context,
value_ptr(n, index)); value_ptr(n, index), 1);
delete_at(n, index); delete_at(n, index);
keys[last_level] = k + 1ull; keys[last_level] = k + 1ull;
......
...@@ -236,22 +236,14 @@ dm_block_t shadow_root(struct shadow_spine *s) ...@@ -236,22 +236,14 @@ dm_block_t shadow_root(struct shadow_spine *s)
return s->root; return s->root;
} }
static void le64_inc(void *context, const void *value_le) static void le64_inc(void *context, const void *value_le, unsigned count)
{ {
struct dm_transaction_manager *tm = context; dm_tm_with_runs(context, value_le, count, dm_tm_inc_range);
__le64 v_le;
memcpy(&v_le, value_le, sizeof(v_le));
dm_tm_inc(tm, le64_to_cpu(v_le));
} }
static void le64_dec(void *context, const void *value_le) static void le64_dec(void *context, const void *value_le, unsigned count)
{ {
struct dm_transaction_manager *tm = context; dm_tm_with_runs(context, value_le, count, dm_tm_dec_range);
__le64 v_le;
memcpy(&v_le, value_le, sizeof(v_le));
dm_tm_dec(tm, le64_to_cpu(v_le));
} }
static int le64_equal(void *context, const void *value1_le, const void *value2_le) static int le64_equal(void *context, const void *value1_le, const void *value2_le)
......
...@@ -71,15 +71,13 @@ static int upper_bound(struct btree_node *n, uint64_t key) ...@@ -71,15 +71,13 @@ static int upper_bound(struct btree_node *n, uint64_t key)
void inc_children(struct dm_transaction_manager *tm, struct btree_node *n, void inc_children(struct dm_transaction_manager *tm, struct btree_node *n,
struct dm_btree_value_type *vt) struct dm_btree_value_type *vt)
{ {
unsigned i;
uint32_t nr_entries = le32_to_cpu(n->header.nr_entries); uint32_t nr_entries = le32_to_cpu(n->header.nr_entries);
if (le32_to_cpu(n->header.flags) & INTERNAL_NODE) if (le32_to_cpu(n->header.flags) & INTERNAL_NODE)
for (i = 0; i < nr_entries; i++) dm_tm_with_runs(tm, value_ptr(n, 0), nr_entries, dm_tm_inc_range);
dm_tm_inc(tm, value64(n, i));
else if (vt->inc) else if (vt->inc)
for (i = 0; i < nr_entries; i++) vt->inc(vt->context, value_ptr(n, 0), nr_entries);
vt->inc(vt->context, value_ptr(n, i));
} }
static int insert_at(size_t value_size, struct btree_node *node, unsigned index, static int insert_at(size_t value_size, struct btree_node *node, unsigned index,
...@@ -318,13 +316,9 @@ int dm_btree_del(struct dm_btree_info *info, dm_block_t root) ...@@ -318,13 +316,9 @@ int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
goto out; goto out;
} else { } else {
if (info->value_type.dec) { if (info->value_type.dec)
unsigned i; info->value_type.dec(info->value_type.context,
value_ptr(f->n, 0), f->nr_children);
for (i = 0; i < f->nr_children; i++)
info->value_type.dec(info->value_type.context,
value_ptr(f->n, i));
}
pop_frame(s); pop_frame(s);
} }
} }
...@@ -1146,6 +1140,77 @@ static int btree_insert_raw(struct shadow_spine *s, dm_block_t root, ...@@ -1146,6 +1140,77 @@ static int btree_insert_raw(struct shadow_spine *s, dm_block_t root,
return 0; return 0;
} }
static int __btree_get_overwrite_leaf(struct shadow_spine *s, dm_block_t root,
uint64_t key, int *index)
{
int r, i = -1;
struct btree_node *node;
*index = 0;
for (;;) {
r = shadow_step(s, root, &s->info->value_type);
if (r < 0)
return r;
node = dm_block_data(shadow_current(s));
/*
* We have to patch up the parent node, ugly, but I don't
* see a way to do this automatically as part of the spine
* op.
*/
if (shadow_has_parent(s) && i >= 0) {
__le64 location = cpu_to_le64(dm_block_location(shadow_current(s)));
__dm_bless_for_disk(&location);
memcpy_disk(value_ptr(dm_block_data(shadow_parent(s)), i),
&location, sizeof(__le64));
}
node = dm_block_data(shadow_current(s));
i = lower_bound(node, key);
BUG_ON(i < 0);
BUG_ON(i >= le32_to_cpu(node->header.nr_entries));
if (le32_to_cpu(node->header.flags) & LEAF_NODE) {
if (key != le64_to_cpu(node->keys[i]))
return -EINVAL;
break;
}
root = value64(node, i);
}
*index = i;
return 0;
}
int btree_get_overwrite_leaf(struct dm_btree_info *info, dm_block_t root,
uint64_t key, int *index,
dm_block_t *new_root, struct dm_block **leaf)
{
int r;
struct shadow_spine spine;
BUG_ON(info->levels > 1);
init_shadow_spine(&spine, info);
r = __btree_get_overwrite_leaf(&spine, root, key, index);
if (!r) {
*new_root = shadow_root(&spine);
*leaf = shadow_current(&spine);
/*
* Decrement the count so exit_shadow_spine() doesn't
* unlock the leaf.
*/
spine.count--;
}
exit_shadow_spine(&spine);
return r;
}
static bool need_insert(struct btree_node *node, uint64_t *keys, static bool need_insert(struct btree_node *node, uint64_t *keys,
unsigned level, unsigned index) unsigned level, unsigned index)
{ {
...@@ -1222,7 +1287,7 @@ static int insert(struct dm_btree_info *info, dm_block_t root, ...@@ -1222,7 +1287,7 @@ static int insert(struct dm_btree_info *info, dm_block_t root,
value_ptr(n, index), value_ptr(n, index),
value))) { value))) {
info->value_type.dec(info->value_type.context, info->value_type.dec(info->value_type.context,
value_ptr(n, index)); value_ptr(n, index), 1);
} }
memcpy_disk(value_ptr(n, index), memcpy_disk(value_ptr(n, index),
value, info->value_type.size); value, info->value_type.size);
......
...@@ -51,21 +51,21 @@ struct dm_btree_value_type { ...@@ -51,21 +51,21 @@ struct dm_btree_value_type {
*/ */
/* /*
* The btree is making a duplicate of the value, for instance * The btree is making a duplicate of a run of values, for instance
* because previously-shared btree nodes have now diverged. * because previously-shared btree nodes have now diverged.
* @value argument is the new copy that the copy function may modify. * @value argument is the new copy that the copy function may modify.
* (Probably it just wants to increment a reference count * (Probably it just wants to increment a reference count
* somewhere.) This method is _not_ called for insertion of a new * somewhere.) This method is _not_ called for insertion of a new
* value: It is assumed the ref count is already 1. * value: It is assumed the ref count is already 1.
*/ */
void (*inc)(void *context, const void *value); void (*inc)(void *context, const void *value, unsigned count);
/* /*
* This value is being deleted. The btree takes care of freeing * These values are being deleted. The btree takes care of freeing
* the memory pointed to by @value. Often the del function just * the memory pointed to by @value. Often the del function just
* needs to decrement a reference count somewhere. * needs to decrement a reference counts somewhere.
*/ */
void (*dec)(void *context, const void *value); void (*dec)(void *context, const void *value, unsigned count);
/* /*
* A test for equality between two values. When a value is * A test for equality between two values. When a value is
......
...@@ -96,12 +96,6 @@ struct disk_bitmap_header { ...@@ -96,12 +96,6 @@ struct disk_bitmap_header {
__le64 blocknr; __le64 blocknr;
} __attribute__ ((packed, aligned(8))); } __attribute__ ((packed, aligned(8)));
enum allocation_event {
SM_NONE,
SM_ALLOC,
SM_FREE,
};
/*----------------------------------------------------------------*/ /*----------------------------------------------------------------*/
int sm_ll_extend(struct ll_disk *ll, dm_block_t extra_blocks); int sm_ll_extend(struct ll_disk *ll, dm_block_t extra_blocks);
...@@ -111,9 +105,15 @@ int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin, ...@@ -111,9 +105,15 @@ int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
dm_block_t end, dm_block_t *result); dm_block_t end, dm_block_t *result);
int sm_ll_find_common_free_block(struct ll_disk *old_ll, struct ll_disk *new_ll, int sm_ll_find_common_free_block(struct ll_disk *old_ll, struct ll_disk *new_ll,
dm_block_t begin, dm_block_t end, dm_block_t *result); dm_block_t begin, dm_block_t end, dm_block_t *result);
int sm_ll_insert(struct ll_disk *ll, dm_block_t b, uint32_t ref_count, enum allocation_event *ev);
int sm_ll_inc(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev); /*
int sm_ll_dec(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev); * The next three functions return (via nr_allocations) the net number of
* allocations that were made. This number may be negative if there were
* more frees than allocs.
*/
int sm_ll_insert(struct ll_disk *ll, dm_block_t b, uint32_t ref_count, int32_t *nr_allocations);
int sm_ll_inc(struct ll_disk *ll, dm_block_t b, dm_block_t e, int32_t *nr_allocations);
int sm_ll_dec(struct ll_disk *ll, dm_block_t b, dm_block_t e, int32_t *nr_allocations);
int sm_ll_commit(struct ll_disk *ll); int sm_ll_commit(struct ll_disk *ll);
int sm_ll_new_metadata(struct ll_disk *ll, struct dm_transaction_manager *tm); int sm_ll_new_metadata(struct ll_disk *ll, struct dm_transaction_manager *tm);
......
...@@ -87,76 +87,39 @@ static int sm_disk_set_count(struct dm_space_map *sm, dm_block_t b, ...@@ -87,76 +87,39 @@ static int sm_disk_set_count(struct dm_space_map *sm, dm_block_t b,
uint32_t count) uint32_t count)
{ {
int r; int r;
uint32_t old_count; int32_t nr_allocations;
enum allocation_event ev;
struct sm_disk *smd = container_of(sm, struct sm_disk, sm); struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
r = sm_ll_insert(&smd->ll, b, count, &ev); r = sm_ll_insert(&smd->ll, b, count, &nr_allocations);
if (!r) { if (!r) {
switch (ev) { smd->nr_allocated_this_transaction += nr_allocations;
case SM_NONE:
break;
case SM_ALLOC:
/*
* This _must_ be free in the prior transaction
* otherwise we've lost atomicity.
*/
smd->nr_allocated_this_transaction++;
break;
case SM_FREE:
/*
* It's only free if it's also free in the last
* transaction.
*/
r = sm_ll_lookup(&smd->old_ll, b, &old_count);
if (r)
return r;
if (!old_count)
smd->nr_allocated_this_transaction--;
break;
}
} }
return r; return r;
} }
static int sm_disk_inc_block(struct dm_space_map *sm, dm_block_t b) static int sm_disk_inc_blocks(struct dm_space_map *sm, dm_block_t b, dm_block_t e)
{ {
int r; int r;
enum allocation_event ev; int32_t nr_allocations;
struct sm_disk *smd = container_of(sm, struct sm_disk, sm); struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
r = sm_ll_inc(&smd->ll, b, &ev); r = sm_ll_inc(&smd->ll, b, e, &nr_allocations);
if (!r && (ev == SM_ALLOC)) if (!r)
/* smd->nr_allocated_this_transaction += nr_allocations;
* This _must_ be free in the prior transaction
* otherwise we've lost atomicity.
*/
smd->nr_allocated_this_transaction++;
return r; return r;
} }
static int sm_disk_dec_block(struct dm_space_map *sm, dm_block_t b) static int sm_disk_dec_blocks(struct dm_space_map *sm, dm_block_t b, dm_block_t e)
{ {
int r; int r;
uint32_t old_count; int32_t nr_allocations;
enum allocation_event ev;
struct sm_disk *smd = container_of(sm, struct sm_disk, sm); struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
r = sm_ll_dec(&smd->ll, b, &ev); r = sm_ll_dec(&smd->ll, b, e, &nr_allocations);
if (!r && (ev == SM_FREE)) { if (!r)
/* smd->nr_allocated_this_transaction += nr_allocations;
* It's only free if it's also free in the last
* transaction.
*/
r = sm_ll_lookup(&smd->old_ll, b, &old_count);
if (!r && !old_count)
smd->nr_allocated_this_transaction--;
}
return r; return r;
} }
...@@ -164,7 +127,7 @@ static int sm_disk_dec_block(struct dm_space_map *sm, dm_block_t b) ...@@ -164,7 +127,7 @@ static int sm_disk_dec_block(struct dm_space_map *sm, dm_block_t b)
static int sm_disk_new_block(struct dm_space_map *sm, dm_block_t *b) static int sm_disk_new_block(struct dm_space_map *sm, dm_block_t *b)
{ {
int r; int r;
enum allocation_event ev; int32_t nr_allocations;
struct sm_disk *smd = container_of(sm, struct sm_disk, sm); struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
/* /*
...@@ -183,10 +146,9 @@ static int sm_disk_new_block(struct dm_space_map *sm, dm_block_t *b) ...@@ -183,10 +146,9 @@ static int sm_disk_new_block(struct dm_space_map *sm, dm_block_t *b)
return r; return r;
smd->begin = *b + 1; smd->begin = *b + 1;
r = sm_ll_inc(&smd->ll, *b, &ev); r = sm_ll_inc(&smd->ll, *b, *b + 1, &nr_allocations);
if (!r) { if (!r) {
BUG_ON(ev != SM_ALLOC); smd->nr_allocated_this_transaction += nr_allocations;
smd->nr_allocated_this_transaction++;
} }
return r; return r;
...@@ -242,8 +204,8 @@ static struct dm_space_map ops = { ...@@ -242,8 +204,8 @@ static struct dm_space_map ops = {
.get_count = sm_disk_get_count, .get_count = sm_disk_get_count,
.count_is_more_than_one = sm_disk_count_is_more_than_one, .count_is_more_than_one = sm_disk_count_is_more_than_one,
.set_count = sm_disk_set_count, .set_count = sm_disk_set_count,
.inc_block = sm_disk_inc_block, .inc_blocks = sm_disk_inc_blocks,
.dec_block = sm_disk_dec_block, .dec_blocks = sm_disk_dec_blocks,
.new_block = sm_disk_new_block, .new_block = sm_disk_new_block,
.commit = sm_disk_commit, .commit = sm_disk_commit,
.root_size = sm_disk_root_size, .root_size = sm_disk_root_size,
......
...@@ -89,7 +89,8 @@ enum block_op_type { ...@@ -89,7 +89,8 @@ enum block_op_type {
struct block_op { struct block_op {
enum block_op_type type; enum block_op_type type;
dm_block_t block; dm_block_t b;
dm_block_t e;
}; };
struct bop_ring_buffer { struct bop_ring_buffer {
...@@ -116,7 +117,7 @@ static unsigned brb_next(struct bop_ring_buffer *brb, unsigned old) ...@@ -116,7 +117,7 @@ static unsigned brb_next(struct bop_ring_buffer *brb, unsigned old)
} }
static int brb_push(struct bop_ring_buffer *brb, static int brb_push(struct bop_ring_buffer *brb,
enum block_op_type type, dm_block_t b) enum block_op_type type, dm_block_t b, dm_block_t e)
{ {
struct block_op *bop; struct block_op *bop;
unsigned next = brb_next(brb, brb->end); unsigned next = brb_next(brb, brb->end);
...@@ -130,7 +131,8 @@ static int brb_push(struct bop_ring_buffer *brb, ...@@ -130,7 +131,8 @@ static int brb_push(struct bop_ring_buffer *brb,
bop = brb->bops + brb->end; bop = brb->bops + brb->end;
bop->type = type; bop->type = type;
bop->block = b; bop->b = b;
bop->e = e;
brb->end = next; brb->end = next;
...@@ -145,9 +147,7 @@ static int brb_peek(struct bop_ring_buffer *brb, struct block_op *result) ...@@ -145,9 +147,7 @@ static int brb_peek(struct bop_ring_buffer *brb, struct block_op *result)
return -ENODATA; return -ENODATA;
bop = brb->bops + brb->begin; bop = brb->bops + brb->begin;
result->type = bop->type; memcpy(result, bop, sizeof(*result));
result->block = bop->block;
return 0; return 0;
} }
...@@ -178,10 +178,9 @@ struct sm_metadata { ...@@ -178,10 +178,9 @@ struct sm_metadata {
struct threshold threshold; struct threshold threshold;
}; };
static int add_bop(struct sm_metadata *smm, enum block_op_type type, dm_block_t b) static int add_bop(struct sm_metadata *smm, enum block_op_type type, dm_block_t b, dm_block_t e)
{ {
int r = brb_push(&smm->uncommitted, type, b); int r = brb_push(&smm->uncommitted, type, b, e);
if (r) { if (r) {
DMERR("too many recursive allocations"); DMERR("too many recursive allocations");
return -ENOMEM; return -ENOMEM;
...@@ -193,15 +192,15 @@ static int add_bop(struct sm_metadata *smm, enum block_op_type type, dm_block_t ...@@ -193,15 +192,15 @@ static int add_bop(struct sm_metadata *smm, enum block_op_type type, dm_block_t
static int commit_bop(struct sm_metadata *smm, struct block_op *op) static int commit_bop(struct sm_metadata *smm, struct block_op *op)
{ {
int r = 0; int r = 0;
enum allocation_event ev; int32_t nr_allocations;
switch (op->type) { switch (op->type) {
case BOP_INC: case BOP_INC:
r = sm_ll_inc(&smm->ll, op->block, &ev); r = sm_ll_inc(&smm->ll, op->b, op->e, &nr_allocations);
break; break;
case BOP_DEC: case BOP_DEC:
r = sm_ll_dec(&smm->ll, op->block, &ev); r = sm_ll_dec(&smm->ll, op->b, op->e, &nr_allocations);
break; break;
} }
...@@ -314,7 +313,7 @@ static int sm_metadata_get_count(struct dm_space_map *sm, dm_block_t b, ...@@ -314,7 +313,7 @@ static int sm_metadata_get_count(struct dm_space_map *sm, dm_block_t b,
i = brb_next(&smm->uncommitted, i)) { i = brb_next(&smm->uncommitted, i)) {
struct block_op *op = smm->uncommitted.bops + i; struct block_op *op = smm->uncommitted.bops + i;
if (op->block != b) if (b < op->b || b >= op->e)
continue; continue;
switch (op->type) { switch (op->type) {
...@@ -355,7 +354,7 @@ static int sm_metadata_count_is_more_than_one(struct dm_space_map *sm, ...@@ -355,7 +354,7 @@ static int sm_metadata_count_is_more_than_one(struct dm_space_map *sm,
struct block_op *op = smm->uncommitted.bops + i; struct block_op *op = smm->uncommitted.bops + i;
if (op->block != b) if (b < op->b || b >= op->e)
continue; continue;
switch (op->type) { switch (op->type) {
...@@ -393,7 +392,7 @@ static int sm_metadata_set_count(struct dm_space_map *sm, dm_block_t b, ...@@ -393,7 +392,7 @@ static int sm_metadata_set_count(struct dm_space_map *sm, dm_block_t b,
uint32_t count) uint32_t count)
{ {
int r, r2; int r, r2;
enum allocation_event ev; int32_t nr_allocations;
struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
if (smm->recursion_count) { if (smm->recursion_count) {
...@@ -402,40 +401,42 @@ static int sm_metadata_set_count(struct dm_space_map *sm, dm_block_t b, ...@@ -402,40 +401,42 @@ static int sm_metadata_set_count(struct dm_space_map *sm, dm_block_t b,
} }
in(smm); in(smm);
r = sm_ll_insert(&smm->ll, b, count, &ev); r = sm_ll_insert(&smm->ll, b, count, &nr_allocations);
r2 = out(smm); r2 = out(smm);
return combine_errors(r, r2); return combine_errors(r, r2);
} }
static int sm_metadata_inc_block(struct dm_space_map *sm, dm_block_t b) static int sm_metadata_inc_blocks(struct dm_space_map *sm, dm_block_t b, dm_block_t e)
{ {
int r, r2 = 0; int r, r2 = 0;
enum allocation_event ev; int32_t nr_allocations;
struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
if (recursing(smm)) if (recursing(smm)) {
r = add_bop(smm, BOP_INC, b); r = add_bop(smm, BOP_INC, b, e);
else { if (r)
return r;
} else {
in(smm); in(smm);
r = sm_ll_inc(&smm->ll, b, &ev); r = sm_ll_inc(&smm->ll, b, e, &nr_allocations);
r2 = out(smm); r2 = out(smm);
} }
return combine_errors(r, r2); return combine_errors(r, r2);
} }
static int sm_metadata_dec_block(struct dm_space_map *sm, dm_block_t b) static int sm_metadata_dec_blocks(struct dm_space_map *sm, dm_block_t b, dm_block_t e)
{ {
int r, r2 = 0; int r, r2 = 0;
enum allocation_event ev; int32_t nr_allocations;
struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
if (recursing(smm)) if (recursing(smm))
r = add_bop(smm, BOP_DEC, b); r = add_bop(smm, BOP_DEC, b, e);
else { else {
in(smm); in(smm);
r = sm_ll_dec(&smm->ll, b, &ev); r = sm_ll_dec(&smm->ll, b, e, &nr_allocations);
r2 = out(smm); r2 = out(smm);
} }
...@@ -445,7 +446,7 @@ static int sm_metadata_dec_block(struct dm_space_map *sm, dm_block_t b) ...@@ -445,7 +446,7 @@ static int sm_metadata_dec_block(struct dm_space_map *sm, dm_block_t b)
static int sm_metadata_new_block_(struct dm_space_map *sm, dm_block_t *b) static int sm_metadata_new_block_(struct dm_space_map *sm, dm_block_t *b)
{ {
int r, r2 = 0; int r, r2 = 0;
enum allocation_event ev; int32_t nr_allocations;
struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
/* /*
...@@ -466,10 +467,10 @@ static int sm_metadata_new_block_(struct dm_space_map *sm, dm_block_t *b) ...@@ -466,10 +467,10 @@ static int sm_metadata_new_block_(struct dm_space_map *sm, dm_block_t *b)
smm->begin = *b + 1; smm->begin = *b + 1;
if (recursing(smm)) if (recursing(smm))
r = add_bop(smm, BOP_INC, *b); r = add_bop(smm, BOP_INC, *b, *b + 1);
else { else {
in(smm); in(smm);
r = sm_ll_inc(&smm->ll, *b, &ev); r = sm_ll_inc(&smm->ll, *b, *b + 1, &nr_allocations);
r2 = out(smm); r2 = out(smm);
} }
...@@ -563,8 +564,8 @@ static const struct dm_space_map ops = { ...@@ -563,8 +564,8 @@ static const struct dm_space_map ops = {
.get_count = sm_metadata_get_count, .get_count = sm_metadata_get_count,
.count_is_more_than_one = sm_metadata_count_is_more_than_one, .count_is_more_than_one = sm_metadata_count_is_more_than_one,
.set_count = sm_metadata_set_count, .set_count = sm_metadata_set_count,
.inc_block = sm_metadata_inc_block, .inc_blocks = sm_metadata_inc_blocks,
.dec_block = sm_metadata_dec_block, .dec_blocks = sm_metadata_dec_blocks,
.new_block = sm_metadata_new_block, .new_block = sm_metadata_new_block,
.commit = sm_metadata_commit, .commit = sm_metadata_commit,
.root_size = sm_metadata_root_size, .root_size = sm_metadata_root_size,
...@@ -648,18 +649,28 @@ static int sm_bootstrap_new_block(struct dm_space_map *sm, dm_block_t *b) ...@@ -648,18 +649,28 @@ static int sm_bootstrap_new_block(struct dm_space_map *sm, dm_block_t *b)
return 0; return 0;
} }
static int sm_bootstrap_inc_block(struct dm_space_map *sm, dm_block_t b) static int sm_bootstrap_inc_blocks(struct dm_space_map *sm, dm_block_t b, dm_block_t e)
{ {
int r;
struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
return add_bop(smm, BOP_INC, b); r = add_bop(smm, BOP_INC, b, e);
if (r)
return r;
return 0;
} }
static int sm_bootstrap_dec_block(struct dm_space_map *sm, dm_block_t b) static int sm_bootstrap_dec_blocks(struct dm_space_map *sm, dm_block_t b, dm_block_t e)
{ {
int r;
struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
return add_bop(smm, BOP_DEC, b); r = add_bop(smm, BOP_DEC, b, e);
if (r)
return r;
return 0;
} }
static int sm_bootstrap_commit(struct dm_space_map *sm) static int sm_bootstrap_commit(struct dm_space_map *sm)
...@@ -690,8 +701,8 @@ static const struct dm_space_map bootstrap_ops = { ...@@ -690,8 +701,8 @@ static const struct dm_space_map bootstrap_ops = {
.get_count = sm_bootstrap_get_count, .get_count = sm_bootstrap_get_count,
.count_is_more_than_one = sm_bootstrap_count_is_more_than_one, .count_is_more_than_one = sm_bootstrap_count_is_more_than_one,
.set_count = sm_bootstrap_set_count, .set_count = sm_bootstrap_set_count,
.inc_block = sm_bootstrap_inc_block, .inc_blocks = sm_bootstrap_inc_blocks,
.dec_block = sm_bootstrap_dec_block, .dec_blocks = sm_bootstrap_dec_blocks,
.new_block = sm_bootstrap_new_block, .new_block = sm_bootstrap_new_block,
.commit = sm_bootstrap_commit, .commit = sm_bootstrap_commit,
.root_size = sm_bootstrap_root_size, .root_size = sm_bootstrap_root_size,
...@@ -703,7 +714,7 @@ static const struct dm_space_map bootstrap_ops = { ...@@ -703,7 +714,7 @@ static const struct dm_space_map bootstrap_ops = {
static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks) static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
{ {
int r, i; int r;
struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
dm_block_t old_len = smm->ll.nr_blocks; dm_block_t old_len = smm->ll.nr_blocks;
...@@ -725,9 +736,7 @@ static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks) ...@@ -725,9 +736,7 @@ static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
* allocate any new blocks. * allocate any new blocks.
*/ */
do { do {
for (i = old_len; !r && i < smm->begin; i++) r = add_bop(smm, BOP_INC, old_len, smm->begin);
r = add_bop(smm, BOP_INC, i);
if (r) if (r)
goto out; goto out;
...@@ -774,7 +783,6 @@ int dm_sm_metadata_create(struct dm_space_map *sm, ...@@ -774,7 +783,6 @@ int dm_sm_metadata_create(struct dm_space_map *sm,
dm_block_t superblock) dm_block_t superblock)
{ {
int r; int r;
dm_block_t i;
struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
smm->begin = superblock + 1; smm->begin = superblock + 1;
...@@ -799,9 +807,7 @@ int dm_sm_metadata_create(struct dm_space_map *sm, ...@@ -799,9 +807,7 @@ int dm_sm_metadata_create(struct dm_space_map *sm,
* Now we need to update the newly created data structures with the * Now we need to update the newly created data structures with the
* allocated blocks that they were built from. * allocated blocks that they were built from.
*/ */
for (i = superblock; !r && i < smm->begin; i++) r = add_bop(smm, BOP_INC, superblock, smm->begin);
r = add_bop(smm, BOP_INC, i);
if (r) if (r)
return r; return r;
......
...@@ -46,8 +46,8 @@ struct dm_space_map { ...@@ -46,8 +46,8 @@ struct dm_space_map {
int (*commit)(struct dm_space_map *sm); int (*commit)(struct dm_space_map *sm);
int (*inc_block)(struct dm_space_map *sm, dm_block_t b); int (*inc_blocks)(struct dm_space_map *sm, dm_block_t b, dm_block_t e);
int (*dec_block)(struct dm_space_map *sm, dm_block_t b); int (*dec_blocks)(struct dm_space_map *sm, dm_block_t b, dm_block_t e);
/* /*
* new_block will increment the returned block. * new_block will increment the returned block.
...@@ -117,14 +117,24 @@ static inline int dm_sm_commit(struct dm_space_map *sm) ...@@ -117,14 +117,24 @@ static inline int dm_sm_commit(struct dm_space_map *sm)
return sm->commit(sm); return sm->commit(sm);
} }
static inline int dm_sm_inc_blocks(struct dm_space_map *sm, dm_block_t b, dm_block_t e)
{
return sm->inc_blocks(sm, b, e);
}
static inline int dm_sm_inc_block(struct dm_space_map *sm, dm_block_t b) static inline int dm_sm_inc_block(struct dm_space_map *sm, dm_block_t b)
{ {
return sm->inc_block(sm, b); return dm_sm_inc_blocks(sm, b, b + 1);
}
static inline int dm_sm_dec_blocks(struct dm_space_map *sm, dm_block_t b, dm_block_t e)
{
return sm->dec_blocks(sm, b, e);
} }
static inline int dm_sm_dec_block(struct dm_space_map *sm, dm_block_t b) static inline int dm_sm_dec_block(struct dm_space_map *sm, dm_block_t b)
{ {
return sm->dec_block(sm, b); return dm_sm_dec_blocks(sm, b, b + 1);
} }
static inline int dm_sm_new_block(struct dm_space_map *sm, dm_block_t *b) static inline int dm_sm_new_block(struct dm_space_map *sm, dm_block_t *b)
......
...@@ -359,6 +359,17 @@ void dm_tm_inc(struct dm_transaction_manager *tm, dm_block_t b) ...@@ -359,6 +359,17 @@ void dm_tm_inc(struct dm_transaction_manager *tm, dm_block_t b)
} }
EXPORT_SYMBOL_GPL(dm_tm_inc); EXPORT_SYMBOL_GPL(dm_tm_inc);
void dm_tm_inc_range(struct dm_transaction_manager *tm, dm_block_t b, dm_block_t e)
{
/*
* The non-blocking clone doesn't support this.
*/
BUG_ON(tm->is_clone);
dm_sm_inc_blocks(tm->sm, b, e);
}
EXPORT_SYMBOL_GPL(dm_tm_inc_range);
void dm_tm_dec(struct dm_transaction_manager *tm, dm_block_t b) void dm_tm_dec(struct dm_transaction_manager *tm, dm_block_t b)
{ {
/* /*
...@@ -370,6 +381,47 @@ void dm_tm_dec(struct dm_transaction_manager *tm, dm_block_t b) ...@@ -370,6 +381,47 @@ void dm_tm_dec(struct dm_transaction_manager *tm, dm_block_t b)
} }
EXPORT_SYMBOL_GPL(dm_tm_dec); EXPORT_SYMBOL_GPL(dm_tm_dec);
void dm_tm_dec_range(struct dm_transaction_manager *tm, dm_block_t b, dm_block_t e)
{
/*
* The non-blocking clone doesn't support this.
*/
BUG_ON(tm->is_clone);
dm_sm_dec_blocks(tm->sm, b, e);
}
EXPORT_SYMBOL_GPL(dm_tm_dec_range);
void dm_tm_with_runs(struct dm_transaction_manager *tm,
const __le64 *value_le, unsigned count, dm_tm_run_fn fn)
{
uint64_t b, begin, end;
bool in_run = false;
unsigned i;
for (i = 0; i < count; i++, value_le++) {
b = le64_to_cpu(*value_le);
if (in_run) {
if (b == end)
end++;
else {
fn(tm, begin, end);
begin = b;
end = b + 1;
}
} else {
in_run = true;
begin = b;
end = b + 1;
}
}
if (in_run)
fn(tm, begin, end);
}
EXPORT_SYMBOL_GPL(dm_tm_with_runs);
int dm_tm_ref(struct dm_transaction_manager *tm, dm_block_t b, int dm_tm_ref(struct dm_transaction_manager *tm, dm_block_t b,
uint32_t *result) uint32_t *result)
{ {
......
...@@ -100,8 +100,18 @@ void dm_tm_unlock(struct dm_transaction_manager *tm, struct dm_block *b); ...@@ -100,8 +100,18 @@ void dm_tm_unlock(struct dm_transaction_manager *tm, struct dm_block *b);
* Functions for altering the reference count of a block directly. * Functions for altering the reference count of a block directly.
*/ */
void dm_tm_inc(struct dm_transaction_manager *tm, dm_block_t b); void dm_tm_inc(struct dm_transaction_manager *tm, dm_block_t b);
void dm_tm_inc_range(struct dm_transaction_manager *tm, dm_block_t b, dm_block_t e);
void dm_tm_dec(struct dm_transaction_manager *tm, dm_block_t b); void dm_tm_dec(struct dm_transaction_manager *tm, dm_block_t b);
void dm_tm_dec_range(struct dm_transaction_manager *tm, dm_block_t b, dm_block_t e);
/*
* Builds up runs of adjacent blocks, and then calls the given fn
* (typically dm_tm_inc/dec). Very useful when you have to perform
* the same tm operation on all values in a btree leaf.
*/
typedef void (*dm_tm_run_fn)(struct dm_transaction_manager *, dm_block_t, dm_block_t);
void dm_tm_with_runs(struct dm_transaction_manager *tm,
const __le64 *value_le, unsigned count, dm_tm_run_fn fn);
int dm_tm_ref(struct dm_transaction_manager *tm, dm_block_t b, uint32_t *result); int dm_tm_ref(struct dm_transaction_manager *tm, dm_block_t b, uint32_t *result);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment