Commit 49008f0c authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-5.17/dm-changes' of...

Merge tag 'for-5.17/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm

Pull device mapper updates from Mike Snitzer:

 - Fixes and improvements to dm btree and dm space map code in
   persistent-data library used by thinp and cache.

 - Update DM integrity to use struct_group() to zero struct
   journal_sector.

 - Update DM sysfs to use default_groups in kobj_type.

* tag 'for-5.17/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
  dm sysfs: use default_groups in kobj_type
  dm integrity: Use struct_group() to zero struct journal_sector
  dm space map common: add bounds check to sm_ll_lookup_bitmap()
  dm btree: add a defensive bounds check to insert_at()
  dm btree remove: change a bunch of BUG_ON() calls to proper errors
  dm btree spine: eliminate duplicate le32_to_cpu() in node_check()
  dm btree spine: remove extra node_check function declaration
parents c9193f48 eaac0b59
...@@ -121,8 +121,10 @@ struct journal_entry { ...@@ -121,8 +121,10 @@ struct journal_entry {
#define JOURNAL_MAC_SIZE (JOURNAL_MAC_PER_SECTOR * JOURNAL_BLOCK_SECTORS) #define JOURNAL_MAC_SIZE (JOURNAL_MAC_PER_SECTOR * JOURNAL_BLOCK_SECTORS)
struct journal_sector { struct journal_sector {
struct_group(sectors,
__u8 entries[JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR]; __u8 entries[JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR];
__u8 mac[JOURNAL_MAC_PER_SECTOR]; __u8 mac[JOURNAL_MAC_PER_SECTOR];
);
commit_id_t commit_id; commit_id_t commit_id;
}; };
...@@ -2870,7 +2872,8 @@ static void init_journal(struct dm_integrity_c *ic, unsigned start_section, ...@@ -2870,7 +2872,8 @@ static void init_journal(struct dm_integrity_c *ic, unsigned start_section,
wraparound_section(ic, &i); wraparound_section(ic, &i);
for (j = 0; j < ic->journal_section_sectors; j++) { for (j = 0; j < ic->journal_section_sectors; j++) {
struct journal_sector *js = access_journal(ic, i, j); struct journal_sector *js = access_journal(ic, i, j);
memset(&js->entries, 0, JOURNAL_SECTOR_DATA); BUILD_BUG_ON(sizeof(js->sectors) != JOURNAL_SECTOR_DATA);
memset(&js->sectors, 0, sizeof(js->sectors));
js->commit_id = dm_integrity_commit_id(ic, i, j, commit_seq); js->commit_id = dm_integrity_commit_id(ic, i, j, commit_seq);
} }
for (j = 0; j < ic->journal_section_entries; j++) { for (j = 0; j < ic->journal_section_entries; j++) {
......
...@@ -112,6 +112,7 @@ static struct attribute *dm_attrs[] = { ...@@ -112,6 +112,7 @@ static struct attribute *dm_attrs[] = {
&dm_attr_rq_based_seq_io_merge_deadline.attr, &dm_attr_rq_based_seq_io_merge_deadline.attr,
NULL, NULL,
}; };
ATTRIBUTE_GROUPS(dm);
static const struct sysfs_ops dm_sysfs_ops = { static const struct sysfs_ops dm_sysfs_ops = {
.show = dm_attr_show, .show = dm_attr_show,
...@@ -120,7 +121,7 @@ static const struct sysfs_ops dm_sysfs_ops = { ...@@ -120,7 +121,7 @@ static const struct sysfs_ops dm_sysfs_ops = {
static struct kobj_type dm_ktype = { static struct kobj_type dm_ktype = {
.sysfs_ops = &dm_sysfs_ops, .sysfs_ops = &dm_sysfs_ops,
.default_attrs = dm_attrs, .default_groups = dm_groups,
.release = dm_kobject_release, .release = dm_kobject_release,
}; };
......
...@@ -9,6 +9,9 @@ ...@@ -9,6 +9,9 @@
#include "dm-transaction-manager.h" #include "dm-transaction-manager.h"
#include <linux/export.h> #include <linux/export.h>
#include <linux/device-mapper.h>
#define DM_MSG_PREFIX "btree"
/* /*
* Removing an entry from a btree * Removing an entry from a btree
...@@ -79,15 +82,23 @@ static void node_shift(struct btree_node *n, int shift) ...@@ -79,15 +82,23 @@ static void node_shift(struct btree_node *n, int shift)
} }
} }
static void node_copy(struct btree_node *left, struct btree_node *right, int shift) static int node_copy(struct btree_node *left, struct btree_node *right, int shift)
{ {
uint32_t nr_left = le32_to_cpu(left->header.nr_entries); uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
uint32_t value_size = le32_to_cpu(left->header.value_size); uint32_t value_size = le32_to_cpu(left->header.value_size);
BUG_ON(value_size != le32_to_cpu(right->header.value_size)); if (value_size != le32_to_cpu(right->header.value_size)) {
DMERR("mismatched value size");
return -EILSEQ;
}
if (shift < 0) { if (shift < 0) {
shift = -shift; shift = -shift;
BUG_ON(nr_left + shift > le32_to_cpu(left->header.max_entries));
if (nr_left + shift > le32_to_cpu(left->header.max_entries)) {
DMERR("bad shift");
return -EINVAL;
}
memcpy(key_ptr(left, nr_left), memcpy(key_ptr(left, nr_left),
key_ptr(right, 0), key_ptr(right, 0),
shift * sizeof(__le64)); shift * sizeof(__le64));
...@@ -95,7 +106,11 @@ static void node_copy(struct btree_node *left, struct btree_node *right, int shi ...@@ -95,7 +106,11 @@ static void node_copy(struct btree_node *left, struct btree_node *right, int shi
value_ptr(right, 0), value_ptr(right, 0),
shift * value_size); shift * value_size);
} else { } else {
BUG_ON(shift > le32_to_cpu(right->header.max_entries)); if (shift > le32_to_cpu(right->header.max_entries)) {
DMERR("bad shift");
return -EINVAL;
}
memcpy(key_ptr(right, 0), memcpy(key_ptr(right, 0),
key_ptr(left, nr_left - shift), key_ptr(left, nr_left - shift),
shift * sizeof(__le64)); shift * sizeof(__le64));
...@@ -103,6 +118,7 @@ static void node_copy(struct btree_node *left, struct btree_node *right, int shi ...@@ -103,6 +118,7 @@ static void node_copy(struct btree_node *left, struct btree_node *right, int shi
value_ptr(left, nr_left - shift), value_ptr(left, nr_left - shift),
shift * value_size); shift * value_size);
} }
return 0;
} }
/* /*
...@@ -170,35 +186,54 @@ static void exit_child(struct dm_btree_info *info, struct child *c) ...@@ -170,35 +186,54 @@ static void exit_child(struct dm_btree_info *info, struct child *c)
dm_tm_unlock(info->tm, c->block); dm_tm_unlock(info->tm, c->block);
} }
static void shift(struct btree_node *left, struct btree_node *right, int count) static int shift(struct btree_node *left, struct btree_node *right, int count)
{ {
int r;
uint32_t nr_left = le32_to_cpu(left->header.nr_entries); uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
uint32_t nr_right = le32_to_cpu(right->header.nr_entries); uint32_t nr_right = le32_to_cpu(right->header.nr_entries);
uint32_t max_entries = le32_to_cpu(left->header.max_entries); uint32_t max_entries = le32_to_cpu(left->header.max_entries);
uint32_t r_max_entries = le32_to_cpu(right->header.max_entries); uint32_t r_max_entries = le32_to_cpu(right->header.max_entries);
BUG_ON(max_entries != r_max_entries); if (max_entries != r_max_entries) {
BUG_ON(nr_left - count > max_entries); DMERR("node max_entries mismatch");
BUG_ON(nr_right + count > max_entries); return -EILSEQ;
}
if (nr_left - count > max_entries) {
DMERR("node shift out of bounds");
return -EINVAL;
}
if (nr_right + count > max_entries) {
DMERR("node shift out of bounds");
return -EINVAL;
}
if (!count) if (!count)
return; return 0;
if (count > 0) { if (count > 0) {
node_shift(right, count); node_shift(right, count);
node_copy(left, right, count); r = node_copy(left, right, count);
if (r)
return r;
} else { } else {
node_copy(left, right, count); r = node_copy(left, right, count);
if (r)
return r;
node_shift(right, count); node_shift(right, count);
} }
left->header.nr_entries = cpu_to_le32(nr_left - count); left->header.nr_entries = cpu_to_le32(nr_left - count);
right->header.nr_entries = cpu_to_le32(nr_right + count); right->header.nr_entries = cpu_to_le32(nr_right + count);
return 0;
} }
static void __rebalance2(struct dm_btree_info *info, struct btree_node *parent, static int __rebalance2(struct dm_btree_info *info, struct btree_node *parent,
struct child *l, struct child *r) struct child *l, struct child *r)
{ {
int ret;
struct btree_node *left = l->n; struct btree_node *left = l->n;
struct btree_node *right = r->n; struct btree_node *right = r->n;
uint32_t nr_left = le32_to_cpu(left->header.nr_entries); uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
...@@ -229,9 +264,12 @@ static void __rebalance2(struct dm_btree_info *info, struct btree_node *parent, ...@@ -229,9 +264,12 @@ static void __rebalance2(struct dm_btree_info *info, struct btree_node *parent,
* Rebalance. * Rebalance.
*/ */
unsigned target_left = (nr_left + nr_right) / 2; unsigned target_left = (nr_left + nr_right) / 2;
shift(left, right, nr_left - target_left); ret = shift(left, right, nr_left - target_left);
if (ret)
return ret;
*key_ptr(parent, r->index) = right->keys[0]; *key_ptr(parent, r->index) = right->keys[0];
} }
return 0;
} }
static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info, static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info,
...@@ -253,12 +291,12 @@ static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info, ...@@ -253,12 +291,12 @@ static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info,
return r; return r;
} }
__rebalance2(info, parent, &left, &right); r = __rebalance2(info, parent, &left, &right);
exit_child(info, &left); exit_child(info, &left);
exit_child(info, &right); exit_child(info, &right);
return 0; return r;
} }
/* /*
...@@ -266,7 +304,7 @@ static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info, ...@@ -266,7 +304,7 @@ static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info,
* in right, then rebalance2. This wastes some cpu, but I want something * in right, then rebalance2. This wastes some cpu, but I want something
* simple atm. * simple atm.
*/ */
static void delete_center_node(struct dm_btree_info *info, struct btree_node *parent, static int delete_center_node(struct dm_btree_info *info, struct btree_node *parent,
struct child *l, struct child *c, struct child *r, struct child *l, struct child *c, struct child *r,
struct btree_node *left, struct btree_node *center, struct btree_node *right, struct btree_node *left, struct btree_node *center, struct btree_node *right,
uint32_t nr_left, uint32_t nr_center, uint32_t nr_right) uint32_t nr_left, uint32_t nr_center, uint32_t nr_right)
...@@ -274,13 +312,22 @@ static void delete_center_node(struct dm_btree_info *info, struct btree_node *pa ...@@ -274,13 +312,22 @@ static void delete_center_node(struct dm_btree_info *info, struct btree_node *pa
uint32_t max_entries = le32_to_cpu(left->header.max_entries); uint32_t max_entries = le32_to_cpu(left->header.max_entries);
unsigned shift = min(max_entries - nr_left, nr_center); unsigned shift = min(max_entries - nr_left, nr_center);
BUG_ON(nr_left + shift > max_entries); if (nr_left + shift > max_entries) {
DMERR("node shift out of bounds");
return -EINVAL;
}
node_copy(left, center, -shift); node_copy(left, center, -shift);
left->header.nr_entries = cpu_to_le32(nr_left + shift); left->header.nr_entries = cpu_to_le32(nr_left + shift);
if (shift != nr_center) { if (shift != nr_center) {
shift = nr_center - shift; shift = nr_center - shift;
BUG_ON((nr_right + shift) > max_entries);
if ((nr_right + shift) > max_entries) {
DMERR("node shift out of bounds");
return -EINVAL;
}
node_shift(right, shift); node_shift(right, shift);
node_copy(center, right, shift); node_copy(center, right, shift);
right->header.nr_entries = cpu_to_le32(nr_right + shift); right->header.nr_entries = cpu_to_le32(nr_right + shift);
...@@ -291,18 +338,18 @@ static void delete_center_node(struct dm_btree_info *info, struct btree_node *pa ...@@ -291,18 +338,18 @@ static void delete_center_node(struct dm_btree_info *info, struct btree_node *pa
r->index--; r->index--;
dm_tm_dec(info->tm, dm_block_location(c->block)); dm_tm_dec(info->tm, dm_block_location(c->block));
__rebalance2(info, parent, l, r); return __rebalance2(info, parent, l, r);
} }
/* /*
* Redistributes entries among 3 sibling nodes. * Redistributes entries among 3 sibling nodes.
*/ */
static void redistribute3(struct dm_btree_info *info, struct btree_node *parent, static int redistribute3(struct dm_btree_info *info, struct btree_node *parent,
struct child *l, struct child *c, struct child *r, struct child *l, struct child *c, struct child *r,
struct btree_node *left, struct btree_node *center, struct btree_node *right, struct btree_node *left, struct btree_node *center, struct btree_node *right,
uint32_t nr_left, uint32_t nr_center, uint32_t nr_right) uint32_t nr_left, uint32_t nr_center, uint32_t nr_right)
{ {
int s; int s, ret;
uint32_t max_entries = le32_to_cpu(left->header.max_entries); uint32_t max_entries = le32_to_cpu(left->header.max_entries);
unsigned total = nr_left + nr_center + nr_right; unsigned total = nr_left + nr_center + nr_right;
unsigned target_right = total / 3; unsigned target_right = total / 3;
...@@ -317,34 +364,54 @@ static void redistribute3(struct dm_btree_info *info, struct btree_node *parent, ...@@ -317,34 +364,54 @@ static void redistribute3(struct dm_btree_info *info, struct btree_node *parent,
if (s < 0 && nr_center < -s) { if (s < 0 && nr_center < -s) {
/* not enough in central node */ /* not enough in central node */
shift(left, center, -nr_center); ret = shift(left, center, -nr_center);
if (ret)
return ret;
s += nr_center; s += nr_center;
shift(left, right, s); ret = shift(left, right, s);
nr_right += s; if (ret)
} else return ret;
shift(left, center, s);
shift(center, right, target_right - nr_right); nr_right += s;
} else {
ret = shift(left, center, s);
if (ret)
return ret;
}
ret = shift(center, right, target_right - nr_right);
if (ret)
return ret;
} else { } else {
s = target_right - nr_right; s = target_right - nr_right;
if (s > 0 && nr_center < s) { if (s > 0 && nr_center < s) {
/* not enough in central node */ /* not enough in central node */
shift(center, right, nr_center); ret = shift(center, right, nr_center);
if (ret)
return ret;
s -= nr_center; s -= nr_center;
shift(left, right, s); ret = shift(left, right, s);
if (ret)
return ret;
nr_left -= s; nr_left -= s;
} else } else {
shift(center, right, s); ret = shift(center, right, s);
if (ret)
return ret;
}
shift(left, center, nr_left - target_left); ret = shift(left, center, nr_left - target_left);
if (ret)
return ret;
} }
*key_ptr(parent, c->index) = center->keys[0]; *key_ptr(parent, c->index) = center->keys[0];
*key_ptr(parent, r->index) = right->keys[0]; *key_ptr(parent, r->index) = right->keys[0];
return 0;
} }
static void __rebalance3(struct dm_btree_info *info, struct btree_node *parent, static int __rebalance3(struct dm_btree_info *info, struct btree_node *parent,
struct child *l, struct child *c, struct child *r) struct child *l, struct child *c, struct child *r)
{ {
struct btree_node *left = l->n; struct btree_node *left = l->n;
...@@ -357,14 +424,18 @@ static void __rebalance3(struct dm_btree_info *info, struct btree_node *parent, ...@@ -357,14 +424,18 @@ static void __rebalance3(struct dm_btree_info *info, struct btree_node *parent,
unsigned threshold = merge_threshold(left) * 4 + 1; unsigned threshold = merge_threshold(left) * 4 + 1;
BUG_ON(left->header.max_entries != center->header.max_entries); if ((left->header.max_entries != center->header.max_entries) ||
BUG_ON(center->header.max_entries != right->header.max_entries); (center->header.max_entries != right->header.max_entries)) {
DMERR("bad btree metadata, max_entries differ");
return -EILSEQ;
}
if ((nr_left + nr_center + nr_right) < threshold) if ((nr_left + nr_center + nr_right) < threshold) {
delete_center_node(info, parent, l, c, r, left, center, right, return delete_center_node(info, parent, l, c, r, left, center, right,
nr_left, nr_center, nr_right); nr_left, nr_center, nr_right);
else }
redistribute3(info, parent, l, c, r, left, center, right,
return redistribute3(info, parent, l, c, r, left, center, right,
nr_left, nr_center, nr_right); nr_left, nr_center, nr_right);
} }
...@@ -395,13 +466,13 @@ static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info, ...@@ -395,13 +466,13 @@ static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info,
return r; return r;
} }
__rebalance3(info, parent, &left, &center, &right); r = __rebalance3(info, parent, &left, &center, &right);
exit_child(info, &left); exit_child(info, &left);
exit_child(info, &center); exit_child(info, &center);
exit_child(info, &right); exit_child(info, &right);
return 0; return r;
} }
static int rebalance_children(struct shadow_spine *s, static int rebalance_children(struct shadow_spine *s,
......
...@@ -15,10 +15,6 @@ ...@@ -15,10 +15,6 @@
#define BTREE_CSUM_XOR 121107 #define BTREE_CSUM_XOR 121107
static int node_check(struct dm_block_validator *v,
struct dm_block *b,
size_t block_size);
static void node_prepare_for_write(struct dm_block_validator *v, static void node_prepare_for_write(struct dm_block_validator *v,
struct dm_block *b, struct dm_block *b,
size_t block_size) size_t block_size)
...@@ -40,7 +36,7 @@ static int node_check(struct dm_block_validator *v, ...@@ -40,7 +36,7 @@ static int node_check(struct dm_block_validator *v,
struct node_header *h = &n->header; struct node_header *h = &n->header;
size_t value_size; size_t value_size;
__le32 csum_disk; __le32 csum_disk;
uint32_t flags; uint32_t flags, nr_entries, max_entries;
if (dm_block_location(b) != le64_to_cpu(h->blocknr)) { if (dm_block_location(b) != le64_to_cpu(h->blocknr)) {
DMERR_LIMIT("node_check failed: blocknr %llu != wanted %llu", DMERR_LIMIT("node_check failed: blocknr %llu != wanted %llu",
...@@ -57,15 +53,17 @@ static int node_check(struct dm_block_validator *v, ...@@ -57,15 +53,17 @@ static int node_check(struct dm_block_validator *v,
return -EILSEQ; return -EILSEQ;
} }
nr_entries = le32_to_cpu(h->nr_entries);
max_entries = le32_to_cpu(h->max_entries);
value_size = le32_to_cpu(h->value_size); value_size = le32_to_cpu(h->value_size);
if (sizeof(struct node_header) + if (sizeof(struct node_header) +
(sizeof(__le64) + value_size) * le32_to_cpu(h->max_entries) > block_size) { (sizeof(__le64) + value_size) * max_entries > block_size) {
DMERR_LIMIT("node_check failed: max_entries too large"); DMERR_LIMIT("node_check failed: max_entries too large");
return -EILSEQ; return -EILSEQ;
} }
if (le32_to_cpu(h->nr_entries) > le32_to_cpu(h->max_entries)) { if (nr_entries > max_entries) {
DMERR_LIMIT("node_check failed: too many entries"); DMERR_LIMIT("node_check failed: too many entries");
return -EILSEQ; return -EILSEQ;
} }
......
...@@ -85,10 +85,12 @@ static int insert_at(size_t value_size, struct btree_node *node, unsigned index, ...@@ -85,10 +85,12 @@ static int insert_at(size_t value_size, struct btree_node *node, unsigned index,
__dm_written_to_disk(value) __dm_written_to_disk(value)
{ {
uint32_t nr_entries = le32_to_cpu(node->header.nr_entries); uint32_t nr_entries = le32_to_cpu(node->header.nr_entries);
uint32_t max_entries = le32_to_cpu(node->header.max_entries);
__le64 key_le = cpu_to_le64(key); __le64 key_le = cpu_to_le64(key);
if (index > nr_entries || if (index > nr_entries ||
index >= le32_to_cpu(node->header.max_entries)) { index >= max_entries ||
nr_entries >= max_entries) {
DMERR("too many entries in btree node for insert"); DMERR("too many entries in btree node for insert");
__dm_unbless_for_disk(value); __dm_unbless_for_disk(value);
return -ENOMEM; return -ENOMEM;
......
...@@ -283,6 +283,11 @@ int sm_ll_lookup_bitmap(struct ll_disk *ll, dm_block_t b, uint32_t *result) ...@@ -283,6 +283,11 @@ int sm_ll_lookup_bitmap(struct ll_disk *ll, dm_block_t b, uint32_t *result)
struct disk_index_entry ie_disk; struct disk_index_entry ie_disk;
struct dm_block *blk; struct dm_block *blk;
if (b >= ll->nr_blocks) {
DMERR_LIMIT("metadata block out of bounds");
return -EINVAL;
}
b = do_div(index, ll->entries_per_block); b = do_div(index, ll->entries_per_block);
r = ll->load_ie(ll, index, &ie_disk); r = ll->load_ie(ll, index, &ie_disk);
if (r < 0) if (r < 0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment