Commit 7af81cd0 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-5.13/dm-changes' of...

Merge tag 'for-5.13/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm

Pull device mapper updates from Mike Snitzer:

 - Improve scalability of DM's device hash by switching to rbtree

 - Extend DM ioctl's DM_LIST_DEVICES_CMD handling to include UUID and
   allow filtering based on name or UUID prefix.

 - Various small fixes for typos, warnings, unused function, or
   needlessly exported interfaces.

 - Remove needless request_queue NULL pointer checks in DM thin and
   cache targets.

 - Remove unnecessary loop in DM core's __split_and_process_bio().

 - Remove DM core's dm_vcalloc() and just use kvcalloc or kvmalloc_array
   instead (depending whether zeroing is useful).

 - Fix request-based DM's double free of blk_mq_tag_set in device remove
   after table load fails.

 - Improve DM persistent data performance on non-x86 by fixing packed
   structs to have a stated alignment. Also remove needless extra work
   from redundant calls to sm_disk_get_nr_free() and a paranoid BUG_ON()
   that caused duplicate checksum calculation.

 - Fix missing goto in DM integrity's bitmap_flush_interval error
   handling.

 - Add "reset_recalculate" feature flag to DM integrity.

 - Improve DM integrity by leveraging discard support to avoid needless
   re-writing of metadata and also use discard support to improve hash
   recalculation.

 - Fix race with DM raid target's reshape and MD raid4/5/6 resync that
   resulted in inconsistant reshape state during table reloads.

 - Update DM raid target to temove unnecessary discard limits for raid0
   and raid10 now that MD has optimized discard handling for both raid
   levels.

* tag 'for-5.13/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm: (26 commits)
  dm raid: remove unnecessary discard limits for raid0 and raid10
  dm rq: fix double free of blk_mq_tag_set in dev remove after table load fails
  dm integrity: use discard support when recalculating
  dm integrity: increase RECALC_SECTORS to improve recalculate speed
  dm integrity: don't re-write metadata if discarding same blocks
  dm raid: fix inconclusive reshape layout on fast raid4/5/6 table reload sequences
  dm raid: fix fall-through warning in rs_check_takeover() for Clang
  dm clone metadata: remove unused function
  dm integrity: fix missing goto in bitmap_flush_interval error handling
  dm: replace dm_vcalloc()
  dm space map common: fix division bug in sm_ll_find_free_block()
  dm persistent data: packed struct should have an aligned() attribute too
  dm btree spine: remove paranoid node_check call in node_prep_for_write()
  dm space map disk: remove redundant calls to sm_disk_get_nr_free()
  dm integrity: add the "reset_recalculate" feature flag
  dm persistent data: remove unused return from exit_shadow_spine()
  dm cache: remove needless request_queue NULL pointer checks
  dm thin: remove needless request_queue NULL pointer check
  dm: unexport dm_{get,put}_table_device
  dm ebs: fix a few typos
  ...
parents 152d32aa ca4a4e9a
...@@ -3387,7 +3387,7 @@ static bool origin_dev_supports_discard(struct block_device *origin_bdev) ...@@ -3387,7 +3387,7 @@ static bool origin_dev_supports_discard(struct block_device *origin_bdev)
{ {
struct request_queue *q = bdev_get_queue(origin_bdev); struct request_queue *q = bdev_get_queue(origin_bdev);
return q && blk_queue_discard(q); return blk_queue_discard(q);
} }
/* /*
......
...@@ -276,12 +276,6 @@ static inline int superblock_read_lock(struct dm_clone_metadata *cmd, ...@@ -276,12 +276,6 @@ static inline int superblock_read_lock(struct dm_clone_metadata *cmd,
return dm_bm_read_lock(cmd->bm, SUPERBLOCK_LOCATION, &sb_validator, sblock); return dm_bm_read_lock(cmd->bm, SUPERBLOCK_LOCATION, &sb_validator, sblock);
} }
static inline int superblock_write_lock(struct dm_clone_metadata *cmd,
struct dm_block **sblock)
{
return dm_bm_write_lock(cmd->bm, SUPERBLOCK_LOCATION, &sb_validator, sblock);
}
static inline int superblock_write_lock_zero(struct dm_clone_metadata *cmd, static inline int superblock_write_lock_zero(struct dm_clone_metadata *cmd,
struct dm_block **sblock) struct dm_block **sblock)
{ {
......
...@@ -28,7 +28,7 @@ struct ebs_c { ...@@ -28,7 +28,7 @@ struct ebs_c {
spinlock_t lock; /* Guard bios input list above. */ spinlock_t lock; /* Guard bios input list above. */
sector_t start; /* <start> table line argument, see ebs_ctr below. */ sector_t start; /* <start> table line argument, see ebs_ctr below. */
unsigned int e_bs; /* Emulated block size in sectors exposed to upper layer. */ unsigned int e_bs; /* Emulated block size in sectors exposed to upper layer. */
unsigned int u_bs; /* Underlying block size in sectors retrievd from/set on lower layer device. */ unsigned int u_bs; /* Underlying block size in sectors retrieved from/set on lower layer device. */
unsigned char block_shift; /* bitshift sectors -> blocks used in dm-bufio API. */ unsigned char block_shift; /* bitshift sectors -> blocks used in dm-bufio API. */
bool u_bs_set:1; /* Flag to indicate underlying block size is set on table line. */ bool u_bs_set:1; /* Flag to indicate underlying block size is set on table line. */
}; };
...@@ -43,7 +43,7 @@ static inline sector_t __block_mod(sector_t sector, unsigned int bs) ...@@ -43,7 +43,7 @@ static inline sector_t __block_mod(sector_t sector, unsigned int bs)
return sector & (bs - 1); return sector & (bs - 1);
} }
/* Return number of blocks for a bio, accounting for misalignement of start and end sectors. */ /* Return number of blocks for a bio, accounting for misalignment of start and end sectors. */
static inline unsigned int __nr_blocks(struct ebs_c *ec, struct bio *bio) static inline unsigned int __nr_blocks(struct ebs_c *ec, struct bio *bio)
{ {
sector_t end_sector = __block_mod(bio->bi_iter.bi_sector, ec->u_bs) + bio_sectors(bio); sector_t end_sector = __block_mod(bio->bi_iter.bi_sector, ec->u_bs) + bio_sectors(bio);
...@@ -171,7 +171,7 @@ static void __ebs_forget_bio(struct ebs_c *ec, struct bio *bio) ...@@ -171,7 +171,7 @@ static void __ebs_forget_bio(struct ebs_c *ec, struct bio *bio)
dm_bufio_forget_buffers(ec->bufio, __sector_to_block(ec, sector), blocks); dm_bufio_forget_buffers(ec->bufio, __sector_to_block(ec, sector), blocks);
} }
/* Worker funtion to process incoming bios. */ /* Worker function to process incoming bios. */
static void __ebs_process_bios(struct work_struct *ws) static void __ebs_process_bios(struct work_struct *ws)
{ {
int r; int r;
......
...@@ -35,7 +35,7 @@ ...@@ -35,7 +35,7 @@
#define MIN_LOG2_INTERLEAVE_SECTORS 3 #define MIN_LOG2_INTERLEAVE_SECTORS 3
#define MAX_LOG2_INTERLEAVE_SECTORS 31 #define MAX_LOG2_INTERLEAVE_SECTORS 31
#define METADATA_WORKQUEUE_MAX_ACTIVE 16 #define METADATA_WORKQUEUE_MAX_ACTIVE 16
#define RECALC_SECTORS 8192 #define RECALC_SECTORS 32768
#define RECALC_WRITE_SUPER 16 #define RECALC_WRITE_SUPER 16
#define BITMAP_BLOCK_SIZE 4096 /* don't change it */ #define BITMAP_BLOCK_SIZE 4096 /* don't change it */
#define BITMAP_FLUSH_INTERVAL (10 * HZ) #define BITMAP_FLUSH_INTERVAL (10 * HZ)
...@@ -262,6 +262,7 @@ struct dm_integrity_c { ...@@ -262,6 +262,7 @@ struct dm_integrity_c {
bool journal_uptodate; bool journal_uptodate;
bool just_formatted; bool just_formatted;
bool recalculate_flag; bool recalculate_flag;
bool reset_recalculate_flag;
bool discard; bool discard;
bool fix_padding; bool fix_padding;
bool fix_hmac; bool fix_hmac;
...@@ -1428,8 +1429,10 @@ static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, se ...@@ -1428,8 +1429,10 @@ static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, se
if (op == TAG_READ) { if (op == TAG_READ) {
memcpy(tag, dp, to_copy); memcpy(tag, dp, to_copy);
} else if (op == TAG_WRITE) { } else if (op == TAG_WRITE) {
memcpy(dp, tag, to_copy); if (memcmp(dp, tag, to_copy)) {
dm_bufio_mark_partial_buffer_dirty(b, *metadata_offset, *metadata_offset + to_copy); memcpy(dp, tag, to_copy);
dm_bufio_mark_partial_buffer_dirty(b, *metadata_offset, *metadata_offset + to_copy);
}
} else { } else {
/* e.g.: op == TAG_CMP */ /* e.g.: op == TAG_CMP */
...@@ -2686,26 +2689,30 @@ static void integrity_recalc(struct work_struct *w) ...@@ -2686,26 +2689,30 @@ static void integrity_recalc(struct work_struct *w)
if (unlikely(dm_integrity_failed(ic))) if (unlikely(dm_integrity_failed(ic)))
goto err; goto err;
io_req.bi_op = REQ_OP_READ; if (!ic->discard) {
io_req.bi_op_flags = 0; io_req.bi_op = REQ_OP_READ;
io_req.mem.type = DM_IO_VMA; io_req.bi_op_flags = 0;
io_req.mem.ptr.addr = ic->recalc_buffer; io_req.mem.type = DM_IO_VMA;
io_req.notify.fn = NULL; io_req.mem.ptr.addr = ic->recalc_buffer;
io_req.client = ic->io; io_req.notify.fn = NULL;
io_loc.bdev = ic->dev->bdev; io_req.client = ic->io;
io_loc.sector = get_data_sector(ic, area, offset); io_loc.bdev = ic->dev->bdev;
io_loc.count = n_sectors; io_loc.sector = get_data_sector(ic, area, offset);
io_loc.count = n_sectors;
r = dm_io(&io_req, 1, &io_loc, NULL); r = dm_io(&io_req, 1, &io_loc, NULL);
if (unlikely(r)) { if (unlikely(r)) {
dm_integrity_io_error(ic, "reading data", r); dm_integrity_io_error(ic, "reading data", r);
goto err; goto err;
} }
t = ic->recalc_tags; t = ic->recalc_tags;
for (i = 0; i < n_sectors; i += ic->sectors_per_block) { for (i = 0; i < n_sectors; i += ic->sectors_per_block) {
integrity_sector_checksum(ic, logical_sector + i, ic->recalc_buffer + (i << SECTOR_SHIFT), t); integrity_sector_checksum(ic, logical_sector + i, ic->recalc_buffer + (i << SECTOR_SHIFT), t);
t += ic->tag_size; t += ic->tag_size;
}
} else {
t = ic->recalc_tags + (n_sectors >> ic->sb->log2_sectors_per_block) * ic->tag_size;
} }
metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset); metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset);
...@@ -3134,7 +3141,8 @@ static void dm_integrity_resume(struct dm_target *ti) ...@@ -3134,7 +3141,8 @@ static void dm_integrity_resume(struct dm_target *ti)
rw_journal_sectors(ic, REQ_OP_READ, 0, 0, rw_journal_sectors(ic, REQ_OP_READ, 0, 0,
ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL); ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
if (ic->mode == 'B') { if (ic->mode == 'B') {
if (ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit) { if (ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit &&
!ic->reset_recalculate_flag) {
block_bitmap_copy(ic, ic->recalc_bitmap, ic->journal); block_bitmap_copy(ic, ic->recalc_bitmap, ic->journal);
block_bitmap_copy(ic, ic->may_write_bitmap, ic->journal); block_bitmap_copy(ic, ic->may_write_bitmap, ic->journal);
if (!block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, if (!block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors,
...@@ -3156,7 +3164,8 @@ static void dm_integrity_resume(struct dm_target *ti) ...@@ -3156,7 +3164,8 @@ static void dm_integrity_resume(struct dm_target *ti)
} }
} else { } else {
if (!(ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit && if (!(ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit &&
block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_TEST_ALL_CLEAR))) { block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_TEST_ALL_CLEAR)) ||
ic->reset_recalculate_flag) {
ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING); ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
ic->sb->recalc_sector = cpu_to_le64(0); ic->sb->recalc_sector = cpu_to_le64(0);
} }
...@@ -3169,6 +3178,10 @@ static void dm_integrity_resume(struct dm_target *ti) ...@@ -3169,6 +3178,10 @@ static void dm_integrity_resume(struct dm_target *ti)
dm_integrity_io_error(ic, "writing superblock", r); dm_integrity_io_error(ic, "writing superblock", r);
} else { } else {
replay_journal(ic); replay_journal(ic);
if (ic->reset_recalculate_flag) {
ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
ic->sb->recalc_sector = cpu_to_le64(0);
}
if (ic->mode == 'B') { if (ic->mode == 'B') {
ic->sb->flags |= cpu_to_le32(SB_FLAG_DIRTY_BITMAP); ic->sb->flags |= cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit; ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit;
...@@ -3242,6 +3255,7 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type, ...@@ -3242,6 +3255,7 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
arg_count += !!ic->meta_dev; arg_count += !!ic->meta_dev;
arg_count += ic->sectors_per_block != 1; arg_count += ic->sectors_per_block != 1;
arg_count += !!(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)); arg_count += !!(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING));
arg_count += ic->reset_recalculate_flag;
arg_count += ic->discard; arg_count += ic->discard;
arg_count += ic->mode == 'J'; arg_count += ic->mode == 'J';
arg_count += ic->mode == 'J'; arg_count += ic->mode == 'J';
...@@ -3261,6 +3275,8 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type, ...@@ -3261,6 +3275,8 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
DMEMIT(" block_size:%u", ic->sectors_per_block << SECTOR_SHIFT); DMEMIT(" block_size:%u", ic->sectors_per_block << SECTOR_SHIFT);
if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
DMEMIT(" recalculate"); DMEMIT(" recalculate");
if (ic->reset_recalculate_flag)
DMEMIT(" reset_recalculate");
if (ic->discard) if (ic->discard)
DMEMIT(" allow_discards"); DMEMIT(" allow_discards");
DMEMIT(" journal_sectors:%u", ic->initial_sectors - SB_SECTORS); DMEMIT(" journal_sectors:%u", ic->initial_sectors - SB_SECTORS);
...@@ -3914,7 +3930,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) ...@@ -3914,7 +3930,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
unsigned extra_args; unsigned extra_args;
struct dm_arg_set as; struct dm_arg_set as;
static const struct dm_arg _args[] = { static const struct dm_arg _args[] = {
{0, 17, "Invalid number of feature args"}, {0, 18, "Invalid number of feature args"},
}; };
unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec; unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec;
bool should_write_sb; bool should_write_sb;
...@@ -4039,6 +4055,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) ...@@ -4039,6 +4055,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
if (val >= (uint64_t)UINT_MAX * 1000 / HZ) { if (val >= (uint64_t)UINT_MAX * 1000 / HZ) {
r = -EINVAL; r = -EINVAL;
ti->error = "Invalid bitmap_flush_interval argument"; ti->error = "Invalid bitmap_flush_interval argument";
goto bad;
} }
ic->bitmap_flush_interval = msecs_to_jiffies(val); ic->bitmap_flush_interval = msecs_to_jiffies(val);
} else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) { } else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
...@@ -4058,6 +4075,9 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) ...@@ -4058,6 +4075,9 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto bad; goto bad;
} else if (!strcmp(opt_string, "recalculate")) { } else if (!strcmp(opt_string, "recalculate")) {
ic->recalculate_flag = true; ic->recalculate_flag = true;
} else if (!strcmp(opt_string, "reset_recalculate")) {
ic->recalculate_flag = true;
ic->reset_recalculate_flag = true;
} else if (!strcmp(opt_string, "allow_discards")) { } else if (!strcmp(opt_string, "allow_discards")) {
ic->discard = true; ic->discard = true;
} else if (!strcmp(opt_string, "fix_padding")) { } else if (!strcmp(opt_string, "fix_padding")) {
...@@ -4348,11 +4368,13 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) ...@@ -4348,11 +4368,13 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto bad; goto bad;
} }
INIT_WORK(&ic->recalc_work, integrity_recalc); INIT_WORK(&ic->recalc_work, integrity_recalc);
ic->recalc_buffer = vmalloc(RECALC_SECTORS << SECTOR_SHIFT); if (!ic->discard) {
if (!ic->recalc_buffer) { ic->recalc_buffer = vmalloc(RECALC_SECTORS << SECTOR_SHIFT);
ti->error = "Cannot allocate buffer for recalculating"; if (!ic->recalc_buffer) {
r = -ENOMEM; ti->error = "Cannot allocate buffer for recalculating";
goto bad; r = -ENOMEM;
goto bad;
}
} }
ic->recalc_tags = kvmalloc_array(RECALC_SECTORS >> ic->sb->log2_sectors_per_block, ic->recalc_tags = kvmalloc_array(RECALC_SECTORS >> ic->sb->log2_sectors_per_block,
ic->tag_size, GFP_KERNEL); ic->tag_size, GFP_KERNEL);
...@@ -4361,6 +4383,9 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) ...@@ -4361,6 +4383,9 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
r = -ENOMEM; r = -ENOMEM;
goto bad; goto bad;
} }
if (ic->discard)
memset(ic->recalc_tags, DISCARD_FILLER,
(RECALC_SECTORS >> ic->sb->log2_sectors_per_block) * ic->tag_size);
} else { } else {
if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) { if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
ti->error = "Recalculate can only be specified with internal_hash"; ti->error = "Recalculate can only be specified with internal_hash";
...@@ -4554,7 +4579,7 @@ static void dm_integrity_dtr(struct dm_target *ti) ...@@ -4554,7 +4579,7 @@ static void dm_integrity_dtr(struct dm_target *ti)
static struct target_type integrity_target = { static struct target_type integrity_target = {
.name = "integrity", .name = "integrity",
.version = {1, 7, 0}, .version = {1, 9, 0},
.module = THIS_MODULE, .module = THIS_MODULE,
.features = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY, .features = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY,
.ctr = dm_integrity_ctr, .ctr = dm_integrity_ctr,
......
This diff is collapsed.
...@@ -1853,6 +1853,7 @@ static int rs_check_takeover(struct raid_set *rs) ...@@ -1853,6 +1853,7 @@ static int rs_check_takeover(struct raid_set *rs)
((mddev->layout == ALGORITHM_PARITY_N && mddev->new_layout == ALGORITHM_PARITY_N) || ((mddev->layout == ALGORITHM_PARITY_N && mddev->new_layout == ALGORITHM_PARITY_N) ||
__within_range(mddev->new_layout, ALGORITHM_LEFT_ASYMMETRIC, ALGORITHM_RIGHT_SYMMETRIC))) __within_range(mddev->new_layout, ALGORITHM_LEFT_ASYMMETRIC, ALGORITHM_RIGHT_SYMMETRIC)))
return 0; return 0;
break;
default: default:
break; break;
...@@ -1868,6 +1869,14 @@ static bool rs_takeover_requested(struct raid_set *rs) ...@@ -1868,6 +1869,14 @@ static bool rs_takeover_requested(struct raid_set *rs)
return rs->md.new_level != rs->md.level; return rs->md.new_level != rs->md.level;
} }
/* True if layout is set to reshape. */
static bool rs_is_layout_change(struct raid_set *rs, bool use_mddev)
{
return (use_mddev ? rs->md.delta_disks : rs->delta_disks) ||
rs->md.new_layout != rs->md.layout ||
rs->md.new_chunk_sectors != rs->md.chunk_sectors;
}
/* True if @rs is requested to reshape by ctr */ /* True if @rs is requested to reshape by ctr */
static bool rs_reshape_requested(struct raid_set *rs) static bool rs_reshape_requested(struct raid_set *rs)
{ {
...@@ -1880,9 +1889,7 @@ static bool rs_reshape_requested(struct raid_set *rs) ...@@ -1880,9 +1889,7 @@ static bool rs_reshape_requested(struct raid_set *rs)
if (rs_is_raid0(rs)) if (rs_is_raid0(rs))
return false; return false;
change = mddev->new_layout != mddev->layout || change = rs_is_layout_change(rs, false);
mddev->new_chunk_sectors != mddev->chunk_sectors ||
rs->delta_disks;
/* Historical case to support raid1 reshape without delta disks */ /* Historical case to support raid1 reshape without delta disks */
if (rs_is_raid1(rs)) { if (rs_is_raid1(rs)) {
...@@ -2817,7 +2824,7 @@ static sector_t _get_reshape_sectors(struct raid_set *rs) ...@@ -2817,7 +2824,7 @@ static sector_t _get_reshape_sectors(struct raid_set *rs)
} }
/* /*
* * Reshape:
* - change raid layout * - change raid layout
* - change chunk size * - change chunk size
* - add disks * - add disks
...@@ -2926,6 +2933,20 @@ static int rs_setup_reshape(struct raid_set *rs) ...@@ -2926,6 +2933,20 @@ static int rs_setup_reshape(struct raid_set *rs)
return r; return r;
} }
/*
* If the md resync thread has updated superblock with max reshape position
* at the end of a reshape but not (yet) reset the layout configuration
* changes -> reset the latter.
*/
static void rs_reset_inconclusive_reshape(struct raid_set *rs)
{
if (!rs_is_reshaping(rs) && rs_is_layout_change(rs, true)) {
rs_set_cur(rs);
rs->md.delta_disks = 0;
rs->md.reshape_backwards = 0;
}
}
/* /*
* Enable/disable discard support on RAID set depending on * Enable/disable discard support on RAID set depending on
* RAID level and discard properties of underlying RAID members. * RAID level and discard properties of underlying RAID members.
...@@ -3212,11 +3233,14 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv) ...@@ -3212,11 +3233,14 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
if (r) if (r)
goto bad; goto bad;
/* Catch any inconclusive reshape superblock content. */
rs_reset_inconclusive_reshape(rs);
/* Start raid set read-only and assumed clean to change in raid_resume() */ /* Start raid set read-only and assumed clean to change in raid_resume() */
rs->md.ro = 1; rs->md.ro = 1;
rs->md.in_sync = 1; rs->md.in_sync = 1;
/* Keep array frozen */ /* Keep array frozen until resume. */
set_bit(MD_RECOVERY_FROZEN, &rs->md.recovery); set_bit(MD_RECOVERY_FROZEN, &rs->md.recovery);
/* Has to be held on running the array */ /* Has to be held on running the array */
...@@ -3230,7 +3254,6 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv) ...@@ -3230,7 +3254,6 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
} }
r = md_start(&rs->md); r = md_start(&rs->md);
if (r) { if (r) {
ti->error = "Failed to start raid array"; ti->error = "Failed to start raid array";
mddev_unlock(&rs->md); mddev_unlock(&rs->md);
...@@ -3727,15 +3750,6 @@ static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits) ...@@ -3727,15 +3750,6 @@ static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits)
blk_limits_io_min(limits, chunk_size_bytes); blk_limits_io_min(limits, chunk_size_bytes);
blk_limits_io_opt(limits, chunk_size_bytes * mddev_data_stripes(rs)); blk_limits_io_opt(limits, chunk_size_bytes * mddev_data_stripes(rs));
/*
* RAID0 and RAID10 personalities require bio splitting,
* RAID1/4/5/6 don't and process large discard bios properly.
*/
if (rs_is_raid0(rs) || rs_is_raid10(rs)) {
limits->discard_granularity = chunk_size_bytes;
limits->max_discard_sectors = rs->md.chunk_sectors;
}
} }
static void raid_postsuspend(struct dm_target *ti) static void raid_postsuspend(struct dm_target *ti)
......
...@@ -569,6 +569,7 @@ int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t) ...@@ -569,6 +569,7 @@ int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
blk_mq_free_tag_set(md->tag_set); blk_mq_free_tag_set(md->tag_set);
out_kfree_tag_set: out_kfree_tag_set:
kfree(md->tag_set); kfree(md->tag_set);
md->tag_set = NULL;
return err; return err;
} }
...@@ -578,6 +579,7 @@ void dm_mq_cleanup_mapped_device(struct mapped_device *md) ...@@ -578,6 +579,7 @@ void dm_mq_cleanup_mapped_device(struct mapped_device *md)
if (md->tag_set) { if (md->tag_set) {
blk_mq_free_tag_set(md->tag_set); blk_mq_free_tag_set(md->tag_set);
kfree(md->tag_set); kfree(md->tag_set);
md->tag_set = NULL;
} }
} }
......
...@@ -596,7 +596,7 @@ static void persistent_dtr(struct dm_exception_store *store) ...@@ -596,7 +596,7 @@ static void persistent_dtr(struct dm_exception_store *store)
free_area(ps); free_area(ps);
/* Allocated in persistent_read_metadata */ /* Allocated in persistent_read_metadata */
vfree(ps->callbacks); kvfree(ps->callbacks);
kfree(ps); kfree(ps);
} }
...@@ -621,8 +621,8 @@ static int persistent_read_metadata(struct dm_exception_store *store, ...@@ -621,8 +621,8 @@ static int persistent_read_metadata(struct dm_exception_store *store,
*/ */
ps->exceptions_per_area = (ps->store->chunk_size << SECTOR_SHIFT) / ps->exceptions_per_area = (ps->store->chunk_size << SECTOR_SHIFT) /
sizeof(struct disk_exception); sizeof(struct disk_exception);
ps->callbacks = dm_vcalloc(ps->exceptions_per_area, ps->callbacks = kvcalloc(ps->exceptions_per_area,
sizeof(*ps->callbacks)); sizeof(*ps->callbacks), GFP_KERNEL);
if (!ps->callbacks) if (!ps->callbacks)
return -ENOMEM; return -ENOMEM;
......
...@@ -663,7 +663,8 @@ static int dm_exception_table_init(struct dm_exception_table *et, ...@@ -663,7 +663,8 @@ static int dm_exception_table_init(struct dm_exception_table *et,
et->hash_shift = hash_shift; et->hash_shift = hash_shift;
et->hash_mask = size - 1; et->hash_mask = size - 1;
et->table = dm_vcalloc(size, sizeof(struct hlist_bl_head)); et->table = kvmalloc_array(size, sizeof(struct hlist_bl_head),
GFP_KERNEL);
if (!et->table) if (!et->table)
return -ENOMEM; return -ENOMEM;
...@@ -689,7 +690,7 @@ static void dm_exception_table_exit(struct dm_exception_table *et, ...@@ -689,7 +690,7 @@ static void dm_exception_table_exit(struct dm_exception_table *et,
kmem_cache_free(mem, ex); kmem_cache_free(mem, ex);
} }
vfree(et->table); kvfree(et->table);
} }
static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk) static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk)
......
...@@ -94,24 +94,6 @@ static int setup_btree_index(unsigned int l, struct dm_table *t) ...@@ -94,24 +94,6 @@ static int setup_btree_index(unsigned int l, struct dm_table *t)
return 0; return 0;
} }
void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size)
{
unsigned long size;
void *addr;
/*
* Check that we're not going to overflow.
*/
if (nmemb > (ULONG_MAX / elem_size))
return NULL;
size = nmemb * elem_size;
addr = vzalloc(size);
return addr;
}
EXPORT_SYMBOL(dm_vcalloc);
/* /*
* highs, and targets are managed as dynamic arrays during a * highs, and targets are managed as dynamic arrays during a
* table load. * table load.
...@@ -124,15 +106,15 @@ static int alloc_targets(struct dm_table *t, unsigned int num) ...@@ -124,15 +106,15 @@ static int alloc_targets(struct dm_table *t, unsigned int num)
/* /*
* Allocate both the target array and offset array at once. * Allocate both the target array and offset array at once.
*/ */
n_highs = (sector_t *) dm_vcalloc(num, sizeof(struct dm_target) + n_highs = kvcalloc(num, sizeof(struct dm_target) + sizeof(sector_t),
sizeof(sector_t)); GFP_KERNEL);
if (!n_highs) if (!n_highs)
return -ENOMEM; return -ENOMEM;
n_targets = (struct dm_target *) (n_highs + num); n_targets = (struct dm_target *) (n_highs + num);
memset(n_highs, -1, sizeof(*n_highs) * num); memset(n_highs, -1, sizeof(*n_highs) * num);
vfree(t->highs); kvfree(t->highs);
t->num_allocated = num; t->num_allocated = num;
t->highs = n_highs; t->highs = n_highs;
...@@ -198,7 +180,7 @@ void dm_table_destroy(struct dm_table *t) ...@@ -198,7 +180,7 @@ void dm_table_destroy(struct dm_table *t)
/* free the indexes */ /* free the indexes */
if (t->depth >= 2) if (t->depth >= 2)
vfree(t->index[t->depth - 2]); kvfree(t->index[t->depth - 2]);
/* free the targets */ /* free the targets */
for (i = 0; i < t->num_targets; i++) { for (i = 0; i < t->num_targets; i++) {
...@@ -210,7 +192,7 @@ void dm_table_destroy(struct dm_table *t) ...@@ -210,7 +192,7 @@ void dm_table_destroy(struct dm_table *t)
dm_put_target_type(tgt->type); dm_put_target_type(tgt->type);
} }
vfree(t->highs); kvfree(t->highs);
/* free the device list */ /* free the device list */
free_devices(&t->devices, t->md); free_devices(&t->devices, t->md);
...@@ -1077,7 +1059,7 @@ static int setup_indexes(struct dm_table *t) ...@@ -1077,7 +1059,7 @@ static int setup_indexes(struct dm_table *t)
total += t->counts[i]; total += t->counts[i];
} }
indexes = (sector_t *) dm_vcalloc(total, (unsigned long) NODE_SIZE); indexes = kvcalloc(total, NODE_SIZE, GFP_KERNEL);
if (!indexes) if (!indexes)
return -ENOMEM; return -ENOMEM;
......
...@@ -2816,7 +2816,7 @@ static bool data_dev_supports_discard(struct pool_c *pt) ...@@ -2816,7 +2816,7 @@ static bool data_dev_supports_discard(struct pool_c *pt)
{ {
struct request_queue *q = bdev_get_queue(pt->data_dev->bdev); struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
return q && blk_queue_discard(q); return blk_queue_discard(q);
} }
static bool is_factor(sector_t block_size, uint32_t n) static bool is_factor(sector_t block_size, uint32_t n)
......
...@@ -893,6 +893,28 @@ static int verity_alloc_zero_digest(struct dm_verity *v) ...@@ -893,6 +893,28 @@ static int verity_alloc_zero_digest(struct dm_verity *v)
return r; return r;
} }
static inline bool verity_is_verity_mode(const char *arg_name)
{
return (!strcasecmp(arg_name, DM_VERITY_OPT_LOGGING) ||
!strcasecmp(arg_name, DM_VERITY_OPT_RESTART) ||
!strcasecmp(arg_name, DM_VERITY_OPT_PANIC));
}
static int verity_parse_verity_mode(struct dm_verity *v, const char *arg_name)
{
if (v->mode)
return -EINVAL;
if (!strcasecmp(arg_name, DM_VERITY_OPT_LOGGING))
v->mode = DM_VERITY_MODE_LOGGING;
else if (!strcasecmp(arg_name, DM_VERITY_OPT_RESTART))
v->mode = DM_VERITY_MODE_RESTART;
else if (!strcasecmp(arg_name, DM_VERITY_OPT_PANIC))
v->mode = DM_VERITY_MODE_PANIC;
return 0;
}
static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v, static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v,
struct dm_verity_sig_opts *verify_args) struct dm_verity_sig_opts *verify_args)
{ {
...@@ -916,16 +938,12 @@ static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v, ...@@ -916,16 +938,12 @@ static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v,
arg_name = dm_shift_arg(as); arg_name = dm_shift_arg(as);
argc--; argc--;
if (!strcasecmp(arg_name, DM_VERITY_OPT_LOGGING)) { if (verity_is_verity_mode(arg_name)) {
v->mode = DM_VERITY_MODE_LOGGING; r = verity_parse_verity_mode(v, arg_name);
continue; if (r) {
ti->error = "Conflicting error handling parameters";
} else if (!strcasecmp(arg_name, DM_VERITY_OPT_RESTART)) { return r;
v->mode = DM_VERITY_MODE_RESTART; }
continue;
} else if (!strcasecmp(arg_name, DM_VERITY_OPT_PANIC)) {
v->mode = DM_VERITY_MODE_PANIC;
continue; continue;
} else if (!strcasecmp(arg_name, DM_VERITY_OPT_IGN_ZEROES)) { } else if (!strcasecmp(arg_name, DM_VERITY_OPT_IGN_ZEROES)) {
...@@ -1242,7 +1260,7 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv) ...@@ -1242,7 +1260,7 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
static struct target_type verity_target = { static struct target_type verity_target = {
.name = "verity", .name = "verity",
.version = {1, 7, 0}, .version = {1, 8, 0},
.module = THIS_MODULE, .module = THIS_MODULE,
.ctr = verity_ctr, .ctr = verity_ctr,
.dtr = verity_dtr, .dtr = verity_dtr,
......
...@@ -73,7 +73,7 @@ struct wc_memory_superblock { ...@@ -73,7 +73,7 @@ struct wc_memory_superblock {
}; };
__le64 padding[8]; __le64 padding[8];
}; };
struct wc_memory_entry entries[0]; struct wc_memory_entry entries[];
}; };
struct wc_entry { struct wc_entry {
......
...@@ -840,7 +840,6 @@ int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode, ...@@ -840,7 +840,6 @@ int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode,
*result = &td->dm_dev; *result = &td->dm_dev;
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(dm_get_table_device);
void dm_put_table_device(struct mapped_device *md, struct dm_dev *d) void dm_put_table_device(struct mapped_device *md, struct dm_dev *d)
{ {
...@@ -854,7 +853,6 @@ void dm_put_table_device(struct mapped_device *md, struct dm_dev *d) ...@@ -854,7 +853,6 @@ void dm_put_table_device(struct mapped_device *md, struct dm_dev *d)
} }
mutex_unlock(&md->table_devices_lock); mutex_unlock(&md->table_devices_lock);
} }
EXPORT_SYMBOL(dm_put_table_device);
static void free_table_devices(struct list_head *devices) static void free_table_devices(struct list_head *devices)
{ {
...@@ -1641,38 +1639,35 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md, ...@@ -1641,38 +1639,35 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
} else { } else {
ci.bio = bio; ci.bio = bio;
ci.sector_count = bio_sectors(bio); ci.sector_count = bio_sectors(bio);
while (ci.sector_count && !error) { error = __split_and_process_non_flush(&ci);
error = __split_and_process_non_flush(&ci); if (ci.sector_count && !error) {
if (ci.sector_count && !error) { /*
/* * Remainder must be passed to submit_bio_noacct()
* Remainder must be passed to submit_bio_noacct() * so that it gets handled *after* bios already submitted
* so that it gets handled *after* bios already submitted * have been completely processed.
* have been completely processed. * We take a clone of the original to store in
* We take a clone of the original to store in * ci.io->orig_bio to be used by end_io_acct() and
* ci.io->orig_bio to be used by end_io_acct() and * for dec_pending to use for completion handling.
* for dec_pending to use for completion handling. */
*/ struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count,
struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count, GFP_NOIO, &md->queue->bio_split);
GFP_NOIO, &md->queue->bio_split); ci.io->orig_bio = b;
ci.io->orig_bio = b;
/*
/* * Adjust IO stats for each split, otherwise upon queue
* Adjust IO stats for each split, otherwise upon queue * reentry there will be redundant IO accounting.
* reentry there will be redundant IO accounting. * NOTE: this is a stop-gap fix, a proper fix involves
* NOTE: this is a stop-gap fix, a proper fix involves * significant refactoring of DM core's bio splitting
* significant refactoring of DM core's bio splitting * (by eliminating DM's splitting and just using bio_split)
* (by eliminating DM's splitting and just using bio_split) */
*/ part_stat_lock();
part_stat_lock(); __dm_part_stat_sub(dm_disk(md)->part0,
__dm_part_stat_sub(dm_disk(md)->part0, sectors[op_stat_group(bio_op(bio))], ci.sector_count);
sectors[op_stat_group(bio_op(bio))], ci.sector_count); part_stat_unlock();
part_stat_unlock();
bio_chain(b, bio);
bio_chain(b, bio); trace_block_split(b, bio->bi_iter.bi_sector);
trace_block_split(b, bio->bi_iter.bi_sector); ret = submit_bio_noacct(bio);
ret = submit_bio_noacct(bio);
break;
}
} }
} }
......
...@@ -34,12 +34,12 @@ struct node_header { ...@@ -34,12 +34,12 @@ struct node_header {
__le32 max_entries; __le32 max_entries;
__le32 value_size; __le32 value_size;
__le32 padding; __le32 padding;
} __packed; } __attribute__((packed, aligned(8)));
struct btree_node { struct btree_node {
struct node_header header; struct node_header header;
__le64 keys[]; __le64 keys[];
} __packed; } __attribute__((packed, aligned(8)));
/* /*
...@@ -83,7 +83,7 @@ struct shadow_spine { ...@@ -83,7 +83,7 @@ struct shadow_spine {
}; };
void init_shadow_spine(struct shadow_spine *s, struct dm_btree_info *info); void init_shadow_spine(struct shadow_spine *s, struct dm_btree_info *info);
int exit_shadow_spine(struct shadow_spine *s); void exit_shadow_spine(struct shadow_spine *s);
int shadow_step(struct shadow_spine *s, dm_block_t b, int shadow_step(struct shadow_spine *s, dm_block_t b,
struct dm_btree_value_type *vt); struct dm_btree_value_type *vt);
......
...@@ -30,8 +30,6 @@ static void node_prepare_for_write(struct dm_block_validator *v, ...@@ -30,8 +30,6 @@ static void node_prepare_for_write(struct dm_block_validator *v,
h->csum = cpu_to_le32(dm_bm_checksum(&h->flags, h->csum = cpu_to_le32(dm_bm_checksum(&h->flags,
block_size - sizeof(__le32), block_size - sizeof(__le32),
BTREE_CSUM_XOR)); BTREE_CSUM_XOR));
BUG_ON(node_check(v, b, 4096));
} }
static int node_check(struct dm_block_validator *v, static int node_check(struct dm_block_validator *v,
...@@ -183,15 +181,13 @@ void init_shadow_spine(struct shadow_spine *s, struct dm_btree_info *info) ...@@ -183,15 +181,13 @@ void init_shadow_spine(struct shadow_spine *s, struct dm_btree_info *info)
s->count = 0; s->count = 0;
} }
int exit_shadow_spine(struct shadow_spine *s) void exit_shadow_spine(struct shadow_spine *s)
{ {
int r = 0, i; int i;
for (i = 0; i < s->count; i++) { for (i = 0; i < s->count; i++) {
unlock_block(s->info, s->nodes[i]); unlock_block(s->info, s->nodes[i]);
} }
return r;
} }
int shadow_step(struct shadow_spine *s, dm_block_t b, int shadow_step(struct shadow_spine *s, dm_block_t b,
......
...@@ -339,6 +339,8 @@ int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin, ...@@ -339,6 +339,8 @@ int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
*/ */
begin = do_div(index_begin, ll->entries_per_block); begin = do_div(index_begin, ll->entries_per_block);
end = do_div(end, ll->entries_per_block); end = do_div(end, ll->entries_per_block);
if (end == 0)
end = ll->entries_per_block;
for (i = index_begin; i < index_end; i++, begin = 0) { for (i = index_begin; i < index_end; i++, begin = 0) {
struct dm_block *blk; struct dm_block *blk;
......
...@@ -33,7 +33,7 @@ struct disk_index_entry { ...@@ -33,7 +33,7 @@ struct disk_index_entry {
__le64 blocknr; __le64 blocknr;
__le32 nr_free; __le32 nr_free;
__le32 none_free_before; __le32 none_free_before;
} __packed; } __attribute__ ((packed, aligned(8)));
#define MAX_METADATA_BITMAPS 255 #define MAX_METADATA_BITMAPS 255
...@@ -43,7 +43,7 @@ struct disk_metadata_index { ...@@ -43,7 +43,7 @@ struct disk_metadata_index {
__le64 blocknr; __le64 blocknr;
struct disk_index_entry index[MAX_METADATA_BITMAPS]; struct disk_index_entry index[MAX_METADATA_BITMAPS];
} __packed; } __attribute__ ((packed, aligned(8)));
struct ll_disk; struct ll_disk;
...@@ -86,7 +86,7 @@ struct disk_sm_root { ...@@ -86,7 +86,7 @@ struct disk_sm_root {
__le64 nr_allocated; __le64 nr_allocated;
__le64 bitmap_root; __le64 bitmap_root;
__le64 ref_count_root; __le64 ref_count_root;
} __packed; } __attribute__ ((packed, aligned(8)));
#define ENTRIES_PER_BYTE 4 #define ENTRIES_PER_BYTE 4
...@@ -94,7 +94,7 @@ struct disk_bitmap_header { ...@@ -94,7 +94,7 @@ struct disk_bitmap_header {
__le32 csum; __le32 csum;
__le32 not_used; __le32 not_used;
__le64 blocknr; __le64 blocknr;
} __packed; } __attribute__ ((packed, aligned(8)));
enum allocation_event { enum allocation_event {
SM_NONE, SM_NONE,
......
...@@ -187,13 +187,8 @@ static int sm_disk_new_block(struct dm_space_map *sm, dm_block_t *b) ...@@ -187,13 +187,8 @@ static int sm_disk_new_block(struct dm_space_map *sm, dm_block_t *b)
static int sm_disk_commit(struct dm_space_map *sm) static int sm_disk_commit(struct dm_space_map *sm)
{ {
int r; int r;
dm_block_t nr_free;
struct sm_disk *smd = container_of(sm, struct sm_disk, sm); struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
r = sm_disk_get_nr_free(sm, &nr_free);
if (r)
return r;
r = sm_ll_commit(&smd->ll); r = sm_ll_commit(&smd->ll);
if (r) if (r)
return r; return r;
...@@ -202,10 +197,6 @@ static int sm_disk_commit(struct dm_space_map *sm) ...@@ -202,10 +197,6 @@ static int sm_disk_commit(struct dm_space_map *sm)
smd->begin = 0; smd->begin = 0;
smd->nr_allocated_this_transaction = 0; smd->nr_allocated_this_transaction = 0;
r = sm_disk_get_nr_free(sm, &nr_free);
if (r)
return r;
return 0; return 0;
} }
......
...@@ -574,11 +574,6 @@ struct dm_table *dm_swap_table(struct mapped_device *md, ...@@ -574,11 +574,6 @@ struct dm_table *dm_swap_table(struct mapped_device *md,
*/ */
void dm_destroy_keyslot_manager(struct blk_keyslot_manager *ksm); void dm_destroy_keyslot_manager(struct blk_keyslot_manager *ksm);
/*
* A wrapper around vmalloc.
*/
void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size);
/*----------------------------------------------------------------- /*-----------------------------------------------------------------
* Macros. * Macros.
*---------------------------------------------------------------*/ *---------------------------------------------------------------*/
......
...@@ -193,8 +193,22 @@ struct dm_name_list { ...@@ -193,8 +193,22 @@ struct dm_name_list {
__u32 next; /* offset to the next record from __u32 next; /* offset to the next record from
the _start_ of this */ the _start_ of this */
char name[0]; char name[0];
/*
* The following members can be accessed by taking a pointer that
* points immediately after the terminating zero character in "name"
* and aligning this pointer to next 8-byte boundary.
* Uuid is present if the flag DM_NAME_LIST_FLAG_HAS_UUID is set.
*
* __u32 event_nr;
* __u32 flags;
* char uuid[0];
*/
}; };
#define DM_NAME_LIST_FLAG_HAS_UUID 1
#define DM_NAME_LIST_FLAG_DOESNT_HAVE_UUID 2
/* /*
* Used to retrieve the target versions * Used to retrieve the target versions
*/ */
...@@ -272,9 +286,9 @@ enum { ...@@ -272,9 +286,9 @@ enum {
#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl) #define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
#define DM_VERSION_MAJOR 4 #define DM_VERSION_MAJOR 4
#define DM_VERSION_MINOR 44 #define DM_VERSION_MINOR 45
#define DM_VERSION_PATCHLEVEL 0 #define DM_VERSION_PATCHLEVEL 0
#define DM_VERSION_EXTRA "-ioctl (2021-02-01)" #define DM_VERSION_EXTRA "-ioctl (2021-03-22)"
/* Status bits */ /* Status bits */
#define DM_READONLY_FLAG (1 << 0) /* In/Out */ #define DM_READONLY_FLAG (1 << 0) /* In/Out */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment