Commit 8374cfe6 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-6.0/dm-changes' of...

Merge tag 'for-6.0/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm

Pull device mapper updates from Mike Snitzer:

 - Refactor DM core's mempool allocation so that it clearer by not being
   split acorss files.

 - Improve DM core's BLK_STS_DM_REQUEUE and BLK_STS_AGAIN handling.

 - Optimize DM core's more common bio splitting by eliminating the use
   of bio cloning with bio_split+bio_chain. Shift that cloning cost to
   the relatively unlikely dm_io requeue case that only occurs during
   error handling. Introduces dm_io_rewind() that will clone a bio that
   reflects the subset of the original bio that must be requeued.

 - Remove DM core's dm_table_get_num_targets() wrapper and audit all
   dm_table_get_target() callers.

 - Fix potential for OOM with DM writecache target by setting a default
   MAX_WRITEBACK_JOBS (set to 256MiB or 1/16 of total system memory,
   whichever is smaller).

 - Fix DM writecache target's stats that are reported through
   DM-specific table info.

 - Fix use-after-free crash in dm_sm_register_threshold_callback().

 - Refine DM core's Persistent Reservation handling in preparation for
   broader work Mike Christie is doing to add compatibility with
   Microsoft Windows Failover Cluster.

 - Fix various KASAN reported bugs in the DM raid target.

 - Fix DM raid target crash due to md_handle_request() bio splitting
   that recurses to block core without properly initializing the bio's
   bi_dev.

 - Fix some code comment typos and fix some Documentation formatting.

* tag 'for-6.0/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm: (29 commits)
  dm: fix dm-raid crash if md_handle_request() splits bio
  dm raid: fix address sanitizer warning in raid_resume
  dm raid: fix address sanitizer warning in raid_status
  dm: Start pr_preempt from the same starting path
  dm: Fix PR release handling for non All Registrants
  dm: Start pr_reserve from the same starting path
  dm: Allow dm_call_pr to be used for path searches
  dm: return early from dm_pr_call() if DM device is suspended
  dm thin: fix use-after-free crash in dm_sm_register_threshold_callback
  dm writecache: count number of blocks discarded, not number of discard bios
  dm writecache: count number of blocks written, not number of write bios
  dm writecache: count number of blocks read, not number of read bios
  dm writecache: return void from functions
  dm kcopyd: use __GFP_HIGHMEM when allocating pages
  dm writecache: set a default MAX_WRITEBACK_JOBS
  Documentation: dm writecache: Render status list as list
  Documentation: dm writecache: add blank line before optional parameters
  dm snapshot: fix typo in snapshot_map() comment
  dm raid: remove redundant "the" in parse_raid_params() comment
  dm cache: fix typo in 2 comment blocks
  ...
parents c013d0af 9dd1cd32
......@@ -20,6 +20,7 @@ Constructor parameters:
size)
5. the number of optional parameters (the parameters with an argument
count as two)
start_sector n (default: 0)
offset from the start of cache device in 512-byte sectors
high_watermark n (default: 50)
......@@ -74,20 +75,21 @@ Constructor parameters:
the origin volume in the last n milliseconds
Status:
1. error indicator - 0 if there was no error, otherwise error number
2. the number of blocks
3. the number of free blocks
4. the number of blocks under writeback
5. the number of read requests
6. the number of read requests that hit the cache
7. the number of write requests
8. the number of write requests that hit uncommitted block
9. the number of write requests that hit committed block
10. the number of write requests that bypass the cache
11. the number of write requests that are allocated in the cache
5. the number of read blocks
6. the number of read blocks that hit the cache
7. the number of write blocks
8. the number of write blocks that hit uncommitted block
9. the number of write blocks that hit committed block
10. the number of write blocks that bypass the cache
11. the number of write blocks that are allocated in the cache
12. the number of write requests that are blocked on the freelist
13. the number of flush requests
14. the number of discard requests
14. the number of discarded blocks
Messages:
flush
......
......@@ -5,7 +5,7 @@
dm-mod-y += dm.o dm-table.o dm-target.o dm-linear.o dm-stripe.o \
dm-ioctl.o dm-io.o dm-kcopyd.o dm-sysfs.o dm-stats.o \
dm-rq.o
dm-rq.o dm-io-rewind.o
dm-multipath-y += dm-path-selector.o dm-mpath.o
dm-historical-service-time-y += dm-ps-historical-service-time.o
dm-io-affinity-y += dm-ps-io-affinity.o
......
......@@ -131,7 +131,7 @@ void dm_cache_dump(struct dm_cache_metadata *cmd);
* hints will be lost.
*
* The hints are indexed by the cblock, but many policies will not
* neccessarily have a fast way of accessing efficiently via cblock. So
* necessarily have a fast way of accessing efficiently via cblock. So
* rather than querying the policy for each cblock, we let it walk its data
* structures and fill in the hints in whatever order it wishes.
*/
......
......@@ -2775,7 +2775,7 @@ static int load_mapping(void *context, dm_oblock_t oblock, dm_cblock_t cblock,
/*
* The discard block size in the on disk metadata is not
* neccessarily the same as we're currently using. So we have to
* necessarily the same as we're currently using. So we have to
* be careful to only set the discarded attribute if we know it
* covers a complete block of the new size.
*/
......
......@@ -22,6 +22,8 @@
#define DM_RESERVED_MAX_IOS 1024
struct dm_io;
struct dm_kobject_holder {
struct kobject kobj;
struct completion completion;
......@@ -91,6 +93,14 @@ struct mapped_device {
spinlock_t deferred_lock;
struct bio_list deferred;
/*
* requeue work context is needed for cloning one new bio
* to represent the dm_io to be requeued, since each
* dm_io may point to the original bio from FS.
*/
struct work_struct requeue_work;
struct dm_io *requeue_list;
void *interface_ptr;
/*
......@@ -216,6 +226,13 @@ struct dm_table {
#endif
};
static inline struct dm_target *dm_table_get_target(struct dm_table *t,
unsigned int index)
{
BUG_ON(index >= t->num_targets);
return t->targets + index;
}
/*
* One of these is allocated per clone bio.
*/
......@@ -230,6 +247,9 @@ struct dm_target_io {
sector_t old_sector;
struct bio clone;
};
#define DM_TARGET_IO_BIO_OFFSET (offsetof(struct dm_target_io, clone))
#define DM_IO_BIO_OFFSET \
(offsetof(struct dm_target_io, clone) + offsetof(struct dm_io, tio))
/*
* dm_target_io flags
......@@ -272,7 +292,6 @@ struct dm_io {
atomic_t io_count;
struct mapped_device *md;
struct bio *split_bio;
/* The three fields represent mapped part of original bio */
struct bio *orig_bio;
unsigned int sector_offset; /* offset to end of orig_bio */
......@@ -300,6 +319,8 @@ static inline void dm_io_set_flag(struct dm_io *io, unsigned int bit)
io->flags |= (1U << bit);
}
void dm_io_rewind(struct dm_io *io, struct bio_set *bs);
static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj)
{
return &container_of(kobj, struct dm_kobject_holder, kobj)->completion;
......
......@@ -208,7 +208,7 @@ void dm_ima_measure_on_table_load(struct dm_table *table, unsigned int status_fl
if (!target_data_buf)
goto error;
num_targets = dm_table_get_num_targets(table);
num_targets = table->num_targets;
if (dm_ima_alloc_and_copy_device_data(table->md, &device_data_buf, num_targets, noio))
goto error;
......@@ -237,9 +237,6 @@ void dm_ima_measure_on_table_load(struct dm_table *table, unsigned int status_fl
for (i = 0; i < num_targets; i++) {
struct dm_target *ti = dm_table_get_target(table, i);
if (!ti)
goto error;
last_target_measured = 0;
/*
......
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright 2022 Red Hat, Inc.
*/
#include <linux/bio.h>
#include <linux/blk-crypto.h>
#include <linux/blk-integrity.h>
#include "dm-core.h"
static inline bool dm_bvec_iter_rewind(const struct bio_vec *bv,
struct bvec_iter *iter,
unsigned int bytes)
{
int idx;
iter->bi_size += bytes;
if (bytes <= iter->bi_bvec_done) {
iter->bi_bvec_done -= bytes;
return true;
}
bytes -= iter->bi_bvec_done;
idx = iter->bi_idx - 1;
while (idx >= 0 && bytes && bytes > bv[idx].bv_len) {
bytes -= bv[idx].bv_len;
idx--;
}
if (WARN_ONCE(idx < 0 && bytes,
"Attempted to rewind iter beyond bvec's boundaries\n")) {
iter->bi_size -= bytes;
iter->bi_bvec_done = 0;
iter->bi_idx = 0;
return false;
}
iter->bi_idx = idx;
iter->bi_bvec_done = bv[idx].bv_len - bytes;
return true;
}
#if defined(CONFIG_BLK_DEV_INTEGRITY)
/**
* dm_bio_integrity_rewind - Rewind integrity vector
* @bio: bio whose integrity vector to update
* @bytes_done: number of data bytes to rewind
*
* Description: This function calculates how many integrity bytes the
* number of completed data bytes correspond to and rewind the
* integrity vector accordingly.
*/
static void dm_bio_integrity_rewind(struct bio *bio, unsigned int bytes_done)
{
struct bio_integrity_payload *bip = bio_integrity(bio);
struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
unsigned bytes = bio_integrity_bytes(bi, bytes_done >> 9);
bip->bip_iter.bi_sector -= bio_integrity_intervals(bi, bytes_done >> 9);
dm_bvec_iter_rewind(bip->bip_vec, &bip->bip_iter, bytes);
}
#else /* CONFIG_BLK_DEV_INTEGRITY */
static inline void dm_bio_integrity_rewind(struct bio *bio,
unsigned int bytes_done)
{
return;
}
#endif
#if defined(CONFIG_BLK_INLINE_ENCRYPTION)
/* Decrements @dun by @dec, treating @dun as a multi-limb integer. */
static void dm_bio_crypt_dun_decrement(u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
unsigned int dec)
{
int i;
for (i = 0; dec && i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++) {
u64 prev = dun[i];
dun[i] -= dec;
if (dun[i] > prev)
dec = 1;
else
dec = 0;
}
}
static void dm_bio_crypt_rewind(struct bio *bio, unsigned int bytes)
{
struct bio_crypt_ctx *bc = bio->bi_crypt_context;
dm_bio_crypt_dun_decrement(bc->bc_dun,
bytes >> bc->bc_key->data_unit_size_bits);
}
#else /* CONFIG_BLK_INLINE_ENCRYPTION */
static inline void dm_bio_crypt_rewind(struct bio *bio, unsigned int bytes)
{
return;
}
#endif
static inline void dm_bio_rewind_iter(const struct bio *bio,
struct bvec_iter *iter, unsigned int bytes)
{
iter->bi_sector -= bytes >> 9;
/* No advance means no rewind */
if (bio_no_advance_iter(bio))
iter->bi_size += bytes;
else
dm_bvec_iter_rewind(bio->bi_io_vec, iter, bytes);
}
/**
* dm_bio_rewind - update ->bi_iter of @bio by rewinding @bytes.
* @bio: bio to rewind
* @bytes: how many bytes to rewind
*
* WARNING:
* Caller must ensure that @bio has a fixed end sector, to allow
* rewinding from end of bio and restoring its original position.
* Caller is also responsibile for restoring bio's size.
*/
static void dm_bio_rewind(struct bio *bio, unsigned bytes)
{
if (bio_integrity(bio))
dm_bio_integrity_rewind(bio, bytes);
if (bio_has_crypt_ctx(bio))
dm_bio_crypt_rewind(bio, bytes);
dm_bio_rewind_iter(bio, &bio->bi_iter, bytes);
}
void dm_io_rewind(struct dm_io *io, struct bio_set *bs)
{
struct bio *orig = io->orig_bio;
struct bio *new_orig = bio_alloc_clone(orig->bi_bdev, orig,
GFP_NOIO, bs);
/*
* dm_bio_rewind can restore to previous position since the
* end sector is fixed for original bio, but we still need
* to restore bio's size manually (using io->sectors).
*/
dm_bio_rewind(new_orig, ((io->sector_offset << 9) -
orig->bi_iter.bi_size));
bio_trim(new_orig, 0, io->sectors);
bio_chain(new_orig, orig);
/*
* __bi_remaining was increased (by dm_split_and_process_bio),
* so must drop the one added in bio_chain.
*/
atomic_dec(&orig->__bi_remaining);
io->orig_bio = new_orig;
}
......@@ -832,7 +832,7 @@ static void __dev_status(struct mapped_device *md, struct dm_ioctl *param)
if (!(param->flags & DM_QUERY_INACTIVE_TABLE_FLAG)) {
if (get_disk_ro(disk))
param->flags |= DM_READONLY_FLAG;
param->target_count = dm_table_get_num_targets(table);
param->target_count = table->num_targets;
}
param->flags |= DM_ACTIVE_PRESENT_FLAG;
......@@ -845,7 +845,7 @@ static void __dev_status(struct mapped_device *md, struct dm_ioctl *param)
if (table) {
if (!(dm_table_get_mode(table) & FMODE_WRITE))
param->flags |= DM_READONLY_FLAG;
param->target_count = dm_table_get_num_targets(table);
param->target_count = table->num_targets;
}
dm_put_live_table(md, srcu_idx);
}
......@@ -1248,7 +1248,7 @@ static void retrieve_status(struct dm_table *table,
type = STATUSTYPE_INFO;
/* Get all the target info */
num_targets = dm_table_get_num_targets(table);
num_targets = table->num_targets;
for (i = 0; i < num_targets; i++) {
struct dm_target *ti = dm_table_get_target(table, i);
size_t l;
......
......@@ -219,7 +219,7 @@ static struct page_list *alloc_pl(gfp_t gfp)
if (!pl)
return NULL;
pl->page = alloc_page(gfp);
pl->page = alloc_page(gfp | __GFP_HIGHMEM);
if (!pl->page) {
kfree(pl);
return NULL;
......
......@@ -1369,7 +1369,7 @@ static int parse_raid_params(struct raid_set *rs, struct dm_arg_set *as,
}
rs->md.bitmap_info.daemon_sleep = value;
} else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_DATA_OFFSET))) {
/* Userspace passes new data_offset after having extended the the data image LV */
/* Userspace passes new data_offset after having extended the data image LV */
if (test_and_set_bit(__CTR_FLAG_DATA_OFFSET, &rs->ctr_flags)) {
rs->ti->error = "Only one data_offset argument pair allowed";
return -EINVAL;
......@@ -3097,6 +3097,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
INIT_WORK(&rs->md.event_work, do_table_event);
ti->private = rs;
ti->num_flush_bios = 1;
ti->needs_bio_set_dev = true;
/* Restore any requested new layout for conversion decision */
rs_config_restore(rs, &rs_layout);
......@@ -3509,7 +3510,7 @@ static void raid_status(struct dm_target *ti, status_type_t type,
{
struct raid_set *rs = ti->private;
struct mddev *mddev = &rs->md;
struct r5conf *conf = mddev->private;
struct r5conf *conf = rs_is_raid456(rs) ? mddev->private : NULL;
int i, max_nr_stripes = conf ? conf->max_nr_stripes : 0;
unsigned long recovery;
unsigned int raid_param_cnt = 1; /* at least 1 for chunksize */
......@@ -3819,7 +3820,7 @@ static void attempt_restore_of_faulty_devices(struct raid_set *rs)
memset(cleared_failed_devices, 0, sizeof(cleared_failed_devices));
for (i = 0; i < mddev->raid_disks; i++) {
for (i = 0; i < rs->raid_disks; i++) {
r = &rs->dev[i].rdev;
/* HM FIXME: enhance journal device recovery processing */
if (test_bit(Journal, &r->flags))
......
......@@ -43,7 +43,6 @@ unsigned dm_get_reserved_rq_based_ios(void)
return __dm_get_module_param(&reserved_rq_based_ios,
RESERVED_REQUEST_BASED_IOS, DM_RESERVED_MAX_IOS);
}
EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios);
static unsigned dm_get_blk_mq_nr_hw_queues(void)
{
......
......@@ -2026,7 +2026,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
/*
* Write to snapshot - higher level takes care of RW/RO
* flags so we should only get this if we are
* writeable.
* writable.
*/
if (bio_data_dir(bio) == WRITE) {
pe = __lookup_pending_exception(s, chunk);
......
This diff is collapsed.
......@@ -2045,10 +2045,13 @@ int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd,
dm_sm_threshold_fn fn,
void *context)
{
int r;
int r = -EINVAL;
pmd_write_lock_in_core(pmd);
r = dm_sm_register_threshold_callback(pmd->metadata_sm, threshold, fn, context);
if (!pmd->fail_io) {
r = dm_sm_register_threshold_callback(pmd->metadata_sm,
threshold, fn, context);
}
pmd_write_unlock(pmd);
return r;
......
......@@ -3375,8 +3375,10 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
calc_metadata_threshold(pt),
metadata_low_callback,
pool);
if (r)
if (r) {
ti->error = "Error registering metadata threshold";
goto out_flags_changed;
}
dm_pool_register_pre_commit_callback(pool->pmd,
metadata_pre_commit_callback, pool);
......
......@@ -527,11 +527,10 @@ static int verity_verify_io(struct dm_verity_io *io)
if (v->validated_blocks)
set_bit(cur_block, v->validated_blocks);
continue;
}
else if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_DATA,
cur_block, NULL, &start) == 0)
} else if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_DATA,
cur_block, NULL, &start) == 0) {
continue;
else {
} else {
if (bio->bi_status) {
/*
* Error correction failed; Just return error
......
......@@ -22,7 +22,7 @@
#define HIGH_WATERMARK 50
#define LOW_WATERMARK 45
#define MAX_WRITEBACK_JOBS 0
#define MAX_WRITEBACK_JOBS min(0x10000000 / PAGE_SIZE, totalram_pages() / 16)
#define ENDIO_LATENCY 16
#define WRITEBACK_LATENCY 64
#define AUTOCOMMIT_BLOCKS_SSD 65536
......@@ -1325,8 +1325,8 @@ enum wc_map_op {
WC_MAP_ERROR,
};
static enum wc_map_op writecache_map_remap_origin(struct dm_writecache *wc, struct bio *bio,
struct wc_entry *e)
static void writecache_map_remap_origin(struct dm_writecache *wc, struct bio *bio,
struct wc_entry *e)
{
if (e) {
sector_t next_boundary =
......@@ -1334,8 +1334,6 @@ static enum wc_map_op writecache_map_remap_origin(struct dm_writecache *wc, stru
if (next_boundary < bio->bi_iter.bi_size >> SECTOR_SHIFT)
dm_accept_partial_bio(bio, next_boundary);
}
return WC_MAP_REMAP_ORIGIN;
}
static enum wc_map_op writecache_map_read(struct dm_writecache *wc, struct bio *bio)
......@@ -1362,14 +1360,16 @@ static enum wc_map_op writecache_map_read(struct dm_writecache *wc, struct bio *
map_op = WC_MAP_REMAP;
}
} else {
map_op = writecache_map_remap_origin(wc, bio, e);
writecache_map_remap_origin(wc, bio, e);
wc->stats.reads += (bio->bi_iter.bi_size - wc->block_size) >> wc->block_size_bits;
map_op = WC_MAP_REMAP_ORIGIN;
}
return map_op;
}
static enum wc_map_op writecache_bio_copy_ssd(struct dm_writecache *wc, struct bio *bio,
struct wc_entry *e, bool search_used)
static void writecache_bio_copy_ssd(struct dm_writecache *wc, struct bio *bio,
struct wc_entry *e, bool search_used)
{
unsigned bio_size = wc->block_size;
sector_t start_cache_sec = cache_sector(wc, e);
......@@ -1409,14 +1409,15 @@ static enum wc_map_op writecache_bio_copy_ssd(struct dm_writecache *wc, struct b
bio->bi_iter.bi_sector = start_cache_sec;
dm_accept_partial_bio(bio, bio_size >> SECTOR_SHIFT);
wc->stats.writes += bio->bi_iter.bi_size >> wc->block_size_bits;
wc->stats.writes_allocate += (bio->bi_iter.bi_size - wc->block_size) >> wc->block_size_bits;
if (unlikely(wc->uncommitted_blocks >= wc->autocommit_blocks)) {
wc->uncommitted_blocks = 0;
queue_work(wc->writeback_wq, &wc->flush_work);
} else {
writecache_schedule_autocommit(wc);
}
return WC_MAP_REMAP;
}
static enum wc_map_op writecache_map_write(struct dm_writecache *wc, struct bio *bio)
......@@ -1426,9 +1427,10 @@ static enum wc_map_op writecache_map_write(struct dm_writecache *wc, struct bio
do {
bool found_entry = false;
bool search_used = false;
wc->stats.writes++;
if (writecache_has_error(wc))
if (writecache_has_error(wc)) {
wc->stats.writes += bio->bi_iter.bi_size >> wc->block_size_bits;
return WC_MAP_ERROR;
}
e = writecache_find_entry(wc, bio->bi_iter.bi_sector, 0);
if (e) {
if (!writecache_entry_is_committed(wc, e)) {
......@@ -1452,9 +1454,11 @@ static enum wc_map_op writecache_map_write(struct dm_writecache *wc, struct bio
if (unlikely(!e)) {
if (!WC_MODE_PMEM(wc) && !found_entry) {
direct_write:
wc->stats.writes_around++;
e = writecache_find_entry(wc, bio->bi_iter.bi_sector, WFE_RETURN_FOLLOWING);
return writecache_map_remap_origin(wc, bio, e);
writecache_map_remap_origin(wc, bio, e);
wc->stats.writes_around += bio->bi_iter.bi_size >> wc->block_size_bits;
wc->stats.writes += bio->bi_iter.bi_size >> wc->block_size_bits;
return WC_MAP_REMAP_ORIGIN;
}
wc->stats.writes_blocked_on_freelist++;
writecache_wait_on_freelist(wc);
......@@ -1465,10 +1469,13 @@ static enum wc_map_op writecache_map_write(struct dm_writecache *wc, struct bio
wc->uncommitted_blocks++;
wc->stats.writes_allocate++;
bio_copy:
if (WC_MODE_PMEM(wc))
if (WC_MODE_PMEM(wc)) {
bio_copy_block(wc, bio, memory_data(wc, e));
else
return writecache_bio_copy_ssd(wc, bio, e, search_used);
wc->stats.writes++;
} else {
writecache_bio_copy_ssd(wc, bio, e, search_used);
return WC_MAP_REMAP;
}
} while (bio->bi_iter.bi_size);
if (unlikely(bio->bi_opf & REQ_FUA || wc->uncommitted_blocks >= wc->autocommit_blocks))
......@@ -1503,7 +1510,7 @@ static enum wc_map_op writecache_map_flush(struct dm_writecache *wc, struct bio
static enum wc_map_op writecache_map_discard(struct dm_writecache *wc, struct bio *bio)
{
wc->stats.discards++;
wc->stats.discards += bio->bi_iter.bi_size >> wc->block_size_bits;
if (writecache_has_error(wc))
return WC_MAP_ERROR;
......
......@@ -273,11 +273,8 @@ static int device_not_zone_append_capable(struct dm_target *ti,
static bool dm_table_supports_zone_append(struct dm_table *t)
{
struct dm_target *ti;
unsigned int i;
for (i = 0; i < dm_table_get_num_targets(t); i++) {
ti = dm_table_get_target(t, i);
for (unsigned int i = 0; i < t->num_targets; i++) {
struct dm_target *ti = dm_table_get_target(t, i);
if (ti->emulate_zone_append)
return false;
......
This diff is collapsed.
......@@ -53,7 +53,6 @@ struct dm_io;
*---------------------------------------------------------------*/
void dm_table_event_callback(struct dm_table *t,
void (*fn)(void *), void *context);
struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index);
struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector);
bool dm_table_has_no_data_devices(struct dm_table *table);
int dm_calculate_queue_limits(struct dm_table *table,
......@@ -218,9 +217,6 @@ void dm_kcopyd_exit(void);
/*
* Mempool operations
*/
struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type,
unsigned per_io_data_size, unsigned min_pool_size,
bool integrity, bool poll);
void dm_free_md_mempools(struct dm_md_mempools *pools);
/*
......
......@@ -373,6 +373,12 @@ struct dm_target {
* after returning DM_MAPIO_SUBMITTED from its map function.
*/
bool accounts_remapped_io:1;
/*
* Set if the target will submit the DM bio without first calling
* bio_set_dev(). NOTE: ideally a target should _not_ need this.
*/
bool needs_bio_set_dev:1;
};
void *dm_per_bio_data(struct bio *bio, size_t data_size);
......@@ -561,7 +567,6 @@ void dm_sync_table(struct mapped_device *md);
* Queries
*/
sector_t dm_table_get_size(struct dm_table *t);
unsigned int dm_table_get_num_targets(struct dm_table *t);
fmode_t dm_table_get_mode(struct dm_table *t);
struct mapped_device *dm_table_get_md(struct dm_table *t);
const char *dm_table_device_name(struct dm_table *t);
......
......@@ -286,9 +286,9 @@ enum {
#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
#define DM_VERSION_MAJOR 4
#define DM_VERSION_MINOR 46
#define DM_VERSION_MINOR 47
#define DM_VERSION_PATCHLEVEL 0
#define DM_VERSION_EXTRA "-ioctl (2022-02-22)"
#define DM_VERSION_EXTRA "-ioctl (2022-07-28)"
/* Status bits */
#define DM_READONLY_FLAG (1 << 0) /* In/Out */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment