Commit a3ccea6e authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-6.1/dm-changes-v2' of...

Merge tag 'for-6.1/dm-changes-v2' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm

Pull device mapper updates from Mike Snitzer:

 - Fix dm-bufio to use test_bit_acquire to properly test_bit on arches
   with weaker memory ordering.

 - DM core replace DMWARN with DMERR or DMCRIT for fatal errors.

 - Enable WQ_HIGHPRI on DM verity target's verify_wq.

 - Add documentation for DM verity's try_verify_in_tasklet option.

 - Various typo and redundant word fixes in code and/or comments.

* tag 'for-6.1/dm-changes-v2' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
  dm clone: Fix typo in block_device format specifier
  dm: remove unnecessary assignment statement in alloc_dev()
  dm verity: Add documentation for try_verify_in_tasklet option
  dm cache: delete the redundant word 'each' in comment
  dm raid: fix typo in analyse_superblocks code comment
  dm verity: enable WQ_HIGHPRI on verify_wq
  dm raid: delete the redundant word 'that' in comment
  dm: change from DMWARN to DMERR or DMCRIT for fatal errors
  dm bufio: use the acquire memory barrier when testing for B_READING
parents aae703b0 5434ee8d
...@@ -141,6 +141,10 @@ root_hash_sig_key_desc <key_description> ...@@ -141,6 +141,10 @@ root_hash_sig_key_desc <key_description>
also gain new certificates at run time if they are signed by a certificate also gain new certificates at run time if they are signed by a certificate
already in the secondary trusted keyring. already in the secondary trusted keyring.
try_verify_in_tasklet
If verity hashes are in cache, verify data blocks in kernel tasklet instead
of workqueue. This option can reduce IO latency.
Theory of operation Theory of operation
=================== ===================
......
...@@ -795,7 +795,8 @@ static void __make_buffer_clean(struct dm_buffer *b) ...@@ -795,7 +795,8 @@ static void __make_buffer_clean(struct dm_buffer *b)
{ {
BUG_ON(b->hold_count); BUG_ON(b->hold_count);
if (!b->state) /* fast case */ /* smp_load_acquire() pairs with read_endio()'s smp_mb__before_atomic() */
if (!smp_load_acquire(&b->state)) /* fast case */
return; return;
wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE); wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
...@@ -816,7 +817,7 @@ static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c) ...@@ -816,7 +817,7 @@ static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
BUG_ON(test_bit(B_DIRTY, &b->state)); BUG_ON(test_bit(B_DIRTY, &b->state));
if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep && if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep &&
unlikely(test_bit(B_READING, &b->state))) unlikely(test_bit_acquire(B_READING, &b->state)))
continue; continue;
if (!b->hold_count) { if (!b->hold_count) {
...@@ -1058,7 +1059,7 @@ static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block, ...@@ -1058,7 +1059,7 @@ static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block,
* If the user called both dm_bufio_prefetch and dm_bufio_get on * If the user called both dm_bufio_prefetch and dm_bufio_get on
* the same buffer, it would deadlock if we waited. * the same buffer, it would deadlock if we waited.
*/ */
if (nf == NF_GET && unlikely(test_bit(B_READING, &b->state))) if (nf == NF_GET && unlikely(test_bit_acquire(B_READING, &b->state)))
return NULL; return NULL;
b->hold_count++; b->hold_count++;
...@@ -1218,7 +1219,7 @@ void dm_bufio_release(struct dm_buffer *b) ...@@ -1218,7 +1219,7 @@ void dm_bufio_release(struct dm_buffer *b)
* invalid buffer. * invalid buffer.
*/ */
if ((b->read_error || b->write_error) && if ((b->read_error || b->write_error) &&
!test_bit(B_READING, &b->state) && !test_bit_acquire(B_READING, &b->state) &&
!test_bit(B_WRITING, &b->state) && !test_bit(B_WRITING, &b->state) &&
!test_bit(B_DIRTY, &b->state)) { !test_bit(B_DIRTY, &b->state)) {
__unlink_buffer(b); __unlink_buffer(b);
...@@ -1479,7 +1480,7 @@ EXPORT_SYMBOL_GPL(dm_bufio_release_move); ...@@ -1479,7 +1480,7 @@ EXPORT_SYMBOL_GPL(dm_bufio_release_move);
static void forget_buffer_locked(struct dm_buffer *b) static void forget_buffer_locked(struct dm_buffer *b)
{ {
if (likely(!b->hold_count) && likely(!b->state)) { if (likely(!b->hold_count) && likely(!smp_load_acquire(&b->state))) {
__unlink_buffer(b); __unlink_buffer(b);
__free_buffer_wake(b); __free_buffer_wake(b);
} }
...@@ -1639,7 +1640,7 @@ static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp) ...@@ -1639,7 +1640,7 @@ static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp)
{ {
if (!(gfp & __GFP_FS) || if (!(gfp & __GFP_FS) ||
(static_branch_unlikely(&no_sleep_enabled) && b->c->no_sleep)) { (static_branch_unlikely(&no_sleep_enabled) && b->c->no_sleep)) {
if (test_bit(B_READING, &b->state) || if (test_bit_acquire(B_READING, &b->state) ||
test_bit(B_WRITING, &b->state) || test_bit(B_WRITING, &b->state) ||
test_bit(B_DIRTY, &b->state)) test_bit(B_DIRTY, &b->state))
return false; return false;
......
...@@ -166,7 +166,7 @@ struct dm_cache_policy_type { ...@@ -166,7 +166,7 @@ struct dm_cache_policy_type {
struct dm_cache_policy_type *real; struct dm_cache_policy_type *real;
/* /*
* Policies may store a hint for each each cache block. * Policies may store a hint for each cache block.
* Currently the size of this hint must be 0 or 4 bytes but we * Currently the size of this hint must be 0 or 4 bytes but we
* expect to relax this in future. * expect to relax this in future.
*/ */
......
...@@ -2035,7 +2035,7 @@ static void disable_passdown_if_not_supported(struct clone *clone) ...@@ -2035,7 +2035,7 @@ static void disable_passdown_if_not_supported(struct clone *clone)
reason = "max discard sectors smaller than a region"; reason = "max discard sectors smaller than a region";
if (reason) { if (reason) {
DMWARN("Destination device (%pd) %s: Disabling discard passdown.", DMWARN("Destination device (%pg) %s: Disabling discard passdown.",
dest_dev, reason); dest_dev, reason);
clear_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags); clear_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags);
} }
......
...@@ -434,10 +434,10 @@ static struct mapped_device *dm_hash_rename(struct dm_ioctl *param, ...@@ -434,10 +434,10 @@ static struct mapped_device *dm_hash_rename(struct dm_ioctl *param,
hc = __get_name_cell(new); hc = __get_name_cell(new);
if (hc) { if (hc) {
DMWARN("Unable to change %s on mapped device %s to one that " DMERR("Unable to change %s on mapped device %s to one that "
"already exists: %s", "already exists: %s",
change_uuid ? "uuid" : "name", change_uuid ? "uuid" : "name",
param->name, new); param->name, new);
dm_put(hc->md); dm_put(hc->md);
up_write(&_hash_lock); up_write(&_hash_lock);
kfree(new_data); kfree(new_data);
...@@ -449,8 +449,8 @@ static struct mapped_device *dm_hash_rename(struct dm_ioctl *param, ...@@ -449,8 +449,8 @@ static struct mapped_device *dm_hash_rename(struct dm_ioctl *param,
*/ */
hc = __get_name_cell(param->name); hc = __get_name_cell(param->name);
if (!hc) { if (!hc) {
DMWARN("Unable to rename non-existent device, %s to %s%s", DMERR("Unable to rename non-existent device, %s to %s%s",
param->name, change_uuid ? "uuid " : "", new); param->name, change_uuid ? "uuid " : "", new);
up_write(&_hash_lock); up_write(&_hash_lock);
kfree(new_data); kfree(new_data);
return ERR_PTR(-ENXIO); return ERR_PTR(-ENXIO);
...@@ -460,9 +460,9 @@ static struct mapped_device *dm_hash_rename(struct dm_ioctl *param, ...@@ -460,9 +460,9 @@ static struct mapped_device *dm_hash_rename(struct dm_ioctl *param,
* Does this device already have a uuid? * Does this device already have a uuid?
*/ */
if (change_uuid && hc->uuid) { if (change_uuid && hc->uuid) {
DMWARN("Unable to change uuid of mapped device %s to %s " DMERR("Unable to change uuid of mapped device %s to %s "
"because uuid is already set to %s", "because uuid is already set to %s",
param->name, new, hc->uuid); param->name, new, hc->uuid);
dm_put(hc->md); dm_put(hc->md);
up_write(&_hash_lock); up_write(&_hash_lock);
kfree(new_data); kfree(new_data);
...@@ -750,7 +750,7 @@ static int get_target_version(struct file *filp, struct dm_ioctl *param, size_t ...@@ -750,7 +750,7 @@ static int get_target_version(struct file *filp, struct dm_ioctl *param, size_t
static int check_name(const char *name) static int check_name(const char *name)
{ {
if (strchr(name, '/')) { if (strchr(name, '/')) {
DMWARN("invalid device name"); DMERR("invalid device name");
return -EINVAL; return -EINVAL;
} }
...@@ -773,7 +773,7 @@ static struct dm_table *dm_get_inactive_table(struct mapped_device *md, int *src ...@@ -773,7 +773,7 @@ static struct dm_table *dm_get_inactive_table(struct mapped_device *md, int *src
down_read(&_hash_lock); down_read(&_hash_lock);
hc = dm_get_mdptr(md); hc = dm_get_mdptr(md);
if (!hc || hc->md != md) { if (!hc || hc->md != md) {
DMWARN("device has been removed from the dev hash table."); DMERR("device has been removed from the dev hash table.");
goto out; goto out;
} }
...@@ -1026,7 +1026,7 @@ static int dev_rename(struct file *filp, struct dm_ioctl *param, size_t param_si ...@@ -1026,7 +1026,7 @@ static int dev_rename(struct file *filp, struct dm_ioctl *param, size_t param_si
if (new_data < param->data || if (new_data < param->data ||
invalid_str(new_data, (void *) param + param_size) || !*new_data || invalid_str(new_data, (void *) param + param_size) || !*new_data ||
strlen(new_data) > (change_uuid ? DM_UUID_LEN - 1 : DM_NAME_LEN - 1)) { strlen(new_data) > (change_uuid ? DM_UUID_LEN - 1 : DM_NAME_LEN - 1)) {
DMWARN("Invalid new mapped device name or uuid string supplied."); DMERR("Invalid new mapped device name or uuid string supplied.");
return -EINVAL; return -EINVAL;
} }
...@@ -1061,7 +1061,7 @@ static int dev_set_geometry(struct file *filp, struct dm_ioctl *param, size_t pa ...@@ -1061,7 +1061,7 @@ static int dev_set_geometry(struct file *filp, struct dm_ioctl *param, size_t pa
if (geostr < param->data || if (geostr < param->data ||
invalid_str(geostr, (void *) param + param_size)) { invalid_str(geostr, (void *) param + param_size)) {
DMWARN("Invalid geometry supplied."); DMERR("Invalid geometry supplied.");
goto out; goto out;
} }
...@@ -1069,13 +1069,13 @@ static int dev_set_geometry(struct file *filp, struct dm_ioctl *param, size_t pa ...@@ -1069,13 +1069,13 @@ static int dev_set_geometry(struct file *filp, struct dm_ioctl *param, size_t pa
indata + 1, indata + 2, indata + 3, &dummy); indata + 1, indata + 2, indata + 3, &dummy);
if (x != 4) { if (x != 4) {
DMWARN("Unable to interpret geometry settings."); DMERR("Unable to interpret geometry settings.");
goto out; goto out;
} }
if (indata[0] > 65535 || indata[1] > 255 || if (indata[0] > 65535 || indata[1] > 255 ||
indata[2] > 255 || indata[3] > ULONG_MAX) { indata[2] > 255 || indata[3] > ULONG_MAX) {
DMWARN("Geometry exceeds range limits."); DMERR("Geometry exceeds range limits.");
goto out; goto out;
} }
...@@ -1387,7 +1387,7 @@ static int populate_table(struct dm_table *table, ...@@ -1387,7 +1387,7 @@ static int populate_table(struct dm_table *table,
char *target_params; char *target_params;
if (!param->target_count) { if (!param->target_count) {
DMWARN("populate_table: no targets specified"); DMERR("populate_table: no targets specified");
return -EINVAL; return -EINVAL;
} }
...@@ -1395,7 +1395,7 @@ static int populate_table(struct dm_table *table, ...@@ -1395,7 +1395,7 @@ static int populate_table(struct dm_table *table,
r = next_target(spec, next, end, &spec, &target_params); r = next_target(spec, next, end, &spec, &target_params);
if (r) { if (r) {
DMWARN("unable to find target"); DMERR("unable to find target");
return r; return r;
} }
...@@ -1404,7 +1404,7 @@ static int populate_table(struct dm_table *table, ...@@ -1404,7 +1404,7 @@ static int populate_table(struct dm_table *table,
(sector_t) spec->length, (sector_t) spec->length,
target_params); target_params);
if (r) { if (r) {
DMWARN("error adding target to table"); DMERR("error adding target to table");
return r; return r;
} }
...@@ -1451,8 +1451,8 @@ static int table_load(struct file *filp, struct dm_ioctl *param, size_t param_si ...@@ -1451,8 +1451,8 @@ static int table_load(struct file *filp, struct dm_ioctl *param, size_t param_si
if (immutable_target_type && if (immutable_target_type &&
(immutable_target_type != dm_table_get_immutable_target_type(t)) && (immutable_target_type != dm_table_get_immutable_target_type(t)) &&
!dm_table_get_wildcard_target(t)) { !dm_table_get_wildcard_target(t)) {
DMWARN("can't replace immutable target type %s", DMERR("can't replace immutable target type %s",
immutable_target_type->name); immutable_target_type->name);
r = -EINVAL; r = -EINVAL;
goto err_unlock_md_type; goto err_unlock_md_type;
} }
...@@ -1461,12 +1461,12 @@ static int table_load(struct file *filp, struct dm_ioctl *param, size_t param_si ...@@ -1461,12 +1461,12 @@ static int table_load(struct file *filp, struct dm_ioctl *param, size_t param_si
/* setup md->queue to reflect md's type (may block) */ /* setup md->queue to reflect md's type (may block) */
r = dm_setup_md_queue(md, t); r = dm_setup_md_queue(md, t);
if (r) { if (r) {
DMWARN("unable to set up device queue for new table."); DMERR("unable to set up device queue for new table.");
goto err_unlock_md_type; goto err_unlock_md_type;
} }
} else if (!is_valid_type(dm_get_md_type(md), dm_table_get_type(t))) { } else if (!is_valid_type(dm_get_md_type(md), dm_table_get_type(t))) {
DMWARN("can't change device type (old=%u vs new=%u) after initial table load.", DMERR("can't change device type (old=%u vs new=%u) after initial table load.",
dm_get_md_type(md), dm_table_get_type(t)); dm_get_md_type(md), dm_table_get_type(t));
r = -EINVAL; r = -EINVAL;
goto err_unlock_md_type; goto err_unlock_md_type;
} }
...@@ -1477,7 +1477,7 @@ static int table_load(struct file *filp, struct dm_ioctl *param, size_t param_si ...@@ -1477,7 +1477,7 @@ static int table_load(struct file *filp, struct dm_ioctl *param, size_t param_si
down_write(&_hash_lock); down_write(&_hash_lock);
hc = dm_get_mdptr(md); hc = dm_get_mdptr(md);
if (!hc || hc->md != md) { if (!hc || hc->md != md) {
DMWARN("device has been removed from the dev hash table."); DMERR("device has been removed from the dev hash table.");
up_write(&_hash_lock); up_write(&_hash_lock);
r = -ENXIO; r = -ENXIO;
goto err_destroy_table; goto err_destroy_table;
...@@ -1686,19 +1686,19 @@ static int target_message(struct file *filp, struct dm_ioctl *param, size_t para ...@@ -1686,19 +1686,19 @@ static int target_message(struct file *filp, struct dm_ioctl *param, size_t para
if (tmsg < (struct dm_target_msg *) param->data || if (tmsg < (struct dm_target_msg *) param->data ||
invalid_str(tmsg->message, (void *) param + param_size)) { invalid_str(tmsg->message, (void *) param + param_size)) {
DMWARN("Invalid target message parameters."); DMERR("Invalid target message parameters.");
r = -EINVAL; r = -EINVAL;
goto out; goto out;
} }
r = dm_split_args(&argc, &argv, tmsg->message); r = dm_split_args(&argc, &argv, tmsg->message);
if (r) { if (r) {
DMWARN("Failed to split target message parameters"); DMERR("Failed to split target message parameters");
goto out; goto out;
} }
if (!argc) { if (!argc) {
DMWARN("Empty message received."); DMERR("Empty message received.");
r = -EINVAL; r = -EINVAL;
goto out_argv; goto out_argv;
} }
...@@ -1718,12 +1718,12 @@ static int target_message(struct file *filp, struct dm_ioctl *param, size_t para ...@@ -1718,12 +1718,12 @@ static int target_message(struct file *filp, struct dm_ioctl *param, size_t para
ti = dm_table_find_target(table, tmsg->sector); ti = dm_table_find_target(table, tmsg->sector);
if (!ti) { if (!ti) {
DMWARN("Target message sector outside device."); DMERR("Target message sector outside device.");
r = -EINVAL; r = -EINVAL;
} else if (ti->type->message) } else if (ti->type->message)
r = ti->type->message(ti, argc, argv, result, maxlen); r = ti->type->message(ti, argc, argv, result, maxlen);
else { else {
DMWARN("Target type does not support messages"); DMERR("Target type does not support messages");
r = -EINVAL; r = -EINVAL;
} }
...@@ -1814,11 +1814,11 @@ static int check_version(unsigned int cmd, struct dm_ioctl __user *user) ...@@ -1814,11 +1814,11 @@ static int check_version(unsigned int cmd, struct dm_ioctl __user *user)
if ((DM_VERSION_MAJOR != version[0]) || if ((DM_VERSION_MAJOR != version[0]) ||
(DM_VERSION_MINOR < version[1])) { (DM_VERSION_MINOR < version[1])) {
DMWARN("ioctl interface mismatch: " DMERR("ioctl interface mismatch: "
"kernel(%u.%u.%u), user(%u.%u.%u), cmd(%d)", "kernel(%u.%u.%u), user(%u.%u.%u), cmd(%d)",
DM_VERSION_MAJOR, DM_VERSION_MINOR, DM_VERSION_MAJOR, DM_VERSION_MINOR,
DM_VERSION_PATCHLEVEL, DM_VERSION_PATCHLEVEL,
version[0], version[1], version[2], cmd); version[0], version[1], version[2], cmd);
r = -EINVAL; r = -EINVAL;
} }
...@@ -1927,11 +1927,11 @@ static int validate_params(uint cmd, struct dm_ioctl *param) ...@@ -1927,11 +1927,11 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
if (cmd == DM_DEV_CREATE_CMD) { if (cmd == DM_DEV_CREATE_CMD) {
if (!*param->name) { if (!*param->name) {
DMWARN("name not supplied when creating device"); DMERR("name not supplied when creating device");
return -EINVAL; return -EINVAL;
} }
} else if (*param->uuid && *param->name) { } else if (*param->uuid && *param->name) {
DMWARN("only supply one of name or uuid, cmd(%u)", cmd); DMERR("only supply one of name or uuid, cmd(%u)", cmd);
return -EINVAL; return -EINVAL;
} }
...@@ -1978,7 +1978,7 @@ static int ctl_ioctl(struct file *file, uint command, struct dm_ioctl __user *us ...@@ -1978,7 +1978,7 @@ static int ctl_ioctl(struct file *file, uint command, struct dm_ioctl __user *us
fn = lookup_ioctl(cmd, &ioctl_flags); fn = lookup_ioctl(cmd, &ioctl_flags);
if (!fn) { if (!fn) {
DMWARN("dm_ctl_ioctl: unknown command 0x%x", command); DMERR("dm_ctl_ioctl: unknown command 0x%x", command);
return -ENOTTY; return -ENOTTY;
} }
...@@ -2203,7 +2203,7 @@ int __init dm_early_create(struct dm_ioctl *dmi, ...@@ -2203,7 +2203,7 @@ int __init dm_early_create(struct dm_ioctl *dmi,
(sector_t) spec_array[i]->length, (sector_t) spec_array[i]->length,
target_params_array[i]); target_params_array[i]);
if (r) { if (r) {
DMWARN("error adding target to table"); DMERR("error adding target to table");
goto err_destroy_table; goto err_destroy_table;
} }
} }
...@@ -2216,7 +2216,7 @@ int __init dm_early_create(struct dm_ioctl *dmi, ...@@ -2216,7 +2216,7 @@ int __init dm_early_create(struct dm_ioctl *dmi,
/* setup md->queue to reflect md's type (may block) */ /* setup md->queue to reflect md's type (may block) */
r = dm_setup_md_queue(md, t); r = dm_setup_md_queue(md, t);
if (r) { if (r) {
DMWARN("unable to set up device queue for new table."); DMERR("unable to set up device queue for new table.");
goto err_destroy_table; goto err_destroy_table;
} }
......
...@@ -2529,7 +2529,7 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs) ...@@ -2529,7 +2529,7 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
* of the "sync" directive. * of the "sync" directive.
* *
* With reshaping capability added, we must ensure that * With reshaping capability added, we must ensure that
* that the "sync" directive is disallowed during the reshape. * the "sync" directive is disallowed during the reshape.
*/ */
if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags)) if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags))
continue; continue;
...@@ -2590,7 +2590,7 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs) ...@@ -2590,7 +2590,7 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
/* /*
* Adjust data_offset and new_data_offset on all disk members of @rs * Adjust data_offset and new_data_offset on all disk members of @rs
* for out of place reshaping if requested by contructor * for out of place reshaping if requested by constructor
* *
* We need free space at the beginning of each raid disk for forward * We need free space at the beginning of each raid disk for forward
* and at the end for backward reshapes which userspace has to provide * and at the end for backward reshapes which userspace has to provide
......
...@@ -238,7 +238,7 @@ static void dm_done(struct request *clone, blk_status_t error, bool mapped) ...@@ -238,7 +238,7 @@ static void dm_done(struct request *clone, blk_status_t error, bool mapped)
dm_requeue_original_request(tio, true); dm_requeue_original_request(tio, true);
break; break;
default: default:
DMWARN("unimplemented target endio return value: %d", r); DMCRIT("unimplemented target endio return value: %d", r);
BUG(); BUG();
} }
} }
...@@ -409,7 +409,7 @@ static int map_request(struct dm_rq_target_io *tio) ...@@ -409,7 +409,7 @@ static int map_request(struct dm_rq_target_io *tio)
dm_kill_unmapped_request(rq, BLK_STS_IOERR); dm_kill_unmapped_request(rq, BLK_STS_IOERR);
break; break;
default: default:
DMWARN("unimplemented target map return value: %d", r); DMCRIT("unimplemented target map return value: %d", r);
BUG(); BUG();
} }
......
...@@ -1220,7 +1220,7 @@ int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv, ...@@ -1220,7 +1220,7 @@ int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv,
return 2; /* this wasn't a stats message */ return 2; /* this wasn't a stats message */
if (r == -EINVAL) if (r == -EINVAL)
DMWARN("Invalid parameters for message %s", argv[0]); DMCRIT("Invalid parameters for message %s", argv[0]);
return r; return r;
} }
......
...@@ -234,12 +234,12 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev, ...@@ -234,12 +234,12 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
return 0; return 0;
if ((start >= dev_size) || (start + len > dev_size)) { if ((start >= dev_size) || (start + len > dev_size)) {
DMWARN("%s: %pg too small for target: " DMERR("%s: %pg too small for target: "
"start=%llu, len=%llu, dev_size=%llu", "start=%llu, len=%llu, dev_size=%llu",
dm_device_name(ti->table->md), bdev, dm_device_name(ti->table->md), bdev,
(unsigned long long)start, (unsigned long long)start,
(unsigned long long)len, (unsigned long long)len,
(unsigned long long)dev_size); (unsigned long long)dev_size);
return 1; return 1;
} }
...@@ -251,10 +251,10 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev, ...@@ -251,10 +251,10 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
unsigned int zone_sectors = bdev_zone_sectors(bdev); unsigned int zone_sectors = bdev_zone_sectors(bdev);
if (start & (zone_sectors - 1)) { if (start & (zone_sectors - 1)) {
DMWARN("%s: start=%llu not aligned to h/w zone size %u of %pg", DMERR("%s: start=%llu not aligned to h/w zone size %u of %pg",
dm_device_name(ti->table->md), dm_device_name(ti->table->md),
(unsigned long long)start, (unsigned long long)start,
zone_sectors, bdev); zone_sectors, bdev);
return 1; return 1;
} }
...@@ -268,10 +268,10 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev, ...@@ -268,10 +268,10 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
* the sector range. * the sector range.
*/ */
if (len & (zone_sectors - 1)) { if (len & (zone_sectors - 1)) {
DMWARN("%s: len=%llu not aligned to h/w zone size %u of %pg", DMERR("%s: len=%llu not aligned to h/w zone size %u of %pg",
dm_device_name(ti->table->md), dm_device_name(ti->table->md),
(unsigned long long)len, (unsigned long long)len,
zone_sectors, bdev); zone_sectors, bdev);
return 1; return 1;
} }
} }
...@@ -280,20 +280,20 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev, ...@@ -280,20 +280,20 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
return 0; return 0;
if (start & (logical_block_size_sectors - 1)) { if (start & (logical_block_size_sectors - 1)) {
DMWARN("%s: start=%llu not aligned to h/w " DMERR("%s: start=%llu not aligned to h/w "
"logical block size %u of %pg", "logical block size %u of %pg",
dm_device_name(ti->table->md), dm_device_name(ti->table->md),
(unsigned long long)start, (unsigned long long)start,
limits->logical_block_size, bdev); limits->logical_block_size, bdev);
return 1; return 1;
} }
if (len & (logical_block_size_sectors - 1)) { if (len & (logical_block_size_sectors - 1)) {
DMWARN("%s: len=%llu not aligned to h/w " DMERR("%s: len=%llu not aligned to h/w "
"logical block size %u of %pg", "logical block size %u of %pg",
dm_device_name(ti->table->md), dm_device_name(ti->table->md),
(unsigned long long)len, (unsigned long long)len,
limits->logical_block_size, bdev); limits->logical_block_size, bdev);
return 1; return 1;
} }
...@@ -434,8 +434,8 @@ void dm_put_device(struct dm_target *ti, struct dm_dev *d) ...@@ -434,8 +434,8 @@ void dm_put_device(struct dm_target *ti, struct dm_dev *d)
} }
} }
if (!found) { if (!found) {
DMWARN("%s: device %s not in table devices list", DMERR("%s: device %s not in table devices list",
dm_device_name(ti->table->md), d->name); dm_device_name(ti->table->md), d->name);
return; return;
} }
if (refcount_dec_and_test(&dd->count)) { if (refcount_dec_and_test(&dd->count)) {
...@@ -618,12 +618,12 @@ static int validate_hardware_logical_block_alignment(struct dm_table *t, ...@@ -618,12 +618,12 @@ static int validate_hardware_logical_block_alignment(struct dm_table *t,
} }
if (remaining) { if (remaining) {
DMWARN("%s: table line %u (start sect %llu len %llu) " DMERR("%s: table line %u (start sect %llu len %llu) "
"not aligned to h/w logical block size %u", "not aligned to h/w logical block size %u",
dm_device_name(t->md), i, dm_device_name(t->md), i,
(unsigned long long) ti->begin, (unsigned long long) ti->begin,
(unsigned long long) ti->len, (unsigned long long) ti->len,
limits->logical_block_size); limits->logical_block_size);
return -EINVAL; return -EINVAL;
} }
...@@ -1008,7 +1008,7 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device * ...@@ -1008,7 +1008,7 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *
struct dm_md_mempools *pools; struct dm_md_mempools *pools;
if (unlikely(type == DM_TYPE_NONE)) { if (unlikely(type == DM_TYPE_NONE)) {
DMWARN("no table type is set, can't allocate mempools"); DMERR("no table type is set, can't allocate mempools");
return -EINVAL; return -EINVAL;
} }
...@@ -1112,7 +1112,7 @@ static bool integrity_profile_exists(struct gendisk *disk) ...@@ -1112,7 +1112,7 @@ static bool integrity_profile_exists(struct gendisk *disk)
* Get a disk whose integrity profile reflects the table's profile. * Get a disk whose integrity profile reflects the table's profile.
* Returns NULL if integrity support was inconsistent or unavailable. * Returns NULL if integrity support was inconsistent or unavailable.
*/ */
static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t) static struct gendisk *dm_table_get_integrity_disk(struct dm_table *t)
{ {
struct list_head *devices = dm_table_get_devices(t); struct list_head *devices = dm_table_get_devices(t);
struct dm_dev_internal *dd = NULL; struct dm_dev_internal *dd = NULL;
...@@ -1185,10 +1185,10 @@ static int dm_table_register_integrity(struct dm_table *t) ...@@ -1185,10 +1185,10 @@ static int dm_table_register_integrity(struct dm_table *t)
* profile the new profile should not conflict. * profile the new profile should not conflict.
*/ */
if (blk_integrity_compare(dm_disk(md), template_disk) < 0) { if (blk_integrity_compare(dm_disk(md), template_disk) < 0) {
DMWARN("%s: conflict with existing integrity profile: " DMERR("%s: conflict with existing integrity profile: "
"%s profile mismatch", "%s profile mismatch",
dm_device_name(t->md), dm_device_name(t->md),
template_disk->disk_name); template_disk->disk_name);
return 1; return 1;
} }
...@@ -1327,7 +1327,7 @@ static int dm_table_construct_crypto_profile(struct dm_table *t) ...@@ -1327,7 +1327,7 @@ static int dm_table_construct_crypto_profile(struct dm_table *t)
if (t->md->queue && if (t->md->queue &&
!blk_crypto_has_capabilities(profile, !blk_crypto_has_capabilities(profile,
t->md->queue->crypto_profile)) { t->md->queue->crypto_profile)) {
DMWARN("Inline encryption capabilities of new DM table were more restrictive than the old table's. This is not supported!"); DMERR("Inline encryption capabilities of new DM table were more restrictive than the old table's. This is not supported!");
dm_destroy_crypto_profile(profile); dm_destroy_crypto_profile(profile);
return -EINVAL; return -EINVAL;
} }
......
...@@ -1401,14 +1401,16 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv) ...@@ -1401,14 +1401,16 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
/* WQ_UNBOUND greatly improves performance when running on ramdisk */ /* WQ_UNBOUND greatly improves performance when running on ramdisk */
wq_flags = WQ_MEM_RECLAIM | WQ_UNBOUND; wq_flags = WQ_MEM_RECLAIM | WQ_UNBOUND;
if (v->use_tasklet) { /*
/* * Using WQ_HIGHPRI improves throughput and completion latency by
* Allow verify_wq to preempt softirq since verification in * reducing wait times when reading from a dm-verity device.
* tasklet will fall-back to using it for error handling *
* (or if the bufio cache doesn't have required hashes). * Also as required for the "try_verify_in_tasklet" feature: WQ_HIGHPRI
*/ * allows verify_wq to preempt softirq since verification in tasklet
wq_flags |= WQ_HIGHPRI; * will fall-back to using it for error handling (or if the bufio cache
} * doesn't have required hashes).
*/
wq_flags |= WQ_HIGHPRI;
v->verify_wq = alloc_workqueue("kverityd", wq_flags, num_online_cpus()); v->verify_wq = alloc_workqueue("kverityd", wq_flags, num_online_cpus());
if (!v->verify_wq) { if (!v->verify_wq) {
ti->error = "Cannot allocate workqueue"; ti->error = "Cannot allocate workqueue";
......
...@@ -864,7 +864,7 @@ int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) ...@@ -864,7 +864,7 @@ int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors; sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
if (geo->start > sz) { if (geo->start > sz) {
DMWARN("Start sector is beyond the geometry limits."); DMERR("Start sector is beyond the geometry limits.");
return -EINVAL; return -EINVAL;
} }
...@@ -1149,7 +1149,7 @@ static void clone_endio(struct bio *bio) ...@@ -1149,7 +1149,7 @@ static void clone_endio(struct bio *bio)
/* The target will handle the io */ /* The target will handle the io */
return; return;
default: default:
DMWARN("unimplemented target endio return value: %d", r); DMCRIT("unimplemented target endio return value: %d", r);
BUG(); BUG();
} }
} }
...@@ -1455,7 +1455,7 @@ static void __map_bio(struct bio *clone) ...@@ -1455,7 +1455,7 @@ static void __map_bio(struct bio *clone)
dm_io_dec_pending(io, BLK_STS_DM_REQUEUE); dm_io_dec_pending(io, BLK_STS_DM_REQUEUE);
break; break;
default: default:
DMWARN("unimplemented target map return value: %d", r); DMCRIT("unimplemented target map return value: %d", r);
BUG(); BUG();
} }
} }
...@@ -2005,7 +2005,7 @@ static struct mapped_device *alloc_dev(int minor) ...@@ -2005,7 +2005,7 @@ static struct mapped_device *alloc_dev(int minor)
md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id); md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id);
if (!md) { if (!md) {
DMWARN("unable to allocate device, out of memory."); DMERR("unable to allocate device, out of memory.");
return NULL; return NULL;
} }
...@@ -2065,7 +2065,6 @@ static struct mapped_device *alloc_dev(int minor) ...@@ -2065,7 +2065,6 @@ static struct mapped_device *alloc_dev(int minor)
md->disk->minors = 1; md->disk->minors = 1;
md->disk->flags |= GENHD_FL_NO_PART; md->disk->flags |= GENHD_FL_NO_PART;
md->disk->fops = &dm_blk_dops; md->disk->fops = &dm_blk_dops;
md->disk->queue = md->queue;
md->disk->private_data = md; md->disk->private_data = md;
sprintf(md->disk->disk_name, "dm-%d", minor); sprintf(md->disk->disk_name, "dm-%d", minor);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment