Commit 6a79248b authored by Mike Snitzer's avatar Mike Snitzer

dm vdo permassert: audit all of ASSERT to test for VDO_SUCCESS

Also rename ASSERT to VDO_ASSERT and ASSERT_LOG_ONLY to
VDO_ASSERT_LOG_ONLY.

But re-introduce ASSERT and ASSERT_LOG_ONLY as a placeholder
for the benefit of dm-vdo/indexer.
Signed-off-by: default avatarMike Snitzer <snitzer@kernel.org>
Signed-off-by: default avatarMatthew Sakai <msakai@redhat.com>
parent a958c53a
...@@ -177,8 +177,8 @@ static void apply_to_zone(struct vdo_completion *completion) ...@@ -177,8 +177,8 @@ static void apply_to_zone(struct vdo_completion *completion)
zone_count_t zone; zone_count_t zone;
struct action_manager *manager = as_action_manager(completion); struct action_manager *manager = as_action_manager(completion);
ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == get_acting_zone_thread_id(manager)), VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == get_acting_zone_thread_id(manager)),
"%s() called on acting zones's thread", __func__); "%s() called on acting zones's thread", __func__);
zone = manager->acting_zone++; zone = manager->acting_zone++;
if (manager->acting_zone == manager->zones) { if (manager->acting_zone == manager->zones) {
...@@ -357,8 +357,8 @@ bool vdo_schedule_operation_with_context(struct action_manager *manager, ...@@ -357,8 +357,8 @@ bool vdo_schedule_operation_with_context(struct action_manager *manager,
{ {
struct action *current_action; struct action *current_action;
ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == manager->initiator_thread_id), VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == manager->initiator_thread_id),
"action initiated from correct thread"); "action initiated from correct thread");
if (!manager->current_action->in_use) { if (!manager->current_action->in_use) {
current_action = manager->current_action; current_action = manager->current_action;
} else if (!manager->current_action->next->in_use) { } else if (!manager->current_action->next->in_use) {
......
This diff is collapsed.
...@@ -60,7 +60,7 @@ void vdo_initialize_completion(struct vdo_completion *completion, ...@@ -60,7 +60,7 @@ void vdo_initialize_completion(struct vdo_completion *completion,
static inline void assert_incomplete(struct vdo_completion *completion) static inline void assert_incomplete(struct vdo_completion *completion)
{ {
ASSERT_LOG_ONLY(!completion->complete, "completion is not complete"); VDO_ASSERT_LOG_ONLY(!completion->complete, "completion is not complete");
} }
/** /**
...@@ -111,10 +111,10 @@ void vdo_enqueue_completion(struct vdo_completion *completion, ...@@ -111,10 +111,10 @@ void vdo_enqueue_completion(struct vdo_completion *completion,
struct vdo *vdo = completion->vdo; struct vdo *vdo = completion->vdo;
thread_id_t thread_id = completion->callback_thread_id; thread_id_t thread_id = completion->callback_thread_id;
if (ASSERT(thread_id < vdo->thread_config.thread_count, if (VDO_ASSERT(thread_id < vdo->thread_config.thread_count,
"thread_id %u (completion type %d) is less than thread count %u", "thread_id %u (completion type %d) is less than thread count %u",
thread_id, completion->type, thread_id, completion->type,
vdo->thread_config.thread_count) != UDS_SUCCESS) vdo->thread_config.thread_count) != VDO_SUCCESS)
BUG(); BUG();
completion->requeue = false; completion->requeue = false;
......
...@@ -85,9 +85,9 @@ static inline void vdo_fail_completion(struct vdo_completion *completion, int re ...@@ -85,9 +85,9 @@ static inline void vdo_fail_completion(struct vdo_completion *completion, int re
static inline int vdo_assert_completion_type(struct vdo_completion *completion, static inline int vdo_assert_completion_type(struct vdo_completion *completion,
enum vdo_completion_type expected) enum vdo_completion_type expected)
{ {
return ASSERT(expected == completion->type, return VDO_ASSERT(expected == completion->type,
"completion type should be %u, not %u", expected, "completion type should be %u, not %u", expected,
completion->type); completion->type);
} }
static inline void vdo_set_completion_callback(struct vdo_completion *completion, static inline void vdo_set_completion_callback(struct vdo_completion *completion,
......
This diff is collapsed.
...@@ -280,7 +280,7 @@ struct data_vio { ...@@ -280,7 +280,7 @@ struct data_vio {
static inline struct data_vio *vio_as_data_vio(struct vio *vio) static inline struct data_vio *vio_as_data_vio(struct vio *vio)
{ {
ASSERT_LOG_ONLY((vio->type == VIO_TYPE_DATA), "vio is a data_vio"); VDO_ASSERT_LOG_ONLY((vio->type == VIO_TYPE_DATA), "vio is a data_vio");
return container_of(vio, struct data_vio, vio); return container_of(vio, struct data_vio, vio);
} }
...@@ -374,9 +374,9 @@ static inline void assert_data_vio_in_hash_zone(struct data_vio *data_vio) ...@@ -374,9 +374,9 @@ static inline void assert_data_vio_in_hash_zone(struct data_vio *data_vio)
* It's odd to use the LBN, but converting the record name to hex is a bit clunky for an * It's odd to use the LBN, but converting the record name to hex is a bit clunky for an
* inline, and the LBN better than nothing as an identifier. * inline, and the LBN better than nothing as an identifier.
*/ */
ASSERT_LOG_ONLY((expected == thread_id), VDO_ASSERT_LOG_ONLY((expected == thread_id),
"data_vio for logical block %llu on thread %u, should be on hash zone thread %u", "data_vio for logical block %llu on thread %u, should be on hash zone thread %u",
(unsigned long long) data_vio->logical.lbn, thread_id, expected); (unsigned long long) data_vio->logical.lbn, thread_id, expected);
} }
static inline void set_data_vio_hash_zone_callback(struct data_vio *data_vio, static inline void set_data_vio_hash_zone_callback(struct data_vio *data_vio,
...@@ -402,9 +402,9 @@ static inline void assert_data_vio_in_logical_zone(struct data_vio *data_vio) ...@@ -402,9 +402,9 @@ static inline void assert_data_vio_in_logical_zone(struct data_vio *data_vio)
thread_id_t expected = data_vio->logical.zone->thread_id; thread_id_t expected = data_vio->logical.zone->thread_id;
thread_id_t thread_id = vdo_get_callback_thread_id(); thread_id_t thread_id = vdo_get_callback_thread_id();
ASSERT_LOG_ONLY((expected == thread_id), VDO_ASSERT_LOG_ONLY((expected == thread_id),
"data_vio for logical block %llu on thread %u, should be on thread %u", "data_vio for logical block %llu on thread %u, should be on thread %u",
(unsigned long long) data_vio->logical.lbn, thread_id, expected); (unsigned long long) data_vio->logical.lbn, thread_id, expected);
} }
static inline void set_data_vio_logical_callback(struct data_vio *data_vio, static inline void set_data_vio_logical_callback(struct data_vio *data_vio,
...@@ -430,10 +430,10 @@ static inline void assert_data_vio_in_allocated_zone(struct data_vio *data_vio) ...@@ -430,10 +430,10 @@ static inline void assert_data_vio_in_allocated_zone(struct data_vio *data_vio)
thread_id_t expected = data_vio->allocation.zone->thread_id; thread_id_t expected = data_vio->allocation.zone->thread_id;
thread_id_t thread_id = vdo_get_callback_thread_id(); thread_id_t thread_id = vdo_get_callback_thread_id();
ASSERT_LOG_ONLY((expected == thread_id), VDO_ASSERT_LOG_ONLY((expected == thread_id),
"struct data_vio for allocated physical block %llu on thread %u, should be on thread %u", "struct data_vio for allocated physical block %llu on thread %u, should be on thread %u",
(unsigned long long) data_vio->allocation.pbn, thread_id, (unsigned long long) data_vio->allocation.pbn, thread_id,
expected); expected);
} }
static inline void set_data_vio_allocated_zone_callback(struct data_vio *data_vio, static inline void set_data_vio_allocated_zone_callback(struct data_vio *data_vio,
...@@ -460,10 +460,10 @@ static inline void assert_data_vio_in_duplicate_zone(struct data_vio *data_vio) ...@@ -460,10 +460,10 @@ static inline void assert_data_vio_in_duplicate_zone(struct data_vio *data_vio)
thread_id_t expected = data_vio->duplicate.zone->thread_id; thread_id_t expected = data_vio->duplicate.zone->thread_id;
thread_id_t thread_id = vdo_get_callback_thread_id(); thread_id_t thread_id = vdo_get_callback_thread_id();
ASSERT_LOG_ONLY((expected == thread_id), VDO_ASSERT_LOG_ONLY((expected == thread_id),
"data_vio for duplicate physical block %llu on thread %u, should be on thread %u", "data_vio for duplicate physical block %llu on thread %u, should be on thread %u",
(unsigned long long) data_vio->duplicate.pbn, thread_id, (unsigned long long) data_vio->duplicate.pbn, thread_id,
expected); expected);
} }
static inline void set_data_vio_duplicate_zone_callback(struct data_vio *data_vio, static inline void set_data_vio_duplicate_zone_callback(struct data_vio *data_vio,
...@@ -490,9 +490,9 @@ static inline void assert_data_vio_in_mapped_zone(struct data_vio *data_vio) ...@@ -490,9 +490,9 @@ static inline void assert_data_vio_in_mapped_zone(struct data_vio *data_vio)
thread_id_t expected = data_vio->mapped.zone->thread_id; thread_id_t expected = data_vio->mapped.zone->thread_id;
thread_id_t thread_id = vdo_get_callback_thread_id(); thread_id_t thread_id = vdo_get_callback_thread_id();
ASSERT_LOG_ONLY((expected == thread_id), VDO_ASSERT_LOG_ONLY((expected == thread_id),
"data_vio for mapped physical block %llu on thread %u, should be on thread %u", "data_vio for mapped physical block %llu on thread %u, should be on thread %u",
(unsigned long long) data_vio->mapped.pbn, thread_id, expected); (unsigned long long) data_vio->mapped.pbn, thread_id, expected);
} }
static inline void set_data_vio_mapped_zone_callback(struct data_vio *data_vio, static inline void set_data_vio_mapped_zone_callback(struct data_vio *data_vio,
...@@ -507,10 +507,10 @@ static inline void assert_data_vio_in_new_mapped_zone(struct data_vio *data_vio) ...@@ -507,10 +507,10 @@ static inline void assert_data_vio_in_new_mapped_zone(struct data_vio *data_vio)
thread_id_t expected = data_vio->new_mapped.zone->thread_id; thread_id_t expected = data_vio->new_mapped.zone->thread_id;
thread_id_t thread_id = vdo_get_callback_thread_id(); thread_id_t thread_id = vdo_get_callback_thread_id();
ASSERT_LOG_ONLY((expected == thread_id), VDO_ASSERT_LOG_ONLY((expected == thread_id),
"data_vio for new_mapped physical block %llu on thread %u, should be on thread %u", "data_vio for new_mapped physical block %llu on thread %u, should be on thread %u",
(unsigned long long) data_vio->new_mapped.pbn, thread_id, (unsigned long long) data_vio->new_mapped.pbn, thread_id,
expected); expected);
} }
static inline void set_data_vio_new_mapped_zone_callback(struct data_vio *data_vio, static inline void set_data_vio_new_mapped_zone_callback(struct data_vio *data_vio,
...@@ -525,10 +525,10 @@ static inline void assert_data_vio_in_journal_zone(struct data_vio *data_vio) ...@@ -525,10 +525,10 @@ static inline void assert_data_vio_in_journal_zone(struct data_vio *data_vio)
thread_id_t journal_thread = vdo_from_data_vio(data_vio)->thread_config.journal_thread; thread_id_t journal_thread = vdo_from_data_vio(data_vio)->thread_config.journal_thread;
thread_id_t thread_id = vdo_get_callback_thread_id(); thread_id_t thread_id = vdo_get_callback_thread_id();
ASSERT_LOG_ONLY((journal_thread == thread_id), VDO_ASSERT_LOG_ONLY((journal_thread == thread_id),
"data_vio for logical block %llu on thread %u, should be on journal thread %u", "data_vio for logical block %llu on thread %u, should be on journal thread %u",
(unsigned long long) data_vio->logical.lbn, thread_id, (unsigned long long) data_vio->logical.lbn, thread_id,
journal_thread); journal_thread);
} }
static inline void set_data_vio_journal_callback(struct data_vio *data_vio, static inline void set_data_vio_journal_callback(struct data_vio *data_vio,
...@@ -555,10 +555,10 @@ static inline void assert_data_vio_in_packer_zone(struct data_vio *data_vio) ...@@ -555,10 +555,10 @@ static inline void assert_data_vio_in_packer_zone(struct data_vio *data_vio)
thread_id_t packer_thread = vdo_from_data_vio(data_vio)->thread_config.packer_thread; thread_id_t packer_thread = vdo_from_data_vio(data_vio)->thread_config.packer_thread;
thread_id_t thread_id = vdo_get_callback_thread_id(); thread_id_t thread_id = vdo_get_callback_thread_id();
ASSERT_LOG_ONLY((packer_thread == thread_id), VDO_ASSERT_LOG_ONLY((packer_thread == thread_id),
"data_vio for logical block %llu on thread %u, should be on packer thread %u", "data_vio for logical block %llu on thread %u, should be on packer thread %u",
(unsigned long long) data_vio->logical.lbn, thread_id, (unsigned long long) data_vio->logical.lbn, thread_id,
packer_thread); packer_thread);
} }
static inline void set_data_vio_packer_callback(struct data_vio *data_vio, static inline void set_data_vio_packer_callback(struct data_vio *data_vio,
...@@ -585,10 +585,10 @@ static inline void assert_data_vio_on_cpu_thread(struct data_vio *data_vio) ...@@ -585,10 +585,10 @@ static inline void assert_data_vio_on_cpu_thread(struct data_vio *data_vio)
thread_id_t cpu_thread = vdo_from_data_vio(data_vio)->thread_config.cpu_thread; thread_id_t cpu_thread = vdo_from_data_vio(data_vio)->thread_config.cpu_thread;
thread_id_t thread_id = vdo_get_callback_thread_id(); thread_id_t thread_id = vdo_get_callback_thread_id();
ASSERT_LOG_ONLY((cpu_thread == thread_id), VDO_ASSERT_LOG_ONLY((cpu_thread == thread_id),
"data_vio for logical block %llu on thread %u, should be on cpu thread %u", "data_vio for logical block %llu on thread %u, should be on cpu thread %u",
(unsigned long long) data_vio->logical.lbn, thread_id, (unsigned long long) data_vio->logical.lbn, thread_id,
cpu_thread); cpu_thread);
} }
static inline void set_data_vio_cpu_callback(struct data_vio *data_vio, static inline void set_data_vio_cpu_callback(struct data_vio *data_vio,
......
This diff is collapsed.
...@@ -904,8 +904,8 @@ static int vdo_map_bio(struct dm_target *ti, struct bio *bio) ...@@ -904,8 +904,8 @@ static int vdo_map_bio(struct dm_target *ti, struct bio *bio)
struct vdo_work_queue *current_work_queue; struct vdo_work_queue *current_work_queue;
const struct admin_state_code *code = vdo_get_admin_state_code(&vdo->admin.state); const struct admin_state_code *code = vdo_get_admin_state_code(&vdo->admin.state);
ASSERT_LOG_ONLY(code->normal, "vdo should not receive bios while in state %s", VDO_ASSERT_LOG_ONLY(code->normal, "vdo should not receive bios while in state %s",
code->name); code->name);
/* Count all incoming bios. */ /* Count all incoming bios. */
vdo_count_bios(&vdo->stats.bios_in, bio); vdo_count_bios(&vdo->stats.bios_in, bio);
...@@ -1244,9 +1244,9 @@ static int perform_admin_operation(struct vdo *vdo, u32 starting_phase, ...@@ -1244,9 +1244,9 @@ static int perform_admin_operation(struct vdo *vdo, u32 starting_phase,
/* Assert that we are operating on the correct thread for the current phase. */ /* Assert that we are operating on the correct thread for the current phase. */
static void assert_admin_phase_thread(struct vdo *vdo, const char *what) static void assert_admin_phase_thread(struct vdo *vdo, const char *what)
{ {
ASSERT_LOG_ONLY(vdo_get_callback_thread_id() == get_thread_id_for_phase(vdo), VDO_ASSERT_LOG_ONLY(vdo_get_callback_thread_id() == get_thread_id_for_phase(vdo),
"%s on correct thread for %s", what, "%s on correct thread for %s", what,
ADMIN_PHASE_NAMES[vdo->admin.phase]); ADMIN_PHASE_NAMES[vdo->admin.phase]);
} }
/** /**
...@@ -1424,11 +1424,11 @@ static void release_instance(unsigned int instance) ...@@ -1424,11 +1424,11 @@ static void release_instance(unsigned int instance)
{ {
mutex_lock(&instances_lock); mutex_lock(&instances_lock);
if (instance >= instances.bit_count) { if (instance >= instances.bit_count) {
ASSERT_LOG_ONLY(false, VDO_ASSERT_LOG_ONLY(false,
"instance number %u must be less than bit count %u", "instance number %u must be less than bit count %u",
instance, instances.bit_count); instance, instances.bit_count);
} else if (test_bit(instance, instances.words) == 0) { } else if (test_bit(instance, instances.words) == 0) {
ASSERT_LOG_ONLY(false, "instance number %u must be allocated", instance); VDO_ASSERT_LOG_ONLY(false, "instance number %u must be allocated", instance);
} else { } else {
__clear_bit(instance, instances.words); __clear_bit(instance, instances.words);
instances.count -= 1; instances.count -= 1;
...@@ -1577,9 +1577,9 @@ static int allocate_instance(unsigned int *instance_ptr) ...@@ -1577,9 +1577,9 @@ static int allocate_instance(unsigned int *instance_ptr)
if (instance >= instances.bit_count) { if (instance >= instances.bit_count) {
/* Nothing free after next, so wrap around to instance zero. */ /* Nothing free after next, so wrap around to instance zero. */
instance = find_first_zero_bit(instances.words, instances.bit_count); instance = find_first_zero_bit(instances.words, instances.bit_count);
result = ASSERT(instance < instances.bit_count, result = VDO_ASSERT(instance < instances.bit_count,
"impossibly, no zero bit found"); "impossibly, no zero bit found");
if (result != UDS_SUCCESS) if (result != VDO_SUCCESS)
return result; return result;
} }
...@@ -1729,8 +1729,8 @@ static int prepare_to_grow_physical(struct vdo *vdo, block_count_t new_physical_ ...@@ -1729,8 +1729,8 @@ static int prepare_to_grow_physical(struct vdo *vdo, block_count_t new_physical_
uds_log_info("Preparing to resize physical to %llu", uds_log_info("Preparing to resize physical to %llu",
(unsigned long long) new_physical_blocks); (unsigned long long) new_physical_blocks);
ASSERT_LOG_ONLY((new_physical_blocks > current_physical_blocks), VDO_ASSERT_LOG_ONLY((new_physical_blocks > current_physical_blocks),
"New physical size is larger than current physical size"); "New physical size is larger than current physical size");
result = perform_admin_operation(vdo, PREPARE_GROW_PHYSICAL_PHASE_START, result = perform_admin_operation(vdo, PREPARE_GROW_PHYSICAL_PHASE_START,
check_may_grow_physical, check_may_grow_physical,
finish_operation_callback, finish_operation_callback,
...@@ -1829,8 +1829,8 @@ static int prepare_to_modify(struct dm_target *ti, struct device_config *config, ...@@ -1829,8 +1829,8 @@ static int prepare_to_modify(struct dm_target *ti, struct device_config *config,
uds_log_info("Preparing to resize logical to %llu", uds_log_info("Preparing to resize logical to %llu",
(unsigned long long) config->logical_blocks); (unsigned long long) config->logical_blocks);
ASSERT_LOG_ONLY((config->logical_blocks > logical_blocks), VDO_ASSERT_LOG_ONLY((config->logical_blocks > logical_blocks),
"New logical size is larger than current size"); "New logical size is larger than current size");
result = vdo_prepare_to_grow_block_map(vdo->block_map, result = vdo_prepare_to_grow_block_map(vdo->block_map,
config->logical_blocks); config->logical_blocks);
...@@ -2890,9 +2890,9 @@ static void vdo_module_destroy(void) ...@@ -2890,9 +2890,9 @@ static void vdo_module_destroy(void)
if (dm_registered) if (dm_registered)
dm_unregister_target(&vdo_target_bio); dm_unregister_target(&vdo_target_bio);
ASSERT_LOG_ONLY(instances.count == 0, VDO_ASSERT_LOG_ONLY(instances.count == 0,
"should have no instance numbers still in use, but have %u", "should have no instance numbers still in use, but have %u",
instances.count); instances.count);
vdo_free(instances.words); vdo_free(instances.words);
memset(&instances, 0, sizeof(struct instance_tracker)); memset(&instances, 0, sizeof(struct instance_tracker));
......
This diff is collapsed.
...@@ -281,8 +281,9 @@ int uds_register_error_block(const char *block_name, int first_error, ...@@ -281,8 +281,9 @@ int uds_register_error_block(const char *block_name, int first_error,
.infos = infos, .infos = infos,
}; };
result = ASSERT(first_error < next_free_error, "well-defined error block range"); result = VDO_ASSERT(first_error < next_free_error,
if (result != UDS_SUCCESS) "well-defined error block range");
if (result != VDO_SUCCESS)
return result; return result;
if (registered_errors.count == registered_errors.allocated) { if (registered_errors.count == registered_errors.allocated) {
......
...@@ -59,8 +59,8 @@ struct flusher { ...@@ -59,8 +59,8 @@ struct flusher {
*/ */
static inline void assert_on_flusher_thread(struct flusher *flusher, const char *caller) static inline void assert_on_flusher_thread(struct flusher *flusher, const char *caller)
{ {
ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == flusher->thread_id), VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == flusher->thread_id),
"%s() called from flusher thread", caller); "%s() called from flusher thread", caller);
} }
/** /**
...@@ -272,8 +272,8 @@ static void flush_vdo(struct vdo_completion *completion) ...@@ -272,8 +272,8 @@ static void flush_vdo(struct vdo_completion *completion)
int result; int result;
assert_on_flusher_thread(flusher, __func__); assert_on_flusher_thread(flusher, __func__);
result = ASSERT(vdo_is_state_normal(&flusher->state), result = VDO_ASSERT(vdo_is_state_normal(&flusher->state),
"flusher is in normal operation"); "flusher is in normal operation");
if (result != VDO_SUCCESS) { if (result != VDO_SUCCESS) {
vdo_enter_read_only_mode(flusher->vdo, result); vdo_enter_read_only_mode(flusher->vdo, result);
vdo_complete_flush(flush); vdo_complete_flush(flush);
...@@ -330,11 +330,11 @@ void vdo_complete_flushes(struct flusher *flusher) ...@@ -330,11 +330,11 @@ void vdo_complete_flushes(struct flusher *flusher)
if (flush->flush_generation >= oldest_active_generation) if (flush->flush_generation >= oldest_active_generation)
return; return;
ASSERT_LOG_ONLY((flush->flush_generation == VDO_ASSERT_LOG_ONLY((flush->flush_generation ==
flusher->first_unacknowledged_generation), flusher->first_unacknowledged_generation),
"acknowledged next expected flush, %llu, was: %llu", "acknowledged next expected flush, %llu, was: %llu",
(unsigned long long) flusher->first_unacknowledged_generation, (unsigned long long) flusher->first_unacknowledged_generation,
(unsigned long long) flush->flush_generation); (unsigned long long) flush->flush_generation);
vdo_waitq_dequeue_waiter(&flusher->pending_flushes); vdo_waitq_dequeue_waiter(&flusher->pending_flushes);
vdo_complete_flush(flush); vdo_complete_flush(flush);
flusher->first_unacknowledged_generation++; flusher->first_unacknowledged_generation++;
...@@ -400,8 +400,8 @@ void vdo_launch_flush(struct vdo *vdo, struct bio *bio) ...@@ -400,8 +400,8 @@ void vdo_launch_flush(struct vdo *vdo, struct bio *bio)
struct flusher *flusher = vdo->flusher; struct flusher *flusher = vdo->flusher;
const struct admin_state_code *code = vdo_get_admin_state_code(&flusher->state); const struct admin_state_code *code = vdo_get_admin_state_code(&flusher->state);
ASSERT_LOG_ONLY(!code->quiescent, "Flushing not allowed in state %s", VDO_ASSERT_LOG_ONLY(!code->quiescent, "Flushing not allowed in state %s",
code->name); code->name);
spin_lock(&flusher->lock); spin_lock(&flusher->lock);
......
...@@ -110,14 +110,14 @@ static struct vdo_completion *poll_for_completion(struct simple_work_queue *queu ...@@ -110,14 +110,14 @@ static struct vdo_completion *poll_for_completion(struct simple_work_queue *queu
static void enqueue_work_queue_completion(struct simple_work_queue *queue, static void enqueue_work_queue_completion(struct simple_work_queue *queue,
struct vdo_completion *completion) struct vdo_completion *completion)
{ {
ASSERT_LOG_ONLY(completion->my_queue == NULL, VDO_ASSERT_LOG_ONLY(completion->my_queue == NULL,
"completion %px (fn %px) to enqueue (%px) is not already queued (%px)", "completion %px (fn %px) to enqueue (%px) is not already queued (%px)",
completion, completion->callback, queue, completion->my_queue); completion, completion->callback, queue, completion->my_queue);
if (completion->priority == VDO_WORK_Q_DEFAULT_PRIORITY) if (completion->priority == VDO_WORK_Q_DEFAULT_PRIORITY)
completion->priority = queue->common.type->default_priority; completion->priority = queue->common.type->default_priority;
if (ASSERT(completion->priority <= queue->common.type->max_priority, if (VDO_ASSERT(completion->priority <= queue->common.type->max_priority,
"priority is in range for queue") != VDO_SUCCESS) "priority is in range for queue") != VDO_SUCCESS)
completion->priority = 0; completion->priority = 0;
completion->my_queue = &queue->common; completion->my_queue = &queue->common;
...@@ -222,9 +222,9 @@ static struct vdo_completion *wait_for_next_completion(struct simple_work_queue ...@@ -222,9 +222,9 @@ static struct vdo_completion *wait_for_next_completion(struct simple_work_queue
static void process_completion(struct simple_work_queue *queue, static void process_completion(struct simple_work_queue *queue,
struct vdo_completion *completion) struct vdo_completion *completion)
{ {
if (ASSERT(completion->my_queue == &queue->common, if (VDO_ASSERT(completion->my_queue == &queue->common,
"completion %px from queue %px marked as being in this queue (%px)", "completion %px from queue %px marked as being in this queue (%px)",
completion, queue, completion->my_queue) == UDS_SUCCESS) completion, queue, completion->my_queue) == VDO_SUCCESS)
completion->my_queue = NULL; completion->my_queue = NULL;
vdo_run_completion(completion); vdo_run_completion(completion);
...@@ -319,9 +319,9 @@ static int make_simple_work_queue(const char *thread_name_prefix, const char *na ...@@ -319,9 +319,9 @@ static int make_simple_work_queue(const char *thread_name_prefix, const char *na
struct task_struct *thread = NULL; struct task_struct *thread = NULL;
int result; int result;
ASSERT_LOG_ONLY((type->max_priority <= VDO_WORK_Q_MAX_PRIORITY), VDO_ASSERT_LOG_ONLY((type->max_priority <= VDO_WORK_Q_MAX_PRIORITY),
"queue priority count %u within limit %u", type->max_priority, "queue priority count %u within limit %u", type->max_priority,
VDO_WORK_Q_MAX_PRIORITY); VDO_WORK_Q_MAX_PRIORITY);
result = vdo_allocate(1, struct simple_work_queue, "simple work queue", &queue); result = vdo_allocate(1, struct simple_work_queue, "simple work queue", &queue);
if (result != VDO_SUCCESS) if (result != VDO_SUCCESS)
......
...@@ -94,7 +94,7 @@ static void count_all_bios(struct vio *vio, struct bio *bio) ...@@ -94,7 +94,7 @@ static void count_all_bios(struct vio *vio, struct bio *bio)
*/ */
static void assert_in_bio_zone(struct vio *vio) static void assert_in_bio_zone(struct vio *vio)
{ {
ASSERT_LOG_ONLY(!in_interrupt(), "not in interrupt context"); VDO_ASSERT_LOG_ONLY(!in_interrupt(), "not in interrupt context");
assert_vio_in_bio_zone(vio); assert_vio_in_bio_zone(vio);
} }
...@@ -300,7 +300,7 @@ static bool try_bio_map_merge(struct vio *vio) ...@@ -300,7 +300,7 @@ static bool try_bio_map_merge(struct vio *vio)
mutex_unlock(&bio_queue_data->lock); mutex_unlock(&bio_queue_data->lock);
/* We don't care about failure of int_map_put in this case. */ /* We don't care about failure of int_map_put in this case. */
ASSERT_LOG_ONLY(result == VDO_SUCCESS, "bio map insertion succeeds"); VDO_ASSERT_LOG_ONLY(result == VDO_SUCCESS, "bio map insertion succeeds");
return merged; return merged;
} }
...@@ -345,8 +345,8 @@ void __submit_metadata_vio(struct vio *vio, physical_block_number_t physical, ...@@ -345,8 +345,8 @@ void __submit_metadata_vio(struct vio *vio, physical_block_number_t physical,
const struct admin_state_code *code = vdo_get_admin_state(completion->vdo); const struct admin_state_code *code = vdo_get_admin_state(completion->vdo);
ASSERT_LOG_ONLY(!code->quiescent, "I/O not allowed in state %s", code->name); VDO_ASSERT_LOG_ONLY(!code->quiescent, "I/O not allowed in state %s", code->name);
ASSERT_LOG_ONLY(vio->bio->bi_next == NULL, "metadata bio has no next bio"); VDO_ASSERT_LOG_ONLY(vio->bio->bi_next == NULL, "metadata bio has no next bio");
vdo_reset_completion(completion); vdo_reset_completion(completion);
completion->error_handler = error_handler; completion->error_handler = error_handler;
......
...@@ -142,8 +142,8 @@ void vdo_free_logical_zones(struct logical_zones *zones) ...@@ -142,8 +142,8 @@ void vdo_free_logical_zones(struct logical_zones *zones)
static inline void assert_on_zone_thread(struct logical_zone *zone, const char *what) static inline void assert_on_zone_thread(struct logical_zone *zone, const char *what)
{ {
ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == zone->thread_id), VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == zone->thread_id),
"%s() called on correct thread", what); "%s() called on correct thread", what);
} }
/** /**
...@@ -247,10 +247,10 @@ void vdo_increment_logical_zone_flush_generation(struct logical_zone *zone, ...@@ -247,10 +247,10 @@ void vdo_increment_logical_zone_flush_generation(struct logical_zone *zone,
sequence_number_t expected_generation) sequence_number_t expected_generation)
{ {
assert_on_zone_thread(zone, __func__); assert_on_zone_thread(zone, __func__);
ASSERT_LOG_ONLY((zone->flush_generation == expected_generation), VDO_ASSERT_LOG_ONLY((zone->flush_generation == expected_generation),
"logical zone %u flush generation %llu should be %llu before increment", "logical zone %u flush generation %llu should be %llu before increment",
zone->zone_number, (unsigned long long) zone->flush_generation, zone->zone_number, (unsigned long long) zone->flush_generation,
(unsigned long long) expected_generation); (unsigned long long) expected_generation);
zone->flush_generation++; zone->flush_generation++;
zone->ios_in_flush_generation = 0; zone->ios_in_flush_generation = 0;
...@@ -267,7 +267,7 @@ void vdo_acquire_flush_generation_lock(struct data_vio *data_vio) ...@@ -267,7 +267,7 @@ void vdo_acquire_flush_generation_lock(struct data_vio *data_vio)
struct logical_zone *zone = data_vio->logical.zone; struct logical_zone *zone = data_vio->logical.zone;
assert_on_zone_thread(zone, __func__); assert_on_zone_thread(zone, __func__);
ASSERT_LOG_ONLY(vdo_is_state_normal(&zone->state), "vdo state is normal"); VDO_ASSERT_LOG_ONLY(vdo_is_state_normal(&zone->state), "vdo state is normal");
data_vio->flush_generation = zone->flush_generation; data_vio->flush_generation = zone->flush_generation;
list_add_tail(&data_vio->write_entry, &zone->write_vios); list_add_tail(&data_vio->write_entry, &zone->write_vios);
...@@ -332,10 +332,10 @@ void vdo_release_flush_generation_lock(struct data_vio *data_vio) ...@@ -332,10 +332,10 @@ void vdo_release_flush_generation_lock(struct data_vio *data_vio)
return; return;
list_del_init(&data_vio->write_entry); list_del_init(&data_vio->write_entry);
ASSERT_LOG_ONLY((zone->oldest_active_generation <= data_vio->flush_generation), VDO_ASSERT_LOG_ONLY((zone->oldest_active_generation <= data_vio->flush_generation),
"data_vio releasing lock on generation %llu is not older than oldest active generation %llu", "data_vio releasing lock on generation %llu is not older than oldest active generation %llu",
(unsigned long long) data_vio->flush_generation, (unsigned long long) data_vio->flush_generation,
(unsigned long long) zone->oldest_active_generation); (unsigned long long) zone->oldest_active_generation);
if (!update_oldest_active_generation(zone) || zone->notifying) if (!update_oldest_active_generation(zone) || zone->notifying)
return; return;
......
...@@ -385,12 +385,12 @@ void vdo_memory_init(void) ...@@ -385,12 +385,12 @@ void vdo_memory_init(void)
void vdo_memory_exit(void) void vdo_memory_exit(void)
{ {
ASSERT_LOG_ONLY(memory_stats.kmalloc_bytes == 0, VDO_ASSERT_LOG_ONLY(memory_stats.kmalloc_bytes == 0,
"kmalloc memory used (%zd bytes in %zd blocks) is returned to the kernel", "kmalloc memory used (%zd bytes in %zd blocks) is returned to the kernel",
memory_stats.kmalloc_bytes, memory_stats.kmalloc_blocks); memory_stats.kmalloc_bytes, memory_stats.kmalloc_blocks);
ASSERT_LOG_ONLY(memory_stats.vmalloc_bytes == 0, VDO_ASSERT_LOG_ONLY(memory_stats.vmalloc_bytes == 0,
"vmalloc memory used (%zd bytes in %zd blocks) is returned to the kernel", "vmalloc memory used (%zd bytes in %zd blocks) is returned to the kernel",
memory_stats.vmalloc_bytes, memory_stats.vmalloc_blocks); memory_stats.vmalloc_bytes, memory_stats.vmalloc_blocks);
uds_log_debug("peak usage %zd bytes", memory_stats.peak_bytes); uds_log_debug("peak usage %zd bytes", memory_stats.peak_bytes);
} }
......
...@@ -86,8 +86,8 @@ int vdo_get_compressed_block_fragment(enum block_mapping_state mapping_state, ...@@ -86,8 +86,8 @@ int vdo_get_compressed_block_fragment(enum block_mapping_state mapping_state,
*/ */
static inline void assert_on_packer_thread(struct packer *packer, const char *caller) static inline void assert_on_packer_thread(struct packer *packer, const char *caller)
{ {
ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == packer->thread_id), VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == packer->thread_id),
"%s() called from packer thread", caller); "%s() called from packer thread", caller);
} }
/** /**
...@@ -569,9 +569,9 @@ void vdo_attempt_packing(struct data_vio *data_vio) ...@@ -569,9 +569,9 @@ void vdo_attempt_packing(struct data_vio *data_vio)
assert_on_packer_thread(packer, __func__); assert_on_packer_thread(packer, __func__);
result = ASSERT((status.stage == DATA_VIO_COMPRESSING), result = VDO_ASSERT((status.stage == DATA_VIO_COMPRESSING),
"attempt to pack data_vio not ready for packing, stage: %u", "attempt to pack data_vio not ready for packing, stage: %u",
status.stage); status.stage);
if (result != VDO_SUCCESS) if (result != VDO_SUCCESS)
return; return;
...@@ -671,7 +671,7 @@ void vdo_remove_lock_holder_from_packer(struct vdo_completion *completion) ...@@ -671,7 +671,7 @@ void vdo_remove_lock_holder_from_packer(struct vdo_completion *completion)
lock_holder = vdo_forget(data_vio->compression.lock_holder); lock_holder = vdo_forget(data_vio->compression.lock_holder);
bin = lock_holder->compression.bin; bin = lock_holder->compression.bin;
ASSERT_LOG_ONLY((bin != NULL), "data_vio in packer has a bin"); VDO_ASSERT_LOG_ONLY((bin != NULL), "data_vio in packer has a bin");
slot = lock_holder->compression.slot; slot = lock_holder->compression.slot;
bin->slots_used--; bin->slots_used--;
......
...@@ -13,7 +13,6 @@ ...@@ -13,7 +13,6 @@
/* Utilities for asserting that certain conditions are met */ /* Utilities for asserting that certain conditions are met */
#define STRINGIFY(X) #X #define STRINGIFY(X) #X
#define STRINGIFY_VALUE(X) STRINGIFY(X)
/* /*
* A hack to apply the "warn if unused" attribute to an integral expression. * A hack to apply the "warn if unused" attribute to an integral expression.
...@@ -23,19 +22,23 @@ ...@@ -23,19 +22,23 @@
* expression. With optimization enabled, this function contributes no additional instructions, but * expression. With optimization enabled, this function contributes no additional instructions, but
* the warn_unused_result attribute still applies to the code calling it. * the warn_unused_result attribute still applies to the code calling it.
*/ */
static inline int __must_check uds_must_use(int value) static inline int __must_check vdo_must_use(int value)
{ {
return value; return value;
} }
/* Assert that an expression is true and return an error if it is not. */ /* Assert that an expression is true and return an error if it is not. */
#define ASSERT(expr, ...) uds_must_use(__UDS_ASSERT(expr, __VA_ARGS__)) #define VDO_ASSERT(expr, ...) vdo_must_use(__VDO_ASSERT(expr, __VA_ARGS__))
/* Log a message if the expression is not true. */ /* Log a message if the expression is not true. */
#define ASSERT_LOG_ONLY(expr, ...) __UDS_ASSERT(expr, __VA_ARGS__) #define VDO_ASSERT_LOG_ONLY(expr, ...) __VDO_ASSERT(expr, __VA_ARGS__)
#define __UDS_ASSERT(expr, ...) \ /* For use by UDS */
(likely(expr) ? UDS_SUCCESS \ #define ASSERT(expr, ...) VDO_ASSERT(expr, __VA_ARGS__)
#define ASSERT_LOG_ONLY(expr, ...) __VDO_ASSERT(expr, __VA_ARGS__)
#define __VDO_ASSERT(expr, ...) \
(likely(expr) ? VDO_SUCCESS \
: uds_assertion_failed(STRINGIFY(expr), __FILE__, __LINE__, __VA_ARGS__)) : uds_assertion_failed(STRINGIFY(expr), __FILE__, __LINE__, __VA_ARGS__))
/* Log an assertion failure message. */ /* Log an assertion failure message. */
......
...@@ -80,13 +80,13 @@ static inline void set_pbn_lock_type(struct pbn_lock *lock, enum pbn_lock_type t ...@@ -80,13 +80,13 @@ static inline void set_pbn_lock_type(struct pbn_lock *lock, enum pbn_lock_type t
*/ */
void vdo_downgrade_pbn_write_lock(struct pbn_lock *lock, bool compressed_write) void vdo_downgrade_pbn_write_lock(struct pbn_lock *lock, bool compressed_write)
{ {
ASSERT_LOG_ONLY(!vdo_is_pbn_read_lock(lock), VDO_ASSERT_LOG_ONLY(!vdo_is_pbn_read_lock(lock),
"PBN lock must not already have been downgraded"); "PBN lock must not already have been downgraded");
ASSERT_LOG_ONLY(!has_lock_type(lock, VIO_BLOCK_MAP_WRITE_LOCK), VDO_ASSERT_LOG_ONLY(!has_lock_type(lock, VIO_BLOCK_MAP_WRITE_LOCK),
"must not downgrade block map write locks"); "must not downgrade block map write locks");
ASSERT_LOG_ONLY(lock->holder_count == 1, VDO_ASSERT_LOG_ONLY(lock->holder_count == 1,
"PBN write lock should have one holder but has %u", "PBN write lock should have one holder but has %u",
lock->holder_count); lock->holder_count);
/* /*
* data_vio write locks are downgraded in place--the writer retains the hold on the lock. * data_vio write locks are downgraded in place--the writer retains the hold on the lock.
* If this was a compressed write, the holder has not yet journaled its own inc ref, * If this was a compressed write, the holder has not yet journaled its own inc ref,
...@@ -128,8 +128,8 @@ bool vdo_claim_pbn_lock_increment(struct pbn_lock *lock) ...@@ -128,8 +128,8 @@ bool vdo_claim_pbn_lock_increment(struct pbn_lock *lock)
*/ */
void vdo_assign_pbn_lock_provisional_reference(struct pbn_lock *lock) void vdo_assign_pbn_lock_provisional_reference(struct pbn_lock *lock)
{ {
ASSERT_LOG_ONLY(!lock->has_provisional_reference, VDO_ASSERT_LOG_ONLY(!lock->has_provisional_reference,
"lock does not have a provisional reference"); "lock does not have a provisional reference");
lock->has_provisional_reference = true; lock->has_provisional_reference = true;
} }
...@@ -221,7 +221,7 @@ static void return_pbn_lock_to_pool(struct pbn_lock_pool *pool, struct pbn_lock ...@@ -221,7 +221,7 @@ static void return_pbn_lock_to_pool(struct pbn_lock_pool *pool, struct pbn_lock
INIT_LIST_HEAD(&idle->entry); INIT_LIST_HEAD(&idle->entry);
list_add_tail(&idle->entry, &pool->idle_list); list_add_tail(&idle->entry, &pool->idle_list);
ASSERT_LOG_ONLY(pool->borrowed > 0, "shouldn't return more than borrowed"); VDO_ASSERT_LOG_ONLY(pool->borrowed > 0, "shouldn't return more than borrowed");
pool->borrowed -= 1; pool->borrowed -= 1;
} }
...@@ -267,9 +267,9 @@ static void free_pbn_lock_pool(struct pbn_lock_pool *pool) ...@@ -267,9 +267,9 @@ static void free_pbn_lock_pool(struct pbn_lock_pool *pool)
if (pool == NULL) if (pool == NULL)
return; return;
ASSERT_LOG_ONLY(pool->borrowed == 0, VDO_ASSERT_LOG_ONLY(pool->borrowed == 0,
"All PBN locks must be returned to the pool before it is freed, but %zu locks are still on loan", "All PBN locks must be returned to the pool before it is freed, but %zu locks are still on loan",
pool->borrowed); pool->borrowed);
vdo_free(pool); vdo_free(pool);
} }
...@@ -298,8 +298,8 @@ static int __must_check borrow_pbn_lock_from_pool(struct pbn_lock_pool *pool, ...@@ -298,8 +298,8 @@ static int __must_check borrow_pbn_lock_from_pool(struct pbn_lock_pool *pool,
"no free PBN locks left to borrow"); "no free PBN locks left to borrow");
pool->borrowed += 1; pool->borrowed += 1;
result = ASSERT(!list_empty(&pool->idle_list), result = VDO_ASSERT(!list_empty(&pool->idle_list),
"idle list should not be empty if pool not at capacity"); "idle list should not be empty if pool not at capacity");
if (result != VDO_SUCCESS) if (result != VDO_SUCCESS)
return result; return result;
...@@ -447,7 +447,7 @@ int vdo_attempt_physical_zone_pbn_lock(struct physical_zone *zone, ...@@ -447,7 +447,7 @@ int vdo_attempt_physical_zone_pbn_lock(struct physical_zone *zone,
result = borrow_pbn_lock_from_pool(zone->lock_pool, type, &new_lock); result = borrow_pbn_lock_from_pool(zone->lock_pool, type, &new_lock);
if (result != VDO_SUCCESS) { if (result != VDO_SUCCESS) {
ASSERT_LOG_ONLY(false, "must always be able to borrow a PBN lock"); VDO_ASSERT_LOG_ONLY(false, "must always be able to borrow a PBN lock");
return result; return result;
} }
...@@ -461,8 +461,8 @@ int vdo_attempt_physical_zone_pbn_lock(struct physical_zone *zone, ...@@ -461,8 +461,8 @@ int vdo_attempt_physical_zone_pbn_lock(struct physical_zone *zone,
if (lock != NULL) { if (lock != NULL) {
/* The lock is already held, so we don't need the borrowed one. */ /* The lock is already held, so we don't need the borrowed one. */
return_pbn_lock_to_pool(zone->lock_pool, vdo_forget(new_lock)); return_pbn_lock_to_pool(zone->lock_pool, vdo_forget(new_lock));
result = ASSERT(lock->holder_count > 0, "physical block %llu lock held", result = VDO_ASSERT(lock->holder_count > 0, "physical block %llu lock held",
(unsigned long long) pbn); (unsigned long long) pbn);
if (result != VDO_SUCCESS) if (result != VDO_SUCCESS)
return result; return result;
*lock_ptr = lock; *lock_ptr = lock;
...@@ -485,8 +485,8 @@ static int allocate_and_lock_block(struct allocation *allocation) ...@@ -485,8 +485,8 @@ static int allocate_and_lock_block(struct allocation *allocation)
int result; int result;
struct pbn_lock *lock; struct pbn_lock *lock;
ASSERT_LOG_ONLY(allocation->lock == NULL, VDO_ASSERT_LOG_ONLY(allocation->lock == NULL,
"must not allocate a block while already holding a lock on one"); "must not allocate a block while already holding a lock on one");
result = vdo_allocate_block(allocation->zone->allocator, &allocation->pbn); result = vdo_allocate_block(allocation->zone->allocator, &allocation->pbn);
if (result != VDO_SUCCESS) if (result != VDO_SUCCESS)
...@@ -617,8 +617,8 @@ void vdo_release_physical_zone_pbn_lock(struct physical_zone *zone, ...@@ -617,8 +617,8 @@ void vdo_release_physical_zone_pbn_lock(struct physical_zone *zone,
if (lock == NULL) if (lock == NULL)
return; return;
ASSERT_LOG_ONLY(lock->holder_count > 0, VDO_ASSERT_LOG_ONLY(lock->holder_count > 0,
"should not be releasing a lock that is not held"); "should not be releasing a lock that is not held");
lock->holder_count -= 1; lock->holder_count -= 1;
if (lock->holder_count > 0) { if (lock->holder_count > 0) {
...@@ -627,8 +627,8 @@ void vdo_release_physical_zone_pbn_lock(struct physical_zone *zone, ...@@ -627,8 +627,8 @@ void vdo_release_physical_zone_pbn_lock(struct physical_zone *zone,
} }
holder = vdo_int_map_remove(zone->pbn_operations, locked_pbn); holder = vdo_int_map_remove(zone->pbn_operations, locked_pbn);
ASSERT_LOG_ONLY((lock == holder), "physical block lock mismatch for block %llu", VDO_ASSERT_LOG_ONLY((lock == holder), "physical block lock mismatch for block %llu",
(unsigned long long) locked_pbn); (unsigned long long) locked_pbn);
release_pbn_lock_provisional_reference(lock, locked_pbn, zone->allocator); release_pbn_lock_provisional_reference(lock, locked_pbn, zone->allocator);
return_pbn_lock_to_pool(zone->lock_pool, lock); return_pbn_lock_to_pool(zone->lock_pool, lock);
......
...@@ -127,8 +127,8 @@ void vdo_reset_priority_table(struct priority_table *table) ...@@ -127,8 +127,8 @@ void vdo_reset_priority_table(struct priority_table *table)
void vdo_priority_table_enqueue(struct priority_table *table, unsigned int priority, void vdo_priority_table_enqueue(struct priority_table *table, unsigned int priority,
struct list_head *entry) struct list_head *entry)
{ {
ASSERT_LOG_ONLY((priority <= table->max_priority), VDO_ASSERT_LOG_ONLY((priority <= table->max_priority),
"entry priority must be valid for the table"); "entry priority must be valid for the table");
/* Append the entry to the queue in the specified bucket. */ /* Append the entry to the queue in the specified bucket. */
list_move_tail(entry, &table->buckets[priority].queue); list_move_tail(entry, &table->buckets[priority].queue);
......
...@@ -119,8 +119,8 @@ static bool is_journal_zone_locked(struct recovery_journal *journal, ...@@ -119,8 +119,8 @@ static bool is_journal_zone_locked(struct recovery_journal *journal,
/* Pairs with barrier in vdo_release_journal_entry_lock() */ /* Pairs with barrier in vdo_release_journal_entry_lock() */
smp_rmb(); smp_rmb();
ASSERT_LOG_ONLY((decrements <= journal_value), VDO_ASSERT_LOG_ONLY((decrements <= journal_value),
"journal zone lock counter must not underflow"); "journal zone lock counter must not underflow");
return (journal_value != decrements); return (journal_value != decrements);
} }
...@@ -150,8 +150,8 @@ void vdo_release_recovery_journal_block_reference(struct recovery_journal *journ ...@@ -150,8 +150,8 @@ void vdo_release_recovery_journal_block_reference(struct recovery_journal *journ
lock_number = vdo_get_recovery_journal_block_number(journal, sequence_number); lock_number = vdo_get_recovery_journal_block_number(journal, sequence_number);
current_value = get_counter(journal, lock_number, zone_type, zone_id); current_value = get_counter(journal, lock_number, zone_type, zone_id);
ASSERT_LOG_ONLY((*current_value >= 1), VDO_ASSERT_LOG_ONLY((*current_value >= 1),
"decrement of lock counter must not underflow"); "decrement of lock counter must not underflow");
*current_value -= 1; *current_value -= 1;
if (zone_type == VDO_ZONE_TYPE_JOURNAL) { if (zone_type == VDO_ZONE_TYPE_JOURNAL) {
...@@ -254,8 +254,8 @@ static inline bool __must_check is_block_full(const struct recovery_journal_bloc ...@@ -254,8 +254,8 @@ static inline bool __must_check is_block_full(const struct recovery_journal_bloc
static void assert_on_journal_thread(struct recovery_journal *journal, static void assert_on_journal_thread(struct recovery_journal *journal,
const char *function_name) const char *function_name)
{ {
ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == journal->thread_id), VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == journal->thread_id),
"%s() called on journal thread", function_name); "%s() called on journal thread", function_name);
} }
/** /**
...@@ -353,14 +353,14 @@ static void check_for_drain_complete(struct recovery_journal *journal) ...@@ -353,14 +353,14 @@ static void check_for_drain_complete(struct recovery_journal *journal)
if (vdo_is_state_saving(&journal->state)) { if (vdo_is_state_saving(&journal->state)) {
if (journal->active_block != NULL) { if (journal->active_block != NULL) {
ASSERT_LOG_ONLY(((result == VDO_READ_ONLY) || VDO_ASSERT_LOG_ONLY(((result == VDO_READ_ONLY) ||
!is_block_dirty(journal->active_block)), !is_block_dirty(journal->active_block)),
"journal being saved has clean active block"); "journal being saved has clean active block");
recycle_journal_block(journal->active_block); recycle_journal_block(journal->active_block);
} }
ASSERT_LOG_ONLY(list_empty(&journal->active_tail_blocks), VDO_ASSERT_LOG_ONLY(list_empty(&journal->active_tail_blocks),
"all blocks in a journal being saved must be inactive"); "all blocks in a journal being saved must be inactive");
} }
vdo_finish_draining_with_result(&journal->state, result); vdo_finish_draining_with_result(&journal->state, result);
...@@ -800,8 +800,8 @@ void vdo_free_recovery_journal(struct recovery_journal *journal) ...@@ -800,8 +800,8 @@ void vdo_free_recovery_journal(struct recovery_journal *journal)
* requires opening before use. * requires opening before use.
*/ */
if (!vdo_is_state_quiescent(&journal->state)) { if (!vdo_is_state_quiescent(&journal->state)) {
ASSERT_LOG_ONLY(list_empty(&journal->active_tail_blocks), VDO_ASSERT_LOG_ONLY(list_empty(&journal->active_tail_blocks),
"journal being freed has no active tail blocks"); "journal being freed has no active tail blocks");
} else if (!vdo_is_state_saved(&journal->state) && } else if (!vdo_is_state_saved(&journal->state) &&
!list_empty(&journal->active_tail_blocks)) { !list_empty(&journal->active_tail_blocks)) {
uds_log_warning("journal being freed has uncommitted entries"); uds_log_warning("journal being freed has uncommitted entries");
...@@ -989,8 +989,8 @@ static void initialize_lock_count(struct recovery_journal *journal) ...@@ -989,8 +989,8 @@ static void initialize_lock_count(struct recovery_journal *journal)
atomic_t *decrement_counter = get_decrement_counter(journal, lock_number); atomic_t *decrement_counter = get_decrement_counter(journal, lock_number);
journal_value = get_counter(journal, lock_number, VDO_ZONE_TYPE_JOURNAL, 0); journal_value = get_counter(journal, lock_number, VDO_ZONE_TYPE_JOURNAL, 0);
ASSERT_LOG_ONLY((*journal_value == atomic_read(decrement_counter)), VDO_ASSERT_LOG_ONLY((*journal_value == atomic_read(decrement_counter)),
"count to be initialized not in use"); "count to be initialized not in use");
*journal_value = journal->entries_per_block + 1; *journal_value = journal->entries_per_block + 1;
atomic_set(decrement_counter, 0); atomic_set(decrement_counter, 0);
} }
...@@ -1175,13 +1175,13 @@ static void continue_committed_waiter(struct vdo_waiter *waiter, void *context) ...@@ -1175,13 +1175,13 @@ static void continue_committed_waiter(struct vdo_waiter *waiter, void *context)
int result = (is_read_only(journal) ? VDO_READ_ONLY : VDO_SUCCESS); int result = (is_read_only(journal) ? VDO_READ_ONLY : VDO_SUCCESS);
bool has_decrement; bool has_decrement;
ASSERT_LOG_ONLY(vdo_before_journal_point(&journal->commit_point, VDO_ASSERT_LOG_ONLY(vdo_before_journal_point(&journal->commit_point,
&data_vio->recovery_journal_point), &data_vio->recovery_journal_point),
"DataVIOs released from recovery journal in order. Recovery journal point is (%llu, %u), but commit waiter point is (%llu, %u)", "DataVIOs released from recovery journal in order. Recovery journal point is (%llu, %u), but commit waiter point is (%llu, %u)",
(unsigned long long) journal->commit_point.sequence_number, (unsigned long long) journal->commit_point.sequence_number,
journal->commit_point.entry_count, journal->commit_point.entry_count,
(unsigned long long) data_vio->recovery_journal_point.sequence_number, (unsigned long long) data_vio->recovery_journal_point.sequence_number,
data_vio->recovery_journal_point.entry_count); data_vio->recovery_journal_point.entry_count);
journal->commit_point = data_vio->recovery_journal_point; journal->commit_point = data_vio->recovery_journal_point;
data_vio->last_async_operation = VIO_ASYNC_OP_UPDATE_REFERENCE_COUNTS; data_vio->last_async_operation = VIO_ASYNC_OP_UPDATE_REFERENCE_COUNTS;
...@@ -1281,8 +1281,8 @@ static void complete_write(struct vdo_completion *completion) ...@@ -1281,8 +1281,8 @@ static void complete_write(struct vdo_completion *completion)
journal->last_write_acknowledged = block->sequence_number; journal->last_write_acknowledged = block->sequence_number;
last_active_block = get_journal_block(&journal->active_tail_blocks); last_active_block = get_journal_block(&journal->active_tail_blocks);
ASSERT_LOG_ONLY((block->sequence_number >= last_active_block->sequence_number), VDO_ASSERT_LOG_ONLY((block->sequence_number >= last_active_block->sequence_number),
"completed journal write is still active"); "completed journal write is still active");
notify_commit_waiters(journal); notify_commit_waiters(journal);
...@@ -1456,8 +1456,8 @@ void vdo_add_recovery_journal_entry(struct recovery_journal *journal, ...@@ -1456,8 +1456,8 @@ void vdo_add_recovery_journal_entry(struct recovery_journal *journal,
return; return;
} }
ASSERT_LOG_ONLY(data_vio->recovery_sequence_number == 0, VDO_ASSERT_LOG_ONLY(data_vio->recovery_sequence_number == 0,
"journal lock not held for new entry"); "journal lock not held for new entry");
vdo_advance_journal_point(&journal->append_point, journal->entries_per_block); vdo_advance_journal_point(&journal->append_point, journal->entries_per_block);
vdo_waitq_enqueue_waiter(&journal->entry_waiters, &data_vio->waiter); vdo_waitq_enqueue_waiter(&journal->entry_waiters, &data_vio->waiter);
...@@ -1564,13 +1564,13 @@ void vdo_acquire_recovery_journal_block_reference(struct recovery_journal *journ ...@@ -1564,13 +1564,13 @@ void vdo_acquire_recovery_journal_block_reference(struct recovery_journal *journ
if (sequence_number == 0) if (sequence_number == 0)
return; return;
ASSERT_LOG_ONLY((zone_type != VDO_ZONE_TYPE_JOURNAL), VDO_ASSERT_LOG_ONLY((zone_type != VDO_ZONE_TYPE_JOURNAL),
"invalid lock count increment from journal zone"); "invalid lock count increment from journal zone");
lock_number = vdo_get_recovery_journal_block_number(journal, sequence_number); lock_number = vdo_get_recovery_journal_block_number(journal, sequence_number);
current_value = get_counter(journal, lock_number, zone_type, zone_id); current_value = get_counter(journal, lock_number, zone_type, zone_id);
ASSERT_LOG_ONLY(*current_value < U16_MAX, VDO_ASSERT_LOG_ONLY(*current_value < U16_MAX,
"increment of lock counter must not overflow"); "increment of lock counter must not overflow");
if (*current_value == 0) { if (*current_value == 0) {
/* /*
......
...@@ -976,8 +976,8 @@ find_entry_starting_next_page(struct repair_completion *repair, ...@@ -976,8 +976,8 @@ find_entry_starting_next_page(struct repair_completion *repair,
if (needs_sort) { if (needs_sort) {
struct numbered_block_mapping *just_sorted_entry = struct numbered_block_mapping *just_sorted_entry =
sort_next_heap_element(repair); sort_next_heap_element(repair);
ASSERT_LOG_ONLY(just_sorted_entry < current_entry, VDO_ASSERT_LOG_ONLY(just_sorted_entry < current_entry,
"heap is returning elements in an unexpected order"); "heap is returning elements in an unexpected order");
} }
current_entry--; current_entry--;
...@@ -1129,8 +1129,8 @@ static void recover_block_map(struct vdo_completion *completion) ...@@ -1129,8 +1129,8 @@ static void recover_block_map(struct vdo_completion *completion)
repair->current_entry = &repair->entries[repair->block_map_entry_count - 1]; repair->current_entry = &repair->entries[repair->block_map_entry_count - 1];
first_sorted_entry = sort_next_heap_element(repair); first_sorted_entry = sort_next_heap_element(repair);
ASSERT_LOG_ONLY(first_sorted_entry == repair->current_entry, VDO_ASSERT_LOG_ONLY(first_sorted_entry == repair->current_entry,
"heap is returning elements in an unexpected order"); "heap is returning elements in an unexpected order");
/* Prevent any page from being processed until all pages have been launched. */ /* Prevent any page from being processed until all pages have been launched. */
repair->launching = true; repair->launching = true;
...@@ -1489,8 +1489,8 @@ static int extract_new_mappings(struct repair_completion *repair) ...@@ -1489,8 +1489,8 @@ static int extract_new_mappings(struct repair_completion *repair)
repair->block_map_entry_count++; repair->block_map_entry_count++;
} }
result = ASSERT((repair->block_map_entry_count <= repair->entry_count), result = VDO_ASSERT((repair->block_map_entry_count <= repair->entry_count),
"approximate entry count is an upper bound"); "approximate entry count is an upper bound");
if (result != VDO_SUCCESS) if (result != VDO_SUCCESS)
vdo_enter_read_only_mode(vdo, result); vdo_enter_read_only_mode(vdo, result);
......
This diff is collapsed.
...@@ -44,7 +44,7 @@ void vdo_register_thread(struct thread_registry *registry, ...@@ -44,7 +44,7 @@ void vdo_register_thread(struct thread_registry *registry,
list_add_tail_rcu(&new_thread->links, &registry->links); list_add_tail_rcu(&new_thread->links, &registry->links);
spin_unlock(&registry->lock); spin_unlock(&registry->lock);
ASSERT_LOG_ONLY(!found_it, "new thread not already in registry"); VDO_ASSERT_LOG_ONLY(!found_it, "new thread not already in registry");
if (found_it) { if (found_it) {
/* Ensure no RCU iterators see it before re-initializing. */ /* Ensure no RCU iterators see it before re-initializing. */
synchronize_rcu(); synchronize_rcu();
...@@ -67,7 +67,7 @@ void vdo_unregister_thread(struct thread_registry *registry) ...@@ -67,7 +67,7 @@ void vdo_unregister_thread(struct thread_registry *registry)
} }
spin_unlock(&registry->lock); spin_unlock(&registry->lock);
ASSERT_LOG_ONLY(found_it, "thread found in registry"); VDO_ASSERT_LOG_ONLY(found_it, "thread found in registry");
if (found_it) { if (found_it) {
/* Ensure no RCU iterators see it before re-initializing. */ /* Ensure no RCU iterators see it before re-initializing. */
synchronize_rcu(); synchronize_rcu();
......
...@@ -425,9 +425,9 @@ int vdo_make_thread(struct vdo *vdo, thread_id_t thread_id, ...@@ -425,9 +425,9 @@ int vdo_make_thread(struct vdo *vdo, thread_id_t thread_id,
type = &default_queue_type; type = &default_queue_type;
if (thread->queue != NULL) { if (thread->queue != NULL) {
return ASSERT(vdo_work_queue_type_is(thread->queue, type), return VDO_ASSERT(vdo_work_queue_type_is(thread->queue, type),
"already constructed vdo thread %u is of the correct type", "already constructed vdo thread %u is of the correct type",
thread_id); thread_id);
} }
thread->vdo = vdo; thread->vdo = vdo;
...@@ -448,8 +448,8 @@ static int register_vdo(struct vdo *vdo) ...@@ -448,8 +448,8 @@ static int register_vdo(struct vdo *vdo)
int result; int result;
write_lock(&registry.lock); write_lock(&registry.lock);
result = ASSERT(filter_vdos_locked(vdo_is_equal, vdo) == NULL, result = VDO_ASSERT(filter_vdos_locked(vdo_is_equal, vdo) == NULL,
"VDO not already registered"); "VDO not already registered");
if (result == VDO_SUCCESS) { if (result == VDO_SUCCESS) {
INIT_LIST_HEAD(&vdo->registration); INIT_LIST_HEAD(&vdo->registration);
list_add_tail(&vdo->registration, &registry.links); list_add_tail(&vdo->registration, &registry.links);
...@@ -1050,8 +1050,8 @@ int vdo_register_read_only_listener(struct vdo *vdo, void *listener, ...@@ -1050,8 +1050,8 @@ int vdo_register_read_only_listener(struct vdo *vdo, void *listener,
struct read_only_listener *read_only_listener; struct read_only_listener *read_only_listener;
int result; int result;
result = ASSERT(thread_id != vdo->thread_config.dedupe_thread, result = VDO_ASSERT(thread_id != vdo->thread_config.dedupe_thread,
"read only listener not registered on dedupe thread"); "read only listener not registered on dedupe thread");
if (result != VDO_SUCCESS) if (result != VDO_SUCCESS)
return result; return result;
...@@ -1704,8 +1704,8 @@ void vdo_dump_status(const struct vdo *vdo) ...@@ -1704,8 +1704,8 @@ void vdo_dump_status(const struct vdo *vdo)
*/ */
void vdo_assert_on_admin_thread(const struct vdo *vdo, const char *name) void vdo_assert_on_admin_thread(const struct vdo *vdo, const char *name)
{ {
ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == vdo->thread_config.admin_thread), VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == vdo->thread_config.admin_thread),
"%s called on admin thread", name); "%s called on admin thread", name);
} }
/** /**
...@@ -1718,9 +1718,9 @@ void vdo_assert_on_admin_thread(const struct vdo *vdo, const char *name) ...@@ -1718,9 +1718,9 @@ void vdo_assert_on_admin_thread(const struct vdo *vdo, const char *name)
void vdo_assert_on_logical_zone_thread(const struct vdo *vdo, zone_count_t logical_zone, void vdo_assert_on_logical_zone_thread(const struct vdo *vdo, zone_count_t logical_zone,
const char *name) const char *name)
{ {
ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() ==
vdo->thread_config.logical_threads[logical_zone]), vdo->thread_config.logical_threads[logical_zone]),
"%s called on logical thread", name); "%s called on logical thread", name);
} }
/** /**
...@@ -1733,9 +1733,9 @@ void vdo_assert_on_logical_zone_thread(const struct vdo *vdo, zone_count_t logic ...@@ -1733,9 +1733,9 @@ void vdo_assert_on_logical_zone_thread(const struct vdo *vdo, zone_count_t logic
void vdo_assert_on_physical_zone_thread(const struct vdo *vdo, void vdo_assert_on_physical_zone_thread(const struct vdo *vdo,
zone_count_t physical_zone, const char *name) zone_count_t physical_zone, const char *name)
{ {
ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() ==
vdo->thread_config.physical_threads[physical_zone]), vdo->thread_config.physical_threads[physical_zone]),
"%s called on physical thread", name); "%s called on physical thread", name);
} }
/** /**
...@@ -1773,7 +1773,7 @@ int vdo_get_physical_zone(const struct vdo *vdo, physical_block_number_t pbn, ...@@ -1773,7 +1773,7 @@ int vdo_get_physical_zone(const struct vdo *vdo, physical_block_number_t pbn,
/* With the PBN already checked, we should always succeed in finding a slab. */ /* With the PBN already checked, we should always succeed in finding a slab. */
slab = vdo_get_slab(vdo->depot, pbn); slab = vdo_get_slab(vdo->depot, pbn);
result = ASSERT(slab != NULL, "vdo_get_slab must succeed on all valid PBNs"); result = VDO_ASSERT(slab != NULL, "vdo_get_slab must succeed on all valid PBNs");
if (result != VDO_SUCCESS) if (result != VDO_SUCCESS)
return result; return result;
......
...@@ -82,14 +82,14 @@ int allocate_vio_components(struct vdo *vdo, enum vio_type vio_type, ...@@ -82,14 +82,14 @@ int allocate_vio_components(struct vdo *vdo, enum vio_type vio_type,
struct bio *bio; struct bio *bio;
int result; int result;
result = ASSERT(block_count <= MAX_BLOCKS_PER_VIO, result = VDO_ASSERT(block_count <= MAX_BLOCKS_PER_VIO,
"block count %u does not exceed maximum %u", block_count, "block count %u does not exceed maximum %u", block_count,
MAX_BLOCKS_PER_VIO); MAX_BLOCKS_PER_VIO);
if (result != VDO_SUCCESS) if (result != VDO_SUCCESS)
return result; return result;
result = ASSERT(((vio_type != VIO_TYPE_UNINITIALIZED) && (vio_type != VIO_TYPE_DATA)), result = VDO_ASSERT(((vio_type != VIO_TYPE_UNINITIALIZED) && (vio_type != VIO_TYPE_DATA)),
"%d is a metadata type", vio_type); "%d is a metadata type", vio_type);
if (result != VDO_SUCCESS) if (result != VDO_SUCCESS)
return result; return result;
...@@ -363,13 +363,13 @@ void free_vio_pool(struct vio_pool *pool) ...@@ -363,13 +363,13 @@ void free_vio_pool(struct vio_pool *pool)
return; return;
/* Remove all available vios from the object pool. */ /* Remove all available vios from the object pool. */
ASSERT_LOG_ONLY(!vdo_waitq_has_waiters(&pool->waiting), VDO_ASSERT_LOG_ONLY(!vdo_waitq_has_waiters(&pool->waiting),
"VIO pool must not have any waiters when being freed"); "VIO pool must not have any waiters when being freed");
ASSERT_LOG_ONLY((pool->busy_count == 0), VDO_ASSERT_LOG_ONLY((pool->busy_count == 0),
"VIO pool must not have %zu busy entries when being freed", "VIO pool must not have %zu busy entries when being freed",
pool->busy_count); pool->busy_count);
ASSERT_LOG_ONLY(list_empty(&pool->busy), VDO_ASSERT_LOG_ONLY(list_empty(&pool->busy),
"VIO pool must not have busy entries when being freed"); "VIO pool must not have busy entries when being freed");
list_for_each_entry_safe(pooled, tmp, &pool->available, pool_entry) { list_for_each_entry_safe(pooled, tmp, &pool->available, pool_entry) {
list_del(&pooled->pool_entry); list_del(&pooled->pool_entry);
...@@ -377,8 +377,8 @@ void free_vio_pool(struct vio_pool *pool) ...@@ -377,8 +377,8 @@ void free_vio_pool(struct vio_pool *pool)
pool->size--; pool->size--;
} }
ASSERT_LOG_ONLY(pool->size == 0, VDO_ASSERT_LOG_ONLY(pool->size == 0,
"VIO pool must not have missing entries when being freed"); "VIO pool must not have missing entries when being freed");
vdo_free(vdo_forget(pool->buffer)); vdo_free(vdo_forget(pool->buffer));
vdo_free(pool); vdo_free(pool);
...@@ -403,8 +403,8 @@ void acquire_vio_from_pool(struct vio_pool *pool, struct vdo_waiter *waiter) ...@@ -403,8 +403,8 @@ void acquire_vio_from_pool(struct vio_pool *pool, struct vdo_waiter *waiter)
{ {
struct pooled_vio *pooled; struct pooled_vio *pooled;
ASSERT_LOG_ONLY((pool->thread_id == vdo_get_callback_thread_id()), VDO_ASSERT_LOG_ONLY((pool->thread_id == vdo_get_callback_thread_id()),
"acquire from active vio_pool called from correct thread"); "acquire from active vio_pool called from correct thread");
if (list_empty(&pool->available)) { if (list_empty(&pool->available)) {
vdo_waitq_enqueue_waiter(&pool->waiting, waiter); vdo_waitq_enqueue_waiter(&pool->waiting, waiter);
...@@ -424,8 +424,8 @@ void acquire_vio_from_pool(struct vio_pool *pool, struct vdo_waiter *waiter) ...@@ -424,8 +424,8 @@ void acquire_vio_from_pool(struct vio_pool *pool, struct vdo_waiter *waiter)
*/ */
void return_vio_to_pool(struct vio_pool *pool, struct pooled_vio *vio) void return_vio_to_pool(struct vio_pool *pool, struct pooled_vio *vio)
{ {
ASSERT_LOG_ONLY((pool->thread_id == vdo_get_callback_thread_id()), VDO_ASSERT_LOG_ONLY((pool->thread_id == vdo_get_callback_thread_id()),
"vio pool entry returned on same thread as it was acquired"); "vio pool entry returned on same thread as it was acquired");
vio->vio.completion.error_handler = NULL; vio->vio.completion.error_handler = NULL;
vio->vio.completion.parent = NULL; vio->vio.completion.parent = NULL;
...@@ -465,8 +465,8 @@ void vdo_count_bios(struct atomic_bio_stats *bio_stats, struct bio *bio) ...@@ -465,8 +465,8 @@ void vdo_count_bios(struct atomic_bio_stats *bio_stats, struct bio *bio)
* shouldn't exist. * shouldn't exist.
*/ */
default: default:
ASSERT_LOG_ONLY(0, "Bio operation %d not a write, read, discard, or empty flush", VDO_ASSERT_LOG_ONLY(0, "Bio operation %d not a write, read, discard, or empty flush",
bio_op(bio)); bio_op(bio));
} }
if ((bio->bi_opf & REQ_PREFLUSH) != 0) if ((bio->bi_opf & REQ_PREFLUSH) != 0)
......
...@@ -67,10 +67,10 @@ static inline void assert_vio_in_bio_zone(struct vio *vio) ...@@ -67,10 +67,10 @@ static inline void assert_vio_in_bio_zone(struct vio *vio)
thread_id_t expected = get_vio_bio_zone_thread_id(vio); thread_id_t expected = get_vio_bio_zone_thread_id(vio);
thread_id_t thread_id = vdo_get_callback_thread_id(); thread_id_t thread_id = vdo_get_callback_thread_id();
ASSERT_LOG_ONLY((expected == thread_id), VDO_ASSERT_LOG_ONLY((expected == thread_id),
"vio I/O for physical block %llu on thread %u, should be on bio zone thread %u", "vio I/O for physical block %llu on thread %u, should be on bio zone thread %u",
(unsigned long long) pbn_from_vio_bio(vio->bio), thread_id, (unsigned long long) pbn_from_vio_bio(vio->bio), thread_id,
expected); expected);
} }
int vdo_create_bio(struct bio **bio_ptr); int vdo_create_bio(struct bio **bio_ptr);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment