Commit 6a79248b authored by Mike Snitzer's avatar Mike Snitzer

dm vdo permassert: audit all of ASSERT to test for VDO_SUCCESS

Also rename ASSERT to VDO_ASSERT and ASSERT_LOG_ONLY to
VDO_ASSERT_LOG_ONLY.

But re-introduce ASSERT and ASSERT_LOG_ONLY as a placeholder
for the benefit of dm-vdo/indexer.
Signed-off-by: default avatarMike Snitzer <snitzer@kernel.org>
Signed-off-by: default avatarMatthew Sakai <msakai@redhat.com>
parent a958c53a
......@@ -177,7 +177,7 @@ static void apply_to_zone(struct vdo_completion *completion)
zone_count_t zone;
struct action_manager *manager = as_action_manager(completion);
ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == get_acting_zone_thread_id(manager)),
VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == get_acting_zone_thread_id(manager)),
"%s() called on acting zones's thread", __func__);
zone = manager->acting_zone++;
......@@ -357,7 +357,7 @@ bool vdo_schedule_operation_with_context(struct action_manager *manager,
{
struct action *current_action;
ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == manager->initiator_thread_id),
VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == manager->initiator_thread_id),
"action initiated from correct thread");
if (!manager->current_action->in_use) {
current_action = manager->current_action;
......
......@@ -246,7 +246,7 @@ static inline void assert_on_cache_thread(struct vdo_page_cache *cache,
{
thread_id_t thread_id = vdo_get_callback_thread_id();
ASSERT_LOG_ONLY((thread_id == cache->zone->thread_id),
VDO_ASSERT_LOG_ONLY((thread_id == cache->zone->thread_id),
"%s() must only be called on cache thread %d, not thread %d",
function_name, cache->zone->thread_id, thread_id);
}
......@@ -254,7 +254,7 @@ static inline void assert_on_cache_thread(struct vdo_page_cache *cache,
/** assert_io_allowed() - Assert that a page cache may issue I/O. */
static inline void assert_io_allowed(struct vdo_page_cache *cache)
{
ASSERT_LOG_ONLY(!vdo_is_state_quiescent(&cache->zone->state),
VDO_ASSERT_LOG_ONLY(!vdo_is_state_quiescent(&cache->zone->state),
"VDO page cache may issue I/O");
}
......@@ -287,9 +287,9 @@ static const char * __must_check get_page_state_name(enum vdo_page_buffer_state
BUILD_BUG_ON(ARRAY_SIZE(state_names) != PAGE_STATE_COUNT);
result = ASSERT(state < ARRAY_SIZE(state_names),
result = VDO_ASSERT(state < ARRAY_SIZE(state_names),
"Unknown page_state value %d", state);
if (result != UDS_SUCCESS)
if (result != VDO_SUCCESS)
return "[UNKNOWN PAGE STATE]";
return state_names[state];
......@@ -378,7 +378,7 @@ static int __must_check set_info_pbn(struct page_info *info, physical_block_numb
struct vdo_page_cache *cache = info->cache;
/* Either the new or the old page number must be NO_PAGE. */
int result = ASSERT((pbn == NO_PAGE) || (info->pbn == NO_PAGE),
int result = VDO_ASSERT((pbn == NO_PAGE) || (info->pbn == NO_PAGE),
"Must free a page before reusing it.");
if (result != VDO_SUCCESS)
return result;
......@@ -401,13 +401,13 @@ static int reset_page_info(struct page_info *info)
{
int result;
result = ASSERT(info->busy == 0, "VDO Page must not be busy");
if (result != UDS_SUCCESS)
result = VDO_ASSERT(info->busy == 0, "VDO Page must not be busy");
if (result != VDO_SUCCESS)
return result;
result = ASSERT(!vdo_waitq_has_waiters(&info->waiting),
result = VDO_ASSERT(!vdo_waitq_has_waiters(&info->waiting),
"VDO Page must not have waiters");
if (result != UDS_SUCCESS)
if (result != VDO_SUCCESS)
return result;
result = set_info_pbn(info, NO_PAGE);
......@@ -592,29 +592,29 @@ static int __must_check validate_completed_page(struct vdo_page_completion *comp
{
int result;
result = ASSERT(completion->ready, "VDO Page completion not ready");
if (result != UDS_SUCCESS)
result = VDO_ASSERT(completion->ready, "VDO Page completion not ready");
if (result != VDO_SUCCESS)
return result;
result = ASSERT(completion->info != NULL,
result = VDO_ASSERT(completion->info != NULL,
"VDO Page Completion must be complete");
if (result != UDS_SUCCESS)
if (result != VDO_SUCCESS)
return result;
result = ASSERT(completion->info->pbn == completion->pbn,
result = VDO_ASSERT(completion->info->pbn == completion->pbn,
"VDO Page Completion pbn must be consistent");
if (result != UDS_SUCCESS)
if (result != VDO_SUCCESS)
return result;
result = ASSERT(is_valid(completion->info),
result = VDO_ASSERT(is_valid(completion->info),
"VDO Page Completion page must be valid");
if (result != UDS_SUCCESS)
if (result != VDO_SUCCESS)
return result;
if (writable) {
result = ASSERT(completion->writable,
result = VDO_ASSERT(completion->writable,
"VDO Page Completion must be writable");
if (result != UDS_SUCCESS)
if (result != VDO_SUCCESS)
return result;
}
......@@ -776,7 +776,7 @@ static int __must_check launch_page_load(struct page_info *info,
if (result != VDO_SUCCESS)
return result;
result = ASSERT((info->busy == 0), "Page is not busy before loading.");
result = VDO_ASSERT((info->busy == 0), "Page is not busy before loading.");
if (result != VDO_SUCCESS)
return result;
......@@ -949,7 +949,7 @@ static void discard_a_page(struct vdo_page_cache *cache)
return;
}
ASSERT_LOG_ONLY(!is_in_flight(info),
VDO_ASSERT_LOG_ONLY(!is_in_flight(info),
"page selected for discard is not in flight");
cache->discard_count++;
......@@ -1153,7 +1153,7 @@ void vdo_release_page_completion(struct vdo_completion *completion)
discard_info = page_completion->info;
}
ASSERT_LOG_ONLY((page_completion->waiter.next_waiter == NULL),
VDO_ASSERT_LOG_ONLY((page_completion->waiter.next_waiter == NULL),
"Page being released after leaving all queues");
page_completion->info = NULL;
......@@ -1217,7 +1217,7 @@ void vdo_get_page(struct vdo_page_completion *page_completion,
struct page_info *info;
assert_on_cache_thread(cache, __func__);
ASSERT_LOG_ONLY((page_completion->waiter.next_waiter == NULL),
VDO_ASSERT_LOG_ONLY((page_completion->waiter.next_waiter == NULL),
"New page completion was not already on a wait queue");
*page_completion = (struct vdo_page_completion) {
......@@ -1265,7 +1265,7 @@ void vdo_get_page(struct vdo_page_completion *page_completion,
}
/* Something horrible has gone wrong. */
ASSERT_LOG_ONLY(false, "Info found in a usable state.");
VDO_ASSERT_LOG_ONLY(false, "Info found in a usable state.");
}
/* The page must be fetched. */
......@@ -1334,7 +1334,7 @@ int vdo_invalidate_page_cache(struct vdo_page_cache *cache)
/* Make sure we don't throw away any dirty pages. */
for (info = cache->infos; info < cache->infos + cache->page_count; info++) {
int result = ASSERT(!is_dirty(info), "cache must have no dirty pages");
int result = VDO_ASSERT(!is_dirty(info), "cache must have no dirty pages");
if (result != VDO_SUCCESS)
return result;
......@@ -1440,7 +1440,7 @@ static bool __must_check is_not_older(struct block_map_zone *zone, u8 a, u8 b)
{
int result;
result = ASSERT((in_cyclic_range(zone->oldest_generation, a, zone->generation, 1 << 8) &&
result = VDO_ASSERT((in_cyclic_range(zone->oldest_generation, a, zone->generation, 1 << 8) &&
in_cyclic_range(zone->oldest_generation, b, zone->generation, 1 << 8)),
"generation(s) %u, %u are out of range [%u, %u]",
a, b, zone->oldest_generation, zone->generation);
......@@ -1456,7 +1456,7 @@ static void release_generation(struct block_map_zone *zone, u8 generation)
{
int result;
result = ASSERT((zone->dirty_page_counts[generation] > 0),
result = VDO_ASSERT((zone->dirty_page_counts[generation] > 0),
"dirty page count underflow for generation %u", generation);
if (result != VDO_SUCCESS) {
enter_zone_read_only_mode(zone, result);
......@@ -1482,7 +1482,7 @@ static void set_generation(struct block_map_zone *zone, struct tree_page *page,
page->generation = new_generation;
new_count = ++zone->dirty_page_counts[new_generation];
result = ASSERT((new_count != 0), "dirty page count overflow for generation %u",
result = VDO_ASSERT((new_count != 0), "dirty page count overflow for generation %u",
new_generation);
if (result != VDO_SUCCESS) {
enter_zone_read_only_mode(zone, result);
......@@ -1698,13 +1698,13 @@ static void release_page_lock(struct data_vio *data_vio, char *what)
struct tree_lock *lock_holder;
struct tree_lock *lock = &data_vio->tree_lock;
ASSERT_LOG_ONLY(lock->locked,
VDO_ASSERT_LOG_ONLY(lock->locked,
"release of unlocked block map page %s for key %llu in tree %u",
what, (unsigned long long) lock->key, lock->root_index);
zone = data_vio->logical.zone->block_map_zone;
lock_holder = vdo_int_map_remove(zone->loading_pages, lock->key);
ASSERT_LOG_ONLY((lock_holder == lock),
VDO_ASSERT_LOG_ONLY((lock_holder == lock),
"block map page %s mismatch for key %llu in tree %u",
what, (unsigned long long) lock->key, lock->root_index);
lock->locked = false;
......@@ -2008,7 +2008,7 @@ static void write_expired_elements(struct block_map_zone *zone)
list_del_init(&page->entry);
result = ASSERT(!vdo_waiter_is_waiting(&page->waiter),
result = VDO_ASSERT(!vdo_waiter_is_waiting(&page->waiter),
"Newly expired page not already waiting to write");
if (result != VDO_SUCCESS) {
enter_zone_read_only_mode(zone, result);
......@@ -2867,8 +2867,8 @@ int vdo_decode_block_map(struct block_map_state_2_0 state, block_count_t logical
BUILD_BUG_ON(VDO_BLOCK_MAP_ENTRIES_PER_PAGE !=
((VDO_BLOCK_SIZE - sizeof(struct block_map_page)) /
sizeof(struct block_map_entry)));
result = ASSERT(cache_size > 0, "block map cache size is specified");
if (result != UDS_SUCCESS)
result = VDO_ASSERT(cache_size > 0, "block map cache size is specified");
if (result != VDO_SUCCESS)
return result;
result = vdo_allocate_extended(struct block_map,
......@@ -2937,7 +2937,7 @@ void vdo_initialize_block_map_from_journal(struct block_map *map,
for (z = 0; z < map->zone_count; z++) {
struct dirty_lists *dirty_lists = map->zones[z].dirty_lists;
ASSERT_LOG_ONLY(dirty_lists->next_period == 0, "current period not set");
VDO_ASSERT_LOG_ONLY(dirty_lists->next_period == 0, "current period not set");
dirty_lists->oldest_period = map->current_era_point;
dirty_lists->next_period = map->current_era_point + 1;
dirty_lists->offset = map->current_era_point % dirty_lists->maximum_age;
......@@ -2971,7 +2971,7 @@ static void initiate_drain(struct admin_state *state)
{
struct block_map_zone *zone = container_of(state, struct block_map_zone, state);
ASSERT_LOG_ONLY((zone->active_lookups == 0),
VDO_ASSERT_LOG_ONLY((zone->active_lookups == 0),
"%s() called with no active lookups", __func__);
if (!vdo_is_state_suspending(state)) {
......
......@@ -60,7 +60,7 @@ void vdo_initialize_completion(struct vdo_completion *completion,
static inline void assert_incomplete(struct vdo_completion *completion)
{
ASSERT_LOG_ONLY(!completion->complete, "completion is not complete");
VDO_ASSERT_LOG_ONLY(!completion->complete, "completion is not complete");
}
/**
......@@ -111,10 +111,10 @@ void vdo_enqueue_completion(struct vdo_completion *completion,
struct vdo *vdo = completion->vdo;
thread_id_t thread_id = completion->callback_thread_id;
if (ASSERT(thread_id < vdo->thread_config.thread_count,
if (VDO_ASSERT(thread_id < vdo->thread_config.thread_count,
"thread_id %u (completion type %d) is less than thread count %u",
thread_id, completion->type,
vdo->thread_config.thread_count) != UDS_SUCCESS)
vdo->thread_config.thread_count) != VDO_SUCCESS)
BUG();
completion->requeue = false;
......
......@@ -85,7 +85,7 @@ static inline void vdo_fail_completion(struct vdo_completion *completion, int re
static inline int vdo_assert_completion_type(struct vdo_completion *completion,
enum vdo_completion_type expected)
{
return ASSERT(expected == completion->type,
return VDO_ASSERT(expected == completion->type,
"completion type should be %u, not %u", expected,
completion->type);
}
......
......@@ -232,7 +232,7 @@ static bool check_for_drain_complete_locked(struct data_vio_pool *pool)
if (pool->limiter.busy > 0)
return false;
ASSERT_LOG_ONLY((pool->discard_limiter.busy == 0),
VDO_ASSERT_LOG_ONLY((pool->discard_limiter.busy == 0),
"no outstanding discard permits");
return (bio_list_empty(&pool->limiter.new_waiters) &&
......@@ -277,7 +277,7 @@ static void acknowledge_data_vio(struct data_vio *data_vio)
if (bio == NULL)
return;
ASSERT_LOG_ONLY((data_vio->remaining_discard <=
VDO_ASSERT_LOG_ONLY((data_vio->remaining_discard <=
(u32) (VDO_BLOCK_SIZE - data_vio->offset)),
"data_vio to acknowledge is not an incomplete discard");
......@@ -443,7 +443,7 @@ static void attempt_logical_block_lock(struct vdo_completion *completion)
return;
}
result = ASSERT(lock_holder->logical.locked, "logical block lock held");
result = VDO_ASSERT(lock_holder->logical.locked, "logical block lock held");
if (result != VDO_SUCCESS) {
continue_data_vio_with_error(data_vio, result);
return;
......@@ -627,7 +627,7 @@ static void update_limiter(struct limiter *limiter)
struct bio_list *waiters = &limiter->waiters;
data_vio_count_t available = limiter->limit - limiter->busy;
ASSERT_LOG_ONLY((limiter->release_count <= limiter->busy),
VDO_ASSERT_LOG_ONLY((limiter->release_count <= limiter->busy),
"Release count %u is not more than busy count %u",
limiter->release_count, limiter->busy);
......@@ -850,7 +850,7 @@ int make_data_vio_pool(struct vdo *vdo, data_vio_count_t pool_size,
if (result != VDO_SUCCESS)
return result;
ASSERT_LOG_ONLY((discard_limit <= pool_size),
VDO_ASSERT_LOG_ONLY((discard_limit <= pool_size),
"discard limit does not exceed pool size");
initialize_limiter(&pool->discard_limiter, pool, assign_discard_permit,
discard_limit);
......@@ -908,13 +908,13 @@ void free_data_vio_pool(struct data_vio_pool *pool)
BUG_ON(atomic_read(&pool->processing));
spin_lock(&pool->lock);
ASSERT_LOG_ONLY((pool->limiter.busy == 0),
VDO_ASSERT_LOG_ONLY((pool->limiter.busy == 0),
"data_vio pool must not have %u busy entries when being freed",
pool->limiter.busy);
ASSERT_LOG_ONLY((bio_list_empty(&pool->limiter.waiters) &&
VDO_ASSERT_LOG_ONLY((bio_list_empty(&pool->limiter.waiters) &&
bio_list_empty(&pool->limiter.new_waiters)),
"data_vio pool must not have threads waiting to read or write when being freed");
ASSERT_LOG_ONLY((bio_list_empty(&pool->discard_limiter.waiters) &&
VDO_ASSERT_LOG_ONLY((bio_list_empty(&pool->discard_limiter.waiters) &&
bio_list_empty(&pool->discard_limiter.new_waiters)),
"data_vio pool must not have threads waiting to discard when being freed");
spin_unlock(&pool->lock);
......@@ -961,7 +961,7 @@ void vdo_launch_bio(struct data_vio_pool *pool, struct bio *bio)
{
struct data_vio *data_vio;
ASSERT_LOG_ONLY(!vdo_is_state_quiescent(&pool->state),
VDO_ASSERT_LOG_ONLY(!vdo_is_state_quiescent(&pool->state),
"data_vio_pool not quiescent on acquire");
bio->bi_private = (void *) jiffies;
......@@ -998,7 +998,7 @@ static void initiate_drain(struct admin_state *state)
static void assert_on_vdo_cpu_thread(const struct vdo *vdo, const char *name)
{
ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == vdo->thread_config.cpu_thread),
VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == vdo->thread_config.cpu_thread),
"%s called on cpu thread", name);
}
......@@ -1173,7 +1173,7 @@ static void release_lock(struct data_vio *data_vio, struct lbn_lock *lock)
/* The lock is not locked, so it had better not be registered in the lock map. */
struct data_vio *lock_holder = vdo_int_map_get(lock_map, lock->lbn);
ASSERT_LOG_ONLY((data_vio != lock_holder),
VDO_ASSERT_LOG_ONLY((data_vio != lock_holder),
"no logical block lock held for block %llu",
(unsigned long long) lock->lbn);
return;
......@@ -1181,7 +1181,7 @@ static void release_lock(struct data_vio *data_vio, struct lbn_lock *lock)
/* Release the lock by removing the lock from the map. */
lock_holder = vdo_int_map_remove(lock_map, lock->lbn);
ASSERT_LOG_ONLY((data_vio == lock_holder),
VDO_ASSERT_LOG_ONLY((data_vio == lock_holder),
"logical block lock mismatch for block %llu",
(unsigned long long) lock->lbn);
lock->locked = false;
......@@ -1193,7 +1193,7 @@ static void transfer_lock(struct data_vio *data_vio, struct lbn_lock *lock)
struct data_vio *lock_holder, *next_lock_holder;
int result;
ASSERT_LOG_ONLY(lock->locked, "lbn_lock with waiters is not locked");
VDO_ASSERT_LOG_ONLY(lock->locked, "lbn_lock with waiters is not locked");
/* Another data_vio is waiting for the lock, transfer it in a single lock map operation. */
next_lock_holder =
......@@ -1210,7 +1210,7 @@ static void transfer_lock(struct data_vio *data_vio, struct lbn_lock *lock)
return;
}
ASSERT_LOG_ONLY((lock_holder == data_vio),
VDO_ASSERT_LOG_ONLY((lock_holder == data_vio),
"logical block lock mismatch for block %llu",
(unsigned long long) lock->lbn);
lock->locked = false;
......@@ -1275,9 +1275,9 @@ static void finish_cleanup(struct data_vio *data_vio)
{
struct vdo_completion *completion = &data_vio->vio.completion;
ASSERT_LOG_ONLY(data_vio->allocation.lock == NULL,
VDO_ASSERT_LOG_ONLY(data_vio->allocation.lock == NULL,
"complete data_vio has no allocation lock");
ASSERT_LOG_ONLY(data_vio->hash_lock == NULL,
VDO_ASSERT_LOG_ONLY(data_vio->hash_lock == NULL,
"complete data_vio has no hash lock");
if ((data_vio->remaining_discard <= VDO_BLOCK_SIZE) ||
(completion->result != VDO_SUCCESS)) {
......@@ -1404,7 +1404,7 @@ void data_vio_allocate_data_block(struct data_vio *data_vio,
{
struct allocation *allocation = &data_vio->allocation;
ASSERT_LOG_ONLY((allocation->pbn == VDO_ZERO_BLOCK),
VDO_ASSERT_LOG_ONLY((allocation->pbn == VDO_ZERO_BLOCK),
"data_vio does not have an allocation");
allocation->write_lock_type = write_lock_type;
allocation->zone = vdo_get_next_allocation_zone(data_vio->logical.zone);
......@@ -1796,10 +1796,10 @@ static void compress_data_vio(struct vdo_completion *completion)
*/
void launch_compress_data_vio(struct data_vio *data_vio)
{
ASSERT_LOG_ONLY(!data_vio->is_duplicate, "compressing a non-duplicate block");
ASSERT_LOG_ONLY(data_vio->hash_lock != NULL,
VDO_ASSERT_LOG_ONLY(!data_vio->is_duplicate, "compressing a non-duplicate block");
VDO_ASSERT_LOG_ONLY(data_vio->hash_lock != NULL,
"data_vio to compress has a hash_lock");
ASSERT_LOG_ONLY(data_vio_has_allocation(data_vio),
VDO_ASSERT_LOG_ONLY(data_vio_has_allocation(data_vio),
"data_vio to compress has an allocation");
/*
......@@ -1841,7 +1841,7 @@ static void hash_data_vio(struct vdo_completion *completion)
struct data_vio *data_vio = as_data_vio(completion);
assert_data_vio_on_cpu_thread(data_vio);
ASSERT_LOG_ONLY(!data_vio->is_zero, "zero blocks should not be hashed");
VDO_ASSERT_LOG_ONLY(!data_vio->is_zero, "zero blocks should not be hashed");
murmurhash3_128(data_vio->vio.data, VDO_BLOCK_SIZE, 0x62ea60be,
&data_vio->record_name);
......@@ -1856,7 +1856,7 @@ static void hash_data_vio(struct vdo_completion *completion)
static void prepare_for_dedupe(struct data_vio *data_vio)
{
/* We don't care what thread we are on. */
ASSERT_LOG_ONLY(!data_vio->is_zero, "must not prepare to dedupe zero blocks");
VDO_ASSERT_LOG_ONLY(!data_vio->is_zero, "must not prepare to dedupe zero blocks");
/*
* Before we can dedupe, we need to know the record name, so the first
......@@ -1929,10 +1929,10 @@ static void acknowledge_write_callback(struct vdo_completion *completion)
struct data_vio *data_vio = as_data_vio(completion);
struct vdo *vdo = completion->vdo;
ASSERT_LOG_ONLY((!vdo_uses_bio_ack_queue(vdo) ||
VDO_ASSERT_LOG_ONLY((!vdo_uses_bio_ack_queue(vdo) ||
(vdo_get_callback_thread_id() == vdo->thread_config.bio_ack_thread)),
"%s() called on bio ack queue", __func__);
ASSERT_LOG_ONLY(data_vio_has_flush_generation_lock(data_vio),
VDO_ASSERT_LOG_ONLY(data_vio_has_flush_generation_lock(data_vio),
"write VIO to be acknowledged has a flush generation lock");
acknowledge_data_vio(data_vio);
if (data_vio->new_mapped.pbn == VDO_ZERO_BLOCK) {
......@@ -1998,7 +1998,7 @@ static void handle_allocation_error(struct vdo_completion *completion)
static int assert_is_discard(struct data_vio *data_vio)
{
int result = ASSERT(data_vio->is_discard,
int result = VDO_ASSERT(data_vio->is_discard,
"data_vio with no block map page is a discard");
return ((result == VDO_SUCCESS) ? result : VDO_READ_ONLY);
......
......@@ -280,7 +280,7 @@ struct data_vio {
static inline struct data_vio *vio_as_data_vio(struct vio *vio)
{
ASSERT_LOG_ONLY((vio->type == VIO_TYPE_DATA), "vio is a data_vio");
VDO_ASSERT_LOG_ONLY((vio->type == VIO_TYPE_DATA), "vio is a data_vio");
return container_of(vio, struct data_vio, vio);
}
......@@ -374,7 +374,7 @@ static inline void assert_data_vio_in_hash_zone(struct data_vio *data_vio)
* It's odd to use the LBN, but converting the record name to hex is a bit clunky for an
* inline, and the LBN better than nothing as an identifier.
*/
ASSERT_LOG_ONLY((expected == thread_id),
VDO_ASSERT_LOG_ONLY((expected == thread_id),
"data_vio for logical block %llu on thread %u, should be on hash zone thread %u",
(unsigned long long) data_vio->logical.lbn, thread_id, expected);
}
......@@ -402,7 +402,7 @@ static inline void assert_data_vio_in_logical_zone(struct data_vio *data_vio)
thread_id_t expected = data_vio->logical.zone->thread_id;
thread_id_t thread_id = vdo_get_callback_thread_id();
ASSERT_LOG_ONLY((expected == thread_id),
VDO_ASSERT_LOG_ONLY((expected == thread_id),
"data_vio for logical block %llu on thread %u, should be on thread %u",
(unsigned long long) data_vio->logical.lbn, thread_id, expected);
}
......@@ -430,7 +430,7 @@ static inline void assert_data_vio_in_allocated_zone(struct data_vio *data_vio)
thread_id_t expected = data_vio->allocation.zone->thread_id;
thread_id_t thread_id = vdo_get_callback_thread_id();
ASSERT_LOG_ONLY((expected == thread_id),
VDO_ASSERT_LOG_ONLY((expected == thread_id),
"struct data_vio for allocated physical block %llu on thread %u, should be on thread %u",
(unsigned long long) data_vio->allocation.pbn, thread_id,
expected);
......@@ -460,7 +460,7 @@ static inline void assert_data_vio_in_duplicate_zone(struct data_vio *data_vio)
thread_id_t expected = data_vio->duplicate.zone->thread_id;
thread_id_t thread_id = vdo_get_callback_thread_id();
ASSERT_LOG_ONLY((expected == thread_id),
VDO_ASSERT_LOG_ONLY((expected == thread_id),
"data_vio for duplicate physical block %llu on thread %u, should be on thread %u",
(unsigned long long) data_vio->duplicate.pbn, thread_id,
expected);
......@@ -490,7 +490,7 @@ static inline void assert_data_vio_in_mapped_zone(struct data_vio *data_vio)
thread_id_t expected = data_vio->mapped.zone->thread_id;
thread_id_t thread_id = vdo_get_callback_thread_id();
ASSERT_LOG_ONLY((expected == thread_id),
VDO_ASSERT_LOG_ONLY((expected == thread_id),
"data_vio for mapped physical block %llu on thread %u, should be on thread %u",
(unsigned long long) data_vio->mapped.pbn, thread_id, expected);
}
......@@ -507,7 +507,7 @@ static inline void assert_data_vio_in_new_mapped_zone(struct data_vio *data_vio)
thread_id_t expected = data_vio->new_mapped.zone->thread_id;
thread_id_t thread_id = vdo_get_callback_thread_id();
ASSERT_LOG_ONLY((expected == thread_id),
VDO_ASSERT_LOG_ONLY((expected == thread_id),
"data_vio for new_mapped physical block %llu on thread %u, should be on thread %u",
(unsigned long long) data_vio->new_mapped.pbn, thread_id,
expected);
......@@ -525,7 +525,7 @@ static inline void assert_data_vio_in_journal_zone(struct data_vio *data_vio)
thread_id_t journal_thread = vdo_from_data_vio(data_vio)->thread_config.journal_thread;
thread_id_t thread_id = vdo_get_callback_thread_id();
ASSERT_LOG_ONLY((journal_thread == thread_id),
VDO_ASSERT_LOG_ONLY((journal_thread == thread_id),
"data_vio for logical block %llu on thread %u, should be on journal thread %u",
(unsigned long long) data_vio->logical.lbn, thread_id,
journal_thread);
......@@ -555,7 +555,7 @@ static inline void assert_data_vio_in_packer_zone(struct data_vio *data_vio)
thread_id_t packer_thread = vdo_from_data_vio(data_vio)->thread_config.packer_thread;
thread_id_t thread_id = vdo_get_callback_thread_id();
ASSERT_LOG_ONLY((packer_thread == thread_id),
VDO_ASSERT_LOG_ONLY((packer_thread == thread_id),
"data_vio for logical block %llu on thread %u, should be on packer thread %u",
(unsigned long long) data_vio->logical.lbn, thread_id,
packer_thread);
......@@ -585,7 +585,7 @@ static inline void assert_data_vio_on_cpu_thread(struct data_vio *data_vio)
thread_id_t cpu_thread = vdo_from_data_vio(data_vio)->thread_config.cpu_thread;
thread_id_t thread_id = vdo_get_callback_thread_id();
ASSERT_LOG_ONLY((cpu_thread == thread_id),
VDO_ASSERT_LOG_ONLY((cpu_thread == thread_id),
"data_vio for logical block %llu on thread %u, should be on cpu thread %u",
(unsigned long long) data_vio->logical.lbn, thread_id,
cpu_thread);
......
This diff is collapsed.
......@@ -904,7 +904,7 @@ static int vdo_map_bio(struct dm_target *ti, struct bio *bio)
struct vdo_work_queue *current_work_queue;
const struct admin_state_code *code = vdo_get_admin_state_code(&vdo->admin.state);
ASSERT_LOG_ONLY(code->normal, "vdo should not receive bios while in state %s",
VDO_ASSERT_LOG_ONLY(code->normal, "vdo should not receive bios while in state %s",
code->name);
/* Count all incoming bios. */
......@@ -1244,7 +1244,7 @@ static int perform_admin_operation(struct vdo *vdo, u32 starting_phase,
/* Assert that we are operating on the correct thread for the current phase. */
static void assert_admin_phase_thread(struct vdo *vdo, const char *what)
{
ASSERT_LOG_ONLY(vdo_get_callback_thread_id() == get_thread_id_for_phase(vdo),
VDO_ASSERT_LOG_ONLY(vdo_get_callback_thread_id() == get_thread_id_for_phase(vdo),
"%s on correct thread for %s", what,
ADMIN_PHASE_NAMES[vdo->admin.phase]);
}
......@@ -1424,11 +1424,11 @@ static void release_instance(unsigned int instance)
{
mutex_lock(&instances_lock);
if (instance >= instances.bit_count) {
ASSERT_LOG_ONLY(false,
VDO_ASSERT_LOG_ONLY(false,
"instance number %u must be less than bit count %u",
instance, instances.bit_count);
} else if (test_bit(instance, instances.words) == 0) {
ASSERT_LOG_ONLY(false, "instance number %u must be allocated", instance);
VDO_ASSERT_LOG_ONLY(false, "instance number %u must be allocated", instance);
} else {
__clear_bit(instance, instances.words);
instances.count -= 1;
......@@ -1577,9 +1577,9 @@ static int allocate_instance(unsigned int *instance_ptr)
if (instance >= instances.bit_count) {
/* Nothing free after next, so wrap around to instance zero. */
instance = find_first_zero_bit(instances.words, instances.bit_count);
result = ASSERT(instance < instances.bit_count,
result = VDO_ASSERT(instance < instances.bit_count,
"impossibly, no zero bit found");
if (result != UDS_SUCCESS)
if (result != VDO_SUCCESS)
return result;
}
......@@ -1729,7 +1729,7 @@ static int prepare_to_grow_physical(struct vdo *vdo, block_count_t new_physical_
uds_log_info("Preparing to resize physical to %llu",
(unsigned long long) new_physical_blocks);
ASSERT_LOG_ONLY((new_physical_blocks > current_physical_blocks),
VDO_ASSERT_LOG_ONLY((new_physical_blocks > current_physical_blocks),
"New physical size is larger than current physical size");
result = perform_admin_operation(vdo, PREPARE_GROW_PHYSICAL_PHASE_START,
check_may_grow_physical,
......@@ -1829,7 +1829,7 @@ static int prepare_to_modify(struct dm_target *ti, struct device_config *config,
uds_log_info("Preparing to resize logical to %llu",
(unsigned long long) config->logical_blocks);
ASSERT_LOG_ONLY((config->logical_blocks > logical_blocks),
VDO_ASSERT_LOG_ONLY((config->logical_blocks > logical_blocks),
"New logical size is larger than current size");
result = vdo_prepare_to_grow_block_map(vdo->block_map,
......@@ -2890,7 +2890,7 @@ static void vdo_module_destroy(void)
if (dm_registered)
dm_unregister_target(&vdo_target_bio);
ASSERT_LOG_ONLY(instances.count == 0,
VDO_ASSERT_LOG_ONLY(instances.count == 0,
"should have no instance numbers still in use, but have %u",
instances.count);
vdo_free(instances.words);
......
This diff is collapsed.
......@@ -281,8 +281,9 @@ int uds_register_error_block(const char *block_name, int first_error,
.infos = infos,
};
result = ASSERT(first_error < next_free_error, "well-defined error block range");
if (result != UDS_SUCCESS)
result = VDO_ASSERT(first_error < next_free_error,
"well-defined error block range");
if (result != VDO_SUCCESS)
return result;
if (registered_errors.count == registered_errors.allocated) {
......
......@@ -59,7 +59,7 @@ struct flusher {
*/
static inline void assert_on_flusher_thread(struct flusher *flusher, const char *caller)
{
ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == flusher->thread_id),
VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == flusher->thread_id),
"%s() called from flusher thread", caller);
}
......@@ -272,7 +272,7 @@ static void flush_vdo(struct vdo_completion *completion)
int result;
assert_on_flusher_thread(flusher, __func__);
result = ASSERT(vdo_is_state_normal(&flusher->state),
result = VDO_ASSERT(vdo_is_state_normal(&flusher->state),
"flusher is in normal operation");
if (result != VDO_SUCCESS) {
vdo_enter_read_only_mode(flusher->vdo, result);
......@@ -330,7 +330,7 @@ void vdo_complete_flushes(struct flusher *flusher)
if (flush->flush_generation >= oldest_active_generation)
return;
ASSERT_LOG_ONLY((flush->flush_generation ==
VDO_ASSERT_LOG_ONLY((flush->flush_generation ==
flusher->first_unacknowledged_generation),
"acknowledged next expected flush, %llu, was: %llu",
(unsigned long long) flusher->first_unacknowledged_generation,
......@@ -400,7 +400,7 @@ void vdo_launch_flush(struct vdo *vdo, struct bio *bio)
struct flusher *flusher = vdo->flusher;
const struct admin_state_code *code = vdo_get_admin_state_code(&flusher->state);
ASSERT_LOG_ONLY(!code->quiescent, "Flushing not allowed in state %s",
VDO_ASSERT_LOG_ONLY(!code->quiescent, "Flushing not allowed in state %s",
code->name);
spin_lock(&flusher->lock);
......
......@@ -110,13 +110,13 @@ static struct vdo_completion *poll_for_completion(struct simple_work_queue *queu
static void enqueue_work_queue_completion(struct simple_work_queue *queue,
struct vdo_completion *completion)
{
ASSERT_LOG_ONLY(completion->my_queue == NULL,
VDO_ASSERT_LOG_ONLY(completion->my_queue == NULL,
"completion %px (fn %px) to enqueue (%px) is not already queued (%px)",
completion, completion->callback, queue, completion->my_queue);
if (completion->priority == VDO_WORK_Q_DEFAULT_PRIORITY)
completion->priority = queue->common.type->default_priority;
if (ASSERT(completion->priority <= queue->common.type->max_priority,
if (VDO_ASSERT(completion->priority <= queue->common.type->max_priority,
"priority is in range for queue") != VDO_SUCCESS)
completion->priority = 0;
......@@ -222,9 +222,9 @@ static struct vdo_completion *wait_for_next_completion(struct simple_work_queue
static void process_completion(struct simple_work_queue *queue,
struct vdo_completion *completion)
{
if (ASSERT(completion->my_queue == &queue->common,
if (VDO_ASSERT(completion->my_queue == &queue->common,
"completion %px from queue %px marked as being in this queue (%px)",
completion, queue, completion->my_queue) == UDS_SUCCESS)
completion, queue, completion->my_queue) == VDO_SUCCESS)
completion->my_queue = NULL;
vdo_run_completion(completion);
......@@ -319,7 +319,7 @@ static int make_simple_work_queue(const char *thread_name_prefix, const char *na
struct task_struct *thread = NULL;
int result;
ASSERT_LOG_ONLY((type->max_priority <= VDO_WORK_Q_MAX_PRIORITY),
VDO_ASSERT_LOG_ONLY((type->max_priority <= VDO_WORK_Q_MAX_PRIORITY),
"queue priority count %u within limit %u", type->max_priority,
VDO_WORK_Q_MAX_PRIORITY);
......
......@@ -94,7 +94,7 @@ static void count_all_bios(struct vio *vio, struct bio *bio)
*/
static void assert_in_bio_zone(struct vio *vio)
{
ASSERT_LOG_ONLY(!in_interrupt(), "not in interrupt context");
VDO_ASSERT_LOG_ONLY(!in_interrupt(), "not in interrupt context");
assert_vio_in_bio_zone(vio);
}
......@@ -300,7 +300,7 @@ static bool try_bio_map_merge(struct vio *vio)
mutex_unlock(&bio_queue_data->lock);
/* We don't care about failure of int_map_put in this case. */
ASSERT_LOG_ONLY(result == VDO_SUCCESS, "bio map insertion succeeds");
VDO_ASSERT_LOG_ONLY(result == VDO_SUCCESS, "bio map insertion succeeds");
return merged;
}
......@@ -345,8 +345,8 @@ void __submit_metadata_vio(struct vio *vio, physical_block_number_t physical,
const struct admin_state_code *code = vdo_get_admin_state(completion->vdo);
ASSERT_LOG_ONLY(!code->quiescent, "I/O not allowed in state %s", code->name);
ASSERT_LOG_ONLY(vio->bio->bi_next == NULL, "metadata bio has no next bio");
VDO_ASSERT_LOG_ONLY(!code->quiescent, "I/O not allowed in state %s", code->name);
VDO_ASSERT_LOG_ONLY(vio->bio->bi_next == NULL, "metadata bio has no next bio");
vdo_reset_completion(completion);
completion->error_handler = error_handler;
......
......@@ -142,7 +142,7 @@ void vdo_free_logical_zones(struct logical_zones *zones)
static inline void assert_on_zone_thread(struct logical_zone *zone, const char *what)
{
ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == zone->thread_id),
VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == zone->thread_id),
"%s() called on correct thread", what);
}
......@@ -247,7 +247,7 @@ void vdo_increment_logical_zone_flush_generation(struct logical_zone *zone,
sequence_number_t expected_generation)
{
assert_on_zone_thread(zone, __func__);
ASSERT_LOG_ONLY((zone->flush_generation == expected_generation),
VDO_ASSERT_LOG_ONLY((zone->flush_generation == expected_generation),
"logical zone %u flush generation %llu should be %llu before increment",
zone->zone_number, (unsigned long long) zone->flush_generation,
(unsigned long long) expected_generation);
......@@ -267,7 +267,7 @@ void vdo_acquire_flush_generation_lock(struct data_vio *data_vio)
struct logical_zone *zone = data_vio->logical.zone;
assert_on_zone_thread(zone, __func__);
ASSERT_LOG_ONLY(vdo_is_state_normal(&zone->state), "vdo state is normal");
VDO_ASSERT_LOG_ONLY(vdo_is_state_normal(&zone->state), "vdo state is normal");
data_vio->flush_generation = zone->flush_generation;
list_add_tail(&data_vio->write_entry, &zone->write_vios);
......@@ -332,7 +332,7 @@ void vdo_release_flush_generation_lock(struct data_vio *data_vio)
return;
list_del_init(&data_vio->write_entry);
ASSERT_LOG_ONLY((zone->oldest_active_generation <= data_vio->flush_generation),
VDO_ASSERT_LOG_ONLY((zone->oldest_active_generation <= data_vio->flush_generation),
"data_vio releasing lock on generation %llu is not older than oldest active generation %llu",
(unsigned long long) data_vio->flush_generation,
(unsigned long long) zone->oldest_active_generation);
......
......@@ -385,10 +385,10 @@ void vdo_memory_init(void)
void vdo_memory_exit(void)
{
ASSERT_LOG_ONLY(memory_stats.kmalloc_bytes == 0,
VDO_ASSERT_LOG_ONLY(memory_stats.kmalloc_bytes == 0,
"kmalloc memory used (%zd bytes in %zd blocks) is returned to the kernel",
memory_stats.kmalloc_bytes, memory_stats.kmalloc_blocks);
ASSERT_LOG_ONLY(memory_stats.vmalloc_bytes == 0,
VDO_ASSERT_LOG_ONLY(memory_stats.vmalloc_bytes == 0,
"vmalloc memory used (%zd bytes in %zd blocks) is returned to the kernel",
memory_stats.vmalloc_bytes, memory_stats.vmalloc_blocks);
uds_log_debug("peak usage %zd bytes", memory_stats.peak_bytes);
......
......@@ -86,7 +86,7 @@ int vdo_get_compressed_block_fragment(enum block_mapping_state mapping_state,
*/
static inline void assert_on_packer_thread(struct packer *packer, const char *caller)
{
ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == packer->thread_id),
VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == packer->thread_id),
"%s() called from packer thread", caller);
}
......@@ -569,7 +569,7 @@ void vdo_attempt_packing(struct data_vio *data_vio)
assert_on_packer_thread(packer, __func__);
result = ASSERT((status.stage == DATA_VIO_COMPRESSING),
result = VDO_ASSERT((status.stage == DATA_VIO_COMPRESSING),
"attempt to pack data_vio not ready for packing, stage: %u",
status.stage);
if (result != VDO_SUCCESS)
......@@ -671,7 +671,7 @@ void vdo_remove_lock_holder_from_packer(struct vdo_completion *completion)
lock_holder = vdo_forget(data_vio->compression.lock_holder);
bin = lock_holder->compression.bin;
ASSERT_LOG_ONLY((bin != NULL), "data_vio in packer has a bin");
VDO_ASSERT_LOG_ONLY((bin != NULL), "data_vio in packer has a bin");
slot = lock_holder->compression.slot;
bin->slots_used--;
......
......@@ -13,7 +13,6 @@
/* Utilities for asserting that certain conditions are met */
#define STRINGIFY(X) #X
#define STRINGIFY_VALUE(X) STRINGIFY(X)
/*
* A hack to apply the "warn if unused" attribute to an integral expression.
......@@ -23,19 +22,23 @@
* expression. With optimization enabled, this function contributes no additional instructions, but
* the warn_unused_result attribute still applies to the code calling it.
*/
static inline int __must_check uds_must_use(int value)
static inline int __must_check vdo_must_use(int value)
{
return value;
}
/* Assert that an expression is true and return an error if it is not. */
#define ASSERT(expr, ...) uds_must_use(__UDS_ASSERT(expr, __VA_ARGS__))
#define VDO_ASSERT(expr, ...) vdo_must_use(__VDO_ASSERT(expr, __VA_ARGS__))
/* Log a message if the expression is not true. */
#define ASSERT_LOG_ONLY(expr, ...) __UDS_ASSERT(expr, __VA_ARGS__)
#define VDO_ASSERT_LOG_ONLY(expr, ...) __VDO_ASSERT(expr, __VA_ARGS__)
#define __UDS_ASSERT(expr, ...) \
(likely(expr) ? UDS_SUCCESS \
/* For use by UDS */
#define ASSERT(expr, ...) VDO_ASSERT(expr, __VA_ARGS__)
#define ASSERT_LOG_ONLY(expr, ...) __VDO_ASSERT(expr, __VA_ARGS__)
#define __VDO_ASSERT(expr, ...) \
(likely(expr) ? VDO_SUCCESS \
: uds_assertion_failed(STRINGIFY(expr), __FILE__, __LINE__, __VA_ARGS__))
/* Log an assertion failure message. */
......
......@@ -80,11 +80,11 @@ static inline void set_pbn_lock_type(struct pbn_lock *lock, enum pbn_lock_type t
*/
void vdo_downgrade_pbn_write_lock(struct pbn_lock *lock, bool compressed_write)
{
ASSERT_LOG_ONLY(!vdo_is_pbn_read_lock(lock),
VDO_ASSERT_LOG_ONLY(!vdo_is_pbn_read_lock(lock),
"PBN lock must not already have been downgraded");
ASSERT_LOG_ONLY(!has_lock_type(lock, VIO_BLOCK_MAP_WRITE_LOCK),
VDO_ASSERT_LOG_ONLY(!has_lock_type(lock, VIO_BLOCK_MAP_WRITE_LOCK),
"must not downgrade block map write locks");
ASSERT_LOG_ONLY(lock->holder_count == 1,
VDO_ASSERT_LOG_ONLY(lock->holder_count == 1,
"PBN write lock should have one holder but has %u",
lock->holder_count);
/*
......@@ -128,7 +128,7 @@ bool vdo_claim_pbn_lock_increment(struct pbn_lock *lock)
*/
void vdo_assign_pbn_lock_provisional_reference(struct pbn_lock *lock)
{
ASSERT_LOG_ONLY(!lock->has_provisional_reference,
VDO_ASSERT_LOG_ONLY(!lock->has_provisional_reference,
"lock does not have a provisional reference");
lock->has_provisional_reference = true;
}
......@@ -221,7 +221,7 @@ static void return_pbn_lock_to_pool(struct pbn_lock_pool *pool, struct pbn_lock
INIT_LIST_HEAD(&idle->entry);
list_add_tail(&idle->entry, &pool->idle_list);
ASSERT_LOG_ONLY(pool->borrowed > 0, "shouldn't return more than borrowed");
VDO_ASSERT_LOG_ONLY(pool->borrowed > 0, "shouldn't return more than borrowed");
pool->borrowed -= 1;
}
......@@ -267,7 +267,7 @@ static void free_pbn_lock_pool(struct pbn_lock_pool *pool)
if (pool == NULL)
return;
ASSERT_LOG_ONLY(pool->borrowed == 0,
VDO_ASSERT_LOG_ONLY(pool->borrowed == 0,
"All PBN locks must be returned to the pool before it is freed, but %zu locks are still on loan",
pool->borrowed);
vdo_free(pool);
......@@ -298,7 +298,7 @@ static int __must_check borrow_pbn_lock_from_pool(struct pbn_lock_pool *pool,
"no free PBN locks left to borrow");
pool->borrowed += 1;
result = ASSERT(!list_empty(&pool->idle_list),
result = VDO_ASSERT(!list_empty(&pool->idle_list),
"idle list should not be empty if pool not at capacity");
if (result != VDO_SUCCESS)
return result;
......@@ -447,7 +447,7 @@ int vdo_attempt_physical_zone_pbn_lock(struct physical_zone *zone,
result = borrow_pbn_lock_from_pool(zone->lock_pool, type, &new_lock);
if (result != VDO_SUCCESS) {
ASSERT_LOG_ONLY(false, "must always be able to borrow a PBN lock");
VDO_ASSERT_LOG_ONLY(false, "must always be able to borrow a PBN lock");
return result;
}
......@@ -461,7 +461,7 @@ int vdo_attempt_physical_zone_pbn_lock(struct physical_zone *zone,
if (lock != NULL) {
/* The lock is already held, so we don't need the borrowed one. */
return_pbn_lock_to_pool(zone->lock_pool, vdo_forget(new_lock));
result = ASSERT(lock->holder_count > 0, "physical block %llu lock held",
result = VDO_ASSERT(lock->holder_count > 0, "physical block %llu lock held",
(unsigned long long) pbn);
if (result != VDO_SUCCESS)
return result;
......@@ -485,7 +485,7 @@ static int allocate_and_lock_block(struct allocation *allocation)
int result;
struct pbn_lock *lock;
ASSERT_LOG_ONLY(allocation->lock == NULL,
VDO_ASSERT_LOG_ONLY(allocation->lock == NULL,
"must not allocate a block while already holding a lock on one");
result = vdo_allocate_block(allocation->zone->allocator, &allocation->pbn);
......@@ -617,7 +617,7 @@ void vdo_release_physical_zone_pbn_lock(struct physical_zone *zone,
if (lock == NULL)
return;
ASSERT_LOG_ONLY(lock->holder_count > 0,
VDO_ASSERT_LOG_ONLY(lock->holder_count > 0,
"should not be releasing a lock that is not held");
lock->holder_count -= 1;
......@@ -627,7 +627,7 @@ void vdo_release_physical_zone_pbn_lock(struct physical_zone *zone,
}
holder = vdo_int_map_remove(zone->pbn_operations, locked_pbn);
ASSERT_LOG_ONLY((lock == holder), "physical block lock mismatch for block %llu",
VDO_ASSERT_LOG_ONLY((lock == holder), "physical block lock mismatch for block %llu",
(unsigned long long) locked_pbn);
release_pbn_lock_provisional_reference(lock, locked_pbn, zone->allocator);
......
......@@ -127,7 +127,7 @@ void vdo_reset_priority_table(struct priority_table *table)
void vdo_priority_table_enqueue(struct priority_table *table, unsigned int priority,
struct list_head *entry)
{
ASSERT_LOG_ONLY((priority <= table->max_priority),
VDO_ASSERT_LOG_ONLY((priority <= table->max_priority),
"entry priority must be valid for the table");
/* Append the entry to the queue in the specified bucket. */
......
......@@ -119,7 +119,7 @@ static bool is_journal_zone_locked(struct recovery_journal *journal,
/* Pairs with barrier in vdo_release_journal_entry_lock() */
smp_rmb();
ASSERT_LOG_ONLY((decrements <= journal_value),
VDO_ASSERT_LOG_ONLY((decrements <= journal_value),
"journal zone lock counter must not underflow");
return (journal_value != decrements);
}
......@@ -150,7 +150,7 @@ void vdo_release_recovery_journal_block_reference(struct recovery_journal *journ
lock_number = vdo_get_recovery_journal_block_number(journal, sequence_number);
current_value = get_counter(journal, lock_number, zone_type, zone_id);
ASSERT_LOG_ONLY((*current_value >= 1),
VDO_ASSERT_LOG_ONLY((*current_value >= 1),
"decrement of lock counter must not underflow");
*current_value -= 1;
......@@ -254,7 +254,7 @@ static inline bool __must_check is_block_full(const struct recovery_journal_bloc
static void assert_on_journal_thread(struct recovery_journal *journal,
const char *function_name)
{
ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == journal->thread_id),
VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == journal->thread_id),
"%s() called on journal thread", function_name);
}
......@@ -353,13 +353,13 @@ static void check_for_drain_complete(struct recovery_journal *journal)
if (vdo_is_state_saving(&journal->state)) {
if (journal->active_block != NULL) {
ASSERT_LOG_ONLY(((result == VDO_READ_ONLY) ||
VDO_ASSERT_LOG_ONLY(((result == VDO_READ_ONLY) ||
!is_block_dirty(journal->active_block)),
"journal being saved has clean active block");
recycle_journal_block(journal->active_block);
}
ASSERT_LOG_ONLY(list_empty(&journal->active_tail_blocks),
VDO_ASSERT_LOG_ONLY(list_empty(&journal->active_tail_blocks),
"all blocks in a journal being saved must be inactive");
}
......@@ -800,7 +800,7 @@ void vdo_free_recovery_journal(struct recovery_journal *journal)
* requires opening before use.
*/
if (!vdo_is_state_quiescent(&journal->state)) {
ASSERT_LOG_ONLY(list_empty(&journal->active_tail_blocks),
VDO_ASSERT_LOG_ONLY(list_empty(&journal->active_tail_blocks),
"journal being freed has no active tail blocks");
} else if (!vdo_is_state_saved(&journal->state) &&
!list_empty(&journal->active_tail_blocks)) {
......@@ -989,7 +989,7 @@ static void initialize_lock_count(struct recovery_journal *journal)
atomic_t *decrement_counter = get_decrement_counter(journal, lock_number);
journal_value = get_counter(journal, lock_number, VDO_ZONE_TYPE_JOURNAL, 0);
ASSERT_LOG_ONLY((*journal_value == atomic_read(decrement_counter)),
VDO_ASSERT_LOG_ONLY((*journal_value == atomic_read(decrement_counter)),
"count to be initialized not in use");
*journal_value = journal->entries_per_block + 1;
atomic_set(decrement_counter, 0);
......@@ -1175,7 +1175,7 @@ static void continue_committed_waiter(struct vdo_waiter *waiter, void *context)
int result = (is_read_only(journal) ? VDO_READ_ONLY : VDO_SUCCESS);
bool has_decrement;
ASSERT_LOG_ONLY(vdo_before_journal_point(&journal->commit_point,
VDO_ASSERT_LOG_ONLY(vdo_before_journal_point(&journal->commit_point,
&data_vio->recovery_journal_point),
"DataVIOs released from recovery journal in order. Recovery journal point is (%llu, %u), but commit waiter point is (%llu, %u)",
(unsigned long long) journal->commit_point.sequence_number,
......@@ -1281,7 +1281,7 @@ static void complete_write(struct vdo_completion *completion)
journal->last_write_acknowledged = block->sequence_number;
last_active_block = get_journal_block(&journal->active_tail_blocks);
ASSERT_LOG_ONLY((block->sequence_number >= last_active_block->sequence_number),
VDO_ASSERT_LOG_ONLY((block->sequence_number >= last_active_block->sequence_number),
"completed journal write is still active");
notify_commit_waiters(journal);
......@@ -1456,7 +1456,7 @@ void vdo_add_recovery_journal_entry(struct recovery_journal *journal,
return;
}
ASSERT_LOG_ONLY(data_vio->recovery_sequence_number == 0,
VDO_ASSERT_LOG_ONLY(data_vio->recovery_sequence_number == 0,
"journal lock not held for new entry");
vdo_advance_journal_point(&journal->append_point, journal->entries_per_block);
......@@ -1564,12 +1564,12 @@ void vdo_acquire_recovery_journal_block_reference(struct recovery_journal *journ
if (sequence_number == 0)
return;
ASSERT_LOG_ONLY((zone_type != VDO_ZONE_TYPE_JOURNAL),
VDO_ASSERT_LOG_ONLY((zone_type != VDO_ZONE_TYPE_JOURNAL),
"invalid lock count increment from journal zone");
lock_number = vdo_get_recovery_journal_block_number(journal, sequence_number);
current_value = get_counter(journal, lock_number, zone_type, zone_id);
ASSERT_LOG_ONLY(*current_value < U16_MAX,
VDO_ASSERT_LOG_ONLY(*current_value < U16_MAX,
"increment of lock counter must not overflow");
if (*current_value == 0) {
......
......@@ -976,7 +976,7 @@ find_entry_starting_next_page(struct repair_completion *repair,
if (needs_sort) {
struct numbered_block_mapping *just_sorted_entry =
sort_next_heap_element(repair);
ASSERT_LOG_ONLY(just_sorted_entry < current_entry,
VDO_ASSERT_LOG_ONLY(just_sorted_entry < current_entry,
"heap is returning elements in an unexpected order");
}
......@@ -1129,7 +1129,7 @@ static void recover_block_map(struct vdo_completion *completion)
repair->current_entry = &repair->entries[repair->block_map_entry_count - 1];
first_sorted_entry = sort_next_heap_element(repair);
ASSERT_LOG_ONLY(first_sorted_entry == repair->current_entry,
VDO_ASSERT_LOG_ONLY(first_sorted_entry == repair->current_entry,
"heap is returning elements in an unexpected order");
/* Prevent any page from being processed until all pages have been launched. */
......@@ -1489,7 +1489,7 @@ static int extract_new_mappings(struct repair_completion *repair)
repair->block_map_entry_count++;
}
result = ASSERT((repair->block_map_entry_count <= repair->entry_count),
result = VDO_ASSERT((repair->block_map_entry_count <= repair->entry_count),
"approximate entry count is an upper bound");
if (result != VDO_SUCCESS)
vdo_enter_read_only_mode(vdo, result);
......
This diff is collapsed.
......@@ -44,7 +44,7 @@ void vdo_register_thread(struct thread_registry *registry,
list_add_tail_rcu(&new_thread->links, &registry->links);
spin_unlock(&registry->lock);
ASSERT_LOG_ONLY(!found_it, "new thread not already in registry");
VDO_ASSERT_LOG_ONLY(!found_it, "new thread not already in registry");
if (found_it) {
/* Ensure no RCU iterators see it before re-initializing. */
synchronize_rcu();
......@@ -67,7 +67,7 @@ void vdo_unregister_thread(struct thread_registry *registry)
}
spin_unlock(&registry->lock);
ASSERT_LOG_ONLY(found_it, "thread found in registry");
VDO_ASSERT_LOG_ONLY(found_it, "thread found in registry");
if (found_it) {
/* Ensure no RCU iterators see it before re-initializing. */
synchronize_rcu();
......
......@@ -425,7 +425,7 @@ int vdo_make_thread(struct vdo *vdo, thread_id_t thread_id,
type = &default_queue_type;
if (thread->queue != NULL) {
return ASSERT(vdo_work_queue_type_is(thread->queue, type),
return VDO_ASSERT(vdo_work_queue_type_is(thread->queue, type),
"already constructed vdo thread %u is of the correct type",
thread_id);
}
......@@ -448,7 +448,7 @@ static int register_vdo(struct vdo *vdo)
int result;
write_lock(&registry.lock);
result = ASSERT(filter_vdos_locked(vdo_is_equal, vdo) == NULL,
result = VDO_ASSERT(filter_vdos_locked(vdo_is_equal, vdo) == NULL,
"VDO not already registered");
if (result == VDO_SUCCESS) {
INIT_LIST_HEAD(&vdo->registration);
......@@ -1050,7 +1050,7 @@ int vdo_register_read_only_listener(struct vdo *vdo, void *listener,
struct read_only_listener *read_only_listener;
int result;
result = ASSERT(thread_id != vdo->thread_config.dedupe_thread,
result = VDO_ASSERT(thread_id != vdo->thread_config.dedupe_thread,
"read only listener not registered on dedupe thread");
if (result != VDO_SUCCESS)
return result;
......@@ -1704,7 +1704,7 @@ void vdo_dump_status(const struct vdo *vdo)
*/
void vdo_assert_on_admin_thread(const struct vdo *vdo, const char *name)
{
ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == vdo->thread_config.admin_thread),
VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == vdo->thread_config.admin_thread),
"%s called on admin thread", name);
}
......@@ -1718,7 +1718,7 @@ void vdo_assert_on_admin_thread(const struct vdo *vdo, const char *name)
void vdo_assert_on_logical_zone_thread(const struct vdo *vdo, zone_count_t logical_zone,
const char *name)
{
ASSERT_LOG_ONLY((vdo_get_callback_thread_id() ==
VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() ==
vdo->thread_config.logical_threads[logical_zone]),
"%s called on logical thread", name);
}
......@@ -1733,7 +1733,7 @@ void vdo_assert_on_logical_zone_thread(const struct vdo *vdo, zone_count_t logic
void vdo_assert_on_physical_zone_thread(const struct vdo *vdo,
zone_count_t physical_zone, const char *name)
{
ASSERT_LOG_ONLY((vdo_get_callback_thread_id() ==
VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() ==
vdo->thread_config.physical_threads[physical_zone]),
"%s called on physical thread", name);
}
......@@ -1773,7 +1773,7 @@ int vdo_get_physical_zone(const struct vdo *vdo, physical_block_number_t pbn,
/* With the PBN already checked, we should always succeed in finding a slab. */
slab = vdo_get_slab(vdo->depot, pbn);
result = ASSERT(slab != NULL, "vdo_get_slab must succeed on all valid PBNs");
result = VDO_ASSERT(slab != NULL, "vdo_get_slab must succeed on all valid PBNs");
if (result != VDO_SUCCESS)
return result;
......
......@@ -82,13 +82,13 @@ int allocate_vio_components(struct vdo *vdo, enum vio_type vio_type,
struct bio *bio;
int result;
result = ASSERT(block_count <= MAX_BLOCKS_PER_VIO,
result = VDO_ASSERT(block_count <= MAX_BLOCKS_PER_VIO,
"block count %u does not exceed maximum %u", block_count,
MAX_BLOCKS_PER_VIO);
if (result != VDO_SUCCESS)
return result;
result = ASSERT(((vio_type != VIO_TYPE_UNINITIALIZED) && (vio_type != VIO_TYPE_DATA)),
result = VDO_ASSERT(((vio_type != VIO_TYPE_UNINITIALIZED) && (vio_type != VIO_TYPE_DATA)),
"%d is a metadata type", vio_type);
if (result != VDO_SUCCESS)
return result;
......@@ -363,12 +363,12 @@ void free_vio_pool(struct vio_pool *pool)
return;
/* Remove all available vios from the object pool. */
ASSERT_LOG_ONLY(!vdo_waitq_has_waiters(&pool->waiting),
VDO_ASSERT_LOG_ONLY(!vdo_waitq_has_waiters(&pool->waiting),
"VIO pool must not have any waiters when being freed");
ASSERT_LOG_ONLY((pool->busy_count == 0),
VDO_ASSERT_LOG_ONLY((pool->busy_count == 0),
"VIO pool must not have %zu busy entries when being freed",
pool->busy_count);
ASSERT_LOG_ONLY(list_empty(&pool->busy),
VDO_ASSERT_LOG_ONLY(list_empty(&pool->busy),
"VIO pool must not have busy entries when being freed");
list_for_each_entry_safe(pooled, tmp, &pool->available, pool_entry) {
......@@ -377,7 +377,7 @@ void free_vio_pool(struct vio_pool *pool)
pool->size--;
}
ASSERT_LOG_ONLY(pool->size == 0,
VDO_ASSERT_LOG_ONLY(pool->size == 0,
"VIO pool must not have missing entries when being freed");
vdo_free(vdo_forget(pool->buffer));
......@@ -403,7 +403,7 @@ void acquire_vio_from_pool(struct vio_pool *pool, struct vdo_waiter *waiter)
{
struct pooled_vio *pooled;
ASSERT_LOG_ONLY((pool->thread_id == vdo_get_callback_thread_id()),
VDO_ASSERT_LOG_ONLY((pool->thread_id == vdo_get_callback_thread_id()),
"acquire from active vio_pool called from correct thread");
if (list_empty(&pool->available)) {
......@@ -424,7 +424,7 @@ void acquire_vio_from_pool(struct vio_pool *pool, struct vdo_waiter *waiter)
*/
void return_vio_to_pool(struct vio_pool *pool, struct pooled_vio *vio)
{
ASSERT_LOG_ONLY((pool->thread_id == vdo_get_callback_thread_id()),
VDO_ASSERT_LOG_ONLY((pool->thread_id == vdo_get_callback_thread_id()),
"vio pool entry returned on same thread as it was acquired");
vio->vio.completion.error_handler = NULL;
......@@ -465,7 +465,7 @@ void vdo_count_bios(struct atomic_bio_stats *bio_stats, struct bio *bio)
* shouldn't exist.
*/
default:
ASSERT_LOG_ONLY(0, "Bio operation %d not a write, read, discard, or empty flush",
VDO_ASSERT_LOG_ONLY(0, "Bio operation %d not a write, read, discard, or empty flush",
bio_op(bio));
}
......
......@@ -67,7 +67,7 @@ static inline void assert_vio_in_bio_zone(struct vio *vio)
thread_id_t expected = get_vio_bio_zone_thread_id(vio);
thread_id_t thread_id = vdo_get_callback_thread_id();
ASSERT_LOG_ONLY((expected == thread_id),
VDO_ASSERT_LOG_ONLY((expected == thread_id),
"vio I/O for physical block %llu on thread %u, should be on bio zone thread %u",
(unsigned long long) pbn_from_vio_bio(vio->bio), thread_id,
expected);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment