Commit e6016736 authored by Matthew Sakai's avatar Matthew Sakai Committed by Mike Snitzer

dm vdo indexer: update ASSERT and ASSERT_LOG_ONLY usage

Update indexer uses of ASSERT and ASSERT_LOG_ONLY to
VDO_ASSERT and VDO_ASSERT_LOG_ONLY, respectively. Remove
ASSERT and ASSERT_LOG_ONLY. Also rename uds_assertion_failed
to vdo_assertion_failed.
Signed-off-by: default avatarMatthew Sakai <msakai@redhat.com>
Signed-off-by: default avatarMike Snitzer <snitzer@kernel.org>
parent fc03f737
...@@ -83,10 +83,10 @@ int uds_put_open_chapter_index_record(struct open_chapter_index *chapter_index, ...@@ -83,10 +83,10 @@ int uds_put_open_chapter_index_record(struct open_chapter_index *chapter_index,
u64 chapter_number = chapter_index->virtual_chapter_number; u64 chapter_number = chapter_index->virtual_chapter_number;
u32 record_pages = geometry->record_pages_per_chapter; u32 record_pages = geometry->record_pages_per_chapter;
result = ASSERT(page_number < record_pages, result = VDO_ASSERT(page_number < record_pages,
"Page number within chapter (%u) exceeds the maximum value %u", "Page number within chapter (%u) exceeds the maximum value %u",
page_number, record_pages); page_number, record_pages);
if (result != UDS_SUCCESS) if (result != VDO_SUCCESS)
return UDS_INVALID_ARGUMENT; return UDS_INVALID_ARGUMENT;
address = uds_hash_to_chapter_delta_address(name, geometry); address = uds_hash_to_chapter_delta_address(name, geometry);
...@@ -97,10 +97,10 @@ int uds_put_open_chapter_index_record(struct open_chapter_index *chapter_index, ...@@ -97,10 +97,10 @@ int uds_put_open_chapter_index_record(struct open_chapter_index *chapter_index,
return result; return result;
found = was_entry_found(&entry, address); found = was_entry_found(&entry, address);
result = ASSERT(!(found && entry.is_collision), result = VDO_ASSERT(!(found && entry.is_collision),
"Chunk appears more than once in chapter %llu", "Chunk appears more than once in chapter %llu",
(unsigned long long) chapter_number); (unsigned long long) chapter_number);
if (result != UDS_SUCCESS) if (result != VDO_SUCCESS)
return UDS_BAD_STATE; return UDS_BAD_STATE;
found_name = (found ? name->name : NULL); found_name = (found ? name->name : NULL);
......
...@@ -134,10 +134,10 @@ int uds_validate_config_contents(struct buffered_reader *reader, ...@@ -134,10 +134,10 @@ int uds_validate_config_contents(struct buffered_reader *reader,
decode_u32_le(buffer, &offset, &config.sparse_sample_rate); decode_u32_le(buffer, &offset, &config.sparse_sample_rate);
decode_u64_le(buffer, &offset, &config.nonce); decode_u64_le(buffer, &offset, &config.nonce);
result = ASSERT(offset == sizeof(struct uds_configuration_6_02), result = VDO_ASSERT(offset == sizeof(struct uds_configuration_6_02),
"%zu bytes read but not decoded", "%zu bytes read but not decoded",
sizeof(struct uds_configuration_6_02) - offset); sizeof(struct uds_configuration_6_02) - offset);
if (result != UDS_SUCCESS) if (result != VDO_SUCCESS)
return UDS_CORRUPT_DATA; return UDS_CORRUPT_DATA;
if (is_version(INDEX_CONFIG_VERSION_6_02, version_buffer)) { if (is_version(INDEX_CONFIG_VERSION_6_02, version_buffer)) {
...@@ -210,10 +210,10 @@ int uds_write_config_contents(struct buffered_writer *writer, ...@@ -210,10 +210,10 @@ int uds_write_config_contents(struct buffered_writer *writer,
encode_u32_le(buffer, &offset, config->sparse_sample_rate); encode_u32_le(buffer, &offset, config->sparse_sample_rate);
encode_u64_le(buffer, &offset, config->nonce); encode_u64_le(buffer, &offset, config->nonce);
result = ASSERT(offset == sizeof(struct uds_configuration_6_02), result = VDO_ASSERT(offset == sizeof(struct uds_configuration_6_02),
"%zu bytes encoded, of %zu expected", offset, "%zu bytes encoded, of %zu expected", offset,
sizeof(struct uds_configuration_6_02)); sizeof(struct uds_configuration_6_02));
if (result != UDS_SUCCESS) if (result != VDO_SUCCESS)
return result; return result;
if (version >= 4) { if (version >= 4) {
......
...@@ -856,10 +856,10 @@ int uds_start_restoring_delta_index(struct delta_index *delta_index, ...@@ -856,10 +856,10 @@ int uds_start_restoring_delta_index(struct delta_index *delta_index,
decode_u64_le(buffer, &offset, &header.record_count); decode_u64_le(buffer, &offset, &header.record_count);
decode_u64_le(buffer, &offset, &header.collision_count); decode_u64_le(buffer, &offset, &header.collision_count);
result = ASSERT(offset == sizeof(struct delta_index_header), result = VDO_ASSERT(offset == sizeof(struct delta_index_header),
"%zu bytes decoded of %zu expected", offset, "%zu bytes decoded of %zu expected", offset,
sizeof(struct delta_index_header)); sizeof(struct delta_index_header));
if (result != UDS_SUCCESS) { if (result != VDO_SUCCESS) {
return uds_log_warning_strerror(result, return uds_log_warning_strerror(result,
"failed to read delta index header"); "failed to read delta index header");
} }
...@@ -1136,10 +1136,10 @@ int uds_start_saving_delta_index(const struct delta_index *delta_index, ...@@ -1136,10 +1136,10 @@ int uds_start_saving_delta_index(const struct delta_index *delta_index,
encode_u64_le(buffer, &offset, delta_zone->record_count); encode_u64_le(buffer, &offset, delta_zone->record_count);
encode_u64_le(buffer, &offset, delta_zone->collision_count); encode_u64_le(buffer, &offset, delta_zone->collision_count);
result = ASSERT(offset == sizeof(struct delta_index_header), result = VDO_ASSERT(offset == sizeof(struct delta_index_header),
"%zu bytes encoded of %zu expected", offset, "%zu bytes encoded of %zu expected", offset,
sizeof(struct delta_index_header)); sizeof(struct delta_index_header));
if (result != UDS_SUCCESS) if (result != VDO_SUCCESS)
return result; return result;
result = uds_write_to_buffered_writer(buffered_writer, buffer, offset); result = uds_write_to_buffered_writer(buffered_writer, buffer, offset);
...@@ -1212,9 +1212,9 @@ size_t uds_compute_delta_index_save_bytes(u32 list_count, size_t memory_size) ...@@ -1212,9 +1212,9 @@ size_t uds_compute_delta_index_save_bytes(u32 list_count, size_t memory_size)
static int assert_not_at_end(const struct delta_index_entry *delta_entry) static int assert_not_at_end(const struct delta_index_entry *delta_entry)
{ {
int result = ASSERT(!delta_entry->at_end, int result = VDO_ASSERT(!delta_entry->at_end,
"operation is invalid because the list entry is at the end of the delta list"); "operation is invalid because the list entry is at the end of the delta list");
if (result != UDS_SUCCESS) if (result != VDO_SUCCESS)
result = UDS_BAD_STATE; result = UDS_BAD_STATE;
return result; return result;
...@@ -1236,19 +1236,19 @@ int uds_start_delta_index_search(const struct delta_index *delta_index, u32 list ...@@ -1236,19 +1236,19 @@ int uds_start_delta_index_search(const struct delta_index *delta_index, u32 list
struct delta_zone *delta_zone; struct delta_zone *delta_zone;
struct delta_list *delta_list; struct delta_list *delta_list;
result = ASSERT((list_number < delta_index->list_count), result = VDO_ASSERT((list_number < delta_index->list_count),
"Delta list number (%u) is out of range (%u)", list_number, "Delta list number (%u) is out of range (%u)", list_number,
delta_index->list_count); delta_index->list_count);
if (result != UDS_SUCCESS) if (result != VDO_SUCCESS)
return UDS_CORRUPT_DATA; return UDS_CORRUPT_DATA;
zone_number = list_number / delta_index->lists_per_zone; zone_number = list_number / delta_index->lists_per_zone;
delta_zone = &delta_index->delta_zones[zone_number]; delta_zone = &delta_index->delta_zones[zone_number];
list_number -= delta_zone->first_list; list_number -= delta_zone->first_list;
result = ASSERT((list_number < delta_zone->list_count), result = VDO_ASSERT((list_number < delta_zone->list_count),
"Delta list number (%u) is out of range (%u) for zone (%u)", "Delta list number (%u) is out of range (%u) for zone (%u)",
list_number, delta_zone->list_count, zone_number); list_number, delta_zone->list_count, zone_number);
if (result != UDS_SUCCESS) if (result != VDO_SUCCESS)
return UDS_CORRUPT_DATA; return UDS_CORRUPT_DATA;
if (delta_index->mutable) { if (delta_index->mutable) {
...@@ -1362,9 +1362,9 @@ noinline int uds_next_delta_index_entry(struct delta_index_entry *delta_entry) ...@@ -1362,9 +1362,9 @@ noinline int uds_next_delta_index_entry(struct delta_index_entry *delta_entry)
delta_entry->at_end = true; delta_entry->at_end = true;
delta_entry->delta = 0; delta_entry->delta = 0;
delta_entry->is_collision = false; delta_entry->is_collision = false;
result = ASSERT((delta_entry->offset == size), result = VDO_ASSERT((delta_entry->offset == size),
"next offset past end of delta list"); "next offset past end of delta list");
if (result != UDS_SUCCESS) if (result != VDO_SUCCESS)
result = UDS_CORRUPT_DATA; result = UDS_CORRUPT_DATA;
return result; return result;
...@@ -1390,8 +1390,8 @@ int uds_remember_delta_index_offset(const struct delta_index_entry *delta_entry) ...@@ -1390,8 +1390,8 @@ int uds_remember_delta_index_offset(const struct delta_index_entry *delta_entry)
int result; int result;
struct delta_list *delta_list = delta_entry->delta_list; struct delta_list *delta_list = delta_entry->delta_list;
result = ASSERT(!delta_entry->is_collision, "entry is not a collision"); result = VDO_ASSERT(!delta_entry->is_collision, "entry is not a collision");
if (result != UDS_SUCCESS) if (result != VDO_SUCCESS)
return result; return result;
delta_list->save_key = delta_entry->key - delta_entry->delta; delta_list->save_key = delta_entry->key - delta_entry->delta;
...@@ -1489,9 +1489,9 @@ int uds_get_delta_entry_collision(const struct delta_index_entry *delta_entry, u ...@@ -1489,9 +1489,9 @@ int uds_get_delta_entry_collision(const struct delta_index_entry *delta_entry, u
if (result != UDS_SUCCESS) if (result != UDS_SUCCESS)
return result; return result;
result = ASSERT(delta_entry->is_collision, result = VDO_ASSERT(delta_entry->is_collision,
"Cannot get full block name from a non-collision delta index entry"); "Cannot get full block name from a non-collision delta index entry");
if (result != UDS_SUCCESS) if (result != VDO_SUCCESS)
return UDS_BAD_STATE; return UDS_BAD_STATE;
get_collision_name(delta_entry, name); get_collision_name(delta_entry, name);
...@@ -1506,9 +1506,9 @@ u32 uds_get_delta_entry_value(const struct delta_index_entry *delta_entry) ...@@ -1506,9 +1506,9 @@ u32 uds_get_delta_entry_value(const struct delta_index_entry *delta_entry)
static int assert_mutable_entry(const struct delta_index_entry *delta_entry) static int assert_mutable_entry(const struct delta_index_entry *delta_entry)
{ {
int result = ASSERT((delta_entry->delta_list != &delta_entry->temp_delta_list), int result = VDO_ASSERT((delta_entry->delta_list != &delta_entry->temp_delta_list),
"delta index is mutable"); "delta index is mutable");
if (result != UDS_SUCCESS) if (result != VDO_SUCCESS)
result = UDS_BAD_STATE; result = UDS_BAD_STATE;
return result; return result;
...@@ -1527,10 +1527,10 @@ int uds_set_delta_entry_value(const struct delta_index_entry *delta_entry, u32 v ...@@ -1527,10 +1527,10 @@ int uds_set_delta_entry_value(const struct delta_index_entry *delta_entry, u32 v
if (result != UDS_SUCCESS) if (result != UDS_SUCCESS)
return result; return result;
result = ASSERT((value & value_mask) == value, result = VDO_ASSERT((value & value_mask) == value,
"Value (%u) being set in a delta index is too large (must fit in %u bits)", "Value (%u) being set in a delta index is too large (must fit in %u bits)",
value, delta_entry->value_bits); value, delta_entry->value_bits);
if (result != UDS_SUCCESS) if (result != VDO_SUCCESS)
return UDS_INVALID_ARGUMENT; return UDS_INVALID_ARGUMENT;
set_field(value, delta_entry->delta_zone->memory, set_field(value, delta_entry->delta_zone->memory,
...@@ -1730,9 +1730,9 @@ int uds_put_delta_index_entry(struct delta_index_entry *delta_entry, u32 key, u3 ...@@ -1730,9 +1730,9 @@ int uds_put_delta_index_entry(struct delta_index_entry *delta_entry, u32 key, u3
if (result != UDS_SUCCESS) if (result != UDS_SUCCESS)
return result; return result;
result = ASSERT((key == delta_entry->key), result = VDO_ASSERT((key == delta_entry->key),
"incorrect key for collision entry"); "incorrect key for collision entry");
if (result != UDS_SUCCESS) if (result != VDO_SUCCESS)
return result; return result;
delta_entry->offset += delta_entry->entry_bits; delta_entry->offset += delta_entry->entry_bits;
...@@ -1742,8 +1742,8 @@ int uds_put_delta_index_entry(struct delta_index_entry *delta_entry, u32 key, u3 ...@@ -1742,8 +1742,8 @@ int uds_put_delta_index_entry(struct delta_index_entry *delta_entry, u32 key, u3
result = insert_bits(delta_entry, delta_entry->entry_bits); result = insert_bits(delta_entry, delta_entry->entry_bits);
} else if (delta_entry->at_end) { } else if (delta_entry->at_end) {
/* Insert a new entry at the end of the delta list. */ /* Insert a new entry at the end of the delta list. */
result = ASSERT((key >= delta_entry->key), "key past end of list"); result = VDO_ASSERT((key >= delta_entry->key), "key past end of list");
if (result != UDS_SUCCESS) if (result != VDO_SUCCESS)
return result; return result;
set_delta(delta_entry, key - delta_entry->key); set_delta(delta_entry, key - delta_entry->key);
...@@ -1760,14 +1760,14 @@ int uds_put_delta_index_entry(struct delta_index_entry *delta_entry, u32 key, u3 ...@@ -1760,14 +1760,14 @@ int uds_put_delta_index_entry(struct delta_index_entry *delta_entry, u32 key, u3
* Insert a new entry which requires the delta in the following entry to be * Insert a new entry which requires the delta in the following entry to be
* updated. * updated.
*/ */
result = ASSERT((key < delta_entry->key), result = VDO_ASSERT((key < delta_entry->key),
"key precedes following entry"); "key precedes following entry");
if (result != UDS_SUCCESS) if (result != VDO_SUCCESS)
return result; return result;
result = ASSERT((key >= delta_entry->key - delta_entry->delta), result = VDO_ASSERT((key >= delta_entry->key - delta_entry->delta),
"key effects following entry's delta"); "key effects following entry's delta");
if (result != UDS_SUCCESS) if (result != VDO_SUCCESS)
return result; return result;
old_entry_size = delta_entry->entry_bits; old_entry_size = delta_entry->entry_bits;
......
...@@ -837,8 +837,9 @@ static u64 generate_index_save_nonce(u64 volume_nonce, struct index_save_layout ...@@ -837,8 +837,9 @@ static u64 generate_index_save_nonce(u64 volume_nonce, struct index_save_layout
encode_u32_le(buffer, &offset, isl->save_data.version); encode_u32_le(buffer, &offset, isl->save_data.version);
encode_u32_le(buffer, &offset, 0U); encode_u32_le(buffer, &offset, 0U);
encode_u64_le(buffer, &offset, isl->index_save.start_block); encode_u64_le(buffer, &offset, isl->index_save.start_block);
ASSERT_LOG_ONLY(offset == sizeof(nonce_data), VDO_ASSERT_LOG_ONLY(offset == sizeof(nonce_data),
"%zu bytes encoded of %zu expected", offset, sizeof(nonce_data)); "%zu bytes encoded of %zu expected",
offset, sizeof(nonce_data));
return generate_secondary_nonce(volume_nonce, buffer, sizeof(buffer)); return generate_secondary_nonce(volume_nonce, buffer, sizeof(buffer));
} }
......
...@@ -199,8 +199,8 @@ static void update_session_stats(struct uds_request *request) ...@@ -199,8 +199,8 @@ static void update_session_stats(struct uds_request *request)
break; break;
default: default:
request->status = ASSERT(false, "unknown request type: %d", request->status = VDO_ASSERT(false, "unknown request type: %d",
request->type); request->type);
} }
} }
...@@ -402,8 +402,8 @@ static void suspend_rebuild(struct uds_index_session *session) ...@@ -402,8 +402,8 @@ static void suspend_rebuild(struct uds_index_session *session)
case INDEX_FREEING: case INDEX_FREEING:
default: default:
/* These cases should not happen. */ /* These cases should not happen. */
ASSERT_LOG_ONLY(false, "Bad load context state %u", VDO_ASSERT_LOG_ONLY(false, "Bad load context state %u",
session->load_context.status); session->load_context.status);
break; break;
} }
mutex_unlock(&session->load_context.mutex); mutex_unlock(&session->load_context.mutex);
...@@ -531,8 +531,8 @@ int uds_resume_index_session(struct uds_index_session *session, ...@@ -531,8 +531,8 @@ int uds_resume_index_session(struct uds_index_session *session,
case INDEX_FREEING: case INDEX_FREEING:
default: default:
/* These cases should not happen; do nothing. */ /* These cases should not happen; do nothing. */
ASSERT_LOG_ONLY(false, "Bad load context state %u", VDO_ASSERT_LOG_ONLY(false, "Bad load context state %u",
session->load_context.status); session->load_context.status);
break; break;
} }
mutex_unlock(&session->load_context.mutex); mutex_unlock(&session->load_context.mutex);
......
...@@ -112,7 +112,7 @@ static void enqueue_barrier_messages(struct uds_index *index, u64 virtual_chapte ...@@ -112,7 +112,7 @@ static void enqueue_barrier_messages(struct uds_index *index, u64 virtual_chapte
for (zone = 0; zone < index->zone_count; zone++) { for (zone = 0; zone < index->zone_count; zone++) {
int result = launch_zone_message(message, zone, index); int result = launch_zone_message(message, zone, index);
ASSERT_LOG_ONLY((result == UDS_SUCCESS), "barrier message allocation"); VDO_ASSERT_LOG_ONLY((result == UDS_SUCCESS), "barrier message allocation");
} }
} }
...@@ -1380,7 +1380,7 @@ void uds_enqueue_request(struct uds_request *request, enum request_stage stage) ...@@ -1380,7 +1380,7 @@ void uds_enqueue_request(struct uds_request *request, enum request_stage stage)
break; break;
default: default:
ASSERT_LOG_ONLY(false, "invalid index stage: %d", stage); VDO_ASSERT_LOG_ONLY(false, "invalid index stage: %d", stage);
return; return;
} }
......
...@@ -832,10 +832,10 @@ static int start_restoring_volume_sub_index(struct volume_sub_index *sub_index, ...@@ -832,10 +832,10 @@ static int start_restoring_volume_sub_index(struct volume_sub_index *sub_index,
decode_u32_le(buffer, &offset, &header.first_list); decode_u32_le(buffer, &offset, &header.first_list);
decode_u32_le(buffer, &offset, &header.list_count); decode_u32_le(buffer, &offset, &header.list_count);
result = ASSERT(offset == sizeof(buffer), result = VDO_ASSERT(offset == sizeof(buffer),
"%zu bytes decoded of %zu expected", offset, "%zu bytes decoded of %zu expected", offset,
sizeof(buffer)); sizeof(buffer));
if (result != UDS_SUCCESS) if (result != VDO_SUCCESS)
result = UDS_CORRUPT_DATA; result = UDS_CORRUPT_DATA;
if (memcmp(header.magic, MAGIC_START_5, MAGIC_SIZE) != 0) { if (memcmp(header.magic, MAGIC_START_5, MAGIC_SIZE) != 0) {
...@@ -924,10 +924,10 @@ static int start_restoring_volume_index(struct volume_index *volume_index, ...@@ -924,10 +924,10 @@ static int start_restoring_volume_index(struct volume_index *volume_index,
offset += MAGIC_SIZE; offset += MAGIC_SIZE;
decode_u32_le(buffer, &offset, &header.sparse_sample_rate); decode_u32_le(buffer, &offset, &header.sparse_sample_rate);
result = ASSERT(offset == sizeof(buffer), result = VDO_ASSERT(offset == sizeof(buffer),
"%zu bytes decoded of %zu expected", offset, "%zu bytes decoded of %zu expected", offset,
sizeof(buffer)); sizeof(buffer));
if (result != UDS_SUCCESS) if (result != VDO_SUCCESS)
result = UDS_CORRUPT_DATA; result = UDS_CORRUPT_DATA;
if (memcmp(header.magic, MAGIC_START_6, MAGIC_SIZE) != 0) if (memcmp(header.magic, MAGIC_START_6, MAGIC_SIZE) != 0)
...@@ -1023,10 +1023,10 @@ static int start_saving_volume_sub_index(const struct volume_sub_index *sub_inde ...@@ -1023,10 +1023,10 @@ static int start_saving_volume_sub_index(const struct volume_sub_index *sub_inde
encode_u32_le(buffer, &offset, first_list); encode_u32_le(buffer, &offset, first_list);
encode_u32_le(buffer, &offset, list_count); encode_u32_le(buffer, &offset, list_count);
result = ASSERT(offset == sizeof(struct sub_index_data), result = VDO_ASSERT(offset == sizeof(struct sub_index_data),
"%zu bytes of config written, of %zu expected", offset, "%zu bytes of config written, of %zu expected", offset,
sizeof(struct sub_index_data)); sizeof(struct sub_index_data));
if (result != UDS_SUCCESS) if (result != VDO_SUCCESS)
return result; return result;
result = uds_write_to_buffered_writer(buffered_writer, buffer, offset); result = uds_write_to_buffered_writer(buffered_writer, buffer, offset);
...@@ -1066,10 +1066,10 @@ static int start_saving_volume_index(const struct volume_index *volume_index, ...@@ -1066,10 +1066,10 @@ static int start_saving_volume_index(const struct volume_index *volume_index,
memcpy(buffer, MAGIC_START_6, MAGIC_SIZE); memcpy(buffer, MAGIC_START_6, MAGIC_SIZE);
offset += MAGIC_SIZE; offset += MAGIC_SIZE;
encode_u32_le(buffer, &offset, volume_index->sparse_sample_rate); encode_u32_le(buffer, &offset, volume_index->sparse_sample_rate);
result = ASSERT(offset == sizeof(struct volume_index_data), result = VDO_ASSERT(offset == sizeof(struct volume_index_data),
"%zu bytes of header written, of %zu expected", offset, "%zu bytes of header written, of %zu expected", offset,
sizeof(struct volume_index_data)); sizeof(struct volume_index_data));
if (result != UDS_SUCCESS) if (result != VDO_SUCCESS)
return result; return result;
result = uds_write_to_buffered_writer(writer, buffer, offset); result = uds_write_to_buffered_writer(writer, buffer, offset);
......
...@@ -135,8 +135,8 @@ static void begin_pending_search(struct page_cache *cache, u32 physical_page, ...@@ -135,8 +135,8 @@ static void begin_pending_search(struct page_cache *cache, u32 physical_page,
invalidate_counter.page = physical_page; invalidate_counter.page = physical_page;
invalidate_counter.counter++; invalidate_counter.counter++;
set_invalidate_counter(cache, zone_number, invalidate_counter); set_invalidate_counter(cache, zone_number, invalidate_counter);
ASSERT_LOG_ONLY(search_pending(invalidate_counter), VDO_ASSERT_LOG_ONLY(search_pending(invalidate_counter),
"Search is pending for zone %u", zone_number); "Search is pending for zone %u", zone_number);
/* /*
* This memory barrier ensures that the write to the invalidate counter is seen by other * This memory barrier ensures that the write to the invalidate counter is seen by other
* threads before this thread accesses the cached page. The corresponding read memory * threads before this thread accesses the cached page. The corresponding read memory
...@@ -158,8 +158,8 @@ static void end_pending_search(struct page_cache *cache, unsigned int zone_numbe ...@@ -158,8 +158,8 @@ static void end_pending_search(struct page_cache *cache, unsigned int zone_numbe
smp_mb(); smp_mb();
invalidate_counter = get_invalidate_counter(cache, zone_number); invalidate_counter = get_invalidate_counter(cache, zone_number);
ASSERT_LOG_ONLY(search_pending(invalidate_counter), VDO_ASSERT_LOG_ONLY(search_pending(invalidate_counter),
"Search is pending for zone %u", zone_number); "Search is pending for zone %u", zone_number);
invalidate_counter.counter++; invalidate_counter.counter++;
set_invalidate_counter(cache, zone_number, invalidate_counter); set_invalidate_counter(cache, zone_number, invalidate_counter);
} }
...@@ -259,8 +259,8 @@ static int put_page_in_cache(struct page_cache *cache, u32 physical_page, ...@@ -259,8 +259,8 @@ static int put_page_in_cache(struct page_cache *cache, u32 physical_page,
int result; int result;
/* We hold the read_threads_mutex. */ /* We hold the read_threads_mutex. */
result = ASSERT((page->read_pending), "page to install has a pending read"); result = VDO_ASSERT((page->read_pending), "page to install has a pending read");
if (result != UDS_SUCCESS) if (result != VDO_SUCCESS)
return result; return result;
page->physical_page = physical_page; page->physical_page = physical_page;
...@@ -285,8 +285,8 @@ static void cancel_page_in_cache(struct page_cache *cache, u32 physical_page, ...@@ -285,8 +285,8 @@ static void cancel_page_in_cache(struct page_cache *cache, u32 physical_page,
int result; int result;
/* We hold the read_threads_mutex. */ /* We hold the read_threads_mutex. */
result = ASSERT((page->read_pending), "page to install has a pending read"); result = VDO_ASSERT((page->read_pending), "page to install has a pending read");
if (result != UDS_SUCCESS) if (result != VDO_SUCCESS)
return; return;
clear_cache_page(cache, page); clear_cache_page(cache, page);
...@@ -889,10 +889,10 @@ int uds_search_cached_record_page(struct volume *volume, struct uds_request *req ...@@ -889,10 +889,10 @@ int uds_search_cached_record_page(struct volume *volume, struct uds_request *req
if (record_page_number == NO_CHAPTER_INDEX_ENTRY) if (record_page_number == NO_CHAPTER_INDEX_ENTRY)
return UDS_SUCCESS; return UDS_SUCCESS;
result = ASSERT(record_page_number < geometry->record_pages_per_chapter, result = VDO_ASSERT(record_page_number < geometry->record_pages_per_chapter,
"0 <= %d < %u", record_page_number, "0 <= %d < %u", record_page_number,
geometry->record_pages_per_chapter); geometry->record_pages_per_chapter);
if (result != UDS_SUCCESS) if (result != VDO_SUCCESS)
return result; return result;
page_number = geometry->index_pages_per_chapter + record_page_number; page_number = geometry->index_pages_per_chapter + record_page_number;
...@@ -1501,10 +1501,10 @@ static int __must_check initialize_page_cache(struct page_cache *cache, ...@@ -1501,10 +1501,10 @@ static int __must_check initialize_page_cache(struct page_cache *cache,
cache->zone_count = zone_count; cache->zone_count = zone_count;
atomic64_set(&cache->clock, 1); atomic64_set(&cache->clock, 1);
result = ASSERT((cache->cache_slots <= VOLUME_CACHE_MAX_ENTRIES), result = VDO_ASSERT((cache->cache_slots <= VOLUME_CACHE_MAX_ENTRIES),
"requested cache size, %u, within limit %u", "requested cache size, %u, within limit %u",
cache->cache_slots, VOLUME_CACHE_MAX_ENTRIES); cache->cache_slots, VOLUME_CACHE_MAX_ENTRIES);
if (result != UDS_SUCCESS) if (result != VDO_SUCCESS)
return result; return result;
result = vdo_allocate(VOLUME_CACHE_MAX_QUEUED_READS, struct queued_read, result = vdo_allocate(VOLUME_CACHE_MAX_QUEUED_READS, struct queued_read,
......
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
#include "errors.h" #include "errors.h"
#include "logger.h" #include "logger.h"
int uds_assertion_failed(const char *expression_string, const char *file_name, int vdo_assertion_failed(const char *expression_string, const char *file_name,
int line_number, const char *format, ...) int line_number, const char *format, ...)
{ {
va_list args; va_list args;
......
...@@ -33,16 +33,12 @@ static inline int __must_check vdo_must_use(int value) ...@@ -33,16 +33,12 @@ static inline int __must_check vdo_must_use(int value)
/* Log a message if the expression is not true. */ /* Log a message if the expression is not true. */
#define VDO_ASSERT_LOG_ONLY(expr, ...) __VDO_ASSERT(expr, __VA_ARGS__) #define VDO_ASSERT_LOG_ONLY(expr, ...) __VDO_ASSERT(expr, __VA_ARGS__)
/* For use by UDS */
#define ASSERT(expr, ...) VDO_ASSERT(expr, __VA_ARGS__)
#define ASSERT_LOG_ONLY(expr, ...) __VDO_ASSERT(expr, __VA_ARGS__)
#define __VDO_ASSERT(expr, ...) \ #define __VDO_ASSERT(expr, ...) \
(likely(expr) ? VDO_SUCCESS \ (likely(expr) ? VDO_SUCCESS \
: uds_assertion_failed(STRINGIFY(expr), __FILE__, __LINE__, __VA_ARGS__)) : vdo_assertion_failed(STRINGIFY(expr), __FILE__, __LINE__, __VA_ARGS__))
/* Log an assertion failure message. */ /* Log an assertion failure message. */
int uds_assertion_failed(const char *expression_string, const char *file_name, int vdo_assertion_failed(const char *expression_string, const char *file_name,
int line_number, const char *format, ...) int line_number, const char *format, ...)
__printf(4, 5); __printf(4, 5);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment