Commit 3584240b authored by Mike Snitzer's avatar Mike Snitzer

dm vdo logger: change from uds_ to vdo_ namespace

Rename all uds_log_* to vdo_log_*.
Signed-off-by: default avatarMike Snitzer <snitzer@kernel.org>
Signed-off-by: default avatarChung Chung <cchung@redhat.com>
Signed-off-by: default avatarMatthew Sakai <msakai@redhat.com>
parent 66214ed0
......@@ -228,12 +228,12 @@ static int __must_check begin_operation(struct admin_state *state,
const struct admin_state_code *next_state = get_next_state(state, operation);
if (next_state == NULL) {
result = uds_log_error_strerror(VDO_INVALID_ADMIN_STATE,
result = vdo_log_error_strerror(VDO_INVALID_ADMIN_STATE,
"Can't start %s from %s",
operation->name,
vdo_get_admin_state_code(state)->name);
} else if (state->waiter != NULL) {
result = uds_log_error_strerror(VDO_COMPONENT_BUSY,
result = vdo_log_error_strerror(VDO_COMPONENT_BUSY,
"Can't start %s with extant waiter",
operation->name);
} else {
......@@ -291,7 +291,7 @@ static bool check_code(bool valid, const struct admin_state_code *code, const ch
if (valid)
return true;
result = uds_log_error_strerror(VDO_INVALID_ADMIN_STATE,
result = vdo_log_error_strerror(VDO_INVALID_ADMIN_STATE,
"%s is not a %s", code->name, what);
if (waiter != NULL)
vdo_continue_completion(waiter, result);
......@@ -334,7 +334,7 @@ bool vdo_start_draining(struct admin_state *state,
}
if (!code->normal) {
uds_log_error_strerror(VDO_INVALID_ADMIN_STATE, "can't start %s from %s",
vdo_log_error_strerror(VDO_INVALID_ADMIN_STATE, "can't start %s from %s",
operation->name, code->name);
vdo_continue_completion(waiter, VDO_INVALID_ADMIN_STATE);
return false;
......
......@@ -264,7 +264,7 @@ static void report_cache_pressure(struct vdo_page_cache *cache)
ADD_ONCE(cache->stats.cache_pressure, 1);
if (cache->waiter_count > cache->page_count) {
if ((cache->pressure_report % LOG_INTERVAL) == 0)
uds_log_info("page cache pressure %u", cache->stats.cache_pressure);
vdo_log_info("page cache pressure %u", cache->stats.cache_pressure);
if (++cache->pressure_report >= DISPLAY_INTERVAL)
cache->pressure_report = 0;
......@@ -483,7 +483,7 @@ static void complete_with_page(struct page_info *info,
bool available = vdo_page_comp->writable ? is_present(info) : is_valid(info);
if (!available) {
uds_log_error_strerror(VDO_BAD_PAGE,
vdo_log_error_strerror(VDO_BAD_PAGE,
"Requested cache page %llu in state %s is not %s",
(unsigned long long) info->pbn,
get_page_state_name(info->state),
......@@ -563,7 +563,7 @@ static void set_persistent_error(struct vdo_page_cache *cache, const char *conte
struct vdo *vdo = cache->vdo;
if ((result != VDO_READ_ONLY) && !vdo_is_read_only(vdo)) {
uds_log_error_strerror(result, "VDO Page Cache persistent error: %s",
vdo_log_error_strerror(result, "VDO Page Cache persistent error: %s",
context);
vdo_enter_read_only_mode(vdo, result);
}
......@@ -704,7 +704,7 @@ static void page_is_loaded(struct vdo_completion *completion)
validity = vdo_validate_block_map_page(page, nonce, info->pbn);
if (validity == VDO_BLOCK_MAP_PAGE_BAD) {
physical_block_number_t pbn = vdo_get_block_map_page_pbn(page);
int result = uds_log_error_strerror(VDO_BAD_PAGE,
int result = vdo_log_error_strerror(VDO_BAD_PAGE,
"Expected page %llu but got page %llu instead",
(unsigned long long) info->pbn,
(unsigned long long) pbn);
......@@ -894,7 +894,7 @@ static void allocate_free_page(struct page_info *info)
if (!vdo_waitq_has_waiters(&cache->free_waiters)) {
if (cache->stats.cache_pressure > 0) {
uds_log_info("page cache pressure relieved");
vdo_log_info("page cache pressure relieved");
WRITE_ONCE(cache->stats.cache_pressure, 0);
}
......@@ -1012,7 +1012,7 @@ static void handle_page_write_error(struct vdo_completion *completion)
/* If we're already read-only, write failures are to be expected. */
if (result != VDO_READ_ONLY) {
uds_log_ratelimit(uds_log_error,
vdo_log_ratelimit(vdo_log_error,
"failed to write block map page %llu",
(unsigned long long) info->pbn);
}
......@@ -1397,7 +1397,7 @@ bool vdo_copy_valid_page(char *buffer, nonce_t nonce,
}
if (validity == VDO_BLOCK_MAP_PAGE_BAD) {
uds_log_error_strerror(VDO_BAD_PAGE,
vdo_log_error_strerror(VDO_BAD_PAGE,
"Expected page %llu but got page %llu instead",
(unsigned long long) pbn,
(unsigned long long) vdo_get_block_map_page_pbn(loaded));
......@@ -1785,7 +1785,7 @@ static void continue_with_loaded_page(struct data_vio *data_vio,
vdo_unpack_block_map_entry(&page->entries[slot.block_map_slot.slot]);
if (is_invalid_tree_entry(vdo_from_data_vio(data_vio), &mapping, lock->height)) {
uds_log_error_strerror(VDO_BAD_MAPPING,
vdo_log_error_strerror(VDO_BAD_MAPPING,
"Invalid block map tree PBN: %llu with state %u for page index %u at height %u",
(unsigned long long) mapping.pbn, mapping.state,
lock->tree_slots[lock->height - 1].page_index,
......@@ -2263,7 +2263,7 @@ void vdo_find_block_map_slot(struct data_vio *data_vio)
/* The page at this height has been allocated and loaded. */
mapping = vdo_unpack_block_map_entry(&page->entries[tree_slot.block_map_slot.slot]);
if (is_invalid_tree_entry(vdo_from_data_vio(data_vio), &mapping, lock->height)) {
uds_log_error_strerror(VDO_BAD_MAPPING,
vdo_log_error_strerror(VDO_BAD_MAPPING,
"Invalid block map tree PBN: %llu with state %u for page index %u at height %u",
(unsigned long long) mapping.pbn, mapping.state,
lock->tree_slots[lock->height - 1].page_index,
......@@ -3140,7 +3140,7 @@ static int __must_check set_mapped_location(struct data_vio *data_vio,
* Log the corruption even if we wind up ignoring it for write VIOs, converting all cases
* to VDO_BAD_MAPPING.
*/
uds_log_error_strerror(VDO_BAD_MAPPING,
vdo_log_error_strerror(VDO_BAD_MAPPING,
"PBN %llu with state %u read from the block map was invalid",
(unsigned long long) mapped.pbn, mapped.state);
......
......@@ -792,25 +792,25 @@ static int initialize_data_vio(struct data_vio *data_vio, struct vdo *vdo)
result = vdo_allocate_memory(VDO_BLOCK_SIZE, 0, "data_vio data",
&data_vio->vio.data);
if (result != VDO_SUCCESS)
return uds_log_error_strerror(result,
return vdo_log_error_strerror(result,
"data_vio data allocation failure");
result = vdo_allocate_memory(VDO_BLOCK_SIZE, 0, "compressed block",
&data_vio->compression.block);
if (result != VDO_SUCCESS) {
return uds_log_error_strerror(result,
return vdo_log_error_strerror(result,
"data_vio compressed block allocation failure");
}
result = vdo_allocate_memory(VDO_BLOCK_SIZE, 0, "vio scratch",
&data_vio->scratch_block);
if (result != VDO_SUCCESS)
return uds_log_error_strerror(result,
return vdo_log_error_strerror(result,
"data_vio scratch allocation failure");
result = vdo_create_bio(&bio);
if (result != VDO_SUCCESS)
return uds_log_error_strerror(result,
return vdo_log_error_strerror(result,
"data_vio data bio allocation failure");
vdo_initialize_completion(&data_vio->decrement_completion, vdo,
......@@ -1025,7 +1025,7 @@ void resume_data_vio_pool(struct data_vio_pool *pool, struct vdo_completion *com
static void dump_limiter(const char *name, struct limiter *limiter)
{
uds_log_info("%s: %u of %u busy (max %u), %s", name, limiter->busy,
vdo_log_info("%s: %u of %u busy (max %u), %s", name, limiter->busy,
limiter->limit, limiter->max_busy,
((bio_list_empty(&limiter->waiters) &&
bio_list_empty(&limiter->new_waiters)) ?
......@@ -1323,7 +1323,7 @@ static void perform_cleanup_stage(struct data_vio *data_vio,
if ((data_vio->recovery_sequence_number > 0) &&
(READ_ONCE(vdo->read_only_notifier.read_only_error) == VDO_SUCCESS) &&
(data_vio->vio.completion.result != VDO_READ_ONLY))
uds_log_warning("VDO not read-only when cleaning data_vio with RJ lock");
vdo_log_warning("VDO not read-only when cleaning data_vio with RJ lock");
fallthrough;
case VIO_RELEASE_LOGICAL:
......@@ -1353,7 +1353,7 @@ static void enter_read_only_mode(struct vdo_completion *completion)
if (completion->result != VDO_READ_ONLY) {
struct data_vio *data_vio = as_data_vio(completion);
uds_log_error_strerror(completion->result,
vdo_log_error_strerror(completion->result,
"Preparing to enter read-only mode: data_vio for LBN %llu (becoming mapped to %llu, previously mapped to %llu, allocated %llu) is completing with a fatal error after operation %s",
(unsigned long long) data_vio->logical.lbn,
(unsigned long long) data_vio->new_mapped.pbn,
......@@ -1449,14 +1449,14 @@ int uncompress_data_vio(struct data_vio *data_vio,
&fragment_offset, &fragment_size);
if (result != VDO_SUCCESS) {
uds_log_debug("%s: compressed fragment error %d", __func__, result);
vdo_log_debug("%s: compressed fragment error %d", __func__, result);
return result;
}
size = LZ4_decompress_safe((block->data + fragment_offset), buffer,
fragment_size, VDO_BLOCK_SIZE);
if (size != VDO_BLOCK_SIZE) {
uds_log_debug("%s: lz4 error", __func__);
vdo_log_debug("%s: lz4 error", __func__);
return VDO_INVALID_FRAGMENT;
}
......
......@@ -1287,7 +1287,7 @@ static bool acquire_provisional_reference(struct data_vio *agent, struct pbn_loc
if (result == VDO_SUCCESS)
return true;
uds_log_warning_strerror(result,
vdo_log_warning_strerror(result,
"Error acquiring provisional reference for dedupe candidate; aborting dedupe");
agent->is_duplicate = false;
vdo_release_physical_zone_pbn_lock(agent->duplicate.zone,
......@@ -1614,7 +1614,7 @@ static bool decode_uds_advice(struct dedupe_context *context)
version = encoding->data[offset++];
if (version != UDS_ADVICE_VERSION) {
uds_log_error("invalid UDS advice version code %u", version);
vdo_log_error("invalid UDS advice version code %u", version);
return false;
}
......@@ -1625,7 +1625,7 @@ static bool decode_uds_advice(struct dedupe_context *context)
/* Don't use advice that's clearly meaningless. */
if ((advice->state == VDO_MAPPING_STATE_UNMAPPED) || (advice->pbn == VDO_ZERO_BLOCK)) {
uds_log_debug("Invalid advice from deduplication server: pbn %llu, state %u. Giving up on deduplication of logical block %llu",
vdo_log_debug("Invalid advice from deduplication server: pbn %llu, state %u. Giving up on deduplication of logical block %llu",
(unsigned long long) advice->pbn, advice->state,
(unsigned long long) data_vio->logical.lbn);
atomic64_inc(&vdo->stats.invalid_advice_pbn_count);
......@@ -1634,7 +1634,7 @@ static bool decode_uds_advice(struct dedupe_context *context)
result = vdo_get_physical_zone(vdo, advice->pbn, &advice->zone);
if ((result != VDO_SUCCESS) || (advice->zone == NULL)) {
uds_log_debug("Invalid physical block number from deduplication server: %llu, giving up on deduplication of logical block %llu",
vdo_log_debug("Invalid physical block number from deduplication server: %llu, giving up on deduplication of logical block %llu",
(unsigned long long) advice->pbn,
(unsigned long long) data_vio->logical.lbn);
atomic64_inc(&vdo->stats.invalid_advice_pbn_count);
......@@ -2053,7 +2053,7 @@ static void close_index(struct hash_zones *zones)
result = uds_close_index(zones->index_session);
if (result != UDS_SUCCESS)
uds_log_error_strerror(result, "Error closing index");
vdo_log_error_strerror(result, "Error closing index");
spin_lock(&zones->lock);
zones->index_state = IS_CLOSED;
zones->error_flag |= result != UDS_SUCCESS;
......@@ -2080,7 +2080,7 @@ static void open_index(struct hash_zones *zones)
result = uds_open_index(create_flag ? UDS_CREATE : UDS_LOAD,
&zones->parameters, zones->index_session);
if (result != UDS_SUCCESS)
uds_log_error_strerror(result, "Error opening index");
vdo_log_error_strerror(result, "Error opening index");
spin_lock(&zones->lock);
if (!create_flag) {
......@@ -2104,7 +2104,7 @@ static void open_index(struct hash_zones *zones)
zones->index_target = IS_CLOSED;
zones->error_flag = true;
spin_unlock(&zones->lock);
uds_log_info("Setting UDS index target state to error");
vdo_log_info("Setting UDS index target state to error");
spin_lock(&zones->lock);
}
/*
......@@ -2160,7 +2160,7 @@ static void report_dedupe_timeouts(struct hash_zones *zones, unsigned int timeou
u64 unreported = atomic64_read(&zones->timeouts);
unreported -= zones->reported_timeouts;
uds_log_debug("UDS index timeout on %llu requests",
vdo_log_debug("UDS index timeout on %llu requests",
(unsigned long long) unreported);
zones->reported_timeouts += unreported;
}
......@@ -2207,7 +2207,7 @@ static int initialize_index(struct vdo *vdo, struct hash_zones *zones)
1, NULL);
if (result != VDO_SUCCESS) {
uds_destroy_index_session(vdo_forget(zones->index_session));
uds_log_error("UDS index queue initialization failed (%d)", result);
vdo_log_error("UDS index queue initialization failed (%d)", result);
return result;
}
......@@ -2502,7 +2502,7 @@ static void initiate_suspend_index(struct admin_state *state)
result = uds_suspend_index_session(zones->index_session, save);
if (result != UDS_SUCCESS)
uds_log_error_strerror(result, "Error suspending dedupe index");
vdo_log_error_strerror(result, "Error suspending dedupe index");
}
vdo_finish_draining(state);
......@@ -2585,7 +2585,7 @@ static void resume_index(void *context, struct vdo_completion *parent)
zones->parameters.bdev = config->owned_device->bdev;
result = uds_resume_index_session(zones->index_session, zones->parameters.bdev);
if (result != UDS_SUCCESS)
uds_log_error_strerror(result, "Error resuming dedupe index");
vdo_log_error_strerror(result, "Error resuming dedupe index");
spin_lock(&zones->lock);
vdo_resume_if_quiescent(&zones->state);
......@@ -2665,7 +2665,7 @@ static void get_index_statistics(struct hash_zones *zones,
result = uds_get_index_session_stats(zones->index_session, &index_stats);
if (result != UDS_SUCCESS) {
uds_log_error_strerror(result, "Error reading index stats");
vdo_log_error_strerror(result, "Error reading index stats");
return;
}
......@@ -2750,7 +2750,7 @@ static void dump_hash_lock(const struct hash_lock *lock)
* unambiguous. 'U' indicates a lock not registered in the map.
*/
state = get_hash_lock_state_name(lock->state);
uds_log_info(" hl %px: %3.3s %c%llu/%u rc=%u wc=%zu agt=%px",
vdo_log_info(" hl %px: %3.3s %c%llu/%u rc=%u wc=%zu agt=%px",
lock, state, (lock->registered ? 'D' : 'U'),
(unsigned long long) lock->duplicate.pbn,
lock->duplicate.state, lock->reference_count,
......@@ -2784,11 +2784,11 @@ static void dump_hash_zone(const struct hash_zone *zone)
data_vio_count_t i;
if (zone->hash_lock_map == NULL) {
uds_log_info("struct hash_zone %u: NULL map", zone->zone_number);
vdo_log_info("struct hash_zone %u: NULL map", zone->zone_number);
return;
}
uds_log_info("struct hash_zone %u: mapSize=%zu",
vdo_log_info("struct hash_zone %u: mapSize=%zu",
zone->zone_number, vdo_int_map_size(zone->hash_lock_map));
for (i = 0; i < LOCK_POOL_CAPACITY; i++)
dump_hash_lock(&zone->lock_array[i]);
......@@ -2808,9 +2808,9 @@ void vdo_dump_hash_zones(struct hash_zones *zones)
target = (zones->changing ? index_state_to_string(zones, zones->index_target) : NULL);
spin_unlock(&zones->lock);
uds_log_info("UDS index: state: %s", state);
vdo_log_info("UDS index: state: %s", state);
if (target != NULL)
uds_log_info("UDS index: changing to state: %s", target);
vdo_log_info("UDS index: changing to state: %s", target);
for (zone = 0; zone < zones->zone_count; zone++)
dump_hash_zone(&zones->zones[zone]);
......@@ -2957,7 +2957,7 @@ static void set_target_state(struct hash_zones *zones, enum index_state target,
spin_unlock(&zones->lock);
if (old_state != new_state)
uds_log_info("Setting UDS index target state to %s", new_state);
vdo_log_info("Setting UDS index target state to %s", new_state);
}
const char *vdo_get_dedupe_index_state_name(struct hash_zones *zones)
......
This diff is collapsed.
......@@ -58,12 +58,12 @@ static void do_dump(struct vdo *vdo, unsigned int dump_options_requested,
u32 active, maximum;
s64 outstanding;
uds_log_info("%s dump triggered via %s", UDS_LOGGING_MODULE_NAME, why);
vdo_log_info("%s dump triggered via %s", VDO_LOGGING_MODULE_NAME, why);
active = get_data_vio_pool_active_requests(vdo->data_vio_pool);
maximum = get_data_vio_pool_maximum_requests(vdo->data_vio_pool);
outstanding = (atomic64_read(&vdo->stats.bios_submitted) -
atomic64_read(&vdo->stats.bios_completed));
uds_log_info("%u device requests outstanding (max %u), %lld bio requests outstanding, device '%s'",
vdo_log_info("%u device requests outstanding (max %u), %lld bio requests outstanding, device '%s'",
active, maximum, outstanding,
vdo_get_device_name(vdo->device_config->owning_target));
if (((dump_options_requested & FLAG_SHOW_QUEUES) != 0) && (vdo->threads != NULL)) {
......@@ -80,7 +80,7 @@ static void do_dump(struct vdo *vdo, unsigned int dump_options_requested,
vdo_dump_status(vdo);
vdo_report_memory_usage();
uds_log_info("end of %s dump", UDS_LOGGING_MODULE_NAME);
vdo_log_info("end of %s dump", VDO_LOGGING_MODULE_NAME);
}
static int parse_dump_options(unsigned int argc, char *const *argv,
......@@ -114,7 +114,7 @@ static int parse_dump_options(unsigned int argc, char *const *argv,
}
}
if (j == ARRAY_SIZE(option_names)) {
uds_log_warning("dump option name '%s' unknown", argv[i]);
vdo_log_warning("dump option name '%s' unknown", argv[i]);
options_okay = false;
}
}
......@@ -159,13 +159,13 @@ static void dump_vio_waiters(struct vdo_wait_queue *waitq, char *wait_on)
data_vio = vdo_waiter_as_data_vio(first);
uds_log_info(" %s is locked. Waited on by: vio %px pbn %llu lbn %llu d-pbn %llu lastOp %s",
vdo_log_info(" %s is locked. Waited on by: vio %px pbn %llu lbn %llu d-pbn %llu lastOp %s",
wait_on, data_vio, data_vio->allocation.pbn, data_vio->logical.lbn,
data_vio->duplicate.pbn, get_data_vio_operation_name(data_vio));
for (waiter = first->next_waiter; waiter != first; waiter = waiter->next_waiter) {
data_vio = vdo_waiter_as_data_vio(waiter);
uds_log_info(" ... and : vio %px pbn %llu lbn %llu d-pbn %llu lastOp %s",
vdo_log_info(" ... and : vio %px pbn %llu lbn %llu d-pbn %llu lastOp %s",
data_vio, data_vio->allocation.pbn, data_vio->logical.lbn,
data_vio->duplicate.pbn,
get_data_vio_operation_name(data_vio));
......@@ -258,7 +258,7 @@ void dump_data_vio(void *data)
encode_vio_dump_flags(data_vio, flags_dump_buffer);
uds_log_info(" vio %px %s%s %s %s%s", data_vio,
vdo_log_info(" vio %px %s%s %s %s%s", data_vio,
vio_block_number_dump_buffer,
vio_flush_generation_buffer,
get_data_vio_operation_name(data_vio),
......
......@@ -146,7 +146,7 @@ static int __must_check validate_version(struct version_number expected_version,
const char *component_name)
{
if (!vdo_are_same_version(expected_version, actual_version)) {
return uds_log_error_strerror(VDO_UNSUPPORTED_VERSION,
return vdo_log_error_strerror(VDO_UNSUPPORTED_VERSION,
"%s version mismatch, expected %d.%d, got %d.%d",
component_name,
expected_version.major_version,
......@@ -179,7 +179,7 @@ int vdo_validate_header(const struct header *expected_header,
int result;
if (expected_header->id != actual_header->id) {
return uds_log_error_strerror(VDO_INCORRECT_COMPONENT,
return vdo_log_error_strerror(VDO_INCORRECT_COMPONENT,
"%s ID mismatch, expected %d, got %d",
name, expected_header->id,
actual_header->id);
......@@ -192,7 +192,7 @@ int vdo_validate_header(const struct header *expected_header,
if ((expected_header->size > actual_header->size) ||
(exact_size && (expected_header->size < actual_header->size))) {
return uds_log_error_strerror(VDO_UNSUPPORTED_VERSION,
return vdo_log_error_strerror(VDO_UNSUPPORTED_VERSION,
"%s size mismatch, expected %zu, got %zu",
name, expected_header->size,
actual_header->size);
......@@ -653,7 +653,7 @@ int vdo_configure_slab_depot(const struct partition *partition,
physical_block_number_t last_block;
block_count_t slab_size = slab_config.slab_blocks;
uds_log_debug("slabDepot %s(block_count=%llu, first_block=%llu, slab_size=%llu, zone_count=%u)",
vdo_log_debug("slabDepot %s(block_count=%llu, first_block=%llu, slab_size=%llu, zone_count=%u)",
__func__, (unsigned long long) partition->count,
(unsigned long long) partition->offset,
(unsigned long long) slab_size, zone_count);
......@@ -677,7 +677,7 @@ int vdo_configure_slab_depot(const struct partition *partition,
.zone_count = zone_count,
};
uds_log_debug("slab_depot last_block=%llu, total_data_blocks=%llu, slab_count=%zu, left_over=%llu",
vdo_log_debug("slab_depot last_block=%llu, total_data_blocks=%llu, slab_count=%zu, left_over=%llu",
(unsigned long long) last_block,
(unsigned long long) total_data_blocks, slab_count,
(unsigned long long) (partition->count - (last_block - partition->offset)));
......@@ -875,7 +875,7 @@ int vdo_initialize_layout(block_count_t size, physical_block_number_t offset,
(offset + block_map_blocks + journal_blocks + summary_blocks);
if (necessary_size > size)
return uds_log_error_strerror(VDO_NO_SPACE,
return vdo_log_error_strerror(VDO_NO_SPACE,
"Not enough space to make a VDO");
*layout = (struct layout) {
......@@ -1045,7 +1045,7 @@ static int decode_layout(u8 *buffer, size_t *offset, physical_block_number_t sta
layout->num_partitions = layout_header.partition_count;
if (layout->num_partitions > VDO_PARTITION_COUNT) {
return uds_log_error_strerror(VDO_UNKNOWN_PARTITION,
return vdo_log_error_strerror(VDO_UNKNOWN_PARTITION,
"layout has extra partitions");
}
......@@ -1070,7 +1070,7 @@ static int decode_layout(u8 *buffer, size_t *offset, physical_block_number_t sta
result = vdo_get_partition(layout, REQUIRED_PARTITIONS[i], &partition);
if (result != VDO_SUCCESS) {
vdo_uninitialize_layout(layout);
return uds_log_error_strerror(result,
return vdo_log_error_strerror(result,
"layout is missing required partition %u",
REQUIRED_PARTITIONS[i]);
}
......@@ -1080,7 +1080,7 @@ static int decode_layout(u8 *buffer, size_t *offset, physical_block_number_t sta
if (start != size) {
vdo_uninitialize_layout(layout);
return uds_log_error_strerror(UDS_BAD_STATE,
return vdo_log_error_strerror(UDS_BAD_STATE,
"partitions do not cover the layout");
}
......@@ -1253,7 +1253,7 @@ int vdo_validate_config(const struct vdo_config *config,
return VDO_OUT_OF_RANGE;
if (physical_block_count != config->physical_blocks) {
uds_log_error("A physical size of %llu blocks was specified, not the %llu blocks configured in the vdo super block",
vdo_log_error("A physical size of %llu blocks was specified, not the %llu blocks configured in the vdo super block",
(unsigned long long) physical_block_count,
(unsigned long long) config->physical_blocks);
return VDO_PARAMETER_MISMATCH;
......@@ -1266,7 +1266,7 @@ int vdo_validate_config(const struct vdo_config *config,
return result;
if (logical_block_count != config->logical_blocks) {
uds_log_error("A logical size of %llu blocks was specified, but that differs from the %llu blocks configured in the vdo super block",
vdo_log_error("A logical size of %llu blocks was specified, but that differs from the %llu blocks configured in the vdo super block",
(unsigned long long) logical_block_count,
(unsigned long long) config->logical_blocks);
return VDO_PARAMETER_MISMATCH;
......@@ -1390,7 +1390,7 @@ int vdo_validate_component_states(struct vdo_component_states *states,
block_count_t logical_size)
{
if (geometry_nonce != states->vdo.nonce) {
return uds_log_error_strerror(VDO_BAD_NONCE,
return vdo_log_error_strerror(VDO_BAD_NONCE,
"Geometry nonce %llu does not match superblock nonce %llu",
(unsigned long long) geometry_nonce,
(unsigned long long) states->vdo.nonce);
......@@ -1463,7 +1463,7 @@ int vdo_decode_super_block(u8 *buffer)
* We can't check release version or checksum until we know the content size, so we
* have to assume a version mismatch on unexpected values.
*/
return uds_log_error_strerror(VDO_UNSUPPORTED_VERSION,
return vdo_log_error_strerror(VDO_UNSUPPORTED_VERSION,
"super block contents too large: %zu",
header.size);
}
......
......@@ -215,8 +215,8 @@ const char *uds_string_error_name(int errnum, char *buf, size_t buflen)
*/
int uds_status_to_errno(int error)
{
char error_name[UDS_MAX_ERROR_NAME_SIZE];
char error_message[UDS_MAX_ERROR_MESSAGE_SIZE];
char error_name[VDO_MAX_ERROR_NAME_SIZE];
char error_message[VDO_MAX_ERROR_MESSAGE_SIZE];
/* 0 is success, and negative values are already system error codes. */
if (likely(error <= 0))
......@@ -248,7 +248,7 @@ int uds_status_to_errno(int error)
default:
/* Translate an unexpected error into something generic. */
uds_log_info("%s: mapping status code %d (%s: %s) to -EIO",
vdo_log_info("%s: mapping status code %d (%s: %s) to -EIO",
__func__, error,
uds_string_error_name(error, error_name,
sizeof(error_name)),
......
......@@ -51,8 +51,8 @@ enum uds_status_codes {
};
enum {
UDS_MAX_ERROR_NAME_SIZE = 80,
UDS_MAX_ERROR_MESSAGE_SIZE = 128,
VDO_MAX_ERROR_NAME_SIZE = 80,
VDO_MAX_ERROR_MESSAGE_SIZE = 128,
};
struct error_info {
......
......@@ -108,7 +108,7 @@ static void *allocate_flush(gfp_t gfp_mask, void *pool_data)
int result = vdo_allocate(1, struct vdo_flush, __func__, &flush);
if (result != VDO_SUCCESS)
uds_log_error_strerror(result, "failed to allocate spare flush");
vdo_log_error_strerror(result, "failed to allocate spare flush");
}
if (flush != NULL) {
......@@ -349,11 +349,11 @@ void vdo_complete_flushes(struct flusher *flusher)
*/
void vdo_dump_flusher(const struct flusher *flusher)
{
uds_log_info("struct flusher");
uds_log_info(" flush_generation=%llu first_unacknowledged_generation=%llu",
vdo_log_info("struct flusher");
vdo_log_info(" flush_generation=%llu first_unacknowledged_generation=%llu",
(unsigned long long) flusher->flush_generation,
(unsigned long long) flusher->first_unacknowledged_generation);
uds_log_info(" notifiers queue is %s; pending_flushes queue is %s",
vdo_log_info(" notifiers queue is %s; pending_flushes queue is %s",
(vdo_waitq_has_waiters(&flusher->notifiers) ? "not empty" : "empty"),
(vdo_waitq_has_waiters(&flusher->pending_flushes) ? "not empty" : "empty"));
}
......
......@@ -485,7 +485,7 @@ static void dump_simple_work_queue(struct simple_work_queue *queue)
thread_status = atomic_read(&queue->idle) ? "idle" : "running";
}
uds_log_info("workQ %px (%s) %s (%c)", &queue->common, queue->common.name,
vdo_log_info("workQ %px (%s) %s (%c)", &queue->common, queue->common.name,
thread_status, task_state_report);
/* ->waiting_worker_threads wait queue status? anyone waiting? */
......
......@@ -166,7 +166,7 @@ int uds_pack_open_chapter_index_page(struct open_chapter_index *chapter_index,
if (removals == 0) {
uds_get_delta_index_stats(delta_index, &stats);
uds_log_warning("The chapter index for chapter %llu contains %llu entries with %llu collisions",
vdo_log_warning("The chapter index for chapter %llu contains %llu entries with %llu collisions",
(unsigned long long) chapter_number,
(unsigned long long) stats.record_count,
(unsigned long long) stats.collision_count);
......@@ -198,7 +198,7 @@ int uds_pack_open_chapter_index_page(struct open_chapter_index *chapter_index,
}
if (removals > 0) {
uds_log_warning("To avoid chapter index page overflow in chapter %llu, %u entries were removed from the chapter index",
vdo_log_warning("To avoid chapter index page overflow in chapter %llu, %u entries were removed from the chapter index",
(unsigned long long) chapter_number, removals);
}
......
......@@ -33,54 +33,54 @@ static bool are_matching_configurations(struct uds_configuration *saved_config,
bool result = true;
if (saved_geometry->record_pages_per_chapter != geometry->record_pages_per_chapter) {
uds_log_error("Record pages per chapter (%u) does not match (%u)",
vdo_log_error("Record pages per chapter (%u) does not match (%u)",
saved_geometry->record_pages_per_chapter,
geometry->record_pages_per_chapter);
result = false;
}
if (saved_geometry->chapters_per_volume != geometry->chapters_per_volume) {
uds_log_error("Chapter count (%u) does not match (%u)",
vdo_log_error("Chapter count (%u) does not match (%u)",
saved_geometry->chapters_per_volume,
geometry->chapters_per_volume);
result = false;
}
if (saved_geometry->sparse_chapters_per_volume != geometry->sparse_chapters_per_volume) {
uds_log_error("Sparse chapter count (%u) does not match (%u)",
vdo_log_error("Sparse chapter count (%u) does not match (%u)",
saved_geometry->sparse_chapters_per_volume,
geometry->sparse_chapters_per_volume);
result = false;
}
if (saved_config->cache_chapters != user->cache_chapters) {
uds_log_error("Cache size (%u) does not match (%u)",
vdo_log_error("Cache size (%u) does not match (%u)",
saved_config->cache_chapters, user->cache_chapters);
result = false;
}
if (saved_config->volume_index_mean_delta != user->volume_index_mean_delta) {
uds_log_error("Volume index mean delta (%u) does not match (%u)",
vdo_log_error("Volume index mean delta (%u) does not match (%u)",
saved_config->volume_index_mean_delta,
user->volume_index_mean_delta);
result = false;
}
if (saved_geometry->bytes_per_page != geometry->bytes_per_page) {
uds_log_error("Bytes per page value (%zu) does not match (%zu)",
vdo_log_error("Bytes per page value (%zu) does not match (%zu)",
saved_geometry->bytes_per_page, geometry->bytes_per_page);
result = false;
}
if (saved_config->sparse_sample_rate != user->sparse_sample_rate) {
uds_log_error("Sparse sample rate (%u) does not match (%u)",
vdo_log_error("Sparse sample rate (%u) does not match (%u)",
saved_config->sparse_sample_rate,
user->sparse_sample_rate);
result = false;
}
if (saved_config->nonce != user->nonce) {
uds_log_error("Nonce (%llu) does not match (%llu)",
vdo_log_error("Nonce (%llu) does not match (%llu)",
(unsigned long long) saved_config->nonce,
(unsigned long long) user->nonce);
result = false;
......@@ -109,11 +109,11 @@ int uds_validate_config_contents(struct buffered_reader *reader,
result = uds_read_from_buffered_reader(reader, version_buffer,
INDEX_CONFIG_VERSION_LENGTH);
if (result != UDS_SUCCESS)
return uds_log_error_strerror(result, "cannot read index config version");
return vdo_log_error_strerror(result, "cannot read index config version");
if (!is_version(INDEX_CONFIG_VERSION_6_02, version_buffer) &&
!is_version(INDEX_CONFIG_VERSION_8_02, version_buffer)) {
return uds_log_error_strerror(UDS_CORRUPT_DATA,
return vdo_log_error_strerror(UDS_CORRUPT_DATA,
"unsupported configuration version: '%.*s'",
INDEX_CONFIG_VERSION_LENGTH,
version_buffer);
......@@ -121,7 +121,7 @@ int uds_validate_config_contents(struct buffered_reader *reader,
result = uds_read_from_buffered_reader(reader, buffer, sizeof(buffer));
if (result != UDS_SUCCESS)
return uds_log_error_strerror(result, "cannot read config data");
return vdo_log_error_strerror(result, "cannot read config data");
decode_u32_le(buffer, &offset, &geometry.record_pages_per_chapter);
decode_u32_le(buffer, &offset, &geometry.chapters_per_volume);
......@@ -149,7 +149,7 @@ int uds_validate_config_contents(struct buffered_reader *reader,
result = uds_read_from_buffered_reader(reader, remapping,
sizeof(remapping));
if (result != UDS_SUCCESS)
return uds_log_error_strerror(result, "cannot read converted config");
return vdo_log_error_strerror(result, "cannot read converted config");
offset = 0;
decode_u64_le(remapping, &offset,
......@@ -159,7 +159,7 @@ int uds_validate_config_contents(struct buffered_reader *reader,
}
if (!are_matching_configurations(&config, &geometry, user_config)) {
uds_log_warning("Supplied configuration does not match save");
vdo_log_warning("Supplied configuration does not match save");
return UDS_NO_INDEX;
}
......@@ -263,7 +263,7 @@ static int compute_memory_sizes(uds_memory_config_size_t mem_gb, bool sparse,
DEFAULT_CHAPTERS_PER_VOLUME);
*record_pages_per_chapter = DEFAULT_RECORD_PAGES_PER_CHAPTER;
} else {
uds_log_error("received invalid memory size");
vdo_log_error("received invalid memory size");
return -EINVAL;
}
......@@ -292,7 +292,7 @@ static unsigned int __must_check normalize_zone_count(unsigned int requested)
if (zone_count > MAX_ZONES)
zone_count = MAX_ZONES;
uds_log_info("Using %u indexing zone%s for concurrency.",
vdo_log_info("Using %u indexing zone%s for concurrency.",
zone_count, zone_count == 1 ? "" : "s");
return zone_count;
}
......@@ -364,13 +364,13 @@ void uds_log_configuration(struct uds_configuration *config)
{
struct index_geometry *geometry = config->geometry;
uds_log_debug("Configuration:");
uds_log_debug(" Record pages per chapter: %10u", geometry->record_pages_per_chapter);
uds_log_debug(" Chapters per volume: %10u", geometry->chapters_per_volume);
uds_log_debug(" Sparse chapters per volume: %10u", geometry->sparse_chapters_per_volume);
uds_log_debug(" Cache size (chapters): %10u", config->cache_chapters);
uds_log_debug(" Volume index mean delta: %10u", config->volume_index_mean_delta);
uds_log_debug(" Bytes per page: %10zu", geometry->bytes_per_page);
uds_log_debug(" Sparse sample rate: %10u", config->sparse_sample_rate);
uds_log_debug(" Nonce: %llu", (unsigned long long) config->nonce);
vdo_log_debug("Configuration:");
vdo_log_debug(" Record pages per chapter: %10u", geometry->record_pages_per_chapter);
vdo_log_debug(" Chapters per volume: %10u", geometry->chapters_per_volume);
vdo_log_debug(" Sparse chapters per volume: %10u", geometry->sparse_chapters_per_volume);
vdo_log_debug(" Cache size (chapters): %10u", config->cache_chapters);
vdo_log_debug(" Volume index mean delta: %10u", config->volume_index_mean_delta);
vdo_log_debug(" Bytes per page: %10zu", geometry->bytes_per_page);
vdo_log_debug(" Sparse sample rate: %10u", config->sparse_sample_rate);
vdo_log_debug(" Nonce: %llu", (unsigned long long) config->nonce);
}
......@@ -375,7 +375,7 @@ int uds_initialize_delta_index(struct delta_index *delta_index, unsigned int zon
*/
if (delta_index->list_count <= first_list_in_zone) {
uds_uninitialize_delta_index(delta_index);
return uds_log_error_strerror(UDS_INVALID_ARGUMENT,
return vdo_log_error_strerror(UDS_INVALID_ARGUMENT,
"%u delta lists not enough for %u zones",
list_count, zone_count);
}
......@@ -732,7 +732,7 @@ int uds_pack_delta_index_page(const struct delta_index *delta_index, u64 header_
free_bits -= GUARD_BITS;
if (free_bits < IMMUTABLE_HEADER_SIZE) {
/* This page is too small to store any delta lists. */
return uds_log_error_strerror(UDS_OVERFLOW,
return vdo_log_error_strerror(UDS_OVERFLOW,
"Chapter Index Page of %zu bytes is too small",
memory_size);
}
......@@ -843,7 +843,7 @@ int uds_start_restoring_delta_index(struct delta_index *delta_index,
result = uds_read_from_buffered_reader(buffered_readers[z], buffer,
sizeof(buffer));
if (result != UDS_SUCCESS) {
return uds_log_warning_strerror(result,
return vdo_log_warning_strerror(result,
"failed to read delta index header");
}
......@@ -860,23 +860,23 @@ int uds_start_restoring_delta_index(struct delta_index *delta_index,
"%zu bytes decoded of %zu expected", offset,
sizeof(struct delta_index_header));
if (result != VDO_SUCCESS) {
return uds_log_warning_strerror(result,
return vdo_log_warning_strerror(result,
"failed to read delta index header");
}
if (memcmp(header.magic, DELTA_INDEX_MAGIC, MAGIC_SIZE) != 0) {
return uds_log_warning_strerror(UDS_CORRUPT_DATA,
return vdo_log_warning_strerror(UDS_CORRUPT_DATA,
"delta index file has bad magic number");
}
if (zone_count != header.zone_count) {
return uds_log_warning_strerror(UDS_CORRUPT_DATA,
return vdo_log_warning_strerror(UDS_CORRUPT_DATA,
"delta index files contain mismatched zone counts (%u,%u)",
zone_count, header.zone_count);
}
if (header.zone_number != z) {
return uds_log_warning_strerror(UDS_CORRUPT_DATA,
return vdo_log_warning_strerror(UDS_CORRUPT_DATA,
"delta index zone %u found in slot %u",
header.zone_number, z);
}
......@@ -887,7 +887,7 @@ int uds_start_restoring_delta_index(struct delta_index *delta_index,
collision_count += header.collision_count;
if (first_list[z] != list_next) {
return uds_log_warning_strerror(UDS_CORRUPT_DATA,
return vdo_log_warning_strerror(UDS_CORRUPT_DATA,
"delta index file for zone %u starts with list %u instead of list %u",
z, first_list[z], list_next);
}
......@@ -896,13 +896,13 @@ int uds_start_restoring_delta_index(struct delta_index *delta_index,
}
if (list_next != delta_index->list_count) {
return uds_log_warning_strerror(UDS_CORRUPT_DATA,
return vdo_log_warning_strerror(UDS_CORRUPT_DATA,
"delta index files contain %u delta lists instead of %u delta lists",
list_next, delta_index->list_count);
}
if (collision_count > record_count) {
return uds_log_warning_strerror(UDS_CORRUPT_DATA,
return vdo_log_warning_strerror(UDS_CORRUPT_DATA,
"delta index files contain %llu collisions and %llu records",
(unsigned long long) collision_count,
(unsigned long long) record_count);
......@@ -927,7 +927,7 @@ int uds_start_restoring_delta_index(struct delta_index *delta_index,
size_data,
sizeof(size_data));
if (result != UDS_SUCCESS) {
return uds_log_warning_strerror(result,
return vdo_log_warning_strerror(result,
"failed to read delta index size");
}
......@@ -960,7 +960,7 @@ static int restore_delta_list_to_zone(struct delta_zone *delta_zone,
u32 list_number = save_info->index - delta_zone->first_list;
if (list_number >= delta_zone->list_count) {
return uds_log_warning_strerror(UDS_CORRUPT_DATA,
return vdo_log_warning_strerror(UDS_CORRUPT_DATA,
"invalid delta list number %u not in range [%u,%u)",
save_info->index, delta_zone->first_list,
delta_zone->first_list + delta_zone->list_count);
......@@ -968,7 +968,7 @@ static int restore_delta_list_to_zone(struct delta_zone *delta_zone,
delta_list = &delta_zone->delta_lists[list_number + 1];
if (delta_list->size == 0) {
return uds_log_warning_strerror(UDS_CORRUPT_DATA,
return vdo_log_warning_strerror(UDS_CORRUPT_DATA,
"unexpected delta list number %u",
save_info->index);
}
......@@ -976,7 +976,7 @@ static int restore_delta_list_to_zone(struct delta_zone *delta_zone,
bit_count = delta_list->size + save_info->bit_offset;
byte_count = BITS_TO_BYTES(bit_count);
if (save_info->byte_count != byte_count) {
return uds_log_warning_strerror(UDS_CORRUPT_DATA,
return vdo_log_warning_strerror(UDS_CORRUPT_DATA,
"unexpected delta list size %u != %u",
save_info->byte_count, byte_count);
}
......@@ -996,7 +996,7 @@ static int restore_delta_list_data(struct delta_index *delta_index, unsigned int
result = uds_read_from_buffered_reader(buffered_reader, buffer, sizeof(buffer));
if (result != UDS_SUCCESS) {
return uds_log_warning_strerror(result,
return vdo_log_warning_strerror(result,
"failed to read delta list data");
}
......@@ -1009,7 +1009,7 @@ static int restore_delta_list_data(struct delta_index *delta_index, unsigned int
if ((save_info.bit_offset >= BITS_PER_BYTE) ||
(save_info.byte_count > DELTA_LIST_MAX_BYTE_COUNT)) {
return uds_log_warning_strerror(UDS_CORRUPT_DATA,
return vdo_log_warning_strerror(UDS_CORRUPT_DATA,
"corrupt delta list data");
}
......@@ -1018,7 +1018,7 @@ static int restore_delta_list_data(struct delta_index *delta_index, unsigned int
return UDS_CORRUPT_DATA;
if (save_info.index >= delta_index->list_count) {
return uds_log_warning_strerror(UDS_CORRUPT_DATA,
return vdo_log_warning_strerror(UDS_CORRUPT_DATA,
"invalid delta list number %u of %u",
save_info.index,
delta_index->list_count);
......@@ -1027,7 +1027,7 @@ static int restore_delta_list_data(struct delta_index *delta_index, unsigned int
result = uds_read_from_buffered_reader(buffered_reader, data,
save_info.byte_count);
if (result != UDS_SUCCESS) {
return uds_log_warning_strerror(result,
return vdo_log_warning_strerror(result,
"failed to read delta list data");
}
......@@ -1102,7 +1102,7 @@ static int flush_delta_list(struct delta_zone *zone, u32 flush_index)
result = uds_write_to_buffered_writer(zone->buffered_writer, buffer,
sizeof(buffer));
if (result != UDS_SUCCESS) {
uds_log_warning_strerror(result, "failed to write delta list memory");
vdo_log_warning_strerror(result, "failed to write delta list memory");
return result;
}
......@@ -1110,7 +1110,7 @@ static int flush_delta_list(struct delta_zone *zone, u32 flush_index)
zone->memory + get_delta_list_byte_start(delta_list),
get_delta_list_byte_size(delta_list));
if (result != UDS_SUCCESS)
uds_log_warning_strerror(result, "failed to write delta list memory");
vdo_log_warning_strerror(result, "failed to write delta list memory");
return result;
}
......@@ -1144,7 +1144,7 @@ int uds_start_saving_delta_index(const struct delta_index *delta_index,
result = uds_write_to_buffered_writer(buffered_writer, buffer, offset);
if (result != UDS_SUCCESS)
return uds_log_warning_strerror(result,
return vdo_log_warning_strerror(result,
"failed to write delta index header");
for (i = 0; i < delta_zone->list_count; i++) {
......@@ -1156,7 +1156,7 @@ int uds_start_saving_delta_index(const struct delta_index *delta_index,
result = uds_write_to_buffered_writer(buffered_writer, data,
sizeof(data));
if (result != UDS_SUCCESS)
return uds_log_warning_strerror(result,
return vdo_log_warning_strerror(result,
"failed to write delta list size");
}
......@@ -1197,7 +1197,7 @@ int uds_write_guard_delta_list(struct buffered_writer *buffered_writer)
result = uds_write_to_buffered_writer(buffered_writer, buffer, sizeof(buffer));
if (result != UDS_SUCCESS)
uds_log_warning_strerror(result, "failed to write guard delta list");
vdo_log_warning_strerror(result, "failed to write guard delta list");
return UDS_SUCCESS;
}
......@@ -1378,7 +1378,7 @@ noinline int uds_next_delta_index_entry(struct delta_index_entry *delta_entry)
* This is not an assertion because uds_validate_chapter_index_page() wants to
* handle this error.
*/
uds_log_warning("Decoded past the end of the delta list");
vdo_log_warning("Decoded past the end of the delta list");
return UDS_CORRUPT_DATA;
}
......@@ -1959,7 +1959,7 @@ u32 uds_get_delta_index_page_count(u32 entry_count, u32 list_count, u32 mean_del
void uds_log_delta_index_entry(struct delta_index_entry *delta_entry)
{
uds_log_ratelimit(uds_log_info,
vdo_log_ratelimit(vdo_log_info,
"List 0x%X Key 0x%X Offset 0x%X%s%s List_size 0x%X%s",
delta_entry->list_number, delta_entry->key,
delta_entry->offset, delta_entry->at_end ? " end" : "",
......
This diff is collapsed.
......@@ -167,7 +167,7 @@ int uds_read_index_page_map(struct index_page_map *map, struct buffered_reader *
decode_u16_le(buffer, &offset, &map->entries[i]);
vdo_free(buffer);
uds_log_debug("read index page map, last update %llu",
vdo_log_debug("read index page map, last update %llu",
(unsigned long long) map->last_update);
return UDS_SUCCESS;
}
......@@ -104,7 +104,7 @@ int uds_launch_request(struct uds_request *request)
int result;
if (request->callback == NULL) {
uds_log_error("missing required callback");
vdo_log_error("missing required callback");
return -EINVAL;
}
......@@ -116,7 +116,7 @@ int uds_launch_request(struct uds_request *request)
case UDS_UPDATE:
break;
default:
uds_log_error("received invalid callback type");
vdo_log_error("received invalid callback type");
return -EINVAL;
}
......@@ -244,7 +244,7 @@ static int __must_check make_empty_index_session(struct uds_index_session **inde
int uds_create_index_session(struct uds_index_session **session)
{
if (session == NULL) {
uds_log_error("missing session pointer");
vdo_log_error("missing session pointer");
return -EINVAL;
}
......@@ -257,10 +257,10 @@ static int __must_check start_loading_index_session(struct uds_index_session *in
mutex_lock(&index_session->request_mutex);
if (index_session->state & IS_FLAG_SUSPENDED) {
uds_log_info("Index session is suspended");
vdo_log_info("Index session is suspended");
result = -EBUSY;
} else if (index_session->state != 0) {
uds_log_info("Index is already loaded");
vdo_log_info("Index is already loaded");
result = -EBUSY;
} else {
index_session->state |= IS_FLAG_LOADING;
......@@ -290,7 +290,7 @@ static int initialize_index_session(struct uds_index_session *index_session,
result = uds_make_configuration(&index_session->parameters, &config);
if (result != UDS_SUCCESS) {
uds_log_error_strerror(result, "Failed to allocate config");
vdo_log_error_strerror(result, "Failed to allocate config");
return result;
}
......@@ -298,7 +298,7 @@ static int initialize_index_session(struct uds_index_session *index_session,
result = uds_make_index(config, open_type, &index_session->load_context,
enter_callback_stage, &index_session->index);
if (result != UDS_SUCCESS)
uds_log_error_strerror(result, "Failed to make index");
vdo_log_error_strerror(result, "Failed to make index");
else
uds_log_configuration(config);
......@@ -332,15 +332,15 @@ int uds_open_index(enum uds_open_index_type open_type,
char name[BDEVNAME_SIZE];
if (parameters == NULL) {
uds_log_error("missing required parameters");
vdo_log_error("missing required parameters");
return -EINVAL;
}
if (parameters->bdev == NULL) {
uds_log_error("missing required block device");
vdo_log_error("missing required block device");
return -EINVAL;
}
if (session == NULL) {
uds_log_error("missing required session pointer");
vdo_log_error("missing required session pointer");
return -EINVAL;
}
......@@ -350,11 +350,11 @@ int uds_open_index(enum uds_open_index_type open_type,
session->parameters = *parameters;
format_dev_t(name, parameters->bdev->bd_dev);
uds_log_info("%s: %s", get_open_type_string(open_type), name);
vdo_log_info("%s: %s", get_open_type_string(open_type), name);
result = initialize_index_session(session, open_type);
if (result != UDS_SUCCESS)
uds_log_error_strerror(result, "Failed %s",
vdo_log_error_strerror(result, "Failed %s",
get_open_type_string(open_type));
finish_loading_index_session(session, result);
......@@ -426,7 +426,7 @@ int uds_suspend_index_session(struct uds_index_session *session, bool save)
if ((session->state & IS_FLAG_WAITING) || (session->state & IS_FLAG_DESTROYING)) {
no_work = true;
uds_log_info("Index session is already changing state");
vdo_log_info("Index session is already changing state");
result = -EBUSY;
} else if (session->state & IS_FLAG_SUSPENDED) {
no_work = true;
......@@ -485,7 +485,7 @@ int uds_resume_index_session(struct uds_index_session *session,
mutex_lock(&session->request_mutex);
if (session->state & IS_FLAG_WAITING) {
uds_log_info("Index session is already changing state");
vdo_log_info("Index session is already changing state");
no_work = true;
result = -EBUSY;
} else if (!(session->state & IS_FLAG_SUSPENDED)) {
......@@ -562,7 +562,7 @@ static int save_and_free_index(struct uds_index_session *index_session)
if (!suspended) {
result = uds_save_index(index);
if (result != UDS_SUCCESS)
uds_log_warning_strerror(result,
vdo_log_warning_strerror(result,
"ignoring error from save_index");
}
uds_free_index(index);
......@@ -598,7 +598,7 @@ int uds_close_index(struct uds_index_session *index_session)
}
if (index_session->state & IS_FLAG_SUSPENDED) {
uds_log_info("Index session is suspended");
vdo_log_info("Index session is suspended");
result = -EBUSY;
} else if ((index_session->state & IS_FLAG_DESTROYING) ||
!(index_session->state & IS_FLAG_LOADED)) {
......@@ -611,10 +611,10 @@ int uds_close_index(struct uds_index_session *index_session)
if (result != UDS_SUCCESS)
return uds_status_to_errno(result);
uds_log_debug("Closing index");
vdo_log_debug("Closing index");
wait_for_no_requests_in_progress(index_session);
result = save_and_free_index(index_session);
uds_log_debug("Closed index");
vdo_log_debug("Closed index");
mutex_lock(&index_session->request_mutex);
index_session->state &= ~IS_FLAG_CLOSING;
......@@ -629,7 +629,7 @@ int uds_destroy_index_session(struct uds_index_session *index_session)
int result;
bool load_pending = false;
uds_log_debug("Destroying index session");
vdo_log_debug("Destroying index session");
/* Wait for any current index state change to complete. */
mutex_lock(&index_session->request_mutex);
......@@ -641,7 +641,7 @@ int uds_destroy_index_session(struct uds_index_session *index_session)
if (index_session->state & IS_FLAG_DESTROYING) {
mutex_unlock(&index_session->request_mutex);
uds_log_info("Index session is already closing");
vdo_log_info("Index session is already closing");
return -EBUSY;
}
......@@ -672,7 +672,7 @@ int uds_destroy_index_session(struct uds_index_session *index_session)
result = save_and_free_index(index_session);
uds_request_queue_finish(index_session->callback_queue);
index_session->callback_queue = NULL;
uds_log_debug("Destroyed index session");
vdo_log_debug("Destroyed index session");
vdo_free(index_session);
return uds_status_to_errno(result);
}
......@@ -710,7 +710,7 @@ int uds_get_index_session_stats(struct uds_index_session *index_session,
struct uds_index_stats *stats)
{
if (stats == NULL) {
uds_log_error("received a NULL index stats pointer");
vdo_log_error("received a NULL index stats pointer");
return -EINVAL;
}
......
......@@ -188,7 +188,7 @@ static int finish_previous_chapter(struct uds_index *index, u64 current_chapter_
mutex_unlock(&writer->mutex);
if (result != UDS_SUCCESS)
return uds_log_error_strerror(result,
return vdo_log_error_strerror(result,
"Writing of previous open chapter failed");
return UDS_SUCCESS;
......@@ -258,7 +258,7 @@ static int open_next_chapter(struct index_zone *zone)
unsigned int finished_zones;
u32 expire_chapters;
uds_log_debug("closing chapter %llu of zone %u after %u entries (%u short)",
vdo_log_debug("closing chapter %llu of zone %u after %u entries (%u short)",
(unsigned long long) zone->newest_virtual_chapter, zone->id,
zone->open_chapter->size,
zone->open_chapter->capacity - zone->open_chapter->size);
......@@ -315,7 +315,7 @@ static int dispatch_index_zone_control_request(struct uds_request *request)
return handle_chapter_closed(zone, message->virtual_chapter);
default:
uds_log_error("invalid message type: %d", message->type);
vdo_log_error("invalid message type: %d", message->type);
return UDS_INVALID_ARGUMENT;
}
}
......@@ -600,7 +600,7 @@ static int dispatch_index_request(struct uds_index *index, struct uds_request *r
break;
default:
result = uds_log_warning_strerror(UDS_INVALID_ARGUMENT,
result = vdo_log_warning_strerror(UDS_INVALID_ARGUMENT,
"invalid request type: %d",
request->type);
break;
......@@ -618,7 +618,7 @@ static void execute_zone_request(struct uds_request *request)
if (request->zone_message.type != UDS_MESSAGE_NONE) {
result = dispatch_index_zone_control_request(request);
if (result != UDS_SUCCESS) {
uds_log_error_strerror(result, "error executing message: %d",
vdo_log_error_strerror(result, "error executing message: %d",
request->zone_message.type);
}
......@@ -678,7 +678,7 @@ static void close_chapters(void *arg)
struct chapter_writer *writer = arg;
struct uds_index *index = writer->index;
uds_log_debug("chapter writer starting");
vdo_log_debug("chapter writer starting");
mutex_lock(&writer->mutex);
for (;;) {
while (writer->zones_to_write < index->zone_count) {
......@@ -688,7 +688,7 @@ static void close_chapters(void *arg)
* open chapter, so we can exit now.
*/
mutex_unlock(&writer->mutex);
uds_log_debug("chapter writer stopping");
vdo_log_debug("chapter writer stopping");
return;
}
uds_wait_cond(&writer->cond, &writer->mutex);
......@@ -711,7 +711,7 @@ static void close_chapters(void *arg)
index->has_saved_open_chapter = false;
result = uds_discard_open_chapter(index->layout);
if (result == UDS_SUCCESS)
uds_log_debug("Discarding saved open chapter");
vdo_log_debug("Discarding saved open chapter");
}
result = uds_close_open_chapter(writer->chapters, index->zone_count,
......@@ -818,7 +818,7 @@ static int load_index(struct uds_index *index)
last_save_chapter = ((index->last_save != NO_LAST_SAVE) ? index->last_save : 0);
uds_log_info("loaded index from chapter %llu through chapter %llu",
vdo_log_info("loaded index from chapter %llu through chapter %llu",
(unsigned long long) index->oldest_virtual_chapter,
(unsigned long long) last_save_chapter);
......@@ -843,7 +843,7 @@ static int rebuild_index_page_map(struct uds_index *index, u64 vcn)
index_page_number,
&chapter_index_page);
if (result != UDS_SUCCESS) {
return uds_log_error_strerror(result,
return vdo_log_error_strerror(result,
"failed to read index page %u in chapter %u",
index_page_number, chapter);
}
......@@ -851,7 +851,7 @@ static int rebuild_index_page_map(struct uds_index *index, u64 vcn)
lowest_delta_list = chapter_index_page->lowest_list_number;
highest_delta_list = chapter_index_page->highest_list_number;
if (lowest_delta_list != expected_list_number) {
return uds_log_error_strerror(UDS_CORRUPT_DATA,
return vdo_log_error_strerror(UDS_CORRUPT_DATA,
"chapter %u index page %u is corrupt",
chapter, index_page_number);
}
......@@ -980,7 +980,7 @@ static int replay_chapter(struct uds_index *index, u64 virtual, bool sparse)
u32 physical_chapter;
if (check_for_suspend(index)) {
uds_log_info("Replay interrupted by index shutdown at chapter %llu",
vdo_log_info("Replay interrupted by index shutdown at chapter %llu",
(unsigned long long) virtual);
return -EBUSY;
}
......@@ -992,7 +992,7 @@ static int replay_chapter(struct uds_index *index, u64 virtual, bool sparse)
result = rebuild_index_page_map(index, virtual);
if (result != UDS_SUCCESS) {
return uds_log_error_strerror(result,
return vdo_log_error_strerror(result,
"could not rebuild index page map for chapter %u",
physical_chapter);
}
......@@ -1005,7 +1005,7 @@ static int replay_chapter(struct uds_index *index, u64 virtual, bool sparse)
result = uds_get_volume_record_page(index->volume, physical_chapter,
record_page_number, &record_page);
if (result != UDS_SUCCESS) {
return uds_log_error_strerror(result, "could not get page %d",
return vdo_log_error_strerror(result, "could not get page %d",
record_page_number);
}
......@@ -1034,7 +1034,7 @@ static int replay_volume(struct uds_index *index)
u64 upto_virtual = index->newest_virtual_chapter;
bool will_be_sparse;
uds_log_info("Replaying volume from chapter %llu through chapter %llu",
vdo_log_info("Replaying volume from chapter %llu through chapter %llu",
(unsigned long long) from_virtual,
(unsigned long long) upto_virtual);
......@@ -1064,7 +1064,7 @@ static int replay_volume(struct uds_index *index)
new_map_update = index->volume->index_page_map->last_update;
if (new_map_update != old_map_update) {
uds_log_info("replay changed index page map update from %llu to %llu",
vdo_log_info("replay changed index page map update from %llu to %llu",
(unsigned long long) old_map_update,
(unsigned long long) new_map_update);
}
......@@ -1084,7 +1084,7 @@ static int rebuild_index(struct uds_index *index)
result = uds_find_volume_chapter_boundaries(index->volume, &lowest, &highest,
&is_empty);
if (result != UDS_SUCCESS) {
return uds_log_fatal_strerror(result,
return vdo_log_fatal_strerror(result,
"cannot rebuild index: unknown volume chapter boundaries");
}
......@@ -1194,7 +1194,7 @@ int uds_make_index(struct uds_configuration *config, enum uds_open_index_type op
result = make_index_zone(index, z);
if (result != UDS_SUCCESS) {
uds_free_index(index);
return uds_log_error_strerror(result,
return vdo_log_error_strerror(result,
"Could not create index zone");
}
}
......@@ -1203,7 +1203,7 @@ int uds_make_index(struct uds_configuration *config, enum uds_open_index_type op
result = uds_make_volume_index(config, nonce, &index->volume_index);
if (result != UDS_SUCCESS) {
uds_free_index(index);
return uds_log_error_strerror(result, "could not make volume index");
return vdo_log_error_strerror(result, "could not make volume index");
}
index->load_context = load_context;
......@@ -1229,14 +1229,14 @@ int uds_make_index(struct uds_configuration *config, enum uds_open_index_type op
break;
case -ENOMEM:
/* We should not try a rebuild for this error. */
uds_log_error_strerror(result, "index could not be loaded");
vdo_log_error_strerror(result, "index could not be loaded");
break;
default:
uds_log_error_strerror(result, "index could not be loaded");
vdo_log_error_strerror(result, "index could not be loaded");
if (open_type == UDS_LOAD) {
result = rebuild_index(index);
if (result != UDS_SUCCESS) {
uds_log_error_strerror(result,
vdo_log_error_strerror(result,
"index could not be rebuilt");
}
}
......@@ -1246,7 +1246,7 @@ int uds_make_index(struct uds_configuration *config, enum uds_open_index_type op
if (result != UDS_SUCCESS) {
uds_free_index(index);
return uds_log_error_strerror(result, "fatal error in %s()", __func__);
return vdo_log_error_strerror(result, "fatal error in %s()", __func__);
}
for (z = 0; z < index->zone_count; z++) {
......@@ -1320,16 +1320,16 @@ int uds_save_index(struct uds_index *index)
index->prev_save = index->last_save;
index->last_save = ((index->newest_virtual_chapter == 0) ?
NO_LAST_SAVE : index->newest_virtual_chapter - 1);
uds_log_info("beginning save (vcn %llu)", (unsigned long long) index->last_save);
vdo_log_info("beginning save (vcn %llu)", (unsigned long long) index->last_save);
result = uds_save_index_state(index->layout, index);
if (result != UDS_SUCCESS) {
uds_log_info("save index failed");
vdo_log_info("save index failed");
index->last_save = index->prev_save;
} else {
index->has_saved_open_chapter = true;
index->need_to_save = false;
uds_log_info("finished save (vcn %llu)",
vdo_log_info("finished save (vcn %llu)",
(unsigned long long) index->last_save);
}
......
......@@ -365,7 +365,7 @@ void uds_free_buffered_writer(struct buffered_writer *writer)
flush_previous_buffer(writer);
result = -dm_bufio_write_dirty_buffers(writer->client);
if (result != UDS_SUCCESS)
uds_log_warning_strerror(result, "%s: failed to sync storage", __func__);
vdo_log_warning_strerror(result, "%s: failed to sync storage", __func__);
dm_bufio_client_destroy(writer->client);
uds_put_io_factory(writer->factory);
......
......@@ -259,14 +259,14 @@ static int fill_delta_chapter_index(struct open_chapter_zone **chapter_zones,
overflow_count++;
break;
default:
uds_log_error_strerror(result,
vdo_log_error_strerror(result,
"failed to build open chapter index");
return result;
}
}
if (overflow_count > 0)
uds_log_warning("Failed to add %d entries to chapter index",
vdo_log_warning("Failed to add %d entries to chapter index",
overflow_count);
return UDS_SUCCESS;
......@@ -417,7 +417,7 @@ int uds_load_open_chapter(struct uds_index *index, struct buffered_reader *reade
return result;
if (memcmp(OPEN_CHAPTER_VERSION, version, sizeof(version)) != 0) {
return uds_log_error_strerror(UDS_CORRUPT_DATA,
return vdo_log_error_strerror(UDS_CORRUPT_DATA,
"Invalid open chapter version: %.*s",
(int) sizeof(version), version);
}
......
......@@ -225,13 +225,13 @@ static int compute_volume_sub_index_parameters(const struct uds_configuration *c
params->address_bits = bits_per(address_count - 1);
params->chapter_bits = bits_per(rounded_chapters - 1);
if ((u32) params->list_count != params->list_count) {
return uds_log_warning_strerror(UDS_INVALID_ARGUMENT,
return vdo_log_warning_strerror(UDS_INVALID_ARGUMENT,
"cannot initialize volume index with %llu delta lists",
(unsigned long long) params->list_count);
}
if (params->address_bits > 31) {
return uds_log_warning_strerror(UDS_INVALID_ARGUMENT,
return vdo_log_warning_strerror(UDS_INVALID_ARGUMENT,
"cannot initialize volume index with %u address bits",
params->address_bits);
}
......@@ -568,7 +568,7 @@ int uds_put_volume_index_record(struct volume_index_record *record, u64 virtual_
u64 low = get_zone_for_record(record)->virtual_chapter_low;
u64 high = get_zone_for_record(record)->virtual_chapter_high;
return uds_log_warning_strerror(UDS_INVALID_ARGUMENT,
return vdo_log_warning_strerror(UDS_INVALID_ARGUMENT,
"cannot put record into chapter number %llu that is out of the valid range %llu to %llu",
(unsigned long long) virtual_chapter,
(unsigned long long) low,
......@@ -590,7 +590,7 @@ int uds_put_volume_index_record(struct volume_index_record *record, u64 virtual_
record->is_found = true;
break;
case UDS_OVERFLOW:
uds_log_ratelimit(uds_log_warning_strerror, UDS_OVERFLOW,
vdo_log_ratelimit(vdo_log_warning_strerror, UDS_OVERFLOW,
"Volume index entry dropped due to overflow condition");
uds_log_delta_index_entry(&record->delta_entry);
break;
......@@ -606,7 +606,7 @@ int uds_remove_volume_index_record(struct volume_index_record *record)
int result;
if (!record->is_found)
return uds_log_warning_strerror(UDS_BAD_STATE,
return vdo_log_warning_strerror(UDS_BAD_STATE,
"illegal operation on new record");
/* Mark the record so that it cannot be used again */
......@@ -644,7 +644,7 @@ static void set_volume_sub_index_zone_open_chapter(struct volume_sub_index *sub_
1 + (used_bits - sub_index->max_zone_bits) / sub_index->chapter_zone_bits;
if (expire_count == 1) {
uds_log_ratelimit(uds_log_info,
vdo_log_ratelimit(vdo_log_info,
"zone %u: At chapter %llu, expiring chapter %llu early",
zone_number,
(unsigned long long) virtual_chapter,
......@@ -662,7 +662,7 @@ static void set_volume_sub_index_zone_open_chapter(struct volume_sub_index *sub_
zone->virtual_chapter_high - zone->virtual_chapter_low;
zone->virtual_chapter_low = zone->virtual_chapter_high;
}
uds_log_ratelimit(uds_log_info,
vdo_log_ratelimit(vdo_log_info,
"zone %u: At chapter %llu, expiring chapters %llu to %llu early",
zone_number,
(unsigned long long) virtual_chapter,
......@@ -713,14 +713,14 @@ int uds_set_volume_index_record_chapter(struct volume_index_record *record,
int result;
if (!record->is_found)
return uds_log_warning_strerror(UDS_BAD_STATE,
return vdo_log_warning_strerror(UDS_BAD_STATE,
"illegal operation on new record");
if (!is_virtual_chapter_indexed(record, virtual_chapter)) {
u64 low = get_zone_for_record(record)->virtual_chapter_low;
u64 high = get_zone_for_record(record)->virtual_chapter_high;
return uds_log_warning_strerror(UDS_INVALID_ARGUMENT,
return vdo_log_warning_strerror(UDS_INVALID_ARGUMENT,
"cannot set chapter number %llu that is out of the valid range %llu to %llu",
(unsigned long long) virtual_chapter,
(unsigned long long) low,
......@@ -820,7 +820,7 @@ static int start_restoring_volume_sub_index(struct volume_sub_index *sub_index,
result = uds_read_from_buffered_reader(readers[i], buffer,
sizeof(buffer));
if (result != UDS_SUCCESS) {
return uds_log_warning_strerror(result,
return vdo_log_warning_strerror(result,
"failed to read volume index header");
}
......@@ -839,14 +839,14 @@ static int start_restoring_volume_sub_index(struct volume_sub_index *sub_index,
result = UDS_CORRUPT_DATA;
if (memcmp(header.magic, MAGIC_START_5, MAGIC_SIZE) != 0) {
return uds_log_warning_strerror(UDS_CORRUPT_DATA,
return vdo_log_warning_strerror(UDS_CORRUPT_DATA,
"volume index file had bad magic number");
}
if (sub_index->volume_nonce == 0) {
sub_index->volume_nonce = header.volume_nonce;
} else if (header.volume_nonce != sub_index->volume_nonce) {
return uds_log_warning_strerror(UDS_CORRUPT_DATA,
return vdo_log_warning_strerror(UDS_CORRUPT_DATA,
"volume index volume nonce incorrect");
}
......@@ -857,7 +857,7 @@ static int start_restoring_volume_sub_index(struct volume_sub_index *sub_index,
u64 low = header.virtual_chapter_low;
u64 high = header.virtual_chapter_high;
return uds_log_warning_strerror(UDS_CORRUPT_DATA,
return vdo_log_warning_strerror(UDS_CORRUPT_DATA,
"Inconsistent volume index zone files: Chapter range is [%llu,%llu], chapter range %d is [%llu,%llu]",
(unsigned long long) virtual_chapter_low,
(unsigned long long) virtual_chapter_high,
......@@ -873,7 +873,7 @@ static int start_restoring_volume_sub_index(struct volume_sub_index *sub_index,
result = uds_read_from_buffered_reader(readers[i], decoded,
sizeof(u64));
if (result != UDS_SUCCESS) {
return uds_log_warning_strerror(result,
return vdo_log_warning_strerror(result,
"failed to read volume index flush ranges");
}
......@@ -891,7 +891,7 @@ static int start_restoring_volume_sub_index(struct volume_sub_index *sub_index,
result = uds_start_restoring_delta_index(&sub_index->delta_index, readers,
reader_count);
if (result != UDS_SUCCESS)
return uds_log_warning_strerror(result, "restoring delta index failed");
return vdo_log_warning_strerror(result, "restoring delta index failed");
return UDS_SUCCESS;
}
......@@ -916,7 +916,7 @@ static int start_restoring_volume_index(struct volume_index *volume_index,
result = uds_read_from_buffered_reader(buffered_readers[i], buffer,
sizeof(buffer));
if (result != UDS_SUCCESS) {
return uds_log_warning_strerror(result,
return vdo_log_warning_strerror(result,
"failed to read volume index header");
}
......@@ -931,13 +931,13 @@ static int start_restoring_volume_index(struct volume_index *volume_index,
result = UDS_CORRUPT_DATA;
if (memcmp(header.magic, MAGIC_START_6, MAGIC_SIZE) != 0)
return uds_log_warning_strerror(UDS_CORRUPT_DATA,
return vdo_log_warning_strerror(UDS_CORRUPT_DATA,
"volume index file had bad magic number");
if (i == 0) {
volume_index->sparse_sample_rate = header.sparse_sample_rate;
} else if (volume_index->sparse_sample_rate != header.sparse_sample_rate) {
uds_log_warning_strerror(UDS_CORRUPT_DATA,
vdo_log_warning_strerror(UDS_CORRUPT_DATA,
"Inconsistent sparse sample rate in delta index zone files: %u vs. %u",
volume_index->sparse_sample_rate,
header.sparse_sample_rate);
......@@ -1031,7 +1031,7 @@ static int start_saving_volume_sub_index(const struct volume_sub_index *sub_inde
result = uds_write_to_buffered_writer(buffered_writer, buffer, offset);
if (result != UDS_SUCCESS)
return uds_log_warning_strerror(result,
return vdo_log_warning_strerror(result,
"failed to write volume index header");
for (i = 0; i < list_count; i++) {
......@@ -1041,7 +1041,7 @@ static int start_saving_volume_sub_index(const struct volume_sub_index *sub_inde
result = uds_write_to_buffered_writer(buffered_writer, encoded,
sizeof(u64));
if (result != UDS_SUCCESS) {
return uds_log_warning_strerror(result,
return vdo_log_warning_strerror(result,
"failed to write volume index flush ranges");
}
}
......@@ -1074,7 +1074,7 @@ static int start_saving_volume_index(const struct volume_index *volume_index,
result = uds_write_to_buffered_writer(writer, buffer, offset);
if (result != UDS_SUCCESS) {
uds_log_warning_strerror(result, "failed to write volume index header");
vdo_log_warning_strerror(result, "failed to write volume index header");
return result;
}
......@@ -1264,7 +1264,7 @@ int uds_make_volume_index(const struct uds_configuration *config, u64 volume_non
&volume_index->vi_non_hook);
if (result != UDS_SUCCESS) {
uds_free_volume_index(volume_index);
return uds_log_error_strerror(result,
return vdo_log_error_strerror(result,
"Error creating non hook volume index");
}
......@@ -1272,7 +1272,7 @@ int uds_make_volume_index(const struct uds_configuration *config, u64 volume_non
&volume_index->vi_hook);
if (result != UDS_SUCCESS) {
uds_free_volume_index(volume_index);
return uds_log_error_strerror(result,
return vdo_log_error_strerror(result,
"Error creating hook volume index");
}
......
This diff is collapsed.
......@@ -381,7 +381,7 @@ static int resize_buckets(struct int_map *map)
/* Re-initialize the map to be empty and 50% larger. */
size_t new_capacity = map->capacity / 2 * 3;
uds_log_info("%s: attempting resize from %zu to %zu, current size=%zu",
vdo_log_info("%s: attempting resize from %zu to %zu, current size=%zu",
__func__, map->capacity, new_capacity, map->size);
result = allocate_buckets(map, new_capacity);
if (result != VDO_SUCCESS) {
......
......@@ -408,7 +408,7 @@ int vdo_make_io_submitter(unsigned int thread_count, unsigned int rotation_inter
* Clean up the partially initialized bio-queue entirely and indicate that
* initialization failed.
*/
uds_log_error("bio map initialization failed %d", result);
vdo_log_error("bio map initialization failed %d", result);
vdo_cleanup_io_submitter(io_submitter);
vdo_free_io_submitter(io_submitter);
return result;
......@@ -423,7 +423,7 @@ int vdo_make_io_submitter(unsigned int thread_count, unsigned int rotation_inter
* initialization failed.
*/
vdo_int_map_free(vdo_forget(bio_queue_data->map));
uds_log_error("bio queue initialization failed %d", result);
vdo_log_error("bio queue initialization failed %d", result);
vdo_cleanup_io_submitter(io_submitter);
vdo_free_io_submitter(io_submitter);
return result;
......
......@@ -16,14 +16,14 @@
#include "thread-device.h"
#include "thread-utils.h"
int vdo_log_level = UDS_LOG_DEFAULT;
int vdo_log_level = VDO_LOG_DEFAULT;
int uds_get_log_level(void)
int vdo_get_log_level(void)
{
int log_level_latch = READ_ONCE(vdo_log_level);
if (unlikely(log_level_latch > UDS_LOG_MAX)) {
log_level_latch = UDS_LOG_DEFAULT;
if (unlikely(log_level_latch > VDO_LOG_MAX)) {
log_level_latch = VDO_LOG_DEFAULT;
WRITE_ONCE(vdo_log_level, log_level_latch);
}
return log_level_latch;
......@@ -54,7 +54,7 @@ static void emit_log_message_to_kernel(int priority, const char *fmt, ...)
va_list args;
struct va_format vaf;
if (priority > uds_get_log_level())
if (priority > vdo_get_log_level())
return;
va_start(args, fmt);
......@@ -62,22 +62,22 @@ static void emit_log_message_to_kernel(int priority, const char *fmt, ...)
vaf.va = &args;
switch (priority) {
case UDS_LOG_EMERG:
case UDS_LOG_ALERT:
case UDS_LOG_CRIT:
case VDO_LOG_EMERG:
case VDO_LOG_ALERT:
case VDO_LOG_CRIT:
pr_crit("%pV", &vaf);
break;
case UDS_LOG_ERR:
case VDO_LOG_ERR:
pr_err("%pV", &vaf);
break;
case UDS_LOG_WARNING:
case VDO_LOG_WARNING:
pr_warn("%pV", &vaf);
break;
case UDS_LOG_NOTICE:
case UDS_LOG_INFO:
case VDO_LOG_NOTICE:
case VDO_LOG_INFO:
pr_info("%pV", &vaf);
break;
case UDS_LOG_DEBUG:
case VDO_LOG_DEBUG:
pr_debug("%pV", &vaf);
break;
default:
......@@ -150,7 +150,7 @@ static void emit_log_message(int priority, const char *module, const char *prefi
}
/*
* uds_log_embedded_message() - Log a message embedded within another message.
* vdo_log_embedded_message() - Log a message embedded within another message.
* @priority: the priority at which to log the message
* @module: the name of the module doing the logging
* @prefix: optional string prefix to message, may be NULL
......@@ -158,7 +158,7 @@ static void emit_log_message(int priority, const char *module, const char *prefi
* @args1: arguments for message first part (required)
* @fmt2: format of message second part
*/
void uds_log_embedded_message(int priority, const char *module, const char *prefix,
void vdo_log_embedded_message(int priority, const char *module, const char *prefix,
const char *fmt1, va_list args1, const char *fmt2, ...)
{
va_list args1_copy;
......@@ -168,7 +168,7 @@ void uds_log_embedded_message(int priority, const char *module, const char *pref
va_start(args2, fmt2);
if (module == NULL)
module = UDS_LOGGING_MODULE_NAME;
module = VDO_LOGGING_MODULE_NAME;
if (prefix == NULL)
prefix = "";
......@@ -191,41 +191,41 @@ void uds_log_embedded_message(int priority, const char *module, const char *pref
va_end(args2);
}
int uds_vlog_strerror(int priority, int errnum, const char *module, const char *format,
int vdo_vlog_strerror(int priority, int errnum, const char *module, const char *format,
va_list args)
{
char errbuf[UDS_MAX_ERROR_MESSAGE_SIZE];
char errbuf[VDO_MAX_ERROR_MESSAGE_SIZE];
const char *message = uds_string_error(errnum, errbuf, sizeof(errbuf));
uds_log_embedded_message(priority, module, NULL, format, args, ": %s (%d)",
vdo_log_embedded_message(priority, module, NULL, format, args, ": %s (%d)",
message, errnum);
return errnum;
}
int __uds_log_strerror(int priority, int errnum, const char *module, const char *format, ...)
int __vdo_log_strerror(int priority, int errnum, const char *module, const char *format, ...)
{
va_list args;
va_start(args, format);
uds_vlog_strerror(priority, errnum, module, format, args);
vdo_vlog_strerror(priority, errnum, module, format, args);
va_end(args);
return errnum;
}
void uds_log_backtrace(int priority)
void vdo_log_backtrace(int priority)
{
if (priority > uds_get_log_level())
if (priority > vdo_get_log_level())
return;
dump_stack();
}
void __uds_log_message(int priority, const char *module, const char *format, ...)
void __vdo_log_message(int priority, const char *module, const char *format, ...)
{
va_list args;
va_start(args, format);
uds_log_embedded_message(priority, module, NULL, format, args, "%s", "");
vdo_log_embedded_message(priority, module, NULL, format, args, "%s", "");
va_end(args);
}
......@@ -233,7 +233,7 @@ void __uds_log_message(int priority, const char *module, const char *format, ...
* Sleep or delay a few milliseconds in an attempt to allow the log buffers to be flushed lest they
* be overrun.
*/
void uds_pause_for_logger(void)
void vdo_pause_for_logger(void)
{
fsleep(4000);
}
......@@ -3,8 +3,8 @@
* Copyright 2023 Red Hat
*/
#ifndef UDS_LOGGER_H
#define UDS_LOGGER_H
#ifndef VDO_LOGGER_H
#define VDO_LOGGER_H
#include <linux/kern_levels.h>
#include <linux/module.h>
......@@ -14,26 +14,26 @@
/* Custom logging utilities for UDS */
enum {
UDS_LOG_EMERG = LOGLEVEL_EMERG,
UDS_LOG_ALERT = LOGLEVEL_ALERT,
UDS_LOG_CRIT = LOGLEVEL_CRIT,
UDS_LOG_ERR = LOGLEVEL_ERR,
UDS_LOG_WARNING = LOGLEVEL_WARNING,
UDS_LOG_NOTICE = LOGLEVEL_NOTICE,
UDS_LOG_INFO = LOGLEVEL_INFO,
UDS_LOG_DEBUG = LOGLEVEL_DEBUG,
UDS_LOG_MAX = UDS_LOG_DEBUG,
UDS_LOG_DEFAULT = UDS_LOG_INFO,
VDO_LOG_EMERG = LOGLEVEL_EMERG,
VDO_LOG_ALERT = LOGLEVEL_ALERT,
VDO_LOG_CRIT = LOGLEVEL_CRIT,
VDO_LOG_ERR = LOGLEVEL_ERR,
VDO_LOG_WARNING = LOGLEVEL_WARNING,
VDO_LOG_NOTICE = LOGLEVEL_NOTICE,
VDO_LOG_INFO = LOGLEVEL_INFO,
VDO_LOG_DEBUG = LOGLEVEL_DEBUG,
VDO_LOG_MAX = VDO_LOG_DEBUG,
VDO_LOG_DEFAULT = VDO_LOG_INFO,
};
extern int vdo_log_level;
#define DM_MSG_PREFIX "vdo"
#define UDS_LOGGING_MODULE_NAME DM_NAME ": " DM_MSG_PREFIX
#define VDO_LOGGING_MODULE_NAME DM_NAME ": " DM_MSG_PREFIX
/* Apply a rate limiter to a log method call. */
#define uds_log_ratelimit(log_fn, ...) \
#define vdo_log_ratelimit(log_fn, ...) \
do { \
static DEFINE_RATELIMIT_STATE(_rs, \
DEFAULT_RATELIMIT_INTERVAL, \
......@@ -43,58 +43,58 @@ extern int vdo_log_level;
} \
} while (0)
int uds_get_log_level(void);
int vdo_get_log_level(void);
void uds_log_embedded_message(int priority, const char *module, const char *prefix,
void vdo_log_embedded_message(int priority, const char *module, const char *prefix,
const char *fmt1, va_list args1, const char *fmt2, ...)
__printf(4, 0) __printf(6, 7);
void uds_log_backtrace(int priority);
void vdo_log_backtrace(int priority);
/* All log functions will preserve the caller's value of errno. */
#define uds_log_strerror(priority, errnum, ...) \
__uds_log_strerror(priority, errnum, UDS_LOGGING_MODULE_NAME, __VA_ARGS__)
#define vdo_log_strerror(priority, errnum, ...) \
__vdo_log_strerror(priority, errnum, VDO_LOGGING_MODULE_NAME, __VA_ARGS__)
int __uds_log_strerror(int priority, int errnum, const char *module,
int __vdo_log_strerror(int priority, int errnum, const char *module,
const char *format, ...)
__printf(4, 5);
int uds_vlog_strerror(int priority, int errnum, const char *module, const char *format,
int vdo_vlog_strerror(int priority, int errnum, const char *module, const char *format,
va_list args)
__printf(4, 0);
/* Log an error prefixed with the string associated with the errnum. */
#define uds_log_error_strerror(errnum, ...) \
uds_log_strerror(UDS_LOG_ERR, errnum, __VA_ARGS__)
#define vdo_log_error_strerror(errnum, ...) \
vdo_log_strerror(VDO_LOG_ERR, errnum, __VA_ARGS__)
#define uds_log_debug_strerror(errnum, ...) \
uds_log_strerror(UDS_LOG_DEBUG, errnum, __VA_ARGS__)
#define vdo_log_debug_strerror(errnum, ...) \
vdo_log_strerror(VDO_LOG_DEBUG, errnum, __VA_ARGS__)
#define uds_log_info_strerror(errnum, ...) \
uds_log_strerror(UDS_LOG_INFO, errnum, __VA_ARGS__)
#define vdo_log_info_strerror(errnum, ...) \
vdo_log_strerror(VDO_LOG_INFO, errnum, __VA_ARGS__)
#define uds_log_warning_strerror(errnum, ...) \
uds_log_strerror(UDS_LOG_WARNING, errnum, __VA_ARGS__)
#define vdo_log_warning_strerror(errnum, ...) \
vdo_log_strerror(VDO_LOG_WARNING, errnum, __VA_ARGS__)
#define uds_log_fatal_strerror(errnum, ...) \
uds_log_strerror(UDS_LOG_CRIT, errnum, __VA_ARGS__)
#define vdo_log_fatal_strerror(errnum, ...) \
vdo_log_strerror(VDO_LOG_CRIT, errnum, __VA_ARGS__)
#define uds_log_message(priority, ...) \
__uds_log_message(priority, UDS_LOGGING_MODULE_NAME, __VA_ARGS__)
#define vdo_log_message(priority, ...) \
__vdo_log_message(priority, VDO_LOGGING_MODULE_NAME, __VA_ARGS__)
void __uds_log_message(int priority, const char *module, const char *format, ...)
void __vdo_log_message(int priority, const char *module, const char *format, ...)
__printf(3, 4);
#define uds_log_debug(...) uds_log_message(UDS_LOG_DEBUG, __VA_ARGS__)
#define vdo_log_debug(...) vdo_log_message(VDO_LOG_DEBUG, __VA_ARGS__)
#define uds_log_info(...) uds_log_message(UDS_LOG_INFO, __VA_ARGS__)
#define vdo_log_info(...) vdo_log_message(VDO_LOG_INFO, __VA_ARGS__)
#define uds_log_warning(...) uds_log_message(UDS_LOG_WARNING, __VA_ARGS__)
#define vdo_log_warning(...) vdo_log_message(VDO_LOG_WARNING, __VA_ARGS__)
#define uds_log_error(...) uds_log_message(UDS_LOG_ERR, __VA_ARGS__)
#define vdo_log_error(...) vdo_log_message(VDO_LOG_ERR, __VA_ARGS__)
#define uds_log_fatal(...) uds_log_message(UDS_LOG_CRIT, __VA_ARGS__)
#define vdo_log_fatal(...) vdo_log_message(VDO_LOG_CRIT, __VA_ARGS__)
void uds_pause_for_logger(void);
#endif /* UDS_LOGGER_H */
void vdo_pause_for_logger(void);
#endif /* VDO_LOGGER_H */
......@@ -363,8 +363,8 @@ struct physical_zone *vdo_get_next_allocation_zone(struct logical_zone *zone)
*/
void vdo_dump_logical_zone(const struct logical_zone *zone)
{
uds_log_info("logical_zone %u", zone->zone_number);
uds_log_info(" flush_generation=%llu oldest_active_generation=%llu notification_generation=%llu notifying=%s ios_in_flush_generation=%llu",
vdo_log_info("logical_zone %u", zone->zone_number);
vdo_log_info(" flush_generation=%llu oldest_active_generation=%llu notification_generation=%llu notifying=%s ios_in_flush_generation=%llu",
(unsigned long long) READ_ONCE(zone->flush_generation),
(unsigned long long) READ_ONCE(zone->oldest_active_generation),
(unsigned long long) READ_ONCE(zone->notification_generation),
......
......@@ -150,7 +150,7 @@ static void remove_vmalloc_block(void *ptr)
if (block != NULL)
vdo_free(block);
else
uds_log_info("attempting to remove ptr %px not found in vmalloc list", ptr);
vdo_log_info("attempting to remove ptr %px not found in vmalloc list", ptr);
}
/*
......@@ -284,7 +284,7 @@ int vdo_allocate_memory(size_t size, size_t align, const char *what, void *ptr)
memalloc_noio_restore(noio_flags);
if (unlikely(p == NULL)) {
uds_log_error("Could not allocate %zu bytes for %s in %u msecs",
vdo_log_error("Could not allocate %zu bytes for %s in %u msecs",
size, what, jiffies_to_msecs(jiffies - start_time));
return -ENOMEM;
}
......@@ -391,7 +391,7 @@ void vdo_memory_exit(void)
VDO_ASSERT_LOG_ONLY(memory_stats.vmalloc_bytes == 0,
"vmalloc memory used (%zd bytes in %zd blocks) is returned to the kernel",
memory_stats.vmalloc_bytes, memory_stats.vmalloc_blocks);
uds_log_debug("peak usage %zd bytes", memory_stats.peak_bytes);
vdo_log_debug("peak usage %zd bytes", memory_stats.peak_bytes);
}
void vdo_get_memory_stats(u64 *bytes_used, u64 *peak_bytes_used)
......@@ -426,13 +426,13 @@ void vdo_report_memory_usage(void)
peak_usage = memory_stats.peak_bytes;
spin_unlock_irqrestore(&memory_stats.lock, flags);
total_bytes = kmalloc_bytes + vmalloc_bytes;
uds_log_info("current module memory tracking (actual allocation sizes, not requested):");
uds_log_info(" %llu bytes in %llu kmalloc blocks",
vdo_log_info("current module memory tracking (actual allocation sizes, not requested):");
vdo_log_info(" %llu bytes in %llu kmalloc blocks",
(unsigned long long) kmalloc_bytes,
(unsigned long long) kmalloc_blocks);
uds_log_info(" %llu bytes in %llu vmalloc blocks",
vdo_log_info(" %llu bytes in %llu vmalloc blocks",
(unsigned long long) vmalloc_bytes,
(unsigned long long) vmalloc_blocks);
uds_log_info(" total %llu bytes, peak usage %llu bytes",
vdo_log_info(" total %llu bytes, peak usage %llu bytes",
(unsigned long long) total_bytes, (unsigned long long) peak_usage);
}
......@@ -421,7 +421,7 @@ int vdo_write_stats(struct vdo *vdo, char *buf, unsigned int maxlen)
result = vdo_allocate(1, struct vdo_statistics, __func__, &stats);
if (result != VDO_SUCCESS) {
uds_log_error("Cannot allocate memory to write VDO statistics");
vdo_log_error("Cannot allocate memory to write VDO statistics");
return result;
}
......
......@@ -748,7 +748,7 @@ static void dump_packer_bin(const struct packer_bin *bin, bool canceled)
/* Don't dump empty bins. */
return;
uds_log_info(" %sBin slots_used=%u free_space=%zu",
vdo_log_info(" %sBin slots_used=%u free_space=%zu",
(canceled ? "Canceled" : ""), bin->slots_used, bin->free_space);
/*
......@@ -767,8 +767,8 @@ void vdo_dump_packer(const struct packer *packer)
{
struct packer_bin *bin;
uds_log_info("packer");
uds_log_info(" flushGeneration=%llu state %s packer_bin_count=%llu",
vdo_log_info("packer");
vdo_log_info(" flushGeneration=%llu state %s packer_bin_count=%llu",
(unsigned long long) packer->flush_generation,
vdo_get_admin_state_code(&packer->state)->name,
(unsigned long long) packer->size);
......
......@@ -15,10 +15,10 @@ int vdo_assertion_failed(const char *expression_string, const char *file_name,
va_start(args, format);
uds_log_embedded_message(UDS_LOG_ERR, UDS_LOGGING_MODULE_NAME, "assertion \"",
vdo_log_embedded_message(VDO_LOG_ERR, VDO_LOGGING_MODULE_NAME, "assertion \"",
format, args, "\" (%s) failed at %s:%d",
expression_string, file_name, line_number);
uds_log_backtrace(UDS_LOG_ERR);
vdo_log_backtrace(VDO_LOG_ERR);
va_end(args);
......
......@@ -163,7 +163,7 @@ static void release_pbn_lock_provisional_reference(struct pbn_lock *lock,
result = vdo_release_block_reference(allocator, locked_pbn);
if (result != VDO_SUCCESS) {
uds_log_error_strerror(result,
vdo_log_error_strerror(result,
"Failed to release reference to %s physical block %llu",
lock->implementation->release_reason,
(unsigned long long) locked_pbn);
......@@ -294,7 +294,7 @@ static int __must_check borrow_pbn_lock_from_pool(struct pbn_lock_pool *pool,
idle_pbn_lock *idle;
if (pool->borrowed >= pool->capacity)
return uds_log_error_strerror(VDO_LOCK_ERROR,
return vdo_log_error_strerror(VDO_LOCK_ERROR,
"no free PBN locks left to borrow");
pool->borrowed += 1;
......@@ -499,7 +499,7 @@ static int allocate_and_lock_block(struct allocation *allocation)
if (lock->holder_count > 0) {
/* This block is already locked, which should be impossible. */
return uds_log_error_strerror(VDO_LOCK_ERROR,
return vdo_log_error_strerror(VDO_LOCK_ERROR,
"Newly allocated block %llu was spuriously locked (holder_count=%u)",
(unsigned long long) allocation->pbn,
lock->holder_count);
......
......@@ -804,7 +804,7 @@ void vdo_free_recovery_journal(struct recovery_journal *journal)
"journal being freed has no active tail blocks");
} else if (!vdo_is_state_saved(&journal->state) &&
!list_empty(&journal->active_tail_blocks)) {
uds_log_warning("journal being freed has uncommitted entries");
vdo_log_warning("journal being freed has uncommitted entries");
}
for (i = 0; i < RECOVERY_JOURNAL_RESERVED_BLOCKS; i++) {
......@@ -1305,7 +1305,7 @@ static void handle_write_error(struct vdo_completion *completion)
struct recovery_journal *journal = block->journal;
vio_record_metadata_io_error(as_vio(completion));
uds_log_error_strerror(completion->result,
vdo_log_error_strerror(completion->result,
"cannot write recovery journal block %llu",
(unsigned long long) block->sequence_number);
enter_journal_read_only_mode(journal, completion->result);
......@@ -1719,7 +1719,7 @@ vdo_get_recovery_journal_statistics(const struct recovery_journal *journal)
*/
static void dump_recovery_block(const struct recovery_journal_block *block)
{
uds_log_info(" sequence number %llu; entries %u; %s; %zu entry waiters; %zu commit waiters",
vdo_log_info(" sequence number %llu; entries %u; %s; %zu entry waiters; %zu commit waiters",
(unsigned long long) block->sequence_number, block->entry_count,
(block->committing ? "committing" : "waiting"),
vdo_waitq_num_waiters(&block->entry_waiters),
......@@ -1736,8 +1736,8 @@ void vdo_dump_recovery_journal_statistics(const struct recovery_journal *journal
const struct recovery_journal_block *block;
struct recovery_journal_statistics stats = vdo_get_recovery_journal_statistics(journal);
uds_log_info("Recovery Journal");
uds_log_info(" block_map_head=%llu slab_journal_head=%llu last_write_acknowledged=%llu tail=%llu block_map_reap_head=%llu slab_journal_reap_head=%llu disk_full=%llu slab_journal_commits_requested=%llu entry_waiters=%zu",
vdo_log_info("Recovery Journal");
vdo_log_info(" block_map_head=%llu slab_journal_head=%llu last_write_acknowledged=%llu tail=%llu block_map_reap_head=%llu slab_journal_reap_head=%llu disk_full=%llu slab_journal_commits_requested=%llu entry_waiters=%zu",
(unsigned long long) journal->block_map_head,
(unsigned long long) journal->slab_journal_head,
(unsigned long long) journal->last_write_acknowledged,
......@@ -1747,16 +1747,16 @@ void vdo_dump_recovery_journal_statistics(const struct recovery_journal *journal
(unsigned long long) stats.disk_full,
(unsigned long long) stats.slab_journal_commits_requested,
vdo_waitq_num_waiters(&journal->entry_waiters));
uds_log_info(" entries: started=%llu written=%llu committed=%llu",
vdo_log_info(" entries: started=%llu written=%llu committed=%llu",
(unsigned long long) stats.entries.started,
(unsigned long long) stats.entries.written,
(unsigned long long) stats.entries.committed);
uds_log_info(" blocks: started=%llu written=%llu committed=%llu",
vdo_log_info(" blocks: started=%llu written=%llu committed=%llu",
(unsigned long long) stats.blocks.started,
(unsigned long long) stats.blocks.written,
(unsigned long long) stats.blocks.committed);
uds_log_info(" active blocks:");
vdo_log_info(" active blocks:");
list_for_each_entry(block, &journal->active_tail_blocks, list_node)
dump_recovery_block(block);
}
......@@ -265,13 +265,13 @@ static void finish_repair(struct vdo_completion *completion)
free_repair_completion(vdo_forget(repair));
if (vdo_state_requires_read_only_rebuild(vdo->load_state)) {
uds_log_info("Read-only rebuild complete");
vdo_log_info("Read-only rebuild complete");
vdo_launch_completion(parent);
return;
}
/* FIXME: shouldn't this say either "recovery" or "repair"? */
uds_log_info("Rebuild complete");
vdo_log_info("Rebuild complete");
/*
* Now that we've freed the repair completion and its vast array of journal entries, we
......@@ -291,9 +291,9 @@ static void abort_repair(struct vdo_completion *completion)
struct repair_completion *repair = as_repair_completion(completion);
if (vdo_state_requires_read_only_rebuild(completion->vdo->load_state))
uds_log_info("Read-only rebuild aborted");
vdo_log_info("Read-only rebuild aborted");
else
uds_log_warning("Recovery aborted");
vdo_log_warning("Recovery aborted");
free_repair_completion(vdo_forget(repair));
vdo_continue_completion(parent, result);
......@@ -329,10 +329,10 @@ static void drain_slab_depot(struct vdo_completion *completion)
prepare_repair_completion(repair, finish_repair, VDO_ZONE_TYPE_ADMIN);
if (vdo_state_requires_read_only_rebuild(vdo->load_state)) {
uds_log_info("Saving rebuilt state");
vdo_log_info("Saving rebuilt state");
operation = VDO_ADMIN_STATE_REBUILDING;
} else {
uds_log_info("Replayed %zu journal entries into slab journals",
vdo_log_info("Replayed %zu journal entries into slab journals",
repair->entries_added_to_slab_journals);
operation = VDO_ADMIN_STATE_RECOVERING;
}
......@@ -350,7 +350,7 @@ static void flush_block_map_updates(struct vdo_completion *completion)
{
vdo_assert_on_admin_thread(completion->vdo, __func__);
uds_log_info("Flushing block map changes");
vdo_log_info("Flushing block map changes");
prepare_repair_completion(as_repair_completion(completion), drain_slab_depot,
VDO_ZONE_TYPE_ADMIN);
vdo_drain_block_map(completion->vdo->block_map, VDO_ADMIN_STATE_RECOVERING,
......@@ -449,7 +449,7 @@ static bool process_slot(struct block_map_page *page, struct vdo_completion *com
if (result == VDO_SUCCESS)
return true;
uds_log_error_strerror(result,
vdo_log_error_strerror(result,
"Could not adjust reference count for PBN %llu, slot %u mapped to PBN %llu",
(unsigned long long) vdo_get_block_map_page_pbn(page),
slot, (unsigned long long) mapping.pbn);
......@@ -615,7 +615,7 @@ static int process_entry(physical_block_number_t pbn, struct vdo_completion *com
int result;
if ((pbn == VDO_ZERO_BLOCK) || !vdo_is_physical_data_block(depot, pbn)) {
return uds_log_error_strerror(VDO_BAD_CONFIGURATION,
return vdo_log_error_strerror(VDO_BAD_CONFIGURATION,
"PBN %llu out of range",
(unsigned long long) pbn);
}
......@@ -623,7 +623,7 @@ static int process_entry(physical_block_number_t pbn, struct vdo_completion *com
result = vdo_adjust_reference_count_for_rebuild(depot, pbn,
VDO_JOURNAL_BLOCK_MAP_REMAPPING);
if (result != VDO_SUCCESS) {
return uds_log_error_strerror(result,
return vdo_log_error_strerror(result,
"Could not adjust reference count for block map tree PBN %llu",
(unsigned long long) pbn);
}
......@@ -758,7 +758,7 @@ static int validate_recovery_journal_entry(const struct vdo *vdo,
!vdo_is_valid_location(&entry->unmapping) ||
!vdo_is_physical_data_block(vdo->depot, entry->mapping.pbn) ||
!vdo_is_physical_data_block(vdo->depot, entry->unmapping.pbn)) {
return uds_log_error_strerror(VDO_CORRUPT_JOURNAL,
return vdo_log_error_strerror(VDO_CORRUPT_JOURNAL,
"Invalid entry: %s (%llu, %u) from %llu to %llu is not within bounds",
vdo_get_journal_operation_name(entry->operation),
(unsigned long long) entry->slot.pbn,
......@@ -772,7 +772,7 @@ static int validate_recovery_journal_entry(const struct vdo *vdo,
(entry->mapping.pbn == VDO_ZERO_BLOCK) ||
(entry->unmapping.state != VDO_MAPPING_STATE_UNMAPPED) ||
(entry->unmapping.pbn != VDO_ZERO_BLOCK))) {
return uds_log_error_strerror(VDO_CORRUPT_JOURNAL,
return vdo_log_error_strerror(VDO_CORRUPT_JOURNAL,
"Invalid entry: %s (%llu, %u) from %llu to %llu is not a valid tree mapping",
vdo_get_journal_operation_name(entry->operation),
(unsigned long long) entry->slot.pbn,
......@@ -875,7 +875,7 @@ void vdo_replay_into_slab_journals(struct block_allocator *allocator, void *cont
.entry_count = 0,
};
uds_log_info("Replaying entries into slab journals for zone %u",
vdo_log_info("Replaying entries into slab journals for zone %u",
allocator->zone_number);
completion->parent = repair;
add_slab_journal_entries(completion);
......@@ -907,7 +907,7 @@ static void flush_block_map(struct vdo_completion *completion)
vdo_assert_on_admin_thread(completion->vdo, __func__);
uds_log_info("Flushing block map changes");
vdo_log_info("Flushing block map changes");
prepare_repair_completion(repair, load_slab_depot, VDO_ZONE_TYPE_ADMIN);
operation = (vdo_state_requires_read_only_rebuild(completion->vdo->load_state) ?
VDO_ADMIN_STATE_REBUILDING :
......@@ -1107,7 +1107,7 @@ static void recover_block_map(struct vdo_completion *completion)
vdo_state_requires_read_only_rebuild(vdo->load_state);
if (repair->block_map_entry_count == 0) {
uds_log_info("Replaying 0 recovery entries into block map");
vdo_log_info("Replaying 0 recovery entries into block map");
vdo_free(vdo_forget(repair->journal_data));
launch_repair_completion(repair, load_slab_depot, VDO_ZONE_TYPE_ADMIN);
return;
......@@ -1124,7 +1124,7 @@ static void recover_block_map(struct vdo_completion *completion)
};
min_heapify_all(&repair->replay_heap, &repair_min_heap);
uds_log_info("Replaying %zu recovery entries into block map",
vdo_log_info("Replaying %zu recovery entries into block map",
repair->block_map_entry_count);
repair->current_entry = &repair->entries[repair->block_map_entry_count - 1];
......@@ -1437,7 +1437,7 @@ static int validate_heads(struct repair_completion *repair)
return VDO_SUCCESS;
return uds_log_error_strerror(VDO_CORRUPT_JOURNAL,
return vdo_log_error_strerror(VDO_CORRUPT_JOURNAL,
"Journal tail too early. block map head: %llu, slab journal head: %llu, tail: %llu",
(unsigned long long) repair->block_map_head,
(unsigned long long) repair->slab_journal_head,
......@@ -1571,7 +1571,7 @@ static int parse_journal_for_recovery(struct repair_completion *repair)
header = get_recovery_journal_block_header(journal, repair->journal_data, i);
if (header.metadata_type == VDO_METADATA_RECOVERY_JOURNAL) {
/* This is an old format block, so we need to upgrade */
uds_log_error_strerror(VDO_UNSUPPORTED_VERSION,
vdo_log_error_strerror(VDO_UNSUPPORTED_VERSION,
"Recovery journal is in the old format, a read-only rebuild is required.");
vdo_enter_read_only_mode(repair->completion.vdo,
VDO_UNSUPPORTED_VERSION);
......@@ -1628,7 +1628,7 @@ static int parse_journal_for_recovery(struct repair_completion *repair)
if (result != VDO_SUCCESS)
return result;
uds_log_info("Highest-numbered recovery journal block has sequence number %llu, and the highest-numbered usable block is %llu",
vdo_log_info("Highest-numbered recovery journal block has sequence number %llu, and the highest-numbered usable block is %llu",
(unsigned long long) repair->highest_tail,
(unsigned long long) repair->tail);
......@@ -1656,7 +1656,7 @@ static void finish_journal_load(struct vdo_completion *completion)
if (++repair->vios_complete != repair->vio_count)
return;
uds_log_info("Finished reading recovery journal");
vdo_log_info("Finished reading recovery journal");
uninitialize_vios(repair);
prepare_repair_completion(repair, recover_block_map, VDO_ZONE_TYPE_LOGICAL);
vdo_continue_completion(&repair->completion, parse_journal(repair));
......@@ -1701,12 +1701,12 @@ void vdo_repair(struct vdo_completion *parent)
vdo_assert_on_admin_thread(vdo, __func__);
if (vdo->load_state == VDO_FORCE_REBUILD) {
uds_log_warning("Rebuilding reference counts to clear read-only mode");
vdo_log_warning("Rebuilding reference counts to clear read-only mode");
vdo->states.vdo.read_only_recoveries++;
} else if (vdo->load_state == VDO_REBUILD_FOR_UPGRADE) {
uds_log_warning("Rebuilding reference counts for upgrade");
vdo_log_warning("Rebuilding reference counts for upgrade");
} else {
uds_log_warning("Device was dirty, rebuilding reference counts");
vdo_log_warning("Device was dirty, rebuilding reference counts");
}
result = vdo_allocate_extended(struct repair_completion, page_count,
......
This diff is collapsed.
......@@ -87,8 +87,8 @@ int vdo_register_status_codes(void)
*/
int vdo_status_to_errno(int error)
{
char error_name[UDS_MAX_ERROR_NAME_SIZE];
char error_message[UDS_MAX_ERROR_MESSAGE_SIZE];
char error_name[VDO_MAX_ERROR_NAME_SIZE];
char error_message[VDO_MAX_ERROR_MESSAGE_SIZE];
/* 0 is success, negative a system error code */
if (likely(error <= 0))
......@@ -103,7 +103,7 @@ int vdo_status_to_errno(int error)
case VDO_READ_ONLY:
return -EIO;
default:
uds_log_info("%s: mapping internal status code %d (%s: %s) to EIO",
vdo_log_info("%s: mapping internal status code %d (%s: %s) to EIO",
__func__, error,
uds_string_error_name(error, error_name, sizeof(error_name)),
uds_string_error(error, error_message, sizeof(error_message)));
......
......@@ -84,7 +84,7 @@ int vdo_create_thread(void (*thread_function)(void *), void *thread_data,
result = vdo_allocate(1, struct thread, __func__, &thread);
if (result != VDO_SUCCESS) {
uds_log_warning("Error allocating memory for %s", name);
vdo_log_warning("Error allocating memory for %s", name);
return result;
}
......
......@@ -304,7 +304,7 @@ static int __must_check read_geometry_block(struct vdo *vdo)
result = blk_status_to_errno(vio->bio->bi_status);
free_vio(vdo_forget(vio));
if (result != 0) {
uds_log_error_strerror(result, "synchronous read failed");
vdo_log_error_strerror(result, "synchronous read failed");
vdo_free(block);
return -EIO;
}
......@@ -493,7 +493,7 @@ static int initialize_vdo(struct vdo *vdo, struct device_config *config,
return result;
}
uds_log_info("zones: %d logical, %d physical, %d hash; total threads: %d",
vdo_log_info("zones: %d logical, %d physical, %d hash; total threads: %d",
config->thread_counts.logical_zones,
config->thread_counts.physical_zones,
config->thread_counts.hash_zones, vdo->thread_config.thread_count);
......@@ -841,7 +841,7 @@ int vdo_synchronous_flush(struct vdo *vdo)
atomic64_inc(&vdo->stats.flush_out);
if (result != 0) {
uds_log_error_strerror(result, "synchronous flush failed");
vdo_log_error_strerror(result, "synchronous flush failed");
result = -EIO;
}
......@@ -928,7 +928,7 @@ static void handle_save_error(struct vdo_completion *completion)
container_of(as_vio(completion), struct vdo_super_block, vio);
vio_record_metadata_io_error(&super_block->vio);
uds_log_error_strerror(completion->result, "super block save failed");
vdo_log_error_strerror(completion->result, "super block save failed");
/*
* Mark the super block as unwritable so that we won't attempt to write it again. This
* avoids the case where a growth attempt fails writing the super block with the new size,
......@@ -1154,7 +1154,7 @@ static void make_thread_read_only(struct vdo_completion *completion)
thread->is_read_only = true;
listener = thread->listeners;
if (thread_id == 0)
uds_log_error_strerror(READ_ONCE(notifier->read_only_error),
vdo_log_error_strerror(READ_ONCE(notifier->read_only_error),
"Unrecoverable error, entering read-only mode");
} else {
/* We've just finished notifying a listener */
......@@ -1329,7 +1329,7 @@ void vdo_enter_recovery_mode(struct vdo *vdo)
if (vdo_in_read_only_mode(vdo))
return;
uds_log_info("Entering recovery mode");
vdo_log_info("Entering recovery mode");
vdo_set_state(vdo, VDO_RECOVERING);
}
......@@ -1382,7 +1382,7 @@ static void set_compression_callback(struct vdo_completion *completion)
}
}
uds_log_info("compression is %s", (*enable ? "enabled" : "disabled"));
vdo_log_info("compression is %s", (*enable ? "enabled" : "disabled"));
*enable = was_enabled;
complete_synchronous_action(completion);
}
......
......@@ -131,7 +131,7 @@ int create_multi_block_metadata_vio(struct vdo *vdo, enum vio_type vio_type,
*/
result = vdo_allocate(1, struct vio, __func__, &vio);
if (result != VDO_SUCCESS) {
uds_log_error("metadata vio allocation failure %d", result);
vdo_log_error("metadata vio allocation failure %d", result);
return result;
}
......@@ -225,7 +225,7 @@ int vio_reset_bio(struct vio *vio, char *data, bio_end_io_t callback,
bytes_added = bio_add_page(bio, page, bytes, offset);
if (bytes_added != bytes) {
return uds_log_error_strerror(VDO_BIO_CREATION_FAILED,
return vdo_log_error_strerror(VDO_BIO_CREATION_FAILED,
"Could only add %i bytes to bio",
bytes_added);
}
......@@ -258,18 +258,18 @@ void update_vio_error_stats(struct vio *vio, const char *format, ...)
case VDO_NO_SPACE:
atomic64_inc(&vdo->stats.no_space_error_count);
priority = UDS_LOG_DEBUG;
priority = VDO_LOG_DEBUG;
break;
default:
priority = UDS_LOG_ERR;
priority = VDO_LOG_ERR;
}
if (!__ratelimit(&error_limiter))
return;
va_start(args, format);
uds_vlog_strerror(priority, vio->completion.result, UDS_LOGGING_MODULE_NAME,
vdo_vlog_strerror(priority, vio->completion.result, VDO_LOGGING_MODULE_NAME,
format, args);
va_end(args);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment