Commit 985ede92 authored by Vlad Lesin's avatar Vlad Lesin

MDEV-20755 InnoDB: Database page corruption on disk or a failed file read of...

MDEV-20755 InnoDB: Database page corruption on disk or a failed file read of tablespace upon prepare of mariabackup incremental backup

The problem:

When incremental backup is taken, delta files are created for innodb tables
which are marked as new tables during innodb ddl tracking. When such
tablespace is tried to be opened during prepare in
xb_delta_open_matching_space(), it is "created", i.e.
xb_space_create_file() is invoked, instead of opening, even if
a tablespace with the same name exists in the base backup directory.

xb_space_create_file() writes page 0 header the tablespace.
This header does not contain crypt data, as mariabackup does not have
any information about crypt data in delta file metadata for
tablespaces.

After delta file is applied, recovery process is started. As the
sequence of recovery for different pages is not defined, there can be
the situation when crypt data redo log event is executed after some
other page is read for recovery. When some page is read for recovery, it's
decrypted using crypt data stored in tablespace header in page 0, if
there is no crypt data, the page is not decryped and does not pass corruption
test.

This causes error for incremental backup --prepare for encrypted
tablespaces.

The error is not stable because crypt data redo log event updates crypt
data on page 0, and recovery for different pages can be executed in
undefined order.

The fix:

When delta file is created, the corresponding write filter copies only
the pages which LSN is greater then some incremental LSN. When new file
is created during incremental backup, the LSN of all it's pages must be
greater then incremental LSN, so there is no need to create delta for
such table, we can just copy it completely.

The fix is to copy the whole file which was tracked during incremental backup
with innodb ddl tracker, and copy it to base directory during --prepare
instead of delta applying.

There is also DBUG_EXECUTE_IF() in innodb code to avoid writing redo log
record for crypt data updating on page 0 to make the test case stable.

Note:

The issue is not reproducible in 10.5 as optimized DDL's are deprecated
in 10.5. But the fix is still useful because it allows to decrease
data copy size during backup, as delta file contains some extra info.
The test case should be removed for 10.5 as it will always pass.
parent cc1646da
......@@ -2537,17 +2537,24 @@ xb_get_copy_action(const char *dflt)
return(action);
}
/* TODO: We may tune the behavior (e.g. by fil_aio)*/
static
my_bool
xtrabackup_copy_datafile(fil_node_t* node, uint thread_n, const char *dest_name=0, ulonglong max_size=ULLONG_MAX)
/** Copy innodb data file to the specified destination.
@param[in] node file node of a tablespace
@param[in] thread_n thread id, used in the text of diagnostic messages
@param[in] dest_name destination file name
@param[in] write_filter write filter to copy data, can be pass-through filter
for full backup, pages filter for incremental backup, etc.
@return FALSE on success and TRUE on error */
static my_bool xtrabackup_copy_datafile(fil_node_t *node, uint thread_n,
const char *dest_name,
const xb_write_filt_t &write_filter)
{
char dst_name[FN_REFLEN];
ds_file_t *dstfile = NULL;
xb_fil_cur_t cursor;
xb_fil_cur_result_t res;
xb_write_filt_t *write_filter = NULL;
xb_write_filt_ctxt_t write_filt_ctxt;
const char *action;
xb_read_filt_t *read_filter;
......@@ -2587,7 +2594,7 @@ xtrabackup_copy_datafile(fil_node_t* node, uint thread_n, const char *dest_name=
read_filter = &rf_bitmap;
}
res = xb_fil_cur_open(&cursor, read_filter, node, thread_n,max_size);
res = xb_fil_cur_open(&cursor, read_filter, node, thread_n, ULLONG_MAX);
if (res == XB_FIL_CUR_SKIP) {
goto skip;
} else if (res == XB_FIL_CUR_ERROR) {
......@@ -2598,18 +2605,11 @@ xtrabackup_copy_datafile(fil_node_t* node, uint thread_n, const char *dest_name=
sizeof dst_name - 1);
dst_name[sizeof dst_name - 1] = '\0';
/* Setup the page write filter */
if (xtrabackup_incremental) {
write_filter = &wf_incremental;
} else {
write_filter = &wf_write_through;
}
memset(&write_filt_ctxt, 0, sizeof(xb_write_filt_ctxt_t));
ut_a(write_filter->process != NULL);
ut_a(write_filter.process != NULL);
if (write_filter->init != NULL &&
!write_filter->init(&write_filt_ctxt, dst_name, &cursor)) {
if (write_filter.init != NULL &&
!write_filter.init(&write_filt_ctxt, dst_name, &cursor)) {
msg (thread_n, "mariabackup: error: failed to initialize page write filter.");
goto error;
}
......@@ -2630,7 +2630,7 @@ xtrabackup_copy_datafile(fil_node_t* node, uint thread_n, const char *dest_name=
/* The main copy loop */
while ((res = xb_fil_cur_read(&cursor)) == XB_FIL_CUR_SUCCESS) {
if (!write_filter->process(&write_filt_ctxt, dstfile)) {
if (!write_filter.process(&write_filt_ctxt, dstfile)) {
goto error;
}
}
......@@ -2639,8 +2639,8 @@ xtrabackup_copy_datafile(fil_node_t* node, uint thread_n, const char *dest_name=
goto error;
}
if (write_filter->finalize
&& !write_filter->finalize(&write_filt_ctxt, dstfile)) {
if (write_filter.finalize
&& !write_filter.finalize(&write_filt_ctxt, dstfile)) {
goto error;
}
......@@ -2654,8 +2654,8 @@ xtrabackup_copy_datafile(fil_node_t* node, uint thread_n, const char *dest_name=
if (ds_close(dstfile)) {
rc = TRUE;
}
if (write_filter && write_filter->deinit) {
write_filter->deinit(&write_filt_ctxt);
if (write_filter.deinit) {
write_filter.deinit(&write_filt_ctxt);
}
return(rc);
......@@ -2664,8 +2664,8 @@ xtrabackup_copy_datafile(fil_node_t* node, uint thread_n, const char *dest_name=
if (dstfile != NULL) {
ds_close(dstfile);
}
if (write_filter && write_filter->deinit) {
write_filter->deinit(&write_filt_ctxt);;
if (write_filter.deinit) {
write_filter.deinit(&write_filt_ctxt);;
}
msg(thread_n, "mariabackup: xtrabackup_copy_datafile() failed.");
return(TRUE); /*ERROR*/
......@@ -2675,8 +2675,8 @@ xtrabackup_copy_datafile(fil_node_t* node, uint thread_n, const char *dest_name=
if (dstfile != NULL) {
ds_close(dstfile);
}
if (write_filter && write_filter->deinit) {
write_filter->deinit(&write_filt_ctxt);
if (write_filter.deinit) {
write_filter.deinit(&write_filt_ctxt);
}
msg(thread_n,"Warning: We assume the table was dropped during xtrabackup execution and ignore the tablespace %s", node_name);
return(FALSE);
......@@ -2974,9 +2974,9 @@ data_copy_thread_func(
while ((node = datafiles_iter_next(ctxt->it)) != NULL) {
DBUG_MARIABACKUP_EVENT("before_copy", node->space->name);
/* copy the datafile */
if(xtrabackup_copy_datafile(node, num)) {
if (xtrabackup_copy_datafile(node, num, NULL,
xtrabackup_incremental ? wf_incremental : wf_write_through))
die("failed to copy datafile.");
}
DBUG_MARIABACKUP_EVENT("after_copy", node->space->name);
......@@ -4620,7 +4620,7 @@ void backup_fix_ddl(void)
continue;
std::string dest_name(node->space->name);
dest_name.append(".new");
xtrabackup_copy_datafile(node, 0, dest_name.c_str()/*, do_full_copy ? ULONGLONG_MAX:UNIV_PAGE_SIZE */);
xtrabackup_copy_datafile(node, 0, dest_name.c_str(), wf_write_through);
}
datafiles_iter_free(it);
......@@ -5176,22 +5176,66 @@ static void rename_force(const char *from, const char *to) {
rename_file(from,to);
}
/* During prepare phase, rename ".new" files , that were created in backup_fix_ddl(),
to ".ibd".*/
static ibool prepare_handle_new_files(
const char* data_home_dir, /*!<in: path to datadir */
const char* db_name, /*!<in: database name */
const char* file_name, /*!<in: file name with suffix */
void *)
{
/** During prepare phase, rename ".new" files, that were created in
backup_fix_ddl() and backup_optimized_ddl_op(), to ".ibd". In the case of
incremental backup, i.e. of arg argument is set, move ".new" files to
destination directory and rename them to ".ibd", remove existing ".ibd.delta"
and ".idb.meta" files in incremental directory to avoid applying delta to
".ibd" file.
@param[in] data_home_dir path to datadir
@param[in] db_name database name
@param[in] file_name file name with suffix
@param[in] arg destination path, used in incremental backup to notify, that
*.new file must be moved to destibation directory
@return true */
static ibool prepare_handle_new_files(const char *data_home_dir,
const char *db_name,
const char *file_name, void *arg)
{
const char *dest_dir = static_cast<const char *>(arg);
std::string src_path = std::string(data_home_dir) + '/' + std::string(db_name) + '/' + file_name;
std::string dest_path = src_path;
/* Copy "*.new" files from incremental to base dir for incremental backup */
std::string dest_path=
dest_dir ? std::string(dest_dir) + '/' + std::string(db_name) +
'/' + file_name : src_path;
size_t index = dest_path.find(".new");
DBUG_ASSERT(index != std::string::npos);
dest_path.replace(index, 4, ".ibd");
dest_path.replace(index, strlen(".ibd"), ".ibd");
rename_force(src_path.c_str(),dest_path.c_str());
if (dest_dir) {
/* remove delta and meta files to avoid delta applying for new file */
index = src_path.find(".new");
DBUG_ASSERT(index != std::string::npos);
src_path.replace(index, std::string::npos, ".ibd.delta");
if (access(src_path.c_str(), R_OK) == 0) {
msg("Removing %s", src_path.c_str());
if (my_delete(src_path.c_str(), MYF(MY_WME)))
die("Can't remove %s, errno %d", src_path.c_str(), errno);
}
src_path.replace(index, std::string::npos, ".ibd.meta");
if (access(src_path.c_str(), R_OK) == 0) {
msg("Removing %s", src_path.c_str());
if (my_delete(src_path.c_str(), MYF(MY_WME)))
die("Can't remove %s, errno %d", src_path.c_str(), errno);
}
/* add table name to the container to avoid it's deletion at the end of
prepare */
std::string table_name = std::string(db_name) + '/'
+ std::string(file_name, file_name + strlen(file_name) - strlen(".new"));
xb_filter_entry_t *table = static_cast<xb_filter_entry_t *>
(malloc(sizeof(xb_filter_entry_t) + table_name.size() + 1));
table->name = ((char*)table) + sizeof(xb_filter_entry_t);
strcpy(table->name, table_name.c_str());
HASH_INSERT(xb_filter_entry_t, name_hash, inc_dir_tables_hash,
ut_fold_string(table->name), table);
}
return TRUE;
}
......@@ -5228,17 +5272,18 @@ rm_if_not_found(
return(TRUE);
}
/************************************************************************
Function enumerates files in datadir (provided by path) which are matched
/** Function enumerates files in datadir (provided by path) which are matched
by provided suffix. For each entry callback is called.
@param[in] path datadir path
@param[in] suffix suffix to match against
@param[in] func callback
@param[in] func_arg arguments for the above callback
@return FALSE if callback for some entry returned FALSE */
static
ibool
xb_process_datadir(
const char* path, /*!<in: datadir path */
const char* suffix, /*!<in: suffix to match
against */
handle_datadir_entry_func_t func) /*!<in: callback */
static ibool xb_process_datadir(const char *path, const char *suffix,
handle_datadir_entry_func_t func,
void *func_arg = NULL)
{
ulint ret;
char dbpath[OS_FILE_MAX_PATH];
......@@ -5273,7 +5318,7 @@ xb_process_datadir(
suffix)) {
if (!func(
path, NULL,
fileinfo.name, NULL))
fileinfo.name, func_arg))
{
os_file_closedir(dbdir);
return(FALSE);
......@@ -5337,7 +5382,7 @@ xb_process_datadir(
if (!func(
path,
dbinfo.name,
fileinfo.name, NULL))
fileinfo.name, func_arg))
{
os_file_closedir(dbdir);
os_file_closedir(dir);
......@@ -5497,6 +5542,12 @@ static bool xtrabackup_prepare_func(char** argv)
fil_path_to_mysql_datadir = ".";
ut_ad(xtrabackup_incremental == xtrabackup_incremental_dir);
if (xtrabackup_incremental) {
inc_dir_tables_hash = hash_create(1000);
ut_ad(inc_dir_tables_hash);
}
/* Fix DDL for prepare. Process .del,.ren, and .new files.
The order in which files are processed, is important
(see MDEV-18185, MDEV-18201)
......@@ -5508,6 +5559,8 @@ static bool xtrabackup_prepare_func(char** argv)
if (xtrabackup_incremental_dir) {
xb_process_datadir(xtrabackup_incremental_dir, ".new.meta", prepare_handle_new_files);
xb_process_datadir(xtrabackup_incremental_dir, ".new.delta", prepare_handle_new_files);
xb_process_datadir(xtrabackup_incremental_dir, ".new",
prepare_handle_new_files, (void *)".");
}
else {
xb_process_datadir(".", ".new", prepare_handle_new_files);
......@@ -5592,8 +5645,6 @@ static bool xtrabackup_prepare_func(char** argv)
goto error_cleanup;
}
inc_dir_tables_hash = hash_create(1000);
ok = xtrabackup_apply_deltas();
xb_data_files_close();
......
--plugin-load-add=$FILE_KEY_MANAGEMENT_SO
--innodb-file-per-table
--innodb-encryption-threads=4
--innodb-encrypt-tables
--innodb-encrypt-log
--loose-file-key-management
--loose-file-key-management-filename=$MYSQL_TEST_DIR/std_data/keys.txt
SET @old_innodb_log_optimize_ddl=@@global.innodb_log_optimize_ddl;
SET GLOBAL innodb_log_optimize_ddl=ON;
SET @old_debug_dbug=@@global.debug_dbug;
SET GLOBAL debug_dbug="+d,ib_log_checkpoint_avoid";
SET @old_innodb_page_cleaner_disabled_debug=@@global.innodb_page_cleaner_disabled_debug;
SET GLOBAL innodb_page_cleaner_disabled_debug=ON;
CREATE TABLE t1 (c INT) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1);
# backup
SET GLOBAL debug_dbug="+d,ib_do_not_log_crypt_data";
INSERT INTO t1 VALUES (2);
# incremental backup
# prepare
# incremental prepare
SET GLOBAL innodb_log_optimize_ddl=@old_innodb_log_optimize_ddl;
SET GLOBAL innodb_page_cleaner_disabled_debug=@old_innodb_page_cleaner_disabled_debug;
SET GLOBAL debug_dbug=@old_debug_dbug;
# Restore and check results
# shutdown server
# remove datadir
# xtrabackup move back
# restart server
SELECT count(*) FROM t1;
count(*)
2
DROP TABLE t1;
#
# If MDEV-20755 bug is no fixed, incremental prepare will fail.
#
--source include/have_debug.inc
--source include/have_file_key_management.inc
--let $base_dir=$MYSQLTEST_VARDIR/tmp/backup
--let $inc_dir=$MYSQLTEST_VARDIR/tmp/backup_inc
SET @old_innodb_log_optimize_ddl=@@global.innodb_log_optimize_ddl;
SET GLOBAL innodb_log_optimize_ddl=ON;
# Disable pages flushing to allow redo log records to be executed on --prepare.
SET @old_debug_dbug=@@global.debug_dbug;
SET GLOBAL debug_dbug="+d,ib_log_checkpoint_avoid";
SET @old_innodb_page_cleaner_disabled_debug=@@global.innodb_page_cleaner_disabled_debug;
SET GLOBAL innodb_page_cleaner_disabled_debug=ON;
CREATE TABLE t1 (c INT) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1);
--echo # backup
--disable_result_log
--exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$base_dir
--enable_result_log
# Do not log crypt data to avoid it's execution on --prepare.
SET GLOBAL debug_dbug="+d,ib_do_not_log_crypt_data";
INSERT INTO t1 VALUES (2);
--disable_result_log
# Execute OPTIMIZE TABLE after the table is copied to execute optimized ddl
# callback during backup, which, in turns, will create t1.new file in backup
# directory.
--let after_copy_test_t1=OPTIMIZE TABLE test.t1;
--echo # incremental backup
--exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$inc_dir --incremental-basedir=$base_dir --dbug=+d,mariabackup_events
--echo # prepare
--exec $XTRABACKUP --prepare --target-dir=$base_dir
# If the tablespace is created during delta tablespace open procedure, then
# crypt data will be not written, and page corruption test will not pass
# when some redo log event is executed, and --prepare will fail.
--echo # incremental prepare
--exec $XTRABACKUP --prepare --target-dir=$base_dir --incremental-dir=$inc_dir
--enable_result_log
SET GLOBAL innodb_log_optimize_ddl=@old_innodb_log_optimize_ddl;
SET GLOBAL innodb_page_cleaner_disabled_debug=@old_innodb_page_cleaner_disabled_debug;
SET GLOBAL debug_dbug=@old_debug_dbug;
--echo # Restore and check results
--let $targetdir=$base_dir
--source include/restart_and_restore.inc
--enable_result_log
SELECT count(*) FROM t1;
# Cleanup
DROP TABLE t1;
--rmdir $base_dir
--rmdir $inc_dir
......@@ -407,6 +407,8 @@ fil_space_crypt_t::write_page0(
mlog_write_ulint(page + offset + MAGIC_SZ + 2 + len + 8, encryption,
MLOG_1BYTE, mtr);
DBUG_EXECUTE_IF("ib_do_not_log_crypt_data", return;);
byte* log_ptr = mlog_open(mtr, 11 + 17 + len);
if (log_ptr != NULL) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment