Commit 4bbd8be4 authored by Marko Mäkelä's avatar Marko Mäkelä

Merge 10.1 into 10.2

parents 34b38ad7 b2f76bac
...@@ -189,6 +189,8 @@ INCLUDE(check_compiler_flag) ...@@ -189,6 +189,8 @@ INCLUDE(check_compiler_flag)
OPTION(WITH_ASAN "Enable address sanitizer" OFF) OPTION(WITH_ASAN "Enable address sanitizer" OFF)
IF (WITH_ASAN) IF (WITH_ASAN)
# this flag might be set by default on some OS
MY_CHECK_AND_SET_COMPILER_FLAG("-U_FORTIFY_SOURCE" DEBUG RELWITHDEBINFO)
# gcc 4.8.1 and new versions of clang # gcc 4.8.1 and new versions of clang
MY_CHECK_AND_SET_COMPILER_FLAG("-fsanitize=address -fPIC" MY_CHECK_AND_SET_COMPILER_FLAG("-fsanitize=address -fPIC"
DEBUG RELWITHDEBINFO) DEBUG RELWITHDEBINFO)
...@@ -214,22 +216,22 @@ ENDIF() ...@@ -214,22 +216,22 @@ ENDIF()
OPTION(WITH_UBSAN "Enable undefined behavior sanitizer" OFF) OPTION(WITH_UBSAN "Enable undefined behavior sanitizer" OFF)
IF (WITH_UBSAN) IF (WITH_UBSAN)
IF(SECURITY_HARDENED) MY_CHECK_AND_SET_COMPILER_FLAG("-fsanitize=undefined -fno-sanitize=alignment -U_FORTIFY_SOURCE" DEBUG RELWITHDEBINFO)
MESSAGE(FATAL_ERROR "WITH_UBSAN and SECURITY_HARDENED are mutually exclusive")
ENDIF()
MY_CHECK_AND_SET_COMPILER_FLAG("-fsanitize=undefined -fno-sanitize=alignment" DEBUG RELWITHDEBINFO)
ENDIF() ENDIF()
# enable security hardening features, like most distributions do # enable security hardening features, like most distributions do
# in our benchmarks that costs about ~1% of performance, depending on the load # in our benchmarks that costs about ~1% of performance, depending on the load
IF(CMAKE_C_COMPILER_VERSION VERSION_LESS "4.6") IF(CMAKE_C_COMPILER_VERSION VERSION_LESS "4.6" OR WITH_ASAN OR WITH_UBSAN)
SET(security_default OFF) SET(security_default OFF)
ELSE() ELSE()
SET(security_default ON) SET(security_default ON)
ENDIF() ENDIF()
OPTION(SECURITY_HARDENED "Use security-enhancing compiler features (stack protector, relro, etc)" ${security_default}) OPTION(SECURITY_HARDENED "Use security-enhancing compiler features (stack protector, relro, etc)" ${security_default})
IF(SECURITY_HARDENED) IF(SECURITY_HARDENED)
IF(WITH_ASAN OR WITH_UBSAN)
MESSAGE(FATAL_ERROR "WITH_ASAN/WITH_UBSAN and SECURITY_HARDENED are mutually exclusive")
ENDIF()
# security-enhancing flags # security-enhancing flags
MY_CHECK_AND_SET_COMPILER_FLAG("-pie -fPIC") MY_CHECK_AND_SET_COMPILER_FLAG("-pie -fPIC")
MY_CHECK_AND_SET_COMPILER_FLAG("-Wl,-z,relro,-z,now") MY_CHECK_AND_SET_COMPILER_FLAG("-Wl,-z,relro,-z,now")
......
...@@ -13,7 +13,8 @@ SET(fail_patterns ...@@ -13,7 +13,8 @@ SET(fail_patterns
FAIL_REGEX "warning:.*redefined" FAIL_REGEX "warning:.*redefined"
FAIL_REGEX "[Ww]arning: [Oo]ption" FAIL_REGEX "[Ww]arning: [Oo]ption"
) )
#The regex patterns above are not localized, thus LANG=C
SET(ENV{LANG} C)
MACRO (MY_CHECK_C_COMPILER_FLAG flag) MACRO (MY_CHECK_C_COMPILER_FLAG flag)
STRING(REGEX REPLACE "[-,= +]" "_" result "have_C_${flag}") STRING(REGEX REPLACE "[-,= +]" "_" result "have_C_${flag}")
SET(SAVE_CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS}") SET(SAVE_CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS}")
......
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
# executing statement. If difference is >0, then something was # executing statement. If difference is >0, then something was
# written to the binary log on the slave. # written to the binary log on the slave.
connection slave; # On Connection Slave
let $before = query_get_value("SHOW MASTER STATUS", Position, 1); let $before = query_get_value("SHOW MASTER STATUS", Position, 1);
connection master; connection master;
......
# PURPOSE. Test that blackhole works with replication in all three
# modes: STATEMENT, MIXED, and ROW.
#
# METHOD. We start by creating a table on the master and then change
# the engine to use blackhole on the slave.
#
# After insert/update/delete of one or more rows, the test the
# proceeds to check that replication is running after replicating an
# change, that the blackhole engine does not contain anything (which
# is just a check that the correct engine is used), and that something
# is written to the binary log.
#
# Whe check INSERT, UPDATE, and DELETE statement for tables with no
# key (forcing a range search on the slave), primary keys (using a
# primary key lookup), and index/key with multiple matches (forcing an
# index search).
# We start with no primary key
CREATE TABLE t1 (a INT, b INT, c INT);
CREATE TABLE t2 (a INT, b INT, c INT);
sync_slave_with_master;
ALTER TABLE t1 ENGINE=BLACKHOLE;
connection master;
INSERT INTO t2 VALUES (1,9,1), (2,9,2), (3,9,3), (4,9,4);
sync_slave_with_master;
# Test insert, no primary key
let $statement = INSERT INTO t1 VALUES (1,1,1),(2,1,2),(3,1,3),(4,1,4);
source extra/rpl_tests/rpl_blackhole.test;
# Test update, no primary key
let $statement = UPDATE t1 SET c = 2*c WHERE a % 2 = 0 AND b = 1;
source extra/rpl_tests/rpl_blackhole.test;
# Test delete, no primary key
let $statement = DELETE FROM t1 WHERE a % 2 = 0 AND b = 1;
source extra/rpl_tests/rpl_blackhole.test;
# Test INSERT-SELECT into Blackhole, no primary key
let $statement = INSERT INTO t1 SELECT * FROM t2;
source extra/rpl_tests/rpl_blackhole.test;
#
# The MASTER has MyISAM as the engine for both tables. The SLAVE has Blackhole
# on t1 (transactional engine) and MyISAM on t2 (non-transactional engine).
#
# In MIXED mode, the command "INSERT INTO t2 SELECT * FROM t1" is logged as
# statement on the master. On the slave, it is tagged as unsafe because the
# statement mixes both transactional and non-transactional engines and as such
# its changes are logged as rows. However, due to the nature of the blackhole
# engine, no rows are returned and thus any chain replication would make the
# next master on the chain diverge.
#
# Fo this reason, we have disabled the statement.
#
# Test INSERT-SELECT from Blackhole, no primary key
# let $statement = INSERT INTO t2 SELECT * FROM t1;
# source extra/rpl_tests/rpl_blackhole.test;
#
connection master;
ALTER TABLE t1 ADD PRIMARY KEY pk_t1 (a,b);
sync_slave_with_master;
# Test insert, primary key
let $statement = INSERT INTO t1 VALUES (1,2,1),(2,2,2),(3,2,3),(4,2,4);
source extra/rpl_tests/rpl_blackhole.test;
# Test update, primary key
let $statement = UPDATE t1 SET c = 2*c WHERE a % 2 = 0 AND b = 2;
source extra/rpl_tests/rpl_blackhole.test;
# Test delete, primary key
let $statement = DELETE FROM t1 WHERE a % 2 = 0 AND b = 2;
source extra/rpl_tests/rpl_blackhole.test;
connection master;
ALTER TABLE t1 DROP PRIMARY KEY, ADD KEY key_t1 (a);
sync_slave_with_master;
# Test insert, key
let $statement = INSERT INTO t1 VALUES (1,3,1),(2,3,2),(3,3,3),(4,3,4);
source extra/rpl_tests/rpl_blackhole.test;
# Test update, key
let $statement = UPDATE t1 SET c = 2*c WHERE a % 2 = 0 AND b = 3;
source extra/rpl_tests/rpl_blackhole.test;
# Test delete, key
let $statement = DELETE FROM t1 WHERE a % 2 = 0 AND b = 3;
source extra/rpl_tests/rpl_blackhole.test;
connection master;
DROP TABLE t1,t2;
sync_slave_with_master;
#
# Start of 10.1 tests
#
#
# MDEV-19675 Wrong charset is chosen when opening a pre-4.1 table
#
# Test with a saved table from 3.23
SELECT @@character_set_database;
@@character_set_database
utf8
SET @@character_set_database="latin1";
SELECT COUNT(*) FROM t1;
ERROR HY000: Got error 190 "Incompatible key or row definition between the MariaDB .frm file and the information in the storage engine. You have to dump an" from storage engine MyISAM
CHECK TABLE t1;
Table Op Msg_type Msg_text
test.t1 check Error Got error 190 "Incompatible key or row definition between the MariaDB .frm file and the information in the storage engine. You have to dump an" from storage engine MyISAM
test.t1 check error Corrupt
REPAIR TABLE t1;
Table Op Msg_type Msg_text
test.t1 repair Error Got error 190 "Incompatible key or row definition between the MariaDB .frm file and the information in the storage engine. You have to dump an" from storage engine MyISAM
test.t1 repair error Corrupt
REPAIR TABLE t1 USE_FRM;
Table Op Msg_type Msg_text
test.t1 repair status OK
SELECT COUNT(*) FROM t1;
COUNT(*)
0
CHECK TABLE t1;
Table Op Msg_type Msg_text
test.t1 check status OK
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`Host` char(60) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL DEFAULT '',
`Db` char(64) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL DEFAULT '',
`Select_priv` enum('N','Y') NOT NULL DEFAULT 'N',
`Insert_priv` enum('N','Y') NOT NULL DEFAULT 'N',
`Update_priv` enum('N','Y') NOT NULL DEFAULT 'N',
`Delete_priv` enum('N','Y') NOT NULL DEFAULT 'N',
`Create_priv` enum('N','Y') NOT NULL DEFAULT 'N',
`Drop_priv` enum('N','Y') NOT NULL DEFAULT 'N',
`Grant_priv` enum('N','Y') NOT NULL DEFAULT 'N',
`References_priv` enum('N','Y') NOT NULL DEFAULT 'N',
`Index_priv` enum('N','Y') NOT NULL DEFAULT 'N',
`Alter_priv` enum('N','Y') NOT NULL DEFAULT 'N',
PRIMARY KEY (`Host`,`Db`)
) ENGINE=MyISAM DEFAULT CHARSET=latin1 COMMENT='Host privileges; Merged with database privileges'
DROP TABLE t1;
SET @@character_set_database=DEFAULT;
# Now do the same, but doing 'ALTER DATABASE' to create the db.opt file,
# instead of setting variables directly.
# Emulate a pre-4.1 database without db.opt
SHOW CREATE DATABASE db1;
Database Create Database
db1 CREATE DATABASE `db1` /*!40100 DEFAULT CHARACTER SET utf8 */
USE db1;
SELECT @@character_set_database, 'taken from defaults' AS comment;
@@character_set_database comment
utf8 taken from defaults
USE test;
ALTER DATABASE db1 DEFAULT CHARACTER SET latin1;
USE db1;
SELECT @@character_set_database, 'taken from db.opt' AS comment;
@@character_set_database comment
latin1 taken from db.opt
SELECT COUNT(*) FROM t1;
ERROR HY000: Got error 190 "Incompatible key or row definition between the MariaDB .frm file and the information in the storage engine. You have to dump an" from storage engine MyISAM
REPAIR TABLE t1 USE_FRM;
Table Op Msg_type Msg_text
db1.t1 repair status OK
SELECT COUNT(*) FROM t1;
COUNT(*)
0
CHECK TABLE t1;
Table Op Msg_type Msg_text
db1.t1 check status OK
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`Host` char(60) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL DEFAULT '',
`Db` char(64) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL DEFAULT '',
`Select_priv` enum('N','Y') NOT NULL DEFAULT 'N',
`Insert_priv` enum('N','Y') NOT NULL DEFAULT 'N',
`Update_priv` enum('N','Y') NOT NULL DEFAULT 'N',
`Delete_priv` enum('N','Y') NOT NULL DEFAULT 'N',
`Create_priv` enum('N','Y') NOT NULL DEFAULT 'N',
`Drop_priv` enum('N','Y') NOT NULL DEFAULT 'N',
`Grant_priv` enum('N','Y') NOT NULL DEFAULT 'N',
`References_priv` enum('N','Y') NOT NULL DEFAULT 'N',
`Index_priv` enum('N','Y') NOT NULL DEFAULT 'N',
`Alter_priv` enum('N','Y') NOT NULL DEFAULT 'N',
PRIMARY KEY (`Host`,`Db`)
) ENGINE=MyISAM DEFAULT CHARSET=latin1 COMMENT='Host privileges; Merged with database privileges'
DROP TABLE t1;
DROP DATABASE db1;
USE test;
#
# End of 10.1 tests
#
...@@ -14,5 +14,4 @@ rpl_spec_variables : BUG#11755836 2009-10-27 jasonh rpl_spec_variables fa ...@@ -14,5 +14,4 @@ rpl_spec_variables : BUG#11755836 2009-10-27 jasonh rpl_spec_variables fa
rpl_get_master_version_and_clock : Bug#11766137 Jan 05 2011 joro Valgrind warnings rpl_get_master_version_and_clock rpl_get_master_version_and_clock : Bug#11766137 Jan 05 2011 joro Valgrind warnings rpl_get_master_version_and_clock
rpl_partition_archive : MDEV-5077 2013-09-27 svoj Cannot exchange partition with archive table rpl_partition_archive : MDEV-5077 2013-09-27 svoj Cannot exchange partition with archive table
rpl_row_binlog_max_cache_size : MDEV-11092 rpl_row_binlog_max_cache_size : MDEV-11092
rpl_blackhole : MDEV-11094
rpl_row_index_choice : MDEV-11666 rpl_row_index_choice : MDEV-11666
...@@ -8,7 +8,6 @@ ALTER TABLE t1 ENGINE=BLACKHOLE; ...@@ -8,7 +8,6 @@ ALTER TABLE t1 ENGINE=BLACKHOLE;
connection master; connection master;
INSERT INTO t2 VALUES (1,9,1), (2,9,2), (3,9,3), (4,9,4); INSERT INTO t2 VALUES (1,9,1), (2,9,2), (3,9,3), (4,9,4);
connection slave; connection slave;
connection slave;
connection master; connection master;
INSERT INTO t1 VALUES (1,1,1),(2,1,2),(3,1,3),(4,1,4); INSERT INTO t1 VALUES (1,1,1),(2,1,2),(3,1,3),(4,1,4);
connection slave; connection slave;
...@@ -17,7 +16,6 @@ SELECT COUNT(*) FROM t1; ...@@ -17,7 +16,6 @@ SELECT COUNT(*) FROM t1;
COUNT(*) COUNT(*)
0 0
>>> Something was written to binary log <<< >>> Something was written to binary log <<<
connection slave;
connection master; connection master;
UPDATE t1 SET c = 2*c WHERE a % 2 = 0 AND b = 1; UPDATE t1 SET c = 2*c WHERE a % 2 = 0 AND b = 1;
connection slave; connection slave;
...@@ -26,7 +24,6 @@ SELECT COUNT(*) FROM t1; ...@@ -26,7 +24,6 @@ SELECT COUNT(*) FROM t1;
COUNT(*) COUNT(*)
0 0
>>> Something was written to binary log <<< >>> Something was written to binary log <<<
connection slave;
connection master; connection master;
DELETE FROM t1 WHERE a % 2 = 0 AND b = 1; DELETE FROM t1 WHERE a % 2 = 0 AND b = 1;
connection slave; connection slave;
...@@ -35,7 +32,6 @@ SELECT COUNT(*) FROM t1; ...@@ -35,7 +32,6 @@ SELECT COUNT(*) FROM t1;
COUNT(*) COUNT(*)
0 0
>>> Something was written to binary log <<< >>> Something was written to binary log <<<
connection slave;
connection master; connection master;
INSERT INTO t1 SELECT * FROM t2; INSERT INTO t1 SELECT * FROM t2;
connection slave; connection slave;
...@@ -55,7 +51,6 @@ SELECT COUNT(*) FROM t1; ...@@ -55,7 +51,6 @@ SELECT COUNT(*) FROM t1;
COUNT(*) COUNT(*)
0 0
>>> Something was written to binary log <<< >>> Something was written to binary log <<<
connection slave;
connection master; connection master;
UPDATE t1 SET c = 2*c WHERE a % 2 = 0 AND b = 2; UPDATE t1 SET c = 2*c WHERE a % 2 = 0 AND b = 2;
connection slave; connection slave;
...@@ -64,7 +59,6 @@ SELECT COUNT(*) FROM t1; ...@@ -64,7 +59,6 @@ SELECT COUNT(*) FROM t1;
COUNT(*) COUNT(*)
0 0
>>> Something was written to binary log <<< >>> Something was written to binary log <<<
connection slave;
connection master; connection master;
DELETE FROM t1 WHERE a % 2 = 0 AND b = 2; DELETE FROM t1 WHERE a % 2 = 0 AND b = 2;
connection slave; connection slave;
...@@ -84,7 +78,6 @@ SELECT COUNT(*) FROM t1; ...@@ -84,7 +78,6 @@ SELECT COUNT(*) FROM t1;
COUNT(*) COUNT(*)
0 0
>>> Something was written to binary log <<< >>> Something was written to binary log <<<
connection slave;
connection master; connection master;
UPDATE t1 SET c = 2*c WHERE a % 2 = 0 AND b = 3; UPDATE t1 SET c = 2*c WHERE a % 2 = 0 AND b = 3;
connection slave; connection slave;
...@@ -93,7 +86,6 @@ SELECT COUNT(*) FROM t1; ...@@ -93,7 +86,6 @@ SELECT COUNT(*) FROM t1;
COUNT(*) COUNT(*)
0 0
>>> Something was written to binary log <<< >>> Something was written to binary log <<<
connection slave;
connection master; connection master;
DELETE FROM t1 WHERE a % 2 = 0 AND b = 3; DELETE FROM t1 WHERE a % 2 = 0 AND b = 3;
connection slave; connection slave;
...@@ -104,4 +96,5 @@ COUNT(*) ...@@ -104,4 +96,5 @@ COUNT(*)
>>> Something was written to binary log <<< >>> Something was written to binary log <<<
connection master; connection master;
DROP TABLE t1,t2; DROP TABLE t1,t2;
connection slave;
include/rpl_end.inc include/rpl_end.inc
This diff is collapsed.
...@@ -20,81 +20,6 @@ source include/master-slave.inc; ...@@ -20,81 +20,6 @@ source include/master-slave.inc;
call mtr.add_suppression("Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT"); call mtr.add_suppression("Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT");
# We start with no primary key source extra/rpl_tests/rpl_blackhole_basic.test;
CREATE TABLE t1 (a INT, b INT, c INT);
CREATE TABLE t2 (a INT, b INT, c INT);
sync_slave_with_master;
ALTER TABLE t1 ENGINE=BLACKHOLE;
connection master;
INSERT INTO t2 VALUES (1,9,1), (2,9,2), (3,9,3), (4,9,4);
sync_slave_with_master;
# Test insert, no primary key
let $statement = INSERT INTO t1 VALUES (1,1,1),(2,1,2),(3,1,3),(4,1,4);
source extra/rpl_tests/rpl_blackhole.test;
# Test update, no primary key
let $statement = UPDATE t1 SET c = 2*c WHERE a % 2 = 0 AND b = 1;
source extra/rpl_tests/rpl_blackhole.test;
# Test delete, no primary key
let $statement = DELETE FROM t1 WHERE a % 2 = 0 AND b = 1;
source extra/rpl_tests/rpl_blackhole.test;
# Test INSERT-SELECT into Blackhole, no primary key
let $statement = INSERT INTO t1 SELECT * FROM t2;
source extra/rpl_tests/rpl_blackhole.test;
#
# The MASTER has MyISAM as the engine for both tables. The SLAVE has Blackhole
# on t1 (transactional engine) and MyISAM on t2 (non-transactional engine).
#
# In MIXED mode, the command "INSERT INTO t2 SELECT * FROM t1" is logged as
# statement on the master. On the slave, it is tagged as unsafe because the
# statement mixes both transactional and non-transactional engines and as such
# its changes are logged as rows. However, due to the nature of the blackhole
# engine, no rows are returned and thus any chain replication would make the
# next master on the chain diverge.
#
# Fo this reason, we have disabled the statement.
#
# Test INSERT-SELECT from Blackhole, no primary key
# let $statement = INSERT INTO t2 SELECT * FROM t1;
# source extra/rpl_tests/rpl_blackhole.test;
#
connection master;
ALTER TABLE t1 ADD PRIMARY KEY pk_t1 (a,b);
# Test insert, primary key
let $statement = INSERT INTO t1 VALUES (1,2,1),(2,2,2),(3,2,3),(4,2,4);
source extra/rpl_tests/rpl_blackhole.test;
# Test update, primary key
let $statement = UPDATE t1 SET c = 2*c WHERE a % 2 = 0 AND b = 2;
source extra/rpl_tests/rpl_blackhole.test;
# Test delete, primary key
let $statement = DELETE FROM t1 WHERE a % 2 = 0 AND b = 2;
source extra/rpl_tests/rpl_blackhole.test;
connection master;
ALTER TABLE t1 DROP PRIMARY KEY, ADD KEY key_t1 (a);
# Test insert, key
let $statement = INSERT INTO t1 VALUES (1,3,1),(2,3,2),(3,3,3),(4,3,4);
source extra/rpl_tests/rpl_blackhole.test;
# Test update, key
let $statement = UPDATE t1 SET c = 2*c WHERE a % 2 = 0 AND b = 3;
source extra/rpl_tests/rpl_blackhole.test;
# Test delete, key
let $statement = DELETE FROM t1 WHERE a % 2 = 0 AND b = 3;
source extra/rpl_tests/rpl_blackhole.test;
connection master;
DROP TABLE t1,t2;
--source include/rpl_end.inc --source include/rpl_end.inc
--binlog_annotate_row_events --timezone=GMT-3
--binlog_annotate_row_events --replicate_annotate_row_events
# ==== Purpose ====
#
# Test verifies that when "replicate_annotate_row_events" are enabled on slave
# the DML operations on blackhole engine will be successful. It also ensures
# that Annotate events are logged into slave's binary log.
#
# ==== Implementation ====
#
# Steps:
# 0 - Enable "replicate_annotate_row_events" on slave and do DML operations
# on master.
# 1 - Slave server will successfully apply the DML operations and it is in
# sync with master.
# 2 - Verify that the "show binlog events" prints all annotate events.
# 3 - Stream the slave's binary log using "mysqlbinlog" tool and verify
# that the Annotate events are being displayed.
#
# ==== References ====
#
# MDEV-11094: Blackhole table updates on slave fail when row annotation is
# enabled
source include/have_blackhole.inc;
source include/have_binlog_format_row.inc;
source include/binlog_start_pos.inc;
source include/master-slave.inc;
SET timestamp=1000000000;
RESET MASTER;
connection slave;
SET timestamp=1000000000;
RESET MASTER;
connection master;
source extra/rpl_tests/rpl_blackhole_basic.test;
# Verify on slave.
connection slave;
FLUSH LOGS;
--replace_column 2 # 5 #
--replace_result $binlog_start_pos <start_pos>
--replace_regex /table_id: [0-9]+/table_id: #/ /\/\* xid=.* \*\//\/* xid= *\//
--eval show binlog events in 'slave-bin.000001' from $binlog_start_pos
let $MYSQLD_DATADIR= `select @@datadir`;
--replace_regex /server id [0-9]*/server id #/ /server v [^ ]*/server v #.##.##/ /exec_time=[0-9]*/exec_time=#/ /thread_id=[0-9]*/thread_id=#/ /table id [0-9]*/table id #/ /mapped to number [0-9]*/mapped to number #/ /end_log_pos [0-9]*/end_log_pos #/ /# at [0-9]*/# at #/ /CRC32 0x[0-9a-f]*/CRC32 XXX/
--exec $MYSQL_BINLOG --base64-output=decode-rows $MYSQLD_DATADIR/slave-bin.000001
source include/rpl_end.inc;
let $MYSQLD_DATADIR= `select @@datadir`;
--echo #
--echo # Start of 10.1 tests
--echo #
--echo #
--echo # MDEV-19675 Wrong charset is chosen when opening a pre-4.1 table
--echo #
--echo # Test with a saved table from 3.23
SELECT @@character_set_database;
SET @@character_set_database="latin1";
--copy_file std_data/host_old.frm $MYSQLD_DATADIR/test/t1.frm
--copy_file std_data/host_old.MYD $MYSQLD_DATADIR/test/t1.MYD
--copy_file std_data/host_old.MYI $MYSQLD_DATADIR/test/t1.MYI
--error ER_GET_ERRNO
SELECT COUNT(*) FROM t1;
CHECK TABLE t1;
REPAIR TABLE t1;
REPAIR TABLE t1 USE_FRM;
SELECT COUNT(*) FROM t1;
CHECK TABLE t1;
SHOW CREATE TABLE t1;
DROP TABLE t1;
SET @@character_set_database=DEFAULT;
--echo # Now do the same, but doing 'ALTER DATABASE' to create the db.opt file,
--echo # instead of setting variables directly.
--echo # Emulate a pre-4.1 database without db.opt
--mkdir $MYSQLD_DATADIR/db1
SHOW CREATE DATABASE db1;
USE db1;
SELECT @@character_set_database, 'taken from defaults' AS comment;
USE test;
ALTER DATABASE db1 DEFAULT CHARACTER SET latin1;
USE db1;
SELECT @@character_set_database, 'taken from db.opt' AS comment;
--copy_file std_data/host_old.frm $MYSQLD_DATADIR/db1/t1.frm
--copy_file std_data/host_old.MYD $MYSQLD_DATADIR/db1/t1.MYD
--copy_file std_data/host_old.MYI $MYSQLD_DATADIR/db1/t1.MYI
--error ER_GET_ERRNO
SELECT COUNT(*) FROM t1;
REPAIR TABLE t1 USE_FRM;
SELECT COUNT(*) FROM t1;
CHECK TABLE t1;
SHOW CREATE TABLE t1;
DROP TABLE t1;
DROP DATABASE db1;
USE test;
--echo #
--echo # End of 10.1 tests
--echo #
...@@ -210,6 +210,40 @@ plugin_ref ha_resolve_by_name(THD *thd, const LEX_STRING *name, bool tmp_table) ...@@ -210,6 +210,40 @@ plugin_ref ha_resolve_by_name(THD *thd, const LEX_STRING *name, bool tmp_table)
} }
bool
Storage_engine_name::resolve_storage_engine_with_error(THD *thd,
handlerton **ha,
bool tmp_table)
{
#if MYSQL_VERSION_ID < 100300
/*
Please remove tmp_name when merging to 10.3 and pass m_storage_engine_name
directly to ha_resolve_by_name().
*/
LEX_STRING tmp_name;
tmp_name.str= const_cast<char*>(m_storage_engine_name.str);
tmp_name.length= m_storage_engine_name.length;
#endif
if (plugin_ref plugin= ha_resolve_by_name(thd, &tmp_name, tmp_table))
{
*ha= plugin_hton(plugin);
return false;
}
*ha= NULL;
if (thd->variables.sql_mode & MODE_NO_ENGINE_SUBSTITUTION)
{
my_error(ER_UNKNOWN_STORAGE_ENGINE, MYF(0), m_storage_engine_name.str);
return true;
}
push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_UNKNOWN_STORAGE_ENGINE,
ER_THD(thd, ER_UNKNOWN_STORAGE_ENGINE),
m_storage_engine_name.str);
return false;
}
plugin_ref ha_lock_engine(THD *thd, const handlerton *hton) plugin_ref ha_lock_engine(THD *thd, const handlerton *hton)
{ {
if (hton) if (hton)
......
...@@ -1233,7 +1233,7 @@ bool Master_info_index::init_all_master_info() ...@@ -1233,7 +1233,7 @@ bool Master_info_index::init_all_master_info()
if (!err_num) // No Error on read Master_info if (!err_num) // No Error on read Master_info
{ {
if (global_system_variables.log_warnings > 1) if (global_system_variables.log_warnings > 1)
sql_print_information("Reading of all Master_info entries succeded"); sql_print_information("Reading of all Master_info entries succeeded");
DBUG_RETURN(0); DBUG_RETURN(0);
} }
if (succ_num) // Have some Error and some Success if (succ_num) // Have some Error and some Success
......
...@@ -194,6 +194,18 @@ bool Sql_cmd_alter_table::execute(THD *thd) ...@@ -194,6 +194,18 @@ bool Sql_cmd_alter_table::execute(THD *thd)
SELECT_LEX *select_lex= &lex->select_lex; SELECT_LEX *select_lex= &lex->select_lex;
/* first table of first SELECT_LEX */ /* first table of first SELECT_LEX */
TABLE_LIST *first_table= (TABLE_LIST*) select_lex->table_list.first; TABLE_LIST *first_table= (TABLE_LIST*) select_lex->table_list.first;
const bool used_engine= lex->create_info.used_fields & HA_CREATE_USED_ENGINE;
DBUG_ASSERT((m_storage_engine_name.str != NULL) == used_engine);
if (used_engine)
{
if (resolve_storage_engine_with_error(thd, &lex->create_info.db_type,
lex->create_info.tmp_table()))
return true; // Engine not found, substitution is not allowed
if (!lex->create_info.db_type) // Not found, but substitution is allowed
lex->create_info.used_fields&= ~HA_CREATE_USED_ENGINE;
}
/* /*
Code in mysql_alter_table() may modify its HA_CREATE_INFO argument, Code in mysql_alter_table() may modify its HA_CREATE_INFO argument,
so we have to use a copy of this structure to make execution so we have to use a copy of this structure to make execution
......
...@@ -392,7 +392,8 @@ class Sql_cmd_common_alter_table : public Sql_cmd ...@@ -392,7 +392,8 @@ class Sql_cmd_common_alter_table : public Sql_cmd
Sql_cmd_alter_table represents the generic ALTER TABLE statement. Sql_cmd_alter_table represents the generic ALTER TABLE statement.
@todo move Alter_info and other ALTER specific structures from Lex here. @todo move Alter_info and other ALTER specific structures from Lex here.
*/ */
class Sql_cmd_alter_table : public Sql_cmd_common_alter_table class Sql_cmd_alter_table : public Sql_cmd_common_alter_table,
public Storage_engine_name
{ {
public: public:
/** /**
...@@ -404,6 +405,8 @@ class Sql_cmd_alter_table : public Sql_cmd_common_alter_table ...@@ -404,6 +405,8 @@ class Sql_cmd_alter_table : public Sql_cmd_common_alter_table
~Sql_cmd_alter_table() ~Sql_cmd_alter_table()
{} {}
Storage_engine_name *option_storage_engine_name() { return this; }
bool execute(THD *thd); bool execute(THD *thd);
}; };
......
...@@ -105,6 +105,31 @@ enum enum_sql_command { ...@@ -105,6 +105,31 @@ enum enum_sql_command {
SQLCOM_END SQLCOM_END
}; };
class Storage_engine_name
{
protected:
LEX_CSTRING m_storage_engine_name;
public:
Storage_engine_name()
{
m_storage_engine_name.str= NULL;
m_storage_engine_name.length= 0;
}
Storage_engine_name(const LEX_CSTRING &name)
:m_storage_engine_name(name)
{ }
Storage_engine_name(const LEX_STRING &name)
{
m_storage_engine_name.str= name.str;
m_storage_engine_name.length= name.length;
}
bool resolve_storage_engine_with_error(THD *thd,
handlerton **ha,
bool tmp_table);
};
/** /**
@class Sql_cmd - Representation of an SQL command. @class Sql_cmd - Representation of an SQL command.
...@@ -148,6 +173,11 @@ class Sql_cmd : public Sql_alloc ...@@ -148,6 +173,11 @@ class Sql_cmd : public Sql_alloc
*/ */
virtual bool execute(THD *thd) = 0; virtual bool execute(THD *thd) = 0;
virtual Storage_engine_name *option_storage_engine_name()
{
return NULL;
}
protected: protected:
Sql_cmd() Sql_cmd()
{} {}
...@@ -164,4 +194,15 @@ class Sql_cmd : public Sql_alloc ...@@ -164,4 +194,15 @@ class Sql_cmd : public Sql_alloc
} }
}; };
class Sql_cmd_create_table: public Sql_cmd,
public Storage_engine_name
{
public:
enum_sql_command sql_command_code() const { return SQLCOM_CREATE_TABLE; }
Storage_engine_name *option_storage_engine_name() { return this; }
bool execute(THD *thd);
};
#endif // SQL_CMD_INCLUDED #endif // SQL_CMD_INCLUDED
This diff is collapsed.
This diff is collapsed.
...@@ -2539,6 +2539,8 @@ create: ...@@ -2539,6 +2539,8 @@ create:
create_or_replace opt_temporary TABLE_SYM opt_if_not_exists table_ident create_or_replace opt_temporary TABLE_SYM opt_if_not_exists table_ident
{ {
LEX *lex= thd->lex; LEX *lex= thd->lex;
if (!(lex->m_sql_cmd= new (thd->mem_root) Sql_cmd_create_table()))
MYSQL_YYABORT;
lex->create_info.init(); lex->create_info.init();
if (lex->set_command_with_check(SQLCOM_CREATE_TABLE, $2, $1 | $4)) if (lex->set_command_with_check(SQLCOM_CREATE_TABLE, $2, $1 | $4))
MYSQL_YYABORT; MYSQL_YYABORT;
...@@ -2560,16 +2562,6 @@ create: ...@@ -2560,16 +2562,6 @@ create:
{ {
LEX *lex= thd->lex; LEX *lex= thd->lex;
lex->current_select= &lex->select_lex; lex->current_select= &lex->select_lex;
if ((lex->create_info.used_fields & HA_CREATE_USED_ENGINE) &&
!lex->create_info.db_type)
{
lex->create_info.use_default_db_type(thd);
push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_WARN_USING_OTHER_HANDLER,
ER_THD(thd, ER_WARN_USING_OTHER_HANDLER),
hton_name(lex->create_info.db_type)->str,
$5->table.str);
}
create_table_set_open_action_and_adjust_tables(lex); create_table_set_open_action_and_adjust_tables(lex);
} }
| create_or_replace opt_unique INDEX_SYM opt_if_not_exists ident | create_or_replace opt_unique INDEX_SYM opt_if_not_exists ident
...@@ -5669,10 +5661,20 @@ create_table_options: ...@@ -5669,10 +5661,20 @@ create_table_options:
; ;
create_table_option: create_table_option:
ENGINE_SYM opt_equal storage_engines ENGINE_SYM opt_equal ident_or_text
{
LEX *lex= Lex;
if (!lex->m_sql_cmd)
{ {
Lex->create_info.db_type= $3; DBUG_ASSERT(lex->sql_command == SQLCOM_ALTER_TABLE);
Lex->create_info.used_fields|= HA_CREATE_USED_ENGINE; if (!(lex->m_sql_cmd= new (thd->mem_root) Sql_cmd_alter_table()))
MYSQL_YYABORT;
}
Storage_engine_name *opt=
lex->m_sql_cmd->option_storage_engine_name();
DBUG_ASSERT(opt); // Expect a proper Sql_cmd
*opt= Storage_engine_name($3);
lex->create_info.used_fields|= HA_CREATE_USED_ENGINE;
} }
| MAX_ROWS opt_equal ulonglong_num | MAX_ROWS opt_equal ulonglong_num
{ {
...@@ -5937,21 +5939,10 @@ default_collation: ...@@ -5937,21 +5939,10 @@ default_collation:
storage_engines: storage_engines:
ident_or_text ident_or_text
{ {
plugin_ref plugin= ha_resolve_by_name(thd, &$1, if (Storage_engine_name($1).
thd->lex->create_info.tmp_table()); resolve_storage_engine_with_error(thd, &$$,
thd->lex->create_info.tmp_table()))
if (plugin) MYSQL_YYABORT;
$$= plugin_hton(plugin);
else
{
if (thd->variables.sql_mode & MODE_NO_ENGINE_SUBSTITUTION)
my_yyabort_error((ER_UNKNOWN_STORAGE_ENGINE, MYF(0), $1.str));
$$= 0;
push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_UNKNOWN_STORAGE_ENGINE,
ER_THD(thd, ER_UNKNOWN_STORAGE_ENGINE),
$1.str);
}
} }
; ;
...@@ -7744,11 +7735,6 @@ alter_list_item: ...@@ -7744,11 +7735,6 @@ alter_list_item:
{ {
LEX *lex=Lex; LEX *lex=Lex;
lex->alter_info.flags|= Alter_info::ALTER_OPTIONS; lex->alter_info.flags|= Alter_info::ALTER_OPTIONS;
if ((lex->create_info.used_fields & HA_CREATE_USED_ENGINE) &&
!lex->create_info.db_type)
{
lex->create_info.used_fields&= ~HA_CREATE_USED_ENGINE;
}
} }
| FORCE_SYM | FORCE_SYM
{ {
......
...@@ -1357,8 +1357,9 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, ...@@ -1357,8 +1357,9 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
} }
if (!share->table_charset) if (!share->table_charset)
{ {
const CHARSET_INFO *cs= thd->variables.collation_database;
/* unknown charset in frm_image[38] or pre-3.23 frm */ /* unknown charset in frm_image[38] or pre-3.23 frm */
if (use_mb(default_charset_info)) if (use_mb(cs))
{ {
/* Warn that we may be changing the size of character columns */ /* Warn that we may be changing the size of character columns */
sql_print_warning("'%s' had no or invalid character set, " sql_print_warning("'%s' had no or invalid character set, "
...@@ -1366,7 +1367,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, ...@@ -1366,7 +1367,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
"so character column sizes may have changed", "so character column sizes may have changed",
share->path.str); share->path.str);
} }
share->table_charset= default_charset_info; share->table_charset= cs;
} }
share->db_record_offset= 1; share->db_record_offset= 1;
...@@ -2615,8 +2616,20 @@ static bool sql_unusable_for_discovery(THD *thd, handlerton *engine, ...@@ -2615,8 +2616,20 @@ static bool sql_unusable_for_discovery(THD *thd, handlerton *engine,
if (create_info->data_file_name || create_info->index_file_name) if (create_info->data_file_name || create_info->index_file_name)
return 1; return 1;
// ... engine // ... engine
if (create_info->db_type && create_info->db_type != engine) DBUG_ASSERT(lex->m_sql_cmd);
if (lex->create_info.used_fields & HA_CREATE_USED_ENGINE)
{
/*
TODO: we could just compare engine names here, without resolving.
But this optimization is too late for 10.1.
*/
Storage_engine_name *opt= lex->m_sql_cmd->option_storage_engine_name();
DBUG_ASSERT(opt); // lex->m_sql_cmd must be an Sql_cmd_create_table instance
if (opt->resolve_storage_engine_with_error(thd, &create_info->db_type,
false) ||
(create_info->db_type && create_info->db_type != engine))
return 1; return 1;
}
return 0; return 0;
} }
......
...@@ -25,6 +25,24 @@ ...@@ -25,6 +25,24 @@
#include "ha_blackhole.h" #include "ha_blackhole.h"
#include "sql_class.h" // THD, SYSTEM_THREAD_SLAVE_SQL #include "sql_class.h" // THD, SYSTEM_THREAD_SLAVE_SQL
/**
Checks if the param 'thd' is pointing to slave applier thread and row based
replication is in use.
A row event will have its thd->query() == NULL except in cases where
replicate_annotate_row_events is enabled. In the later case the thd->query()
will be pointing to the query, received through replicated annotate event
from master.
@param thd pointer to a THD instance
@return TRUE if thread is slave applier and row based replication is in use
*/
static bool is_row_based_replication(THD *thd)
{
return thd->system_thread == SYSTEM_THREAD_SLAVE_SQL &&
(thd->query() == NULL || thd->variables.binlog_annotate_row_events);
}
/* Static declarations for handlerton */ /* Static declarations for handlerton */
static handler *blackhole_create_handler(handlerton *hton, static handler *blackhole_create_handler(handlerton *hton,
...@@ -109,7 +127,7 @@ int ha_blackhole::update_row(const uchar *old_data, uchar *new_data) ...@@ -109,7 +127,7 @@ int ha_blackhole::update_row(const uchar *old_data, uchar *new_data)
{ {
DBUG_ENTER("ha_blackhole::update_row"); DBUG_ENTER("ha_blackhole::update_row");
THD *thd= ha_thd(); THD *thd= ha_thd();
if (thd->system_thread == SYSTEM_THREAD_SLAVE_SQL && thd->query() == NULL) if (is_row_based_replication(thd))
DBUG_RETURN(0); DBUG_RETURN(0);
DBUG_RETURN(HA_ERR_WRONG_COMMAND); DBUG_RETURN(HA_ERR_WRONG_COMMAND);
} }
...@@ -118,7 +136,7 @@ int ha_blackhole::delete_row(const uchar *buf) ...@@ -118,7 +136,7 @@ int ha_blackhole::delete_row(const uchar *buf)
{ {
DBUG_ENTER("ha_blackhole::delete_row"); DBUG_ENTER("ha_blackhole::delete_row");
THD *thd= ha_thd(); THD *thd= ha_thd();
if (thd->system_thread == SYSTEM_THREAD_SLAVE_SQL && thd->query() == NULL) if (is_row_based_replication(thd))
DBUG_RETURN(0); DBUG_RETURN(0);
DBUG_RETURN(HA_ERR_WRONG_COMMAND); DBUG_RETURN(HA_ERR_WRONG_COMMAND);
} }
...@@ -135,7 +153,7 @@ int ha_blackhole::rnd_next(uchar *buf) ...@@ -135,7 +153,7 @@ int ha_blackhole::rnd_next(uchar *buf)
int rc; int rc;
DBUG_ENTER("ha_blackhole::rnd_next"); DBUG_ENTER("ha_blackhole::rnd_next");
THD *thd= ha_thd(); THD *thd= ha_thd();
if (thd->system_thread == SYSTEM_THREAD_SLAVE_SQL && thd->query() == NULL) if (is_row_based_replication(thd))
rc= 0; rc= 0;
else else
rc= HA_ERR_END_OF_FILE; rc= HA_ERR_END_OF_FILE;
...@@ -220,7 +238,7 @@ int ha_blackhole::index_read_map(uchar * buf, const uchar * key, ...@@ -220,7 +238,7 @@ int ha_blackhole::index_read_map(uchar * buf, const uchar * key,
int rc; int rc;
DBUG_ENTER("ha_blackhole::index_read"); DBUG_ENTER("ha_blackhole::index_read");
THD *thd= ha_thd(); THD *thd= ha_thd();
if (thd->system_thread == SYSTEM_THREAD_SLAVE_SQL && thd->query() == NULL) if (is_row_based_replication(thd))
rc= 0; rc= 0;
else else
rc= HA_ERR_END_OF_FILE; rc= HA_ERR_END_OF_FILE;
...@@ -235,7 +253,7 @@ int ha_blackhole::index_read_idx_map(uchar * buf, uint idx, const uchar * key, ...@@ -235,7 +253,7 @@ int ha_blackhole::index_read_idx_map(uchar * buf, uint idx, const uchar * key,
int rc; int rc;
DBUG_ENTER("ha_blackhole::index_read_idx"); DBUG_ENTER("ha_blackhole::index_read_idx");
THD *thd= ha_thd(); THD *thd= ha_thd();
if (thd->system_thread == SYSTEM_THREAD_SLAVE_SQL && thd->query() == NULL) if (is_row_based_replication(thd))
rc= 0; rc= 0;
else else
rc= HA_ERR_END_OF_FILE; rc= HA_ERR_END_OF_FILE;
...@@ -249,7 +267,7 @@ int ha_blackhole::index_read_last_map(uchar * buf, const uchar * key, ...@@ -249,7 +267,7 @@ int ha_blackhole::index_read_last_map(uchar * buf, const uchar * key,
int rc; int rc;
DBUG_ENTER("ha_blackhole::index_read_last"); DBUG_ENTER("ha_blackhole::index_read_last");
THD *thd= ha_thd(); THD *thd= ha_thd();
if (thd->system_thread == SYSTEM_THREAD_SLAVE_SQL && thd->query() == NULL) if (is_row_based_replication(thd))
rc= 0; rc= 0;
else else
rc= HA_ERR_END_OF_FILE; rc= HA_ERR_END_OF_FILE;
......
...@@ -668,12 +668,8 @@ int toku_logger_find_logfiles (const char *directory, char ***resultp, int *n_lo ...@@ -668,12 +668,8 @@ int toku_logger_find_logfiles (const char *directory, char ***resultp, int *n_lo
snprintf(fname, fnamelen, "%s/%s", directory, de->d_name); snprintf(fname, fnamelen, "%s/%s", directory, de->d_name);
result[n_results++] = fname; result[n_results++] = fname;
} }
// Return them in increasing order. Set width to allow for newer log file names ("xxx.tokulog13") // Return them in increasing order.
// which are one character longer than old log file names ("xxx.tokulog2"). The comparison function qsort(result, n_results, sizeof(result[0]), logfilenamecompare);
// won't look beyond the terminating NUL, so an extra character in the comparison string doesn't matter.
// Allow room for terminating NUL after "xxx.tokulog13" even if result[0] is of form "xxx.tokulog2."
int width = sizeof(result[0]+2);
qsort(result, n_results, width, logfilenamecompare);
*resultp = result; *resultp = result;
*n_logfiles = n_results; *n_logfiles = n_results;
result[n_results]=0; // make a trailing null result[n_results]=0; // make a trailing null
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
Copyright (c) 1997, 2017, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 1997, 2017, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2012, Facebook Inc. Copyright (c) 2012, Facebook Inc.
Copyright (c) 2013, 2017, MariaDB Corporation. Copyright (c) 2013, 2019, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software the terms of the GNU General Public License as published by the Free Software
...@@ -2720,12 +2720,17 @@ recv_scan_log_recs( ...@@ -2720,12 +2720,17 @@ recv_scan_log_recs(
log_block_convert_lsn_to_no(scanned_lsn)); log_block_convert_lsn_to_no(scanned_lsn));
*/ */
if (no != log_block_convert_lsn_to_no(scanned_lsn) if (no != log_block_convert_lsn_to_no(scanned_lsn)) {
|| !log_block_checksum_is_ok_or_old_format(log_block, true)) { /* Garbage or an incompletely written log block.
We will not report any error; because this can happen
if (no == log_block_convert_lsn_to_no(scanned_lsn) when InnoDB was killed while it was writing
&& !log_block_checksum_is_ok_or_old_format( redo log. We simply treat this as an abrupt end of the
redo log. */
finished = true;
break;
} else if (!log_block_checksum_is_ok_or_old_format(
log_block, true)) { log_block, true)) {
fprintf(stderr, fprintf(stderr,
"InnoDB: Log block no %lu at" "InnoDB: Log block no %lu at"
" lsn " LSN_PF " has\n" " lsn " LSN_PF " has\n"
...@@ -2733,17 +2738,12 @@ recv_scan_log_recs( ...@@ -2733,17 +2738,12 @@ recv_scan_log_recs(
" contains %lu, should be %lu\n", " contains %lu, should be %lu\n",
(ulong) no, (ulong) no,
scanned_lsn, scanned_lsn,
(ulong) log_block_get_checksum( (ulong) log_block_get_checksum(log_block),
log_block), (ulong) log_block_calc_checksum(log_block));
(ulong) log_block_calc_checksum(
log_block));
}
maybe_encrypted = log_crypt_block_maybe_encrypted(log_block, maybe_encrypted = log_crypt_block_maybe_encrypted(log_block,
&log_crypt_err); &log_crypt_err);
/* Garbage or an incompletely written log block */
/* Print checkpoint encryption keys if present */ /* Print checkpoint encryption keys if present */
log_crypt_print_checkpoint_keys(log_block); log_crypt_print_checkpoint_keys(log_block);
finished = TRUE; finished = TRUE;
...@@ -2764,7 +2764,6 @@ recv_scan_log_recs( ...@@ -2764,7 +2764,6 @@ recv_scan_log_recs(
} }
break; break;
} }
if (log_block_get_flush_bit(log_block)) { if (log_block_get_flush_bit(log_block)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment