Commit 814205f3 authored by Marko Mäkelä's avatar Marko Mäkelä

Merge 10.2 into 10.3

parents 89b463ee 28e713dc
......@@ -213,6 +213,14 @@ IF (WITH_TSAN)
MY_CHECK_AND_SET_COMPILER_FLAG("-fsanitize=thread" DEBUG RELWITHDEBINFO)
ENDIF()
OPTION(WITH_UBSAN "Enable undefined behavior sanitizer" OFF)
IF (WITH_UBSAN)
IF(SECURITY_HARDENED)
MESSAGE(FATAL_ERROR "WITH_UBSAN and SECURITY_HARDENED are mutually exclusive")
ENDIF()
MY_CHECK_AND_SET_COMPILER_FLAG("-fsanitize=undefined" DEBUG RELWITHDEBINFO)
ENDIF()
IF(NOT WITH_TSAN)
# enable security hardening features, like most distributions do
# in our benchmarks that costs about ~1% of performance, depending on the load
......
......@@ -34,8 +34,8 @@ ENDFOREACH()
# Ensure we have clean build for shared libraries
# without unresolved symbols
# Not supported with AddressSanitizer and ThreadSanitizer
IF(NOT WITH_ASAN AND NOT WITH_TSAN)
# Not supported with the clang sanitizers
IF(NOT WITH_ASAN AND NOT WITH_TSAN AND NOT WITH_UBSAN)
SET(LINK_FLAG_NO_UNDEFINED "-Wl,--no-undefined")
ENDIF()
......
# Copyright (c) 2009, 2018, Oracle and/or its affiliates.
# Copyright (c) 2011, 2018, MariaDB Corporation
# Copyright (c) 2011, 2019, MariaDB Corporation
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
......@@ -209,7 +209,7 @@ MACRO(MYSQL_ADD_PLUGIN)
ELSEIF(NOT CMAKE_SYSTEM_NAME STREQUAL "Linux")
TARGET_LINK_LIBRARIES (${target} mysqld)
ENDIF()
ELSEIF(CMAKE_SYSTEM_NAME STREQUAL "Linux" AND NOT WITH_ASAN AND NOT WITH_TSAN)
ELSEIF(CMAKE_SYSTEM_NAME STREQUAL "Linux" AND NOT WITH_ASAN AND NOT WITH_TSAN AND NOT WITH_UBSAN)
TARGET_LINK_LIBRARIES (${target} "-Wl,--no-undefined")
ENDIF()
......
......@@ -25,8 +25,10 @@ galera_as_slave_preordered : wsrep-preordered feature not merged to MariaDB
galera_as_slave_replication_bundle : MDEV-15785 OPTION_GTID_BEGIN is set in Gtid_log_event::do_apply_event()
galera_binlog_rows_query_log_events: MariaDB does not support binlog_rows_query_log_events
galera_flush : MariaDB does not have global.thread_statistics
galera_gcache_recover_manytrx : MDEV-18834 Galera test failure
galera_gcs_fc_limit : MDEV-17061 Timeout in wait_condition.inc for PROCESSLIST
galera_ist_progress: MDEV-15236 galera_ist_progress fails when trying to read transfer status
galera_ist_mariabackup : MDEV-18829 test leaves port open
galera_ist_progress: MDEV-15236 fails when trying to read transfer status
galera_kill_applier : race condition at the start of the test
galera_kill_ddl : MDEV-17108 Test failure on galera.galera_kill_ddl
galera_migrate : MariaDB does not support START SLAVE USER
......
connection node_1;
connection node_2;
connection node_1;
CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1);
connection node_2;
......
connection node_1;
connection node_2;
connection node_1;
SET SESSION innodb_lock_wait_timeout=600;
SET SESSION lock_wait_timeout=600;
CREATE TABLE ten (f1 INTEGER);
CREATE TABLE ten (f1 INTEGER) engine=InnoDB;
INSERT INTO ten VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10);
CREATE TABLE t1 (f1 INTEGER AUTO_INCREMENT PRIMARY KEY, f2 INTEGER) Engine=InnoDB;
INSERT INTO t1 (f2) SELECT a1.f1 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4, ten AS a5;
......@@ -9,24 +11,24 @@ connection node_2;
SET SESSION wsrep_sync_wait = 0;
SET SESSION wsrep_sync_wait = 15;
SET GLOBAL wsrep_provider_options = 'repl.causal_read_timeout=PT1H';
SELECT COUNT(*) = 100000 FROM t1;
COUNT(*) = 100000
1
SELECT COUNT(*) FROM t1;
COUNT(*)
100000
INSERT INTO t1 (f2) SELECT a1.f1 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4, ten AS a5;
connection node_1;
SELECT COUNT(*) = 200000 FROM t1;
COUNT(*) = 200000
1
SELECT COUNT(*) FROM t1;
COUNT(*)
200000
UPDATE t1 SET f2 = 1;
connection node_2;
SELECT COUNT(*) = 200000 FROM t1 WHERE f2 = 1;
COUNT(*) = 200000
1
SELECT COUNT(*) FROM t1 WHERE f2 = 1;
COUNT(*)
200000
connection node_1;
START TRANSACTION;
SELECT COUNT(*) = 200000 FROM t1;
COUNT(*) = 200000
1
SELECT COUNT(*) FROM t1;
COUNT(*)
200000
UPDATE t1 SET f2 = 3;
connection node_2;
START TRANSACTION;
......
connection node_1;
connection node_2;
connection node_1;
connection node_2;
connection node_2;
CREATE TABLE t1(i INT) ENGINE=INNODB;
INSERT INTO t1 VALUES(1);
......
connection node_1;
SELECT COUNT(DISTINCT uuid) = 2 FROM mtr_wsrep_notify.membership;
COUNT(DISTINCT uuid) = 2
1
......
......@@ -5,7 +5,6 @@
--source include/big_test.inc
--source include/galera_cluster.inc
--source include/have_innodb.inc
--source include/have_mariabackup.inc
--let $node_1=node_1
......@@ -33,11 +32,11 @@ END|
DELIMITER ;|
--send CALL p1();
--sleep 2
--sleep 1
--connection node_2
--send CALL p1();
--sleep 2
--sleep 1
# Kill and restart node #2
......@@ -45,10 +44,7 @@ DELIMITER ;|
--connection node_2a
--source include/kill_galera.inc
--sleep 10
--source include/start_mysqld.inc
--sleep 25
--source include/wait_until_connected_again.inc
INSERT INTO t1 VALUES (DEFAULT);
......@@ -72,8 +68,6 @@ INSERT INTO t1 VALUES (DEFAULT);
--error 2013,2006
--reap
--sleep 10
# Confirm that the count is correct and that the cluster is intact
--connection node_1a
......
......@@ -3,7 +3,11 @@
#
--source include/galera_cluster.inc
--source include/have_innodb.inc
# Save original auto_increment_offset values.
--let $node_1=node_1
--let $node_2=node_2
--source include/auto_increment_offset_save.inc
--connection node_1
CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
......@@ -29,4 +33,7 @@ SET SESSION wsrep_sync_wait = DEFAULT;
SELECT COUNT(*) = 1 FROM t1;
SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
--let $node_2=node_2a
--source include/auto_increment_offset_restore.inc
DROP TABLE t1;
!include ../galera_2nodes.cnf
[mysqld]
innodb-status-output=ON
innodb-status-output-locks=ON
--source include/big_test.inc
--source include/galera_cluster.inc
--source include/have_innodb.inc
# Save original auto_increment_offset values.
--let $node_1=node_1
--let $node_2=node_2
--source include/auto_increment_offset_save.inc
--connection node_1
SET SESSION innodb_lock_wait_timeout=600;
SET SESSION lock_wait_timeout=600;
CREATE TABLE ten (f1 INTEGER);
CREATE TABLE ten (f1 INTEGER) engine=InnoDB;
INSERT INTO ten VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10);
CREATE TABLE t1 (f1 INTEGER AUTO_INCREMENT PRIMARY KEY, f2 INTEGER) Engine=InnoDB;
......@@ -20,19 +23,19 @@ SET SESSION wsrep_sync_wait = 15;
SET GLOBAL wsrep_provider_options = 'repl.causal_read_timeout=PT1H';
SELECT COUNT(*) = 100000 FROM t1;
SELECT COUNT(*) FROM t1;
INSERT INTO t1 (f2) SELECT a1.f1 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4, ten AS a5;
--connection node_1
SELECT COUNT(*) = 200000 FROM t1;
SELECT COUNT(*) FROM t1;
UPDATE t1 SET f2 = 1;
--connection node_2
SELECT COUNT(*) = 200000 FROM t1 WHERE f2 = 1;
SELECT COUNT(*) FROM t1 WHERE f2 = 1;
--connection node_1
START TRANSACTION;
SELECT COUNT(*) = 200000 FROM t1;
SELECT COUNT(*) FROM t1;
UPDATE t1 SET f2 = 3;
--connection node_2
......@@ -50,5 +53,7 @@ COMMIT;
--eval SET GLOBAL wsrep_provider_options = '$wsrep_provider_options_node2';
--enable_query_log
--source include/auto_increment_offset_restore.inc
DROP TABLE t1;
DROP TABLE ten;
......@@ -11,11 +11,6 @@
--let $node_2=node_2
--source include/auto_increment_offset_save.inc
# Save original auto_increment_offset values.
--let $node_1=node_1
--let $node_2=node_2
--source include/auto_increment_offset_save.inc
--connection node_2
--let $wsrep_cluster_address_saved = `SELECT @@global.wsrep_cluster_address`
......
SELECT VARIABLE_VALUE LIKE '%[::1]%' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_incoming_addresses';
VARIABLE_VALUE LIKE '%[::1]%'
1
SELECT VARIABLE_VALUE = 3 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
VARIABLE_VALUE = 3
1
SET GLOBAL wsrep_provider_options='gmcast.isolate=1';
CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1);
SET GLOBAL wsrep_provider_options='gmcast.isolate=0';
SELECT COUNT(*) = 1 FROM t1;
COUNT(*) = 1
1
DROP TABLE t1;
include/assert_grep.inc [Streaming the backup to joiner at \[::1\]]
include/assert_grep.inc [async IST sender starting to serve tcp://\[::1\]:]
include/assert_grep.inc [IST receiver addr using tcp://\[::1\]]
include/assert_grep.inc [Prepared IST receiver, listening at: tcp://\[::1\]]
SELECT VARIABLE_VALUE LIKE '%[::1]%' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_incoming_addresses';
VARIABLE_VALUE LIKE '%[::1]%'
1
SELECT VARIABLE_VALUE = 3 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
VARIABLE_VALUE = 3
1
SET GLOBAL wsrep_provider_options='gmcast.isolate=1';
CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1);
SET GLOBAL wsrep_provider_options='gmcast.isolate=0';
SELECT COUNT(*) = 1 FROM t1;
COUNT(*) = 1
1
DROP TABLE t1;
!include ../galera_3nodes.cnf
# decoy value - should not be read by mysqld or sst scripts
[mysqld]
innodb-data-home-dir=/tmp
[galera]
innodb-data-home-dir=
wsrep_sst_method=mariabackup
wsrep_sst_auth="root:"
wsrep_node_address=::1
[galera.1]
wsrep-cluster-address=gcomm://
wsrep_provider_options='base_host=[::1];base_port=@mysqld.1.#galera_port;gmcast.listen_addr=tcp://[::]:@mysqld.1.#galera_port;ist.recv_addr=[::1]:@mysqld.1.#ist_port'
wsrep_sst_receive_address='[::1]:@mysqld.1.#sst_port'
wsrep_node_incoming_address='[::1]:@mysqld.1.port'
wsrep_node_name=node_1
[galera.2]
wsrep_cluster_address='gcomm://[::1]:@mysqld.1.#galera_port'
wsrep_provider_options='base_host=[::1];base_port=@mysqld.2.#galera_port;gmcast.listen_addr=tcp://[::]:@mysqld.2.#galera_port;ist.recv_addr=[::1]:@mysqld.2.#ist_port'
wsrep_sst_receive_address='[::1]:@mysqld.2.#sst_port'
wsrep_node_incoming_address='[::1]:@mysqld.2.port'
wsrep_node_name=node_2
wsrep_sst_donor=node_1
[galera.3]
wsrep_cluster_address='gcomm://[::1]:@mysqld.1.#galera_port'
wsrep_provider_options='base_host=[::1];base_port=@mysqld.3.#galera_port;gmcast.listen_addr=tcp://[::]:@mysqld.3.#galera_port;ist.recv_addr=[::1]:@mysqld.3.#ist_port'
wsrep_sst_receive_address='[::1]:@mysqld.3.#sst_port'
wsrep_node_incoming_address='[::1]:@mysqld.3.port'
wsrep_node_name=node_3
wsrep_sst_donor=node_1
[SST]
transferfmt=@ENV.MTR_GALERA_TFMT
streamfmt=xbstream
sockopt=",pf=ip6"
--source include/galera_cluster.inc
--source include/check_ipv6.inc
--source suite/galera/include/have_mariabackup.inc
# Confirm that initial handshake happened over ipv6
SELECT VARIABLE_VALUE LIKE '%[::1]%' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_incoming_addresses';
SELECT VARIABLE_VALUE = 3 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
# Force IST
--connection node_2
SET GLOBAL wsrep_provider_options='gmcast.isolate=1';
--connection node_1
--let $wait_condition = SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
--source include/wait_condition.inc
CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1);
--connection node_2
SET GLOBAL wsrep_provider_options='gmcast.isolate=0';
--let $wait_condition = SELECT VARIABLE_VALUE = 3 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
--source include/wait_condition.inc
--let $wait_condition = SELECT VARIABLE_VALUE = 'ON' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_ready';
--source include/wait_condition.inc
SELECT COUNT(*) = 1 FROM t1;
DROP TABLE t1;
# Confirm that key messages around SST and IST reference IPv6
--connection node_1
--let $assert_file = $MYSQLTEST_VARDIR/log/mysqld.1.err
--let $assert_only_after = CURRENT_TEST
# The SSTs happen when nodes are started first time
--let $assert_count= 2
--let $assert_text = Streaming the backup to joiner at \[::1\]
--let $assert_select = Streaming the backup to joiner at \[::1\]
--source include/assert_grep.inc
# There will be 1 ISTs donated from node_1 in Galera 3.
# Two first happen at the initial startup to populate the certification
# index. The third one is from the IST which happens during the actual test.
--let $assert_count= 1
--let $assert_text = async IST sender starting to serve tcp://\[::1\]:
--let $assert_select = async IST sender starting to serve tcp://\[::1\]:
--source include/assert_grep.inc
--connection node_2
--let $assert_file = $MYSQLTEST_VARDIR/log/mysqld.2.err
--let $assert_only_after = CURRENT_TEST
# There is one ISTs on joiner at Galera 3.
--let $assert_count= 1
--let $assert_text = IST receiver addr using tcp://\[::1\]
--let $assert_select = IST receiver addr using tcp://\[::1\]
--source include/assert_grep.inc
# There will be only one Prepared IST and in Galera 3 segnos are not printed
--let $assert_count= 1
--let $assert_text = Prepared IST receiver, listening at: tcp://\[::1\]
--let $assert_select = Prepared IST receiver, listening at: tcp://\[::1\]
--source include/assert_grep.inc
!include ../galera_3nodes.cnf
# decoy value - should not be read by mysqld or sst scripts
[mysqld]
innodb-data-home-dir=/tmp
[mariadb]
innodb-data-home-dir=
wsrep_sst_method=rsync
wsrep_node_address=::1
[mariadb.1]
wsrep-cluster-address=gcomm://
wsrep_provider_options='base_host=[::1];base_port=@mysqld.1.#galera_port;gmcast.listen_addr=tcp://[::]:@mysqld.1.#galera_port;ist.recv_addr=[::1]:@mysqld.1.#ist_port'
wsrep_sst_receive_address='[::1]:@mysqld.1.#sst_port'
wsrep_node_incoming_address='[::1]:@mysqld.1.port'
[mariadb.2]
wsrep_cluster_address='gcomm://[::1]:@mysqld.1.#galera_port'
wsrep_provider_options='base_host=[::1];base_port=@mysqld.2.#galera_port;gmcast.listen_addr=tcp://[::]:@mysqld.2.#galera_port;ist.recv_addr=[::1]:@mysqld.2.#ist_port'
wsrep_sst_receive_address='[::1]:@mysqld.2.#sst_port'
wsrep_node_incoming_address='[::1]:@mysqld.2.port'
[mariadb.3]
wsrep_cluster_address='gcomm://[::1]:@mysqld.1.#galera_port'
wsrep_provider_options='base_host=[::1];base_port=@mysqld.3.#galera_port;gmcast.listen_addr=tcp://[::]:@mysqld.3.#galera_port;ist.recv_addr=[::1]:@mysqld.3.#ist_port'
wsrep_sst_receive_address='[::1]:@mysqld.3.#sst_port'
wsrep_node_incoming_address='[::1]:@mysqld.3.port'
[SST]
sockopt=",pf=ip6"
--source include/galera_cluster.inc
--source include/check_ipv6.inc
# Confirm that initial handshake happened over ipv6
SELECT VARIABLE_VALUE LIKE '%[::1]%' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_incoming_addresses';
SELECT VARIABLE_VALUE = 3 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
# Force IST
--connection node_2
SET GLOBAL wsrep_provider_options='gmcast.isolate=1';
--connection node_1
--let $wait_condition = SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
--source include/wait_condition.inc
CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1);
--connection node_2
SET GLOBAL wsrep_provider_options='gmcast.isolate=0';
--let $wait_condition = SELECT VARIABLE_VALUE = 3 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
--source include/wait_condition.inc
--let $wait_condition = SELECT VARIABLE_VALUE = 'ON' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_ready';
--source include/wait_condition.inc
SELECT COUNT(*) = 1 FROM t1;
DROP TABLE t1;
......@@ -7,7 +7,7 @@ if (!$wait_all_purged)
}
let $remaining_expect= `select concat('InnoDB ',$wait_all_purged)`;
let $wait_counter= 300;
let $wait_counter= 600;
while ($wait_counter)
{
--replace_regex /.*History list length ([0-9]+).*/\1/
......
......@@ -28,7 +28,6 @@ WSREP_SST_OPT_PSWD=${WSREP_SST_OPT_PSWD:-}
WSREP_SST_OPT_DEFAULT=""
WSREP_SST_OPT_EXTRA_DEFAULT=""
WSREP_SST_OPT_SUFFIX_DEFAULT=""
WSREP_SST_OPT_SUFFIX_VALUE=""
INNODB_DATA_HOME_DIR_ARG=""
while [ $# -gt 0 ]; do
......@@ -94,7 +93,6 @@ case "$1" in
;;
'--defaults-group-suffix')
readonly WSREP_SST_OPT_SUFFIX_DEFAULT="$1=$2"
readonly WSREP_SST_OPT_SUFFIX_VALUE="$2"
shift
;;
'--host')
......
#!/bin/bash -ue
# Copyright (C) 2013 Percona Inc
# Copyright (C) 2017 MariaDB
# Copyright (C) 2017-2019 MariaDB
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
......@@ -667,7 +667,7 @@ monitor_process()
exit 32
fi
if ! ps -p "${sst_stream_pid}" &>/dev/null; then
if ! ps -p "${sst_stream_pid}" &>/dev/null; then
break
fi
......@@ -707,10 +707,7 @@ if [ ! -z "$INNODB_DATA_HOME_DIR_ARG" ]; then
fi
# if INNODB_DATA_HOME_DIR env. variable is not set, try to get it from my.cnf
if [ -z "$INNODB_DATA_HOME_DIR" ]; then
INNODB_DATA_HOME_DIR=$(parse_cnf mysqld$WSREP_SST_OPT_SUFFIX_VALUE innodb-data-home-dir '')
fi
if [ -z "$INNODB_DATA_HOME_DIR" ]; then
INNODB_DATA_HOME_DIR=$(parse_cnf --mysqld innodb-data-home-dir "")
INNODB_DATA_HOME_DIR=$(parse_cnf --mysqld innodb-data-home-dir '')
fi
if [ ! -z "$INNODB_DATA_HOME_DIR" ]; then
INNOEXTRA+=" --innodb-data-home-dir=$INNODB_DATA_HOME_DIR"
......@@ -753,8 +750,8 @@ else
if [[ "$sstlogarchive" -eq 1 ]]
then
ARCHIVETIMESTAMP=$(date "+%Y.%m.%d-%H.%M.%S")
newfile=""
ARCHIVETIMESTAMP=$(date "+%Y.%m.%d-%H.%M.%S.%N")
newfile=""
if [[ ! -z "$sstlogarchivedir" ]]
then
......@@ -829,7 +826,7 @@ then
exit 93
fi
if [[ -z $(parse_cnf mysqld tmpdir "") && -z $(parse_cnf xtrabackup tmpdir "") ]];then
if [[ -z $(parse_cnf --mysqld tmpdir "") && -z $(parse_cnf xtrabackup tmpdir "") ]];then
xtmpdir=$(mktemp -d)
tmpopts=" --tmpdir=$xtmpdir "
wsrep_log_info "Using $xtmpdir as xtrabackup temporary directory"
......@@ -952,8 +949,8 @@ then
[[ -n $SST_PROGRESS_FILE ]] && touch $SST_PROGRESS_FILE
ib_home_dir=$INNODB_DATA_HOME_DIR
ib_log_dir=$(parse_cnf mysqld innodb-log-group-home-dir "")
ib_undo_dir=$(parse_cnf mysqld innodb-undo-directory "")
ib_log_dir=$(parse_cnf --mysqld innodb-log-group-home-dir "")
ib_undo_dir=$(parse_cnf --mysqld innodb-undo-directory "")
stagemsg="Joiner-Recv"
......@@ -1023,15 +1020,14 @@ then
jpid=$!
wsrep_log_info "Proceeding with SST"
wsrep_log_info "Cleaning the existing datadir and innodb-data/log directories"
if [ "${OS}" = "FreeBSD" ]; then
find -E $ib_home_dir $ib_log_dir $ib_undo_dir $DATA -mindepth 1 -prune -regex $cpat -o -exec rm -rfv {} 1>&2 \+
else
find $ib_home_dir $ib_log_dir $ib_undo_dir $DATA -mindepth 1 -prune -regex $cpat -o -exec rm -rfv {} 1>&2 \+
fi
fi
tempdir=$(parse_cnf mysqld log-bin "")
tempdir=$(parse_cnf --mysqld log-bin "")
if [[ -n ${tempdir:-} ]];then
binlog_dir=$(dirname $tempdir)
binlog_file=$(basename $tempdir)
......
......@@ -170,10 +170,7 @@ if [ ! -z "$INNODB_DATA_HOME_DIR_ARG" ]; then
fi
# if INNODB_DATA_HOME_DIR env. variable is not set, try to get it from my.cnf
if [ -z "$INNODB_DATA_HOME_DIR" ]; then
INNODB_DATA_HOME_DIR=$(parse_cnf mysqld$WSREP_SST_OPT_SUFFIX_VALUE innodb-data-home-dir '')
fi
if [ -z "$INNODB_DATA_HOME_DIR" ]; then
INNODB_DATA_HOME_DIR=$(parse_cnf --mysqld innodb-data-home-dir "")
INNODB_DATA_HOME_DIR=$(parse_cnf --mysqld innodb-data-home-dir '')
fi
if [ -n "$INNODB_DATA_HOME_DIR" ]; then
......
/*****************************************************************************
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2017, 2018, MariaDB Corporation.
Copyright (c) 2017, 2019, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
......@@ -442,39 +442,6 @@ struct que_fork_t{
/* Flag which is ORed to control structure statement node types */
#define QUE_NODE_CONTROL_STAT 1024
/* Query graph node types */
#define QUE_NODE_LOCK 1
#define QUE_NODE_INSERT 2
#define QUE_NODE_UPDATE 4
#define QUE_NODE_CURSOR 5
#define QUE_NODE_SELECT 6
#define QUE_NODE_AGGREGATE 7
#define QUE_NODE_FORK 8
#define QUE_NODE_THR 9
#define QUE_NODE_UNDO 10
#define QUE_NODE_COMMIT 11
#define QUE_NODE_ROLLBACK 12
#define QUE_NODE_PURGE 13
#define QUE_NODE_CREATE_TABLE 14
#define QUE_NODE_CREATE_INDEX 15
#define QUE_NODE_SYMBOL 16
#define QUE_NODE_RES_WORD 17
#define QUE_NODE_FUNC 18
#define QUE_NODE_ORDER 19
#define QUE_NODE_PROC (20 + QUE_NODE_CONTROL_STAT)
#define QUE_NODE_IF (21 + QUE_NODE_CONTROL_STAT)
#define QUE_NODE_WHILE (22 + QUE_NODE_CONTROL_STAT)
#define QUE_NODE_ASSIGNMENT 23
#define QUE_NODE_FETCH 24
#define QUE_NODE_OPEN 25
#define QUE_NODE_COL_ASSIGNMENT 26
#define QUE_NODE_FOR (27 + QUE_NODE_CONTROL_STAT)
#define QUE_NODE_RETURN 28
#define QUE_NODE_ROW_PRINTF 29
#define QUE_NODE_ELSIF 30
#define QUE_NODE_CALL 31
#define QUE_NODE_EXIT 32
#include "que0que.ic"
#endif
/*****************************************************************************
Copyright (c) 1996, 2009, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2019, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
......@@ -36,6 +37,39 @@ typedef struct que_fork_t que_t;
struct que_thr_t;
/* Query graph node types */
#define QUE_NODE_LOCK 1
#define QUE_NODE_INSERT 2
#define QUE_NODE_UPDATE 4
#define QUE_NODE_CURSOR 5
#define QUE_NODE_SELECT 6
#define QUE_NODE_AGGREGATE 7
#define QUE_NODE_FORK 8
#define QUE_NODE_THR 9
#define QUE_NODE_UNDO 10
#define QUE_NODE_COMMIT 11
#define QUE_NODE_ROLLBACK 12
#define QUE_NODE_PURGE 13
#define QUE_NODE_CREATE_TABLE 14
#define QUE_NODE_CREATE_INDEX 15
#define QUE_NODE_SYMBOL 16
#define QUE_NODE_RES_WORD 17
#define QUE_NODE_FUNC 18
#define QUE_NODE_ORDER 19
#define QUE_NODE_PROC (20 + QUE_NODE_CONTROL_STAT)
#define QUE_NODE_IF (21 + QUE_NODE_CONTROL_STAT)
#define QUE_NODE_WHILE (22 + QUE_NODE_CONTROL_STAT)
#define QUE_NODE_ASSIGNMENT 23
#define QUE_NODE_FETCH 24
#define QUE_NODE_OPEN 25
#define QUE_NODE_COL_ASSIGNMENT 26
#define QUE_NODE_FOR (27 + QUE_NODE_CONTROL_STAT)
#define QUE_NODE_RETURN 28
#define QUE_NODE_ROW_PRINTF 29
#define QUE_NODE_ELSIF 30
#define QUE_NODE_CALL 31
#define QUE_NODE_EXIT 32
/* Common struct at the beginning of each query graph node; the name of this
substruct must be 'common' */
......@@ -51,6 +85,11 @@ struct que_common_t{
symbol node or a function node, then we
have to free the data field in val
explicitly */
/** Constructor */
que_common_t(ulint type, que_node_t* parent)
: type(type), parent(parent), brother(), val(), val_buf_size()
{}
};
#endif
/*****************************************************************************
Copyright (c) 2012, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2017, 2018, MariaDB Corporation.
Copyright (c) 2017, 2019, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
......@@ -55,22 +55,13 @@ dberr_t row_import_update_discarded_flag(trx_t* trx, table_id_t table_id,
bool discarded)
MY_ATTRIBUTE((nonnull, warn_unused_result));
/*****************************************************************//**
Update the (space, root page) of a table's indexes from the values
in the data dictionary.
/** Update the root page numbers and tablespace ID of a table.
@param[in,out] trx dictionary transaction
@param[in,out] table persistent table
@param[in] reset whether to reset the fields to FIL_NULL
@return DB_SUCCESS or error code */
dberr_t
row_import_update_index_root(
/*=========================*/
trx_t* trx, /*!< in/out: transaction that
covers the update */
const dict_table_t* table, /*!< in: Table for which we want
to set the root page_no */
bool reset, /*!< in: if true then set to
FIL_NUL */
bool dict_locked) /*!< in: Set to true if the
caller already owns the
dict_sys_t:: mutex. */
row_import_update_index_root(trx_t* trx, dict_table_t* table, bool reset)
MY_ATTRIBUTE((nonnull, warn_unused_result));
#endif /* row0import_h */
/*****************************************************************************
Copyright (c) 1997, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2017, MariaDB Corporation.
Copyright (c) 2017, 2019, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
......@@ -35,16 +35,6 @@ Created 3/14/1997 Heikki Tuuri
#include "ut0vec.h"
#include "row0mysql.h"
/** Create a purge node to a query graph.
@param[in] parent parent node, i.e., a thr node
@param[in] heap memory heap where created
@return own: purge node */
purge_node_t*
row_purge_node_create(
que_thr_t* parent,
mem_heap_t* heap)
MY_ATTRIBUTE((warn_unused_result));
/** Determines if it is possible to remove a secondary index entry.
Removal is possible if the secondary index entry does not refer to any
not delete marked version of a clustered index record where DB_TRX_ID
......@@ -102,6 +92,13 @@ struct purge_node_t{
ulint rec_type;/*!< undo log record type: TRX_UNDO_INSERT_REC,
... */
private:
/** latest unavailable table ID (do not bother looking up again) */
table_id_t unavailable_table_id;
/** the latest modification of the table definition identified by
unavailable_table_id, or TRX_ID_MAX */
trx_id_t def_trx_id;
public:
dict_table_t* table; /*!< table where purge is done */
ulint cmpl_info;/* compiler analysis info of an update */
......@@ -118,19 +115,27 @@ struct purge_node_t{
mem_heap_t* heap; /*!< memory heap used as auxiliary storage for
row; this must be emptied after a successful
purge of a row */
ibool found_clust;/* TRUE if the clustered index record
ibool found_clust;/*!< whether the clustered index record
determined by ref was found in the clustered
index, and we were able to position pcur on
it */
btr_pcur_t pcur; /*!< persistent cursor used in searching the
clustered index record */
ibool done; /* Debug flag */
#ifdef UNIV_DEBUG
/** whether the operation is in progress */
bool in_progress;
#endif
trx_id_t trx_id; /*!< trx id for this purging record */
/** Virtual column information about opening of MariaDB table.
It resets after processing each undo log record. */
purge_vcol_info_t vcol_info;
/** Constructor */
explicit purge_node_t(que_thr_t* parent) :
common(QUE_NODE_PURGE, parent), heap(mem_heap_create(256))
{}
#ifdef UNIV_DEBUG
/***********************************************************//**
Validate the persisent cursor. The purge node has two references
......@@ -146,6 +151,52 @@ struct purge_node_t{
computation.
@return true if the table failed to open. */
bool vcol_op_failed() const { return !vcol_info.validate(); }
/** Determine if a table should be skipped in purge.
@param[in] table_id table identifier
@return whether to skip the table lookup and processing */
bool is_skipped(table_id_t id) const
{
return id == unavailable_table_id && trx_id <= def_trx_id;
}
/** Remember that a table should be skipped in purge.
@param[in] id table identifier
@param[in] limit last transaction for which to skip */
void skip(table_id_t id, trx_id_t limit)
{
DBUG_ASSERT(limit >= trx_id);
unavailable_table_id = id;
def_trx_id = limit;
}
/** Start processing an undo log record. */
void start()
{
ut_ad(in_progress);
DBUG_ASSERT(common.type == QUE_NODE_PURGE);
table = NULL;
row = NULL;
ref = NULL;
index = NULL;
update = NULL;
found_clust = FALSE;
rec_type = ULINT_UNDEFINED;
cmpl_info = ULINT_UNDEFINED;
}
/** Reset the state at end
@return the query graph parent */
que_node_t* end()
{
DBUG_ASSERT(common.type == QUE_NODE_PURGE);
undo_recs = NULL;
ut_d(in_progress = false);
vcol_info.reset();
mem_heap_empty(heap);
return common.parent;
}
};
#endif
......@@ -3066,23 +3066,13 @@ row_import_read_cfg(
return(err);
}
/*****************************************************************//**
Update the <space, root page> of a table's indexes from the values
in the data dictionary.
/** Update the root page numbers and tablespace ID of a table.
@param[in,out] trx dictionary transaction
@param[in,out] table persistent table
@param[in] reset whether to reset the fields to FIL_NULL
@return DB_SUCCESS or error code */
dberr_t
row_import_update_index_root(
/*=========================*/
trx_t* trx, /*!< in/out: transaction that
covers the update */
const dict_table_t* table, /*!< in: Table for which we want
to set the root page_no */
bool reset, /*!< in: if true then set to
FIL_NUL */
bool dict_locked) /*!< in: Set to true if the
caller already owns the
dict_sys_t:: mutex. */
row_import_update_index_root(trx_t* trx, dict_table_t* table, bool reset)
{
const dict_index_t* index;
que_t* graph = 0;
......@@ -3100,9 +3090,7 @@ row_import_update_index_root(
"WHERE TABLE_ID = :table_id AND ID = :index_id;\n"
"END;\n"};
if (!dict_locked) {
mutex_enter(&dict_sys->mutex);
}
table->def_trx_id = trx->id;
for (index = dict_table_get_first_index(table);
index != 0;
......@@ -3177,10 +3165,6 @@ row_import_update_index_root(
que_graph_free(graph);
if (!dict_locked) {
mutex_exit(&dict_sys->mutex);
}
return(err);
}
......@@ -4115,7 +4099,7 @@ row_import_for_mysql(
row_mysql_lock_data_dictionary(trx);
/* Update the root pages of the table's indexes. */
err = row_import_update_index_root(trx, table, false, true);
err = row_import_update_index_root(trx, table, false);
if (err != DB_SUCCESS) {
return(row_import_error(prebuilt, trx, err));
......
......@@ -3028,7 +3028,7 @@ row_discard_tablespace(
}
/* Update the index root pages in the system tables, on disk */
err = row_import_update_index_root(trx, table, true, true);
err = row_import_update_index_root(trx, table, true);
if (err != DB_SUCCESS) {
return(err);
......
/*****************************************************************************
Copyright (c) 1997, 2017, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2017, 2018, MariaDB Corporation.
Copyright (c) 2017, 2019, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
......@@ -57,31 +57,6 @@ check.
If you make a change in this module make sure that no codepath is
introduced where a call to log_free_check() is bypassed. */
/** Create a purge node to a query graph.
@param[in] parent parent node, i.e., a thr node
@param[in] heap memory heap where created
@return own: purge node */
purge_node_t*
row_purge_node_create(
que_thr_t* parent,
mem_heap_t* heap)
{
purge_node_t* node;
ut_ad(parent != NULL);
ut_ad(heap != NULL);
node = static_cast<purge_node_t*>(
mem_heap_zalloc(heap, sizeof(*node)));
node->common.type = QUE_NODE_PURGE;
node->common.parent = parent;
node->done = TRUE;
node->heap = mem_heap_create(256);
return(node);
}
/***********************************************************//**
Repositions the pcur in the purge node on the clustered index record,
if found. If the record is not found, close pcur.
......@@ -1066,6 +1041,10 @@ row_purge_parse_undo_rec(
break;
}
if (node->is_skipped(table_id)) {
return false;
}
/* Prevent DROP TABLE etc. from running when we are doing the purge
for this row */
......@@ -1075,15 +1054,18 @@ row_purge_parse_undo_rec(
node->table = dict_table_open_on_id(
table_id, FALSE, DICT_TABLE_OP_NORMAL);
trx_id_t trx_id;
if (node->table == NULL) {
/* The table has been dropped: no need to do purge */
trx_id = TRX_ID_MAX;
goto err_exit;
}
ut_ad(!node->table->is_temporary());
if (!fil_table_accessible(node->table)) {
goto close_exit;
goto inaccessible;
}
switch (type) {
......@@ -1119,11 +1101,20 @@ row_purge_parse_undo_rec(
/* The table was corrupt in the data dictionary.
dict_set_corrupted() works on an index, and
we do not have an index to call it with. */
close_exit:
inaccessible:
DBUG_ASSERT(table_id == node->table->id);
trx_id = node->table->def_trx_id;
if (!trx_id) {
trx_id = TRX_ID_MAX;
}
dict_table_close(node->table, FALSE, FALSE);
node->table = NULL;
err_exit:
rw_lock_s_unlock(dict_operation_lock);
if (table_id) {
node->skip(table_id, trx_id);
}
return(false);
}
......@@ -1283,25 +1274,11 @@ row_purge_end(
/*==========*/
que_thr_t* thr) /*!< in: query thread */
{
purge_node_t* node;
ut_ad(thr);
node = static_cast<purge_node_t*>(thr->run_node);
ut_ad(que_node_get_type(node) == QUE_NODE_PURGE);
thr->run_node = que_node_get_parent(node);
node->undo_recs = NULL;
node->done = TRUE;
node->vcol_info.reset();
thr->run_node = static_cast<purge_node_t*>(thr->run_node)->end();
ut_a(thr->run_node != NULL);
mem_heap_empty(node->heap);
}
/***********************************************************//**
......@@ -1319,18 +1296,7 @@ row_purge_step(
node = static_cast<purge_node_t*>(thr->run_node);
node->table = NULL;
node->row = NULL;
node->ref = NULL;
node->index = NULL;
node->update = NULL;
node->found_clust = FALSE;
node->rec_type = ULINT_UNDEFINED;
node->cmpl_info = ULINT_UNDEFINED;
ut_a(!node->done);
ut_ad(que_node_get_type(node) == QUE_NODE_PURGE);
node->start();
if (!(node->undo_recs == NULL || ib_vector_is_empty(node->undo_recs))) {
trx_purge_rec_t*purge_rec;
......
/*****************************************************************************
Copyright (c) 1996, 2017, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2017, 2018, MariaDB Corporation.
Copyright (c) 2017, 2019, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
......@@ -148,7 +148,8 @@ purge_graph_build()
for (ulint i = 0; i < srv_n_purge_threads; ++i) {
que_thr_t* thr = que_thr_create(fork, heap, NULL);
thr->child = row_purge_node_create(thr, heap);
thr->child = new(mem_heap_zalloc(heap, sizeof(purge_node_t)))
purge_node_t(thr);
}
return(fork);
......@@ -1404,7 +1405,7 @@ ulint
trx_purge_attach_undo_recs(ulint n_purge_threads)
{
que_thr_t* thr;
ulint i = 0;
ulint i;
ulint n_pages_handled = 0;
ulint n_thrs = UT_LIST_GET_LEN(purge_sys.query->thrs);
......@@ -1412,6 +1413,8 @@ trx_purge_attach_undo_recs(ulint n_purge_threads)
purge_sys.head = purge_sys.tail;
#ifdef UNIV_DEBUG
i = 0;
/* Debug code to validate some pre-requisites and reset done flag. */
for (thr = UT_LIST_GET_FIRST(purge_sys.query->thrs);
thr != NULL && i < n_purge_threads;
......@@ -1422,16 +1425,16 @@ trx_purge_attach_undo_recs(ulint n_purge_threads)
/* Get the purge node. */
node = (purge_node_t*) thr->child;
ut_a(que_node_get_type(node) == QUE_NODE_PURGE);
ut_a(node->undo_recs == NULL);
ut_a(node->done);
node->done = FALSE;
ut_ad(que_node_get_type(node) == QUE_NODE_PURGE);
ut_ad(node->undo_recs == NULL);
ut_ad(!node->in_progress);
ut_d(node->in_progress = true);
}
/* There should never be fewer nodes than threads, the inverse
however is allowed because we only use purge threads as needed. */
ut_a(i == n_purge_threads);
ut_ad(i == n_purge_threads);
#endif
/* Fetch and parse the UNDO records. The UNDO records are added
to a per purge node vector. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment