Commit 4ee28865 authored by Nirbhay Choubey's avatar Nirbhay Choubey

MDEV-5146 : Bulk loads into partitioned table not working

When wsrep is enabled, for any update on innodb tables, the
corresponding keys are appended to galera's transaction writeset
(wsrep_append_keys()). However, for LOAD DATA, this got skipped
if binary logging was disabled or it was non-ROW based.
As a result, while the updates from LOAD DATA on non-partitioned
tables replicated fine as wsrep implicitly enables binary logging
(if not enabled, explicitly), the same did not work on partitioned
tables as for partitioned tables the binary logging gets disabled
temporarily (ha_partition::write_row()).

Fixed by removing the unwanted conditions from the check.
Also backported some changes from 10.0-galera to make sure
wsrep_load_data_splitting affects LOAD DATA commands only.
parent ccd39b2d
......@@ -20,4 +20,40 @@ pk i
SELECT * FROM t1;
pk i
DROP TABLE t1;
#
# MDEV-5146: Bulk loads into partitioned table not working
#
# Case 1: wsrep_load_data_splitting = ON & LOAD DATA with 20002
# entries.
SET GLOBAL wsrep_load_data_splitting = ON;
CREATE TABLE t1 (pk INT PRIMARY KEY)
ENGINE=INNODB PARTITION BY HASH(pk) PARTITIONS 2;
SELECT COUNT(*) = 20002 FROM t1;
COUNT(*) = 20002
1
wsrep_last_committed_diff
1
DROP TABLE t1;
# Case 2: wsrep_load_data_splitting = ON & LOAD DATA with 101 entries.
SET GLOBAL wsrep_load_data_splitting = ON;
CREATE TABLE t1 (pk INT PRIMARY KEY)
ENGINE=INNODB PARTITION BY HASH(pk) PARTITIONS 2;
SELECT COUNT(*) = 101 FROM t1;
COUNT(*) = 101
1
wsrep_last_committed_diff
1
DROP TABLE t1;
# Case 3: wsrep_load_data_splitting = OFF & LOAD DATA with 20002
# entries.
SET GLOBAL wsrep_load_data_splitting = OFF;
CREATE TABLE t1 (pk INT PRIMARY KEY)
ENGINE=INNODB PARTITION BY HASH(pk) PARTITIONS 2;
SELECT COUNT(*) = 20002 FROM t1;
COUNT(*) = 20002
1
wsrep_last_committed_diff
1
DROP TABLE t1;
SET GLOBAL wsrep_load_data_splitting = 1;;
# End of test
......@@ -27,5 +27,118 @@ SELECT * FROM t1;
# Cleanup
DROP TABLE t1;
--echo #
--echo # MDEV-5146: Bulk loads into partitioned table not working
--echo #
# Create 2 files with 20002 & 101 entries in each.
--perl
open(FILE, ">", "$ENV{'MYSQLTEST_VARDIR'}/tmp/mdev-5146-1.dat") or die;
foreach my $i (1..20002) {
print FILE "$i\n";
}
open(FILE, ">", "$ENV{'MYSQLTEST_VARDIR'}/tmp/mdev-5146-2.dat") or die;
foreach my $i (1..101) {
print FILE "$i\n";
}
EOF
--connection node_1
--let $wsrep_load_data_splitting_orig = `SELECT @@wsrep_load_data_splitting`
--echo # Case 1: wsrep_load_data_splitting = ON & LOAD DATA with 20002
--echo # entries.
SET GLOBAL wsrep_load_data_splitting = ON;
CREATE TABLE t1 (pk INT PRIMARY KEY)
ENGINE=INNODB PARTITION BY HASH(pk) PARTITIONS 2;
# Record wsrep_last_committed as it was before LOAD DATA
--let $wsrep_last_committed_before = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME = 'wsrep_last_committed'`
--disable_query_log
--eval LOAD DATA INFILE '$MYSQLTEST_VARDIR/tmp/mdev-5146-1.dat' INTO TABLE t1;
--enable_query_log
--let $wsrep_last_committed_after = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME = 'wsrep_last_committed'`
--connection node_2
SELECT COUNT(*) = 20002 FROM t1;
# LOAD-ing 20002 rows causes 3 commits to be registered
--disable_query_log
--eval SELECT $wsrep_last_committed_after = $wsrep_last_committed_before + 3 AS wsrep_last_committed_diff;
--enable_query_log
DROP TABLE t1;
--echo # Case 2: wsrep_load_data_splitting = ON & LOAD DATA with 101 entries.
--connection node_1
SET GLOBAL wsrep_load_data_splitting = ON;
CREATE TABLE t1 (pk INT PRIMARY KEY)
ENGINE=INNODB PARTITION BY HASH(pk) PARTITIONS 2;
# Record wsrep_last_committed as it was before LOAD DATA
--let $wsrep_last_committed_before = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME = 'wsrep_last_committed'`
--disable_query_log
--eval LOAD DATA INFILE '$MYSQLTEST_VARDIR/tmp/mdev-5146-2.dat' INTO TABLE t1;
--enable_query_log
--let $wsrep_last_committed_after = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME = 'wsrep_last_committed'`
--connection node_2
SELECT COUNT(*) = 101 FROM t1;
# LOAD-ing 101 rows causes 1 commit to be registered
--disable_query_log
--eval SELECT $wsrep_last_committed_after = $wsrep_last_committed_before + 1 AS wsrep_last_committed_diff;
--enable_query_log
DROP TABLE t1;
--echo # Case 3: wsrep_load_data_splitting = OFF & LOAD DATA with 20002
--echo # entries.
--connection node_1
SET GLOBAL wsrep_load_data_splitting = OFF;
CREATE TABLE t1 (pk INT PRIMARY KEY)
ENGINE=INNODB PARTITION BY HASH(pk) PARTITIONS 2;
# Record wsrep_last_committed as it was before LOAD DATA
--let $wsrep_last_committed_before = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME = 'wsrep_last_committed'`
--disable_query_log
--eval LOAD DATA INFILE '$MYSQLTEST_VARDIR/tmp/mdev-5146-1.dat' INTO TABLE t1;
--enable_query_log
--let $wsrep_last_committed_after = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME = 'wsrep_last_committed'`
--connection node_2
SELECT COUNT(*) = 20002 FROM t1;
# LOAD-ing 20002 rows causes 1 commit to be registered
--disable_query_log
--eval SELECT $wsrep_last_committed_after = $wsrep_last_committed_before + 1 AS wsrep_last_committed_diff;
--enable_query_log
DROP TABLE t1;
--connection node_1
# Restore the original value
--eval SET GLOBAL wsrep_load_data_splitting = $wsrep_load_data_splitting_orig;
# Cleanup
remove_file '$MYSQLTEST_VARDIR/tmp/mdev-5146-1.dat';
remove_file '$MYSQLTEST_VARDIR/tmp/mdev-5146-2.dat';
--source include/galera_end.inc
--echo # End of test
......@@ -5753,7 +5753,12 @@ ha_innobase::write_row(
;
} else if (src_table == prebuilt->table) {
#ifdef WITH_WSREP
if (wsrep_on(user_thd)) {
if (wsrep_on(user_thd) &&
wsrep_load_data_splitting &&
sql_command == SQLCOM_LOAD &&
!thd_test_options(user_thd,
OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))
{
switch (wsrep_run_wsrep_commit(user_thd, wsrep_hton, 1))
{
case WSREP_TRX_OK:
......@@ -5780,7 +5785,12 @@ ha_innobase::write_row(
prebuilt->sql_stat_start = TRUE;
} else {
#ifdef WITH_WSREP
if (wsrep_on(user_thd)) {
if (wsrep_on(user_thd) &&
wsrep_load_data_splitting &&
sql_command == SQLCOM_LOAD &&
!thd_test_options(user_thd,
OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))
{
switch (wsrep_run_wsrep_commit(user_thd, wsrep_hton, 1))
{
case WSREP_TRX_OK:
......@@ -5990,12 +6000,13 @@ ha_innobase::write_row(
prebuilt->table->flags,
user_thd);
#ifdef WITH_WSREP
if (!error_result && wsrep_thd_exec_mode(user_thd) == LOCAL_STATE &&
wsrep_on(user_thd) && !wsrep_consistency_check(user_thd) &&
(sql_command != SQLCOM_LOAD ||
thd_binlog_format(user_thd) == BINLOG_FORMAT_ROW)) {
if (wsrep_append_keys(user_thd, false, record, NULL)) {
if (!error_result &&
wsrep_thd_exec_mode(user_thd) == LOCAL_STATE &&
wsrep_on(user_thd) &&
!wsrep_consistency_check(user_thd))
{
if (wsrep_append_keys(user_thd, false, record, NULL))
{
DBUG_PRINT("wsrep", ("row key failed"));
error_result = HA_ERR_INTERNAL_ERROR;
goto wsrep_error;
......
......@@ -6717,7 +6717,12 @@ ha_innobase::write_row(
;
} else if (src_table == prebuilt->table) {
#ifdef WITH_WSREP
if (wsrep_on(user_thd)) {
if (wsrep_on(user_thd) &&
wsrep_load_data_splitting &&
sql_command == SQLCOM_LOAD &&
!thd_test_options(user_thd,
OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))
{
switch (wsrep_run_wsrep_commit(user_thd, wsrep_hton, 1))
{
case WSREP_TRX_OK:
......@@ -6744,7 +6749,12 @@ ha_innobase::write_row(
prebuilt->sql_stat_start = TRUE;
} else {
#ifdef WITH_WSREP
if (wsrep_on(user_thd)) {
if (wsrep_on(user_thd) &&
wsrep_load_data_splitting &&
sql_command == SQLCOM_LOAD &&
!thd_test_options(user_thd,
OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))
{
switch (wsrep_run_wsrep_commit(user_thd, wsrep_hton, 1))
{
case WSREP_TRX_OK:
......@@ -6960,12 +6970,13 @@ ha_innobase::write_row(
prebuilt->table->flags,
user_thd);
#ifdef WITH_WSREP
if (!error_result && wsrep_thd_exec_mode(user_thd) == LOCAL_STATE &&
wsrep_on(user_thd) && !wsrep_consistency_check(user_thd) &&
(sql_command != SQLCOM_LOAD ||
thd_binlog_format(user_thd) == BINLOG_FORMAT_ROW)) {
if (wsrep_append_keys(user_thd, false, record, NULL)) {
if (!error_result &&
wsrep_thd_exec_mode(user_thd) == LOCAL_STATE &&
wsrep_on(user_thd) &&
!wsrep_consistency_check(user_thd))
{
if (wsrep_append_keys(user_thd, false, record, NULL))
{
DBUG_PRINT("wsrep", ("row key failed"));
error_result = HA_ERR_INTERNAL_ERROR;
goto wsrep_error;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment