Commit 4c798b42 authored by unknown's avatar unknown

wl2325 wl2324


mysql-test/include/have_ndb_extra.inc:
  New BitKeeper file ``mysql-test/include/have_ndb_extra.inc''
mysql-test/include/not_ndb.inc:
  New BitKeeper file ``mysql-test/include/not_ndb.inc''
mysql-test/r/have_ndb_extra.require:
  New BitKeeper file ``mysql-test/r/have_ndb_extra.require''
mysql-test/r/ndb_alter_table_row.result:
  New BitKeeper file ``mysql-test/r/ndb_alter_table_row.result''
mysql-test/r/ndb_alter_table_stm.result:
  New BitKeeper file ``mysql-test/r/ndb_alter_table_stm.result''
mysql-test/r/ndb_binlog_basic.result:
  New BitKeeper file ``mysql-test/r/ndb_binlog_basic.result''
mysql-test/r/ndb_binlog_multi.result:
  New BitKeeper file ``mysql-test/r/ndb_binlog_multi.result''
mysql-test/r/ndb_multi_row.result:
  New BitKeeper file ``mysql-test/r/ndb_multi_row.result''
mysql-test/r/not_ndb.require:
  New BitKeeper file ``mysql-test/r/not_ndb.require''
mysql-test/r/rpl_ndb_bank.result:
  New BitKeeper file ``mysql-test/r/rpl_ndb_bank.result''
mysql-test/r/rpl_ndb_basic.result:
  New BitKeeper file ``mysql-test/r/rpl_ndb_basic.result''
mysql-test/r/rpl_ndb_disk.result:
  New BitKeeper file ``mysql-test/r/rpl_ndb_disk.result''
mysql-test/r/rpl_ndb_idempotent.result:
  New BitKeeper file ``mysql-test/r/rpl_ndb_idempotent.result''
mysql-test/r/rpl_ndb_load.result:
  New BitKeeper file ``mysql-test/r/rpl_ndb_load.result''
mysql-test/r/rpl_ndb_multi.result:
  New BitKeeper file ``mysql-test/r/rpl_ndb_multi.result''
mysql-test/r/rpl_ndb_sync.result:
  New BitKeeper file ``mysql-test/r/rpl_ndb_sync.result''
mysql-test/r/rpl_row_basic_7ndb.result:
  New BitKeeper file ``mysql-test/r/rpl_row_basic_7ndb.result''
mysql-test/t/ndb_alter_table_row.test:
  New BitKeeper file ``mysql-test/t/ndb_alter_table_row.test''
mysql-test/t/ndb_alter_table_stm.test:
  New BitKeeper file ``mysql-test/t/ndb_alter_table_stm.test''
mysql-test/t/ndb_binlog_basic.test:
  New BitKeeper file ``mysql-test/t/ndb_binlog_basic.test''
mysql-test/t/ndb_binlog_multi.test:
  New BitKeeper file ``mysql-test/t/ndb_binlog_multi.test''
mysql-test/t/ndb_multi_row.test:
  New BitKeeper file ``mysql-test/t/ndb_multi_row.test''
mysql-test/t/rpl_ndb_bank.test:
  New BitKeeper file ``mysql-test/t/rpl_ndb_bank.test''
mysql-test/t/rpl_ndb_basic.test:
  New BitKeeper file ``mysql-test/t/rpl_ndb_basic.test''
mysql-test/t/rpl_ndb_disk.test:
  New BitKeeper file ``mysql-test/t/rpl_ndb_disk.test''
mysql-test/t/rpl_ndb_idempotent.test:
  New BitKeeper file ``mysql-test/t/rpl_ndb_idempotent.test''
mysql-test/t/rpl_ndb_load.test:
  New BitKeeper file ``mysql-test/t/rpl_ndb_load.test''
mysql-test/t/rpl_ndb_multi.test:
  New BitKeeper file ``mysql-test/t/rpl_ndb_multi.test''
mysql-test/t/rpl_ndb_sync.test:
  New BitKeeper file ``mysql-test/t/rpl_ndb_sync.test''
mysql-test/t/rpl_row_basic_7ndb.test:
  New BitKeeper file ``mysql-test/t/rpl_row_basic_7ndb.test''
sql/ha_ndbcluster_binlog.cc:
  New BitKeeper file ``sql/ha_ndbcluster_binlog.cc''
sql/ha_ndbcluster_binlog.h:
  New BitKeeper file ``sql/ha_ndbcluster_binlog.h''
sql/ha_ndbcluster_tables.h:
  New BitKeeper file ``sql/ha_ndbcluster_tables.h''
sql/rpl_injector.cc:
  New BitKeeper file ``sql/rpl_injector.cc''
sql/rpl_injector.h:
  New BitKeeper file ``sql/rpl_injector.h''
storage/ndb/include/kernel/signaldata/DihFragCount.hpp:
  New BitKeeper file ``storage/ndb/include/kernel/signaldata/DihFragCount.hpp''
parent 5872e5ae
......@@ -852,10 +852,15 @@ static VAR *var_obtain(const char *name, int len)
return v;
}
/*
- if variable starts with a $ it is regarded as a local test varable
- if not it is treated as a environment variable, and the corresponding
environment variable will be updated
*/
int var_set(const char *var_name, const char *var_name_end,
const char *var_val, const char *var_val_end)
{
int digit;
int digit, result, env_var= 0;
VAR* v;
DBUG_ENTER("var_set");
DBUG_PRINT("enter", ("var_name: '%.*s' = '%.*s' (length: %d)",
......@@ -863,11 +868,11 @@ int var_set(const char *var_name, const char *var_name_end,
(int) (var_val_end - var_val), var_val,
(int) (var_val_end - var_val)));
if (*var_name++ != '$')
{
var_name--;
die("Variable name in %s does not start with '$'", var_name);
}
if (*var_name != '$')
env_var= 1;
else
var_name++;
digit = *var_name - '0';
if (!(digit < 10 && digit >= 0))
{
......@@ -875,7 +880,23 @@ int var_set(const char *var_name, const char *var_name_end,
}
else
v = var_reg + digit;
DBUG_RETURN(eval_expr(v, var_val, (const char**)&var_val_end));
result= eval_expr(v, var_val, (const char**) &var_val_end);
if (env_var)
{
char buf[1024];
memcpy(buf, v->name, v->name_len);
buf[v->name_len]= 0;
if (v->int_dirty)
{
sprintf(v->str_val, "%d", v->int_val);
v->int_dirty= 0;
v->str_val_len= strlen(v->str_val);
}
setenv(buf, v->str_val, 1);
}
DBUG_RETURN(result);
}
......@@ -1483,6 +1504,10 @@ int do_sync_with_master(struct st_query *query)
return do_sync_with_master2(offset);
}
/*
when ndb binlog is on, this call will wait until last updated epoch
(locally in the mysqld) has been received into the binlog
*/
int do_save_master_pos()
{
MYSQL_RES* res;
......@@ -1494,6 +1519,89 @@ int do_save_master_pos()
rpl_parse = mysql_rpl_parse_enabled(mysql);
mysql_disable_rpl_parse(mysql);
#ifdef HAVE_NDB_BINLOG
/*
Wait for ndb binlog to be up-to-date with all changes
done on the local mysql server
*/
{
ulong have_ndbcluster;
if (mysql_query(mysql, query= "show variables like 'have_ndbcluster'"))
die("At line %u: failed in %s: %d: %s", start_lineno, query,
mysql_errno(mysql), mysql_error(mysql));
if (!(res= mysql_store_result(mysql)))
die("line %u: mysql_store_result() retuned NULL for '%s'", start_lineno,
query);
if (!(row= mysql_fetch_row(res)))
die("line %u: empty result in %s", start_lineno, query);
have_ndbcluster= strcmp("YES", row[1]) == 0;
mysql_free_result(res);
if (have_ndbcluster)
{
ulonglong epoch, tmp_epoch= 0;
int count= 0;
do
{
const char binlog[]= "binlog";
const char latest_trans_epoch[]=
"latest_trans_epoch=";
const char latest_applied_binlog_epoch[]=
"latest_applied_binlog_epoch=";
if (count)
sleep(1);
if (mysql_query(mysql, query= "show engine ndb status"))
die("At line %u: failed in '%s': %d: %s", start_lineno, query,
mysql_errno(mysql), mysql_error(mysql));
if (!(res= mysql_store_result(mysql)))
die("line %u: mysql_store_result() retuned NULL for '%s'",
start_lineno, query);
while ((row= mysql_fetch_row(res)))
{
if (strcmp(row[1], binlog) == 0)
{
const char *status= row[2];
/* latest_trans_epoch */
if (count == 0)
{
while (*status && strncmp(status, latest_trans_epoch,
sizeof(latest_trans_epoch)-1))
status++;
if (*status)
{
status+= sizeof(latest_trans_epoch)-1;
epoch= strtoull(status, (char**) 0, 10);
}
else
die("line %u: result does not contain '%s' in '%s'",
start_lineno, latest_trans_epoch, query);
}
/* latest_applied_binlog_epoch */
while (*status && strncmp(status, latest_applied_binlog_epoch,
sizeof(latest_applied_binlog_epoch)-1))
status++;
if (*status)
{
status+= sizeof(latest_applied_binlog_epoch)-1;
tmp_epoch= strtoull(status, (char**) 0, 10);
}
else
die("line %u: result does not contain '%s' in '%s'",
start_lineno, latest_applied_binlog_epoch, query);
break;
}
}
mysql_free_result(res);
if (!row)
die("line %u: result does not contain '%s' in '%s'",
start_lineno, binlog, query);
count++;
} while (tmp_epoch < epoch && count <= 3);
}
}
#endif
if (mysql_query(mysql, query= "show master status"))
die("failed in show master status: %d: %s",
mysql_errno(mysql), mysql_error(mysql));
......@@ -1544,7 +1652,8 @@ int do_let(struct st_query *query)
while (*p && (*p != '=') && !my_isspace(charset_info,*p))
p++;
var_name_end= p;
if (var_name+1 == var_name_end)
if (var_name == var_name_end ||
(var_name+1 == var_name_end && *var_name == '$'))
die("Missing variable name in let");
while (my_isspace(charset_info,*p))
p++;
......
......@@ -87,6 +87,11 @@ AC_DEFUN([MYSQL_CHECK_NDB_OPTIONS], [
[Extra CFLAGS for ndb compile]),
[ndb_ccflags=${withval}],
[ndb_ccflags=""])
AC_ARG_WITH([ndb-binlog],
[
--without-ndb-binlog Disable ndb binlog],
[ndb_binlog="$withval"],
[ndb_binlog="default"])
case "$ndb_ccflags" in
"yes")
......@@ -185,6 +190,7 @@ AC_DEFUN([MYSQL_SETUP_NDBCLUSTER], [
ndbcluster_libs="\$(top_builddir)/storage/ndb/src/.libs/libndbclient.a"
ndbcluster_system_libs=""
ndb_mgmclient_libs="\$(top_builddir)/storage/ndb/src/mgmclient/libndbmgmclient.la"
mysql_se_objs="$mysql_se_objs ha_ndbcluster_binlog.o"
MYSQL_CHECK_NDB_OPTIONS
NDBCLUSTER_WORKAROUNDS
......@@ -219,6 +225,25 @@ AC_DEFUN([MYSQL_SETUP_NDBCLUSTER], [
ndb_port="1186"
fi
have_ndb_binlog="no"
if test X"$ndb_binlog" = Xdefault ||
test X"$ndb_binlog" = Xyes
then
if test X"$have_row_based" = Xyes
then
have_ndb_binlog="yes"
fi
fi
if test X"$have_ndb_binlog" = Xyes
then
AC_DEFINE([HAVE_NDB_BINLOG], [1],
[Including Ndb Cluster Binlog])
AC_MSG_RESULT([Including Ndb Cluster Binlog])
else
AC_MSG_RESULT([Not including Ndb Cluster Binlog])
fi
ndb_transporter_opt_objs=""
if test "$ac_cv_func_shmget" = "yes" &&
test "$ac_cv_func_shmat" = "yes" &&
......
......@@ -154,7 +154,14 @@ enum ha_extra_function {
to overwrite entire row.
*/
HA_EXTRA_KEYREAD_PRESERVE_FIELDS,
HA_EXTRA_MMAP
HA_EXTRA_MMAP,
/*
Ignore if the a tuple is not found, continue processing the
transaction and ignore that 'row'. Needed for idempotency
handling on the slave
*/
HA_EXTRA_IGNORE_NO_KEY,
HA_EXTRA_NO_IGNORE_NO_KEY
};
/* The following is parameter to ha_panic() */
......
......@@ -49,7 +49,7 @@ enum enum_server_command
COM_TIME, COM_DELAYED_INSERT, COM_CHANGE_USER, COM_BINLOG_DUMP,
COM_TABLE_DUMP, COM_CONNECT_OUT, COM_REGISTER_SLAVE,
COM_STMT_PREPARE, COM_STMT_EXECUTE, COM_STMT_SEND_LONG_DATA, COM_STMT_CLOSE,
COM_STMT_RESET, COM_SET_OPTION, COM_STMT_FETCH,
COM_STMT_RESET, COM_SET_OPTION, COM_STMT_FETCH, COM_DAEMON,
/* don't forget to update const char *command_name[] in sql_parse.cc */
/* Must be last */
......
......@@ -66,11 +66,13 @@ sqlsources = derror.cc field.cc field_conv.cc strfunc.cc filesort.cc \
parse_file.cc sql_view.cc sql_trigger.cc my_decimal.cc \
event_executor.cc event.cc event_timed.cc \
rpl_filter.cc sql_partition.cc handlerton.cc sql_plugin.cc \
sql_tablespace.cc
sql_tablespace.cc \
rpl_injector.cc
libmysqld_int_a_SOURCES= $(libmysqld_sources) $(libmysqlsources) $(sqlsources)
EXTRA_libmysqld_a_SOURCES = ha_innodb.cc ha_berkeley.cc ha_archive.cc \
ha_blackhole.cc ha_federated.cc ha_ndbcluster.cc \
ha_ndbcluster_binlog.cc \
ha_partition.cc
libmysqld_a_DEPENDENCIES= @mysql_se_objs@
libmysqld_a_SOURCES=
......@@ -102,6 +104,9 @@ ha_berkeley.o: ha_berkeley.cc
ha_ndbcluster.o:ha_ndbcluster.cc
$(CXXCOMPILE) @ndbcluster_includes@ $(LM_CFLAGS) -c $<
ha_ndbcluster_binlog.o: ha_ndbcluster_binlog.cc
$(CXXCOMPILE) @ndbcluster_includes@ $(LM_CFLAGS) -c $<
# Until we can remove dependency on ha_ndbcluster.h
handler.o: handler.cc
$(CXXCOMPILE) @ndbcluster_includes@ $(LM_CFLAGS) -c $<
......
-- require r/have_ndb_extra.require
eval select $NDB_EXTRA_TEST;
-- require r/not_ndb.require
disable_query_log;
# so that both DISABLED and NO is output as NO
-- replace_result DISABLED NO
show variables like "have_ndbcluster";
enable_query_log;
......@@ -594,3 +594,6 @@ CREATE TABLE event (
comment varchar(64) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL default '',
PRIMARY KEY (db,name)
) ENGINE=MyISAM DEFAULT CHARSET=utf8 COMMENT 'Events';
CREATE DATABASE IF NOT EXISTS cluster_replication;
CREATE TABLE IF NOT EXISTS cluster_replication.binlog_index (Position BIGINT UNSIGNED NOT NULL, File VARCHAR(255) NOT NULL, epoch BIGINT UNSIGNED NOT NULL, inserts BIGINT UNSIGNED NOT NULL, updates BIGINT UNSIGNED NOT NULL, deletes BIGINT UNSIGNED NOT NULL, schemaops BIGINT UNSIGNED NOT NULL, PRIMARY KEY(epoch)) ENGINE=MYISAM;
......@@ -140,6 +140,7 @@ our $glob_scriptname= undef;
our $glob_timers= undef;
our $glob_use_running_server= 0;
our $glob_use_running_ndbcluster= 0;
our $glob_use_running_ndbcluster_slave= 0;
our $glob_use_embedded_server= 0;
our @glob_test_mode;
......@@ -233,6 +234,8 @@ our $instance_manager;
our $opt_ndbcluster_port;
our $opt_ndbconnectstring;
our $opt_ndbcluster_port_slave;
our $opt_ndbconnectstring_slave;
our $opt_no_manager; # Does nothing now, we never use manager
our $opt_manager_port; # Does nothing now, we never use manager
......@@ -300,12 +303,16 @@ our $opt_udiff;
our $opt_skip_ndbcluster;
our $opt_with_ndbcluster;
our $opt_skip_ndbcluster_slave;
our $opt_with_ndbcluster_slave;
our $opt_ndb_extra_test;
our $exe_ndb_mgm;
our $path_ndb_tools_dir;
our $path_ndb_backup_dir;
our $file_ndb_testrun_log;
our $flag_ndb_status_ok= 1;
our $flag_ndb_slave_status_ok= 1;
######################################################################
#
......@@ -322,9 +329,13 @@ sub kill_running_server ();
sub kill_and_cleanup ();
sub check_ssl_support ();
sub check_ndbcluster_support ();
sub rm_ndbcluster_tables ($);
sub ndbcluster_install ();
sub ndbcluster_start ();
sub ndbcluster_stop ();
sub ndbcluster_install_slave ();
sub ndbcluster_start_slave ();
sub ndbcluster_stop_slave ();
sub run_benchmarks ($);
sub run_tests ();
sub mysql_install_db ();
......@@ -493,6 +504,7 @@ sub command_line_setup () {
my $opt_master_myport= 9306;
my $opt_slave_myport= 9308;
$opt_ndbcluster_port= 9350;
$opt_ndbcluster_port_slave= 9358;
my $im_port= 9310;
my $im_mysqld1_port= 9312;
my $im_mysqld2_port= 9314;
......@@ -529,6 +541,10 @@ sub command_line_setup () {
'force' => \$opt_force,
'with-ndbcluster' => \$opt_with_ndbcluster,
'skip-ndbcluster|skip-ndb' => \$opt_skip_ndbcluster,
'with-ndbcluster-slave' => \$opt_with_ndbcluster_slave,
'skip-ndbcluster-slave|skip-ndb-slave'
=> \$opt_skip_ndbcluster_slave,
'ndb-extra-test' => \$opt_ndb_extra_test,
'do-test=s' => \$opt_do_test,
'suite=s' => \$opt_suite,
'skip-rpl' => \$opt_skip_rpl,
......@@ -539,6 +555,7 @@ sub command_line_setup () {
'master_port=i' => \$opt_master_myport,
'slave_port=i' => \$opt_slave_myport,
'ndbcluster_port=i' => \$opt_ndbcluster_port,
'ndbcluster_port_slave=i' => \$opt_ndbcluster_port_slave,
'manager-port=i' => \$opt_manager_port, # Currently not used
'im-port=i' => \$im_port, # Instance Manager port.
'im-mysqld1-port=i' => \$im_mysqld1_port, # Port of mysqld, controlled by IM
......@@ -553,6 +570,7 @@ sub command_line_setup () {
# Run test on running server
'extern' => \$opt_extern,
'ndbconnectstring=s' => \$opt_ndbconnectstring,
'ndbconnectstring-slave=s' => \$opt_ndbconnectstring_slave,
# Debugging
'gdb' => \$opt_gdb,
......@@ -759,9 +777,25 @@ sub command_line_setup () {
$opt_ndbconnectstring= "host=localhost:$opt_ndbcluster_port";
}
if ( $opt_ndbconnectstring_slave )
{
$glob_use_running_ndbcluster_slave= 1;
$opt_with_ndbcluster_slave= 1;
}
else
{
$opt_ndbconnectstring_slave= "host=localhost:$opt_ndbcluster_port_slave";
}
if ( $opt_skip_ndbcluster )
{
$opt_with_ndbcluster= 0;
$opt_skip_ndbcluster_slave= 1;
}
if ( $opt_skip_ndbcluster_slave )
{
$opt_with_ndbcluster_slave= 0;
}
# The ":s" in the argument spec, means we have three different cases
......@@ -855,6 +889,8 @@ sub command_line_setup () {
path_mysock => "$sockdir/slave.sock",
path_myport => $opt_slave_myport,
start_timeout => 400,
ndbcluster => 1, # ndbcluster not started
};
$slave->[1]=
......@@ -1188,6 +1224,8 @@ sub kill_running_server () {
ndbcluster_stop();
$master->[0]->{'ndbcluster'}= 1;
ndbcluster_stop_slave();
$slave->[0]->{'ndbcluster'}= 1;
}
}
......@@ -1358,6 +1396,15 @@ sub ndbcluster_start () {
return 0;
}
sub rm_ndbcluster_tables ($) {
my $dir= shift;
foreach my $bin ( glob("$dir/cluster_replication/apply_status*"),
glob("$dir/cluster_replication/schema*") )
{
unlink($bin);
}
}
sub ndbcluster_stop () {
if ( ! $opt_with_ndbcluster or $glob_use_running_ndbcluster )
......@@ -1371,6 +1418,71 @@ sub ndbcluster_stop () {
"--stop"],
"", "/dev/null", "", "");
rm_ndbcluster_tables ($master->[0]->{'path_myddir'});
rm_ndbcluster_tables ($master->[1]->{'path_myddir'});
return;
}
sub ndbcluster_install_slave () {
if ( ! $opt_with_ndbcluster_slave or $glob_use_running_ndbcluster_slave )
{
return 0;
}
mtr_report("Install ndbcluster slave");
if ( mtr_run("$glob_mysql_test_dir/ndb/ndbcluster",
["--port=$opt_ndbcluster_port_slave",
"--data-dir=$opt_vardir",
"--small",
"--ndbd-nodes=1",
"--initial"],
"", "", "", "") )
{
mtr_error("Error ndbcluster_install_slave");
return 1;
}
ndbcluster_stop_slave();
$slave->[0]->{'ndbcluster'}= 1;
return 0;
}
sub ndbcluster_start_slave () {
if ( ! $opt_with_ndbcluster_slave or $glob_use_running_ndbcluster_slave )
{
return 0;
}
# FIXME, we want to _append_ output to file $file_ndb_testrun_log instead of /dev/null
if ( mtr_run("$glob_mysql_test_dir/ndb/ndbcluster",
["--port=$opt_ndbcluster_port_slave",
"--data-dir=$opt_vardir",
"--ndbd-nodes=1"],
"", "/dev/null", "", "") )
{
mtr_error("Error ndbcluster_start_slave");
return 1;
}
return 0;
}
sub ndbcluster_stop_slave () {
if ( ! $opt_with_ndbcluster_slave or $glob_use_running_ndbcluster_slave )
{
return;
}
# FIXME, we want to _append_ output to file $file_ndb_testrun_log instead of /dev/null
mtr_run("$glob_mysql_test_dir/ndb/ndbcluster",
["--port=$opt_ndbcluster_port_slave",
"--data-dir=$opt_vardir",
"--stop"],
"", "/dev/null", "", "");
rm_ndbcluster_tables ($slave->[0]->{'path_myddir'});
return;
}
......@@ -1525,6 +1637,13 @@ sub mysql_install_db () {
$flag_ndb_status_ok= 0;
}
if ( ndbcluster_install_slave() )
{
# failed to install, disable usage but flag that its no ok
$opt_with_ndbcluster_slave= 0;
$flag_ndb_slave_status_ok= 0;
}
return 0;
}
......@@ -1848,6 +1967,18 @@ sub run_testcase ($) {
{
if ( ! $slave->[$idx]->{'pid'} )
{
if ( $idx == 0)
{
if ( $slave->[0]->{'ndbcluster'} )
{
$slave->[0]->{'ndbcluster'}= ndbcluster_start_slave();
if ( $slave->[0]->{'ndbcluster'} )
{
report_failure_and_restart($tinfo);
return;
}
}
}
$slave->[$idx]->{'pid'}=
mysqld_start('slave',$idx,
$tinfo->{'slave_opt'}, $tinfo->{'slave_mi'});
......@@ -2115,6 +2246,12 @@ sub mysqld_arguments ($$$$$) {
{
mtr_add_arg($args, "%s--skip-ndbcluster", $prefix);
}
if ( $opt_with_ndbcluster )
{
mtr_add_arg($args, "%s--ndbcluster", $prefix);
mtr_add_arg($args, "%s--ndb-connectstring=%s", $prefix,
$opt_ndbconnectstring);
}
}
if ( $type eq 'slave' )
......@@ -2171,6 +2308,17 @@ sub mysqld_arguments ($$$$$) {
mtr_add_arg($args, "%s--server-id=%d", $prefix, $slave_server_id);
mtr_add_arg($args, "%s--rpl-recovery-rank=%d", $prefix, $slave_rpl_rank);
}
if ( $opt_skip_ndbcluster_slave )
{
mtr_add_arg($args, "%s--skip-ndbcluster", $prefix);
}
if ( $idx == 0 and $opt_with_ndbcluster_slave )
{
mtr_add_arg($args, "%s--ndbcluster", $prefix);
mtr_add_arg($args, "%s--ndb-connectstring=%s", $prefix,
$opt_ndbconnectstring_slave);
}
} # end slave
if ( $opt_debug )
......@@ -2187,13 +2335,6 @@ sub mysqld_arguments ($$$$$) {
}
}
if ( $opt_with_ndbcluster )
{
mtr_add_arg($args, "%s--ndbcluster", $prefix);
mtr_add_arg($args, "%s--ndb-connectstring=%s", $prefix,
$opt_ndbconnectstring);
}
# FIXME always set nowdays??? SMALL_SERVER
mtr_add_arg($args, "%s--key_buffer_size=1M", $prefix);
mtr_add_arg($args, "%s--sort_buffer=256K", $prefix);
......@@ -2408,6 +2549,12 @@ sub stop_slaves () {
}
}
if ( ! $slave->[0]->{'ndbcluster'} )
{
ndbcluster_stop_slave();
$slave->[0]->{'ndbcluster'}= 1;
}
mtr_stop_mysqld_servers(\@args);
}
......@@ -2617,6 +2764,8 @@ sub run_mysqltest ($) {
$ENV{'MYSQL_MY_PRINT_DEFAULTS'}= $exe_my_print_defaults;
$ENV{'NDB_STATUS_OK'}= $flag_ndb_status_ok;
$ENV{'NDB_SLAVE_STATUS_OK'}= $flag_ndb_slave_status_ok;
$ENV{'NDB_EXTRA_TEST'}= $opt_ndb_extra_test;
$ENV{'NDB_MGM'}= $exe_ndb_mgm;
$ENV{'NDB_BACKUP_DIR'}= $path_ndb_backup_dir;
$ENV{'NDB_TOOLS_DIR'}= $path_ndb_tools_dir;
......
This diff is collapsed.
......@@ -6,7 +6,7 @@ test_SCRIPTS = ndbcluster
noinst_HEADERS = ndbcluster.sh
dist_test_DATA = ndb_config_2_node.ini
dist_test_DATA = ndb_config_2_node.ini ndb_config_1_node.ini
SUFFIXES = .sh
......
......@@ -47,6 +47,7 @@ create database mysqltest;
show databases;
Database
information_schema
cluster_replication
mysql
mysqltest
test
......@@ -58,6 +59,7 @@ drop database mysqltest;
show databases;
Database
information_schema
cluster_replication
mysql
test
drop database mysqltest;
......
......@@ -14,6 +14,7 @@ NULL test latin1 latin1_swedish_ci NULL
select schema_name from information_schema.schemata;
schema_name
information_schema
cluster_replication
mysql
test
show databases like 't%';
......@@ -22,6 +23,7 @@ test
show databases;
Database
information_schema
cluster_replication
mysql
test
show databases where `database` = 't%';
......@@ -55,6 +57,7 @@ TABLE_PRIVILEGES
TRIGGERS
VIEWS
USER_PRIVILEGES
binlog_index
columns_priv
db
event
......@@ -331,6 +334,7 @@ create view v0 (c) as select schema_name from information_schema.schemata;
select * from v0;
c
information_schema
cluster_replication
mysql
test
explain select * from v0;
......@@ -728,7 +732,7 @@ CREATE TABLE t_crashme ( f1 BIGINT);
CREATE VIEW a1 (t_CRASHME) AS SELECT f1 FROM t_crashme GROUP BY f1;
CREATE VIEW a2 AS SELECT t_CRASHME FROM a1;
count(*)
106
107
drop view a2, a1;
drop table t_crashme;
select table_schema,table_name, column_name from
......@@ -810,6 +814,7 @@ delete from mysql.db where user='mysqltest_4';
flush privileges;
SELECT table_schema, count(*) FROM information_schema.TABLES GROUP BY TABLE_SCHEMA;
table_schema count(*)
cluster_replication 1
information_schema 19
mysql 19
create table t1 (i int, j int);
......
cluster_replication.binlog_index OK
mysql.columns_priv OK
mysql.db OK
mysql.event OK
......
......@@ -215,12 +215,11 @@ a long $where variable content
mysqltest: At line 1: Missing arguments to let
mysqltest: At line 1: Missing variable name in let
mysqltest: At line 1: Variable name in hi=hi does not start with '$'
mysqltest: At line 1: Missing assignment operator in let
mysqltest: At line 1: Missing assignment operator in let
mysqltest: At line 1: Missing arguments to let
mysqltest: At line 1: Missing variable name in let
mysqltest: At line 1: Variable name in =hi does not start with '$'
mysqltest: At line 1: Missing variable name in let
mysqltest: At line 1: Missing assignment operator in let
mysqltest: At line 1: Missing file name in source
mysqltest: At line 1: Could not open file ./non_existingFile
......
......@@ -170,35 +170,6 @@ c
4
5
drop table t1;
create table t1 ( a int primary key, b varchar(10), c varchar(10), index (b) )
engine=ndb;
insert into t1 values (1,'one','one'), (2,'two','two'), (3,'three','three');
create index c on t1(c);
select * from t1 where b = 'two';
a b c
2 two two
alter table t1 drop index c;
select * from t1 where b = 'two';
ERROR HY000: Can't lock file (errno: 159)
select * from t1 where b = 'two';
a b c
2 two two
drop table t1;
create table t3 (a int primary key) engine=ndbcluster;
begin;
insert into t3 values (1);
alter table t3 rename t4;
delete from t3;
insert into t3 values (1);
commit;
select * from t3;
ERROR HY000: Can't lock file (errno: 155)
select * from t4;
a
1
drop table t4;
show tables;
Tables_in_test
create table t1 (
ai bigint auto_increment,
c001 int(11) not null,
......
DROP TABLE IF EXISTS t1;
create table t1 ( a int primary key, b varchar(10), c varchar(10), index (b) )
engine=ndb;
insert into t1 values (1,'one','one'), (2,'two','two'), (3,'three','three');
create index c on t1(c);
select * from t1 where c = 'two';
a b c
2 two two
alter table t1 drop index c;
select * from t1 where c = 'two';
a b c
2 two two
drop table t1;
create table t3 (a int primary key) engine=ndbcluster;
begin;
insert into t3 values (1);
alter table t3 rename t4;
commit;
select * from t3;
ERROR 42S02: Table 'test.t3' doesn't exist
select * from t4;
a
1
drop table t4;
show tables;
Tables_in_test
DROP TABLE IF EXISTS t1;
create table t1 ( a int primary key, b varchar(10), c varchar(10), index (b) )
engine=ndb;
insert into t1 values (1,'one','one'), (2,'two','two'), (3,'three','three');
create index c on t1(c);
select * from t1 where c = 'two';
a b c
2 two two
alter table t1 drop index c;
select * from t1 where c = 'two';
ERROR HY000: Can't lock file (errno: 159)
select * from t1 where c = 'two';
a b c
2 two two
drop table t1;
create table t3 (a int primary key) engine=ndbcluster;
begin;
insert into t3 values (1);
alter table t3 rename t4;
delete from t3;
insert into t3 values (1);
commit;
select * from t3;
ERROR HY000: Can't lock file (errno: 155)
select * from t4;
a
1
drop table t4;
show tables;
Tables_in_test
......@@ -6,6 +6,13 @@ attr1 INT NOT NULL,
attr2 INT,
attr3 VARCHAR(10)
) ENGINE=ndbcluster;
drop table t1;
CREATE TABLE t1 (
pk1 INT NOT NULL PRIMARY KEY,
attr1 INT NOT NULL,
attr2 INT,
attr3 VARCHAR(10)
) ENGINE=ndbcluster;
SHOW INDEX FROM t1;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment
t1 0 PRIMARY 1 pk1 A 0 NULL NULL BTREE
......
drop table if exists t1, t2;
drop database if exists mysqltest;
create database mysqltest;
use mysqltest;
drop database mysqltest;
use test;
create table t1 (a int primary key) engine=ndb;
insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
select @max_epoch:=max(epoch)-1 from cluster_replication.binlog_index;
@max_epoch:=max(epoch)-1
#
delete from t1;
alter table t1 add (b int);
insert into t1 values (3,3),(4,4);
alter table t1 rename t2;
begin;
insert into t2 values (1,1),(2,2);
update t2 set b=1 where a=3;
delete from t2 where a=4;
commit;
drop table t2;
select inserts from cluster_replication.binlog_index where epoch > @max_epoch and inserts > 5;
inserts
10
select deletes from cluster_replication.binlog_index where epoch > @max_epoch and deletes > 5;
deletes
10
select inserts,updates,deletes from
cluster_replication.binlog_index where epoch > @max_epoch and updates > 0;
inserts updates deletes
2 1 1
select schemaops from
cluster_replication.binlog_index where epoch > @max_epoch and schemaops > 0;
schemaops
1
1
1
flush logs;
purge master logs before now();
select count(*) from cluster_replication.binlog_index;
count(*)
0
create table t1 (a int primary key, b int) engine=ndb;
create database mysqltest;
use mysqltest;
create table t1 (c int, d int primary key) engine=ndb;
use test;
insert into mysqltest.t1 values (2,1),(2,2);
select @max_epoch:=max(epoch)-1 from cluster_replication.binlog_index;
@max_epoch:=max(epoch)-1
#
drop table t1;
drop database mysqltest;
select inserts,updates,deletes from
cluster_replication.binlog_index where epoch > @max_epoch and inserts > 0;
inserts updates deletes
2 0 0
select schemaops from
cluster_replication.binlog_index where epoch > @max_epoch and schemaops > 0;
schemaops
1
1
drop table if exists t1,t2;
drop table if exists t1,t2;
SHOW TABLES;
Tables_in_test
CREATE TABLE t2 (a INT PRIMARY KEY, b int) ENGINE = NDB;
show tables;
Tables_in_test
t2
INSERT INTO t2 VALUES (1,1),(2,2);
select * from t2 order by a;
a b
1 1
2 2
SELECT @the_epoch:=epoch,inserts,updates,deletes,schemaops FROM
cluster_replication.binlog_index ORDER BY epoch DESC LIMIT 1;
@the_epoch:=epoch inserts updates deletes schemaops
<the_epoch> 2 0 0 0
SELECT * FROM t2 ORDER BY a;
a b
1 1
2 2
SELECT inserts,updates,deletes,schemaops FROM
cluster_replication.binlog_index WHERE epoch=<the_epoch>;
inserts updates deletes schemaops
2 0 0 0
DROP TABLE t2;
CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE = NDB;
INSERT INTO t1 VALUES (1),(2);
SELECT @the_epoch2:=epoch,inserts,updates,deletes,schemaops FROM
cluster_replication.binlog_index ORDER BY epoch DESC LIMIT 1;
@the_epoch2:=epoch inserts updates deletes schemaops
<the_epoch2> 2 0 0 0
SELECT inserts,updates,deletes,schemaops FROM
cluster_replication.binlog_index WHERE epoch > <the_epoch> AND epoch < <the_epoch2>;
inserts updates deletes schemaops
0 0 0 1
drop table t1;
SHOW TABLES;
Tables_in_test
SELECT inserts,updates,deletes,schemaops FROM
cluster_replication.binlog_index WHERE epoch > <the_epoch> AND epoch < <the_epoch2>;
inserts updates deletes schemaops
0 0 0 1
drop table if exists t1, t2, t3, t4;
drop table if exists t1, t2, t3, t4;
flush status;
create table t1 (a int) engine=ndbcluster;
create table t2 (a int) engine=ndbcluster;
......
drop table if exists t1, t2, t3, t4;
drop table if exists t1, t2, t3, t4;
flush status;
create table t1 (a int) engine=ndbcluster;
create table t2 (a int) engine=ndbcluster;
insert into t1 value (2);
insert into t2 value (3);
select * from t1;
a
2
select * from t2;
a
3
show status like 'handler_discover%';
Variable_name Value
Handler_discover 0
select * from t1;
a
2
drop table t1;
create table t1 (a int) engine=ndbcluster;
insert into t1 value (2);
select * from t1;
a
2
show status like 'handler_discover%';
Variable_name Value
Handler_discover 0
drop table t1;
create table t1 (a int) engine=ndbcluster;
insert into t1 value (2);
select * from t1;
a
2
flush status;
select * from t1;
a
2
update t1 set a=3 where a=2;
show status like 'handler_discover%';
Variable_name Value
Handler_discover 0
create table t3 (a int not null primary key, b varchar(22),
c int, last_col text) engine=ndb;
insert into t3 values(1, 'Hi!', 89, 'Longtext column');
create table t4 (pk int primary key, b int) engine=ndb;
select * from t1;
a
3
select * from t3;
a b c last_col
1 Hi! 89 Longtext column
show status like 'handler_discover%';
Variable_name Value
Handler_discover 1
show tables like 't4';
Tables_in_test (t4)
t4
show status like 'handler_discover%';
Variable_name Value
Handler_discover 2
show tables;
Tables_in_test
t1
t2
t3
t4
drop table t1, t2, t3, t4;
drop table t1, t3, t4;
Variable_name Value
have_ndbcluster NO
......@@ -259,6 +259,7 @@ prepare stmt4 from ' show databases ';
execute stmt4;
Database
information_schema
cluster_replication
mysql
test
prepare stmt4 from ' show tables from test like ''t2%'' ';
......
......@@ -33,6 +33,7 @@ create database mysqltest;
show databases;
Database
information_schema
cluster_replication
mysql
mysqltest
mysqltest2
......@@ -50,6 +51,7 @@ set sql_log_bin = 1;
show databases;
Database
information_schema
cluster_replication
mysql
test
create database mysqltest2;
......@@ -69,6 +71,7 @@ load data from master;
show databases;
Database
information_schema
cluster_replication
mysql
mysqltest
mysqltest2
......
......@@ -23,6 +23,7 @@ ALTER DATABASE mysqltest_bob CHARACTER SET latin1;
SHOW DATABASES;
Database
information_schema
cluster_replication
mysql
mysqltest_bob
mysqltest_prometheus
......@@ -31,6 +32,7 @@ test
SHOW DATABASES;
Database
information_schema
cluster_replication
mysql
mysqltest_prometheus
mysqltest_sisyfos
......@@ -45,6 +47,7 @@ CREATE TABLE t2 (a INT);
SHOW DATABASES;
Database
information_schema
cluster_replication
mysql
mysqltest_bob
mysqltest_prometheus
......@@ -53,6 +56,7 @@ test
SHOW DATABASES;
Database
information_schema
cluster_replication
mysql
mysqltest_prometheus
mysqltest_sisyfos
......
......@@ -21,6 +21,7 @@ COUNT(*)
SHOW DATABASES;
Database
information_schema
cluster_replication
mysql
mysqltest
test
......
This diff is collapsed.
stop slave;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
reset master;
reset slave;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
start slave;
CREATE TABLE `t1` ( `nid` int(11) NOT NULL default '0',
`nom` char(4) default NULL,
`prenom` char(4) default NULL,
PRIMARY KEY (`nid`))
ENGINE=ndbcluster DEFAULT CHARSET=latin1;
INSERT INTO t1 VALUES(1,"XYZ1","ABC1");
select * from t1 order by nid;
nid nom prenom
1 XYZ1 ABC1
select * from t1 order by nid;
nid nom prenom
1 XYZ1 ABC1
delete from t1;
INSERT INTO t1 VALUES(1,"XYZ2","ABC2");
select * from t1 order by nid;
nid nom prenom
1 XYZ2 ABC2
select * from t1 order by nid;
nid nom prenom
1 XYZ2 ABC2
DROP table t1;
CREATE TABLE `t1` ( `nid` int(11) NOT NULL default '0',
`nom` char(4) default NULL,
`prenom` char(4) default NULL)
ENGINE=ndbcluster DEFAULT CHARSET=latin1;
INSERT INTO t1 VALUES(1,"XYZ1","ABC1"),(2,"AAA","BBB"),(3,"CCC","DDD");
select * from t1 order by nid;
nid nom prenom
1 XYZ1 ABC1
2 AAA BBB
3 CCC DDD
select * from t1 order by nid;
nid nom prenom
1 XYZ1 ABC1
2 AAA BBB
3 CCC DDD
delete from t1 where nid = 2;
INSERT INTO t1 VALUES(4,"EEE","FFF");
select * from t1 order by nid;
nid nom prenom
1 XYZ1 ABC1
3 CCC DDD
4 EEE FFF
select * from t1 order by nid;
nid nom prenom
1 XYZ1 ABC1
3 CCC DDD
4 EEE FFF
UPDATE t1 set nid=nid+1;
UPDATE t1 set nom="CCP" where nid = 4;
select * from t1 order by nid;
nid nom prenom
2 XYZ1 ABC1
4 CCP DDD
5 EEE FFF
select * from t1 order by nid;
nid nom prenom
2 XYZ1 ABC1
4 CCP DDD
5 EEE FFF
DROP table t1;
CREATE TABLE `t1` ( `nid` int(11) NOT NULL default '0',
`nom` char(4) default NULL,
`prenom` char(4) default NULL,
PRIMARY KEY USING HASH (`nid`))
ENGINE=ndbcluster DEFAULT CHARSET=latin1;
INSERT INTO t1 VALUES(1,"XYZ1","ABC1");
BEGIN;
UPDATE t1 SET `nom`="LOCK" WHERE `nid`=1;
set GLOBAL slave_transaction_retries=1;
UPDATE t1 SET `nom`="DEAD" WHERE `nid`=1;
SHOW SLAVE STATUS;
Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master
<Slave_IO_State> 127.0.0.1 root MASTER_PORT 1 master-bin.000001 <Read_Master_Log_Pos> <Relay_Log_File> <Relay_Log_Pos> master-bin.000001 Yes No <Replicate_Ignore_Table> 146 Error in Write_rows event: error during transaction execution on table test.t1 0 <Exec_Master_Log_Pos> <Relay_Log_Space> None 0 No <Seconds_Behind_Master>
set GLOBAL slave_transaction_retries=10;
START SLAVE;
select * from t1 order by nid;
nid nom prenom
1 LOCK ABC1
COMMIT;
select * from t1 order by nid;
nid nom prenom
1 DEAD ABC1
DROP TABLE t1;
stop slave;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
reset master;
reset slave;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
start slave;
DROP TABLE IF EXISTS t1;
CREATE LOGFILE GROUP lg1
ADD UNDOFILE 'undofile.dat'
INITIAL_SIZE 16M
UNDO_BUFFER_SIZE = 1M
ENGINE=NDB;
alter logfile group lg1
add undofile 'undofile02.dat'
initial_size 4M engine=ndb;
CREATE TABLESPACE ts1
ADD DATAFILE 'datafile.dat'
USE LOGFILE GROUP lg1
INITIAL_SIZE 12M
ENGINE NDB;
alter tablespace ts1
add datafile 'datafile02.dat'
initial_size 4M engine=ndb;
CREATE TABLE t1
(pk1 int not null primary key, b int not null, c int not null)
tablespace ts1 storage disk
engine ndb;
insert into t1 values (1,2,3);
select * from t1 order by pk1;
pk1 b c
1 2 3
select * from t1 order by pk1;
pk1 b c
1 2 3
show binlog events;
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 4 Format_desc 1 102 Server ver: VERSION, Binlog ver: 4
master-bin.000001 102 Query 1 188 use `test`; DROP TABLE IF EXISTS t1
master-bin.000001 188 Query 1 353 CREATE LOGFILE GROUP lg1
ADD UNDOFILE 'undofile.dat'
INITIAL_SIZE 16M
UNDO_BUFFER_SIZE = 1M
ENGINE=NDB
master-bin.000001 353 Query 1 496 alter logfile group lg1
add undofile 'undofile02.dat'
initial_size 4M engine=ndb
master-bin.000001 496 Query 1 658 CREATE TABLESPACE ts1
ADD DATAFILE 'datafile.dat'
USE LOGFILE GROUP lg1
INITIAL_SIZE 12M
ENGINE NDB
master-bin.000001 658 Query 1 798 alter tablespace ts1
add datafile 'datafile02.dat'
initial_size 4M engine=ndb
master-bin.000001 798 Query 1 978 use `test`; CREATE TABLE t1
(pk1 int not null primary key, b int not null, c int not null)
tablespace ts1 storage disk
engine ndb
master-bin.000001 978 Query 1 1042 BEGIN
master-bin.000001 1042 Table_map 1 65 cluster_replication.apply_status
master-bin.000001 1107 Write_rows 1 107
master-bin.000001 1149 Table_map 1 148 test.t1
master-bin.000001 1190 Write_rows 1 190
master-bin.000001 1232 Query 1 1297 COMMIT
drop table t1;
alter tablespace ts1
drop datafile 'datafile.dat'
engine=ndb;
alter tablespace ts1
drop datafile 'datafile02.dat'
engine=ndb;
DROP TABLESPACE ts1 ENGINE=NDB;
DROP LOGFILE GROUP lg1 ENGINE=NDB;
stop slave;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
reset master;
reset slave;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
start slave;
CREATE TABLE t1 (c1 CHAR(15), c2 CHAR(15), c3 INT, PRIMARY KEY (c3)) ENGINE = NDB ;
INSERT INTO t1 VALUES ("row1","will go away",1);
SELECT * FROM t1 ORDER BY c3;
c1 c2 c3
row1 will go away 1
SELECT @the_epoch:=MAX(epoch) FROM cluster_replication.apply_status;
@the_epoch:=MAX(epoch)
<the_epoch>
SELECT * FROM t1 ORDER BY c3;
c1 c2 c3
row1 will go away 1
SELECT @the_pos:=Position,@the_file:=SUBSTRING_INDEX(FILE, '/', -1)
FROM cluster_replication.binlog_index WHERE epoch = <the_epoch> ;
@the_pos:=Position @the_file:=SUBSTRING_INDEX(FILE, '/', -1)
<the_pos> master-bin.000001
INSERT INTO t1 VALUES ("row2","will go away",2),("row3","will change",3),("row4","D",4);
DELETE FROM t1 WHERE c3 = 1;
UPDATE t1 SET c2="should go away" WHERE c3 = 2;
UPDATE t1 SET c2="C" WHERE c3 = 3;
DELETE FROM t1 WHERE c3 = 2;
SELECT * FROM t1 ORDER BY c3;
c1 c2 c3
row3 C 3
row4 D 4
SELECT * FROM t1 ORDER BY c3;
c1 c2 c3
row3 C 3
row4 D 4
SHOW SLAVE STATUS;
Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master
<Slave_IO_State> 127.0.0.1 root MASTER_PORT 1 master-bin.000001 <Read_Master_Log_Pos> <Relay_Log_File> <Relay_Log_Pos> master-bin.000001 Yes Yes <Replicate_Ignore_Table> 0 0 <Exec_Master_Log_Pos> <Relay_Log_Space> None 0 No <Seconds_Behind_Master>
STOP SLAVE;
CHANGE MASTER TO
master_log_file = 'master-bin.000001',
master_log_pos = <the_pos> ;
SHOW SLAVE STATUS;
Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master
<Slave_IO_State> 127.0.0.1 root MASTER_PORT 1 master-bin.000001 <Read_Master_Log_Pos> <Relay_Log_File> <Relay_Log_Pos> master-bin.000001 No No <Replicate_Ignore_Table> 0 0 <Exec_Master_Log_Pos> <Relay_Log_Space> None 0 No <Seconds_Behind_Master>
START SLAVE;
SELECT * FROM t1 ORDER BY c3;
c1 c2 c3
row3 C 3
row4 D 4
SELECT * FROM t1 ORDER BY c3;
c1 c2 c3
row3 C 3
row4 D 4
STOP SLAVE;
DROP TABLE t1;
RESET master;
DROP TABLE t1;
RESET slave;
START SLAVE;
CREATE TABLE t1 (c1 CHAR(15) NOT NULL, c2 CHAR(15) NOT NULL, c3 INT NOT NULL, PRIMARY KEY (c3)) ENGINE = NDB ;
INSERT INTO t1 VALUES ("row1","remove on slave",1);
DELETE FROM t1;
BEGIN;
UPDATE t1 SET c2="does not exist" WHERE c3=1;
INSERT INTO t1 VALUES ("row2","new on slave",2);
COMMIT;
SELECT * FROM t1;
c1 c2 c3
row2 new on slave 2
SHOW SLAVE STATUS;
Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master
<Slave_IO_State> 127.0.0.1 root MASTER_PORT 1 master-bin.000001 <Read_Master_Log_Pos> <Relay_Log_File> <Relay_Log_Pos> master-bin.000001 Yes Yes <Replicate_Ignore_Table> 0 0 <Exec_Master_Log_Pos> <Relay_Log_Space> None 0 No <Seconds_Behind_Master>
stop slave;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
reset master;
reset slave;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
start slave;
DROP DATABASE IF EXISTS TEST_DB;
CREATE DATABASE TEST_DB;
USE TEST_DB;
CREATE TABLE SUBSCRIBER
( NUMBER CHAR(12) BINARY NOT NULL,
NAME CHAR(32) BINARY NOT NULL,
GROUP_ID INT UNSIGNED NOT NULL,
LOCATION INT UNSIGNED NOT NULL,
SESSIONS INT UNSIGNED NOT NULL,
CHANGED_BY CHAR(32) BINARY NOT NULL,
CHANGED_TIME CHAR(32) BINARY NOT NULL,
PRIMARY KEY USING HASH (NUMBER))
ENGINE = NDB;
CREATE TABLE GROUP2
( GROUP_ID INT UNSIGNED NOT NULL,
GROUP_NAME CHAR(32) BINARY NOT NULL,
ALLOW_READ CHAR(1) BINARY NOT NULL,
ALLOW_INSERT INT UNSIGNED NOT NULL,
ALLOW_DELETE INT UNSIGNED NOT NULL,
PRIMARY KEY USING HASH (GROUP_ID))
ENGINE = NDB;
CREATE TABLE SESSION
( NUMBER CHAR(12) BINARY NOT NULL,
SERVER_ID INT UNSIGNED NOT NULL,
DATA BINARY(2000) NOT NULL,
PRIMARY KEY USING HASH (NUMBER,SERVER_ID))
ENGINE = NDB;
CREATE TABLE SERVER
( SUFFIX CHAR(2) BINARY NOT NULL,
SERVER_ID INT UNSIGNED NOT NULL,
NAME CHAR(32) BINARY NOT NULL,
NO_OF_READ INT UNSIGNED NOT NULL,
NO_OF_INSERT INT UNSIGNED NOT NULL,
NO_OF_DELETE INT UNSIGNED NOT NULL,
PRIMARY KEY USING HASH (SUFFIX, SERVER_ID))
ENGINE = NDB;
stop slave;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
reset master;
reset slave;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
start slave;
CREATE TABLE t1 (c1 CHAR(15), c2 CHAR(15), c3 INT, PRIMARY KEY (c3)) ENGINE = NDB ;
reset master;
SHOW TABLES;
Tables_in_test
t1
INSERT INTO t1 VALUES ("row1","will go away",1);
SELECT * FROM t1 ORDER BY c3;
c1 c2 c3
row1 will go away 1
SELECT * FROM t1 ORDER BY c3;
c1 c2 c3
row1 will go away 1
SELECT @the_epoch:=MAX(epoch) FROM cluster_replication.apply_status;
@the_epoch:=MAX(epoch)
<the_epoch>
SELECT * FROM t1 ORDER BY c3;
c1 c2 c3
row1 will go away 1
stop slave;
SELECT @the_pos:=Position,@the_file:=SUBSTRING_INDEX(FILE, '/', -1)
FROM cluster_replication.binlog_index WHERE epoch = <the_epoch> ;
@the_pos:=Position @the_file:=SUBSTRING_INDEX(FILE, '/', -1)
102 master-bin1.000001
CHANGE MASTER TO
master_port=<MASTER_PORT1>,
master_log_file = 'master-bin1.000001',
master_log_pos = 102 ;
start slave;
INSERT INTO t1 VALUES ("row2","will go away",2),("row3","will change",3),("row4","D",4);
DELETE FROM t1 WHERE c3 = 1;
UPDATE t1 SET c2="should go away" WHERE c3 = 2;
UPDATE t1 SET c2="C" WHERE c3 = 3;
DELETE FROM t1 WHERE c3 = 2;
SELECT * FROM t1 ORDER BY c3;
c1 c2 c3
row3 C 3
row4 D 4
INSERT INTO t1 VALUES ("row5","E",5);
SELECT * FROM t1 ORDER BY c3;
c1 c2 c3
row3 C 3
row4 D 4
row5 E 5
SELECT * FROM t1 ORDER BY c3;
c1 c2 c3
row3 C 3
row4 D 4
row5 E 5
STOP SLAVE;
stop slave;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
reset master;
reset slave;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
start slave;
STOP SLAVE;
CREATE DATABASE ndbsynctest;
USE ndbsynctest;
CREATE DATABASE ndbsynctest;
USE ndbsynctest;
CREATE TABLE t1 (c1 BIT(1) NOT NULL, c2 BIT(1) NOT NULL, c3 CHAR(15), PRIMARY KEY(c3)) ENGINE = NDB ;
INSERT INTO t1 VALUES (1,1,"row1"),(0,1,"row2"),(1,0,"row3"),(0,0,"row4");
CREATE TABLE t2 (c1 CHAR(15), c2 BIT(1) NOT NULL, c3 BIT(1) NOT NULL, PRIMARY KEY(c1)) ENGINE = NDB ;
INSERT INTO t2 VALUES ("ABC",1,1),("BCDEF",0,1),("CD",1,0),("DEFGHIJKL",0,0);
SELECT hex(c1),hex(c2),c3 FROM t1 ORDER BY c3;
hex(c1) hex(c2) c3
1 1 row1
0 1 row2
1 0 row3
0 0 row4
SELECT hex(c2),hex(c3),c1 FROM t2 ORDER BY c1;
hex(c2) hex(c3) c1
1 1 ABC
0 1 BCDEF
1 0 CD
0 0 DEFGHIJKL
CREATE TABLE IF NOT EXISTS cluster_replication.backup_info (id INT, backup_id INT);
DELETE FROM cluster_replication.backup_info;
LOAD DATA INFILE '../../var/tmp.dat' INTO TABLE cluster_replication.backup_info FIELDS TERMINATED BY ',';
SELECT @the_backup_id:=backup_id FROM cluster_replication.backup_info;
@the_backup_id:=backup_id
<the_backup_id>
UPDATE t1 SET c2=0 WHERE c3="row2";
SELECT hex(c1),hex(c2),c3 FROM t1 ORDER BY c3;
hex(c1) hex(c2) c3
1 1 row1
0 0 row2
1 0 row3
0 0 row4
SHOW TABLES;
Tables_in_ndbsynctest
DROP DATABASE ndbsynctest;
CREATE DATABASE ndbsynctest;
USE ndbsynctest;
SHOW TABLES;
Tables_in_ndbsynctest
t1
t2
SELECT hex(c1),hex(c2),c3 FROM t1 ORDER BY c3;
hex(c1) hex(c2) c3
1 1 row1
0 1 row2
1 0 row3
0 0 row4
SELECT hex(c2),hex(c3),c1 FROM t2 ORDER BY c1;
hex(c2) hex(c3) c1
1 1 ABC
0 1 BCDEF
1 0 CD
0 0 DEFGHIJKL
SELECT @the_epoch:=MAX(epoch) FROM cluster_replication.apply_status;
@the_epoch:=MAX(epoch)
<the_epoch>
SELECT @the_pos:=Position,@the_file:=SUBSTRING_INDEX(FILE, '/', -1)
FROM cluster_replication.binlog_index WHERE epoch > <the_epoch> ORDER BY epoch ASC LIMIT 1;
@the_pos:=Position @the_file:=SUBSTRING_INDEX(FILE, '/', -1)
<the_pos> master-bin.000001
CHANGE MASTER TO
master_log_file = 'master-bin.000001',
master_log_pos = <the_pos> ;
START SLAVE;
SHOW SLAVE STATUS;
Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master
<Slave_IO_State> 127.0.0.1 root MASTER_PORT 1 master-bin.000001 <Read_Master_Log_Pos> <Relay_Log_File> <Relay_Log_Pos> master-bin.000001 Yes Yes <Replicate_Ignore_Table> 0 0 <Exec_Master_Log_Pos> <Relay_Log_Space> None 0 No <Seconds_Behind_Master>
SELECT hex(c1),hex(c2),c3 FROM t1 ORDER BY c3;
hex(c1) hex(c2) c3
1 1 row1
0 0 row2
1 0 row3
0 0 row4
SELECT hex(c2),hex(c3),c1 FROM t2 ORDER BY c1;
hex(c2) hex(c3) c1
1 1 ABC
0 1 BCDEF
1 0 CD
0 0 DEFGHIJKL
DROP DATABASE ndbsynctest;
STOP SLAVE;
reset master;
select * from cluster_replication.binlog_index;
Position File epoch inserts updates deletes schemaops
reset slave;
select * from cluster_replication.apply_status;
server_id epoch
......@@ -9,6 +9,7 @@ CREATE DATABASE test_ignore;
SHOW DATABASES;
Database
information_schema
cluster_replication
mysql
test
test_ignore
......@@ -33,6 +34,7 @@ master-bin.000001 235 Write_rows 1 282
SHOW DATABASES;
Database
information_schema
cluster_replication
mysql
test
USE test;
......
stop slave;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
reset master;
reset slave;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
start slave;
CREATE TABLE t1 (C1 CHAR(1), C2 CHAR(1), INDEX (C1)) ENGINE = 'NDB' ;
SELECT * FROM t1;
C1 C2
SELECT * FROM t1;
C1 C2
INSERT INTO t1 VALUES ('A','B'), ('X','Y'), ('X','X');
INSERT INTO t1 VALUES ('A','C'), ('X','Z'), ('A','A');
SELECT * FROM t1 ORDER BY C1,C2;
C1 C2
A A
A B
A C
X X
X Y
X Z
SELECT * FROM t1 ORDER BY C1,C2;
C1 C2
A A
A B
A C
X X
X Y
X Z
DELETE FROM t1 WHERE C1 = C2;
SELECT * FROM t1 ORDER BY C1,C2;
C1 C2
A B
A C
X Y
X Z
SELECT * FROM t1 ORDER BY C1,C2;
C1 C2
A B
A C
X Y
X Z
UPDATE t1 SET C2 = 'I' WHERE C1 = 'A' AND C2 = 'C';
SELECT * FROM t1 ORDER BY C1,C2;
C1 C2
A B
A I
X Y
X Z
SELECT * FROM t1 ORDER BY C1,C2;
C1 C2
A B
A I
X Y
X Z
UPDATE t1 SET c2 = 'Q' WHERE c1 = 'A' AND c2 = 'N';
SELECT * FROM t1 ORDER BY c1,c2;
C1 C2
A B
A I
X Y
X Z
SELECT * FROM t1 ORDER BY c1,c2;
C1 C2
A B
A I
X Y
X Z
CREATE TABLE t2 (c1 INT, c12 char(1), c2 INT, PRIMARY KEY (c1)) ENGINE = 'NDB' ;
INSERT INTO t2
VALUES (1,'A',2), (2,'A',4), (3,'A',9), (4,'A',15), (5,'A',25),
(6,'A',35), (7,'A',50), (8,'A',64), (9,'A',81);
SELECT * FROM t2 ORDER BY c1,c2;
c1 c12 c2
1 A 2
2 A 4
3 A 9
4 A 15
5 A 25
6 A 35
7 A 50
8 A 64
9 A 81
SELECT * FROM t2 WHERE c2 = c1 * c1 ORDER BY c1,c2;
c1 c12 c2
2 A 4
3 A 9
5 A 25
8 A 64
9 A 81
SELECT * FROM t2 ORDER BY c1,c2;
c1 c12 c2
1 A 2
2 A 4
3 A 9
4 A 15
5 A 25
6 A 35
7 A 50
8 A 64
9 A 81
SELECT * FROM t2 WHERE c2 = c1 * c1 ORDER BY c1,c2;
c1 c12 c2
2 A 4
3 A 9
5 A 25
8 A 64
9 A 81
UPDATE t2 SET c2 = c1*c1 WHERE c2 != c1*c1;
SELECT * FROM t2 WHERE c2 = c1 * c1 ORDER BY c1,c2;
c1 c12 c2
1 A 1
2 A 4
3 A 9
4 A 16
5 A 25
6 A 36
7 A 49
8 A 64
9 A 81
SELECT * FROM t2 WHERE c2 = c1 * c1 ORDER BY c1,c2;
c1 c12 c2
1 A 1
2 A 4
3 A 9
4 A 16
5 A 25
6 A 36
7 A 49
8 A 64
9 A 81
UPDATE t2 SET c12 = 'Q' WHERE c1 = 1 AND c2 = 999;
SELECT * FROM t2 ORDER BY c1,c2;
c1 c12 c2
1 A 1
2 A 4
3 A 9
4 A 16
5 A 25
6 A 36
7 A 49
8 A 64
9 A 81
SELECT * FROM t2 ORDER BY c1,c2;
c1 c12 c2
1 A 1
2 A 4
3 A 9
4 A 16
5 A 25
6 A 36
7 A 49
8 A 64
9 A 81
DELETE FROM t2 WHERE c1 % 4 = 0;
SELECT * FROM t2 ORDER BY c1,c2;
c1 c12 c2
1 A 1
2 A 4
3 A 9
5 A 25
6 A 36
7 A 49
9 A 81
SELECT * FROM t2 ORDER BY c1,c2;
c1 c12 c2
1 A 1
2 A 4
3 A 9
5 A 25
6 A 36
7 A 49
9 A 81
UPDATE t2 SET c12='X';
CREATE TABLE t3 (C1 CHAR(1), C2 CHAR(1), pk1 INT, C3 CHAR(1), pk2 INT, PRIMARY KEY (pk1,pk2)) ENGINE = 'NDB' ;
INSERT INTO t3 VALUES ('A','B',1,'B',1), ('X','Y',2,'B',1), ('X','X',3,'B',1);
INSERT INTO t3 VALUES ('A','C',1,'B',2), ('X','Z',2,'B',2), ('A','A',3,'B',2);
SELECT * FROM t3 ORDER BY C1,C2;
C1 C2 pk1 C3 pk2
A A 3 B 2
A B 1 B 1
A C 1 B 2
X X 3 B 1
X Y 2 B 1
X Z 2 B 2
SELECT * FROM t3 ORDER BY C1,C2;
C1 C2 pk1 C3 pk2
A A 3 B 2
A B 1 B 1
A C 1 B 2
X X 3 B 1
X Y 2 B 1
X Z 2 B 2
DELETE FROM t3 WHERE C1 = C2;
SELECT * FROM t3 ORDER BY C1,C2;
C1 C2 pk1 C3 pk2
A B 1 B 1
A C 1 B 2
X Y 2 B 1
X Z 2 B 2
SELECT * FROM t3 ORDER BY C1,C2;
C1 C2 pk1 C3 pk2
A B 1 B 1
A C 1 B 2
X Y 2 B 1
X Z 2 B 2
UPDATE t3 SET C2 = 'I' WHERE C1 = 'A' AND C2 = 'C';
SELECT * FROM t3 ORDER BY C1,C2;
C1 C2 pk1 C3 pk2
A B 1 B 1
A I 1 B 2
X Y 2 B 1
X Z 2 B 2
SELECT * FROM t3 ORDER BY C1,C2;
C1 C2 pk1 C3 pk2
A B 1 B 1
A I 1 B 2
X Y 2 B 1
X Z 2 B 2
CREATE TABLE t6 (C1 CHAR(1), C2 CHAR(1), C3 INT) ENGINE = 'NDB' ;
INSERT INTO t6 VALUES ('A','B',1), ('X','Y',2), ('X','X',3);
INSERT INTO t6 VALUES ('A','C',4), ('X','Z',5), ('A','A',6);
SELECT * FROM t6 ORDER BY C3;
C1 C2 C3
A B 1
X Y 2
X X 3
A C 4
X Z 5
A A 6
SELECT * FROM t6 ORDER BY C3;
C1 C2 C3
A B 1
X Y 2
X X 3
A C 4
X Z 5
A A 6
DELETE FROM t6 WHERE C1 = C2;
SELECT * FROM t6 ORDER BY C3;
C1 C2 C3
A B 1
X Y 2
A C 4
X Z 5
SELECT * FROM t6 ORDER BY C3;
C1 C2 C3
A B 1
X Y 2
A C 4
X Z 5
UPDATE t6 SET C2 = 'I' WHERE C1 = 'A' AND C2 = 'C';
SELECT * FROM t6 ORDER BY C3;
C1 C2 C3
A B 1
X Y 2
A I 4
X Z 5
SELECT * FROM t6 ORDER BY C3;
C1 C2 C3
A B 1
X Y 2
A I 4
X Z 5
CREATE TABLE t5 (C1 CHAR(1), C2 CHAR(1), C3 INT PRIMARY KEY) ENGINE = 'NDB' ;
INSERT INTO t5 VALUES ('A','B',1), ('X','Y',2), ('X','X',3);
INSERT INTO t5 VALUES ('A','C',4), ('X','Z',5), ('A','A',6);
UPDATE t5,t2,t3 SET t5.C2='Q', t2.c12='R', t3.C3 ='S' WHERE t5.C1 = t2.c12 AND t5.C1 = t3.C1;
SELECT * FROM t5,t2,t3 WHERE t5.C2='Q' AND t2.c12='R' AND t3.C3 ='S' ORDER BY t5.C3,t2.c1,t3.pk1,t3.pk2;
C1 C2 C3 c1 c12 c2 C1 C2 pk1 C3 pk2
X Q 2 1 R 1 X Y 2 S 1
X Q 2 1 R 1 X Z 2 S 2
X Q 2 2 R 4 X Y 2 S 1
X Q 2 2 R 4 X Z 2 S 2
X Q 2 3 R 9 X Y 2 S 1
X Q 2 3 R 9 X Z 2 S 2
X Q 2 5 R 25 X Y 2 S 1
X Q 2 5 R 25 X Z 2 S 2
X Q 2 6 R 36 X Y 2 S 1
X Q 2 6 R 36 X Z 2 S 2
X Q 2 7 R 49 X Y 2 S 1
X Q 2 7 R 49 X Z 2 S 2
X Q 2 9 R 81 X Y 2 S 1
X Q 2 9 R 81 X Z 2 S 2
X Q 3 1 R 1 X Y 2 S 1
X Q 3 1 R 1 X Z 2 S 2
X Q 3 2 R 4 X Y 2 S 1
X Q 3 2 R 4 X Z 2 S 2
X Q 3 3 R 9 X Y 2 S 1
X Q 3 3 R 9 X Z 2 S 2
X Q 3 5 R 25 X Y 2 S 1
X Q 3 5 R 25 X Z 2 S 2
X Q 3 6 R 36 X Y 2 S 1
X Q 3 6 R 36 X Z 2 S 2
X Q 3 7 R 49 X Y 2 S 1
X Q 3 7 R 49 X Z 2 S 2
X Q 3 9 R 81 X Y 2 S 1
X Q 3 9 R 81 X Z 2 S 2
X Q 5 1 R 1 X Y 2 S 1
X Q 5 1 R 1 X Z 2 S 2
X Q 5 2 R 4 X Y 2 S 1
X Q 5 2 R 4 X Z 2 S 2
X Q 5 3 R 9 X Y 2 S 1
X Q 5 3 R 9 X Z 2 S 2
X Q 5 5 R 25 X Y 2 S 1
X Q 5 5 R 25 X Z 2 S 2
X Q 5 6 R 36 X Y 2 S 1
X Q 5 6 R 36 X Z 2 S 2
X Q 5 7 R 49 X Y 2 S 1
X Q 5 7 R 49 X Z 2 S 2
X Q 5 9 R 81 X Y 2 S 1
X Q 5 9 R 81 X Z 2 S 2
SELECT * FROM t5,t2,t3 WHERE t5.C2='Q' AND t2.c12='R' AND t3.C3 ='S' ORDER BY t5.C3,t2.c1,t3.pk1,t3.pk2;
C1 C2 C3 c1 c12 c2 C1 C2 pk1 C3 pk2
X Q 2 1 R 1 X Y 2 S 1
X Q 2 1 R 1 X Z 2 S 2
X Q 2 2 R 4 X Y 2 S 1
X Q 2 2 R 4 X Z 2 S 2
X Q 2 3 R 9 X Y 2 S 1
X Q 2 3 R 9 X Z 2 S 2
X Q 2 5 R 25 X Y 2 S 1
X Q 2 5 R 25 X Z 2 S 2
X Q 2 6 R 36 X Y 2 S 1
X Q 2 6 R 36 X Z 2 S 2
X Q 2 7 R 49 X Y 2 S 1
X Q 2 7 R 49 X Z 2 S 2
X Q 2 9 R 81 X Y 2 S 1
X Q 2 9 R 81 X Z 2 S 2
X Q 3 1 R 1 X Y 2 S 1
X Q 3 1 R 1 X Z 2 S 2
X Q 3 2 R 4 X Y 2 S 1
X Q 3 2 R 4 X Z 2 S 2
X Q 3 3 R 9 X Y 2 S 1
X Q 3 3 R 9 X Z 2 S 2
X Q 3 5 R 25 X Y 2 S 1
X Q 3 5 R 25 X Z 2 S 2
X Q 3 6 R 36 X Y 2 S 1
X Q 3 6 R 36 X Z 2 S 2
X Q 3 7 R 49 X Y 2 S 1
X Q 3 7 R 49 X Z 2 S 2
X Q 3 9 R 81 X Y 2 S 1
X Q 3 9 R 81 X Z 2 S 2
X Q 5 1 R 1 X Y 2 S 1
X Q 5 1 R 1 X Z 2 S 2
X Q 5 2 R 4 X Y 2 S 1
X Q 5 2 R 4 X Z 2 S 2
X Q 5 3 R 9 X Y 2 S 1
X Q 5 3 R 9 X Z 2 S 2
X Q 5 5 R 25 X Y 2 S 1
X Q 5 5 R 25 X Z 2 S 2
X Q 5 6 R 36 X Y 2 S 1
X Q 5 6 R 36 X Z 2 S 2
X Q 5 7 R 49 X Y 2 S 1
X Q 5 7 R 49 X Z 2 S 2
X Q 5 9 R 81 X Y 2 S 1
X Q 5 9 R 81 X Z 2 S 2
CREATE TABLE t4 (C1 CHAR(1) PRIMARY KEY, B1 BIT(1), B2 BIT(1) NOT NULL DEFAULT 0, C2 CHAR(1) NOT NULL DEFAULT 'A') ENGINE = 'NDB' ;
INSERT INTO t4 SET C1 = 1;
SELECT C1,HEX(B1),HEX(B2) FROM t4 ORDER BY C1;
C1 HEX(B1) HEX(B2)
1 NULL 0
SELECT C1,HEX(B1),HEX(B2) FROM t4 ORDER BY C1;
C1 HEX(B1) HEX(B2)
1 NULL 0
CREATE TABLE t7 (C1 INT PRIMARY KEY, C2 INT) ENGINE = 'NDB' ;
--- on slave: original values ---
INSERT INTO t7 VALUES (1,3), (2,6), (3,9);
SELECT * FROM t7 ORDER BY C1;
C1 C2
1 3
2 6
3 9
--- on master: new values inserted ---
INSERT INTO t7 VALUES (1,2), (2,4), (3,6);
SELECT * FROM t7 ORDER BY C1;
C1 C2
1 2
2 4
3 6
--- on slave: old values should be overwritten by replicated values ---
SELECT * FROM t7 ORDER BY C1;
C1 C2
1 2
2 4
3 6
--- on master ---
DROP TABLE t7;
CREATE TABLE t7 (a INT PRIMARY KEY, b INT UNIQUE, c INT UNIQUE) ENGINE = 'NDB' ;
INSERT INTO t7 VALUES (99,99,99);
INSERT INTO t7 VALUES (99,22,33);
ERROR 23000: Duplicate entry '99' for key 1
INSERT INTO t7 VALUES (11,99,33);
ERROR 23000: Duplicate entry '11' for key 1
INSERT INTO t7 VALUES (11,22,99);
ERROR 23000: Duplicate entry '11' for key 1
SELECT * FROM t7 ORDER BY a;
a b c
99 99 99
--- on slave ---
SELECT * FROM t7 ORDER BY a;
a b c
99 99 99
INSERT INTO t7 VALUES (1,2,3), (2,4,6), (3,6,9);
SELECT * FROM t7 ORDER BY a;
a b c
1 2 3
2 4 6
3 6 9
99 99 99
--- on master ---
INSERT INTO t7 VALUES (2,4,8);
--- on slave ---
SELECT * FROM t7 ORDER BY a;
a b c
1 2 3
2 4 8
3 6 9
99 99 99
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
......@@ -6,6 +6,7 @@ foo CREATE DATABASE `foo` /*!40100 DEFAULT CHARACTER SET latin1 */
show schemas;
Database
information_schema
cluster_replication
foo
mysql
test
......
......@@ -53,6 +53,7 @@ Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length I
show databases;
Database
information_schema
cluster_replication
mysql
test
show databases like "test%";
......
......@@ -20,6 +20,11 @@ subselect : Bug#15706
type_time : Bug#15805
#rpl000002 : Bug#15920 Temporary tables are not binlogged in SBR
#ps_7ndb : Bug#15923 Core dump in RBR mode when executing test suite
ps_7ndb : dbug assert in RBR mode when executing test suite
rpl_ddl : Bug#15963 SBR does not show "Definer" correctly
mysqlslap : Bug#16167
events : Affects flush test case. A table lock not released somewhere
ndb_autodiscover : TBF with CR
ndb_autodiscover2 : TBF with CR
ndb_binlog_basic : Results are not deterministic, Tomas will fix
rpl_ndb_basic : Bug#16228
......@@ -518,9 +518,6 @@ echo $novar1;
--error 1
--exec echo "let $=hi;" | $MYSQL_TEST 2>&1
--error 1
--exec echo "let hi=hi;" | $MYSQL_TEST 2>&1
--error 1
--exec echo "let $1 hi;" | $MYSQL_TEST 2>&1
......
......@@ -142,23 +142,19 @@ INSERT INTO t1 VALUES (1,2,0),(18,19,4),(20,21,0);
select c from t1 order by c;
drop table t1;
--disable_ps_protocol
create table t1 ( a int primary key, b varchar(10), c varchar(10), index (b) )
engine=ndb;
insert into t1 values (1,'one','one'), (2,'two','two'), (3,'three','three');
create index c on t1(c);
connection server2;
select * from t1 where b = 'two';
connection server1;
alter table t1 drop index c;
connection server2;
# This should fail since index information is not automatically refreshed
--error 1015
select * from t1 where b = 'two';
select * from t1 where b = 'two';
connection server1;
drop table t1;
--enable_ps_protocol
## Test moved to ndb_alter_table_row|stmt respectively as behaviour differs
#create table t1 ( a int primary key, b varchar(10), c varchar(10), index (b) )
#engine=ndb;
#insert into t1 values (1,'one','one'), (2,'two','two'), (3,'three','three');
#create index c on t1(c);
#connection server2;
#select * from t1 where c = 'two';
#connection server1;
#alter table t1 drop index c;
#connection server2;
#select * from t1 where c = 'two';
#connection server1;
#drop table t1;
#--disable_warnings
#DROP TABLE IF EXISTS t2;
......@@ -183,29 +179,32 @@ drop table t1;
#select count(*) from t2;
#drop table t2;
connection server1;
create table t3 (a int primary key) engine=ndbcluster;
connection server2;
begin;
insert into t3 values (1);
connection server1;
alter table t3 rename t4;
connection server2;
# This should work as transaction is ongoing...
delete from t3;
insert into t3 values (1);
commit;
# This should fail as its a new transaction
--error 1015
select * from t3;
select * from t4;
drop table t4;
show tables;
connection server1;
## Test moved to ndb_alter_table_row|stmt respectively as behaviour differs
#connection server1;
#create table t3 (a int primary key) engine=ndbcluster;
#connection server2;
#begin;
#insert into t3 values (1);
#connection server1;
#alter table t3 rename t4;
#connection server2;
## with rbr the below will not work as the "alter" event
## explicitly invalidates the dictionary cache.
### This should work as transaction is ongoing...
##delete from t3;
##insert into t3 values (1);
#commit;
## This should fail as its a new transaction
#--error 1146
#select * from t3;
#select * from t4;
#drop table t4;
#show tables;
#connection server1;
create table t1 (
ai bigint auto_increment,
......
-- source include/have_ndb.inc
-- source include/have_multi_ndb.inc
-- source include/not_embedded.inc
-- source include/have_binlog_format_row.inc
--disable_warnings
DROP TABLE IF EXISTS t1;
--enable_warnings
connection server1;
create table t1 ( a int primary key, b varchar(10), c varchar(10), index (b) )
engine=ndb;
insert into t1 values (1,'one','one'), (2,'two','two'), (3,'three','three');
create index c on t1(c);
connection server2;
select * from t1 where c = 'two';
connection server1;
alter table t1 drop index c;
connection server2;
select * from t1 where c = 'two';
connection server1;
drop table t1;
connection server1;
create table t3 (a int primary key) engine=ndbcluster;
connection server2;
begin;
insert into t3 values (1);
connection server1;
alter table t3 rename t4;
connection server2;
# with rbr the below will not work as the "alter" event
# explicitly invalidates the dictionary cache.
## This should work as transaction is ongoing...
#delete from t3;
#insert into t3 values (1);
commit;
# This should fail as its a new transaction
--error 1146
select * from t3;
select * from t4;
drop table t4;
show tables;
connection server1;
-- source include/have_ndb.inc
-- source include/have_multi_ndb.inc
-- source include/not_embedded.inc
-- source include/have_binlog_format_statement.inc
--disable_warnings
DROP TABLE IF EXISTS t1;
--enable_warnings
connection server1;
create table t1 ( a int primary key, b varchar(10), c varchar(10), index (b) )
engine=ndb;
insert into t1 values (1,'one','one'), (2,'two','two'), (3,'three','three');
create index c on t1(c);
connection server2;
select * from t1 where c = 'two';
connection server1;
alter table t1 drop index c;
connection server2;
-- error 1015
select * from t1 where c = 'two';
select * from t1 where c = 'two';
connection server1;
drop table t1;
connection server1;
create table t3 (a int primary key) engine=ndbcluster;
connection server2;
begin;
insert into t3 values (1);
connection server1;
alter table t3 rename t4;
connection server2;
# with rbr the below will not work as the "alter" event
# explicitly invalidates the dictionary cache.
# This should work as transaction is ongoing...
delete from t3;
insert into t3 values (1);
commit;
# This should fail as its a new transaction
--error 1015
select * from t3;
select * from t4;
drop table t4;
show tables;
connection server1;
......@@ -6,6 +6,17 @@ DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
drop database if exists mysqltest;
--enable_warnings
# workaround for bug#16445
# remove to reproduce bug and run tests drom ndb start
# and with ndb_autodiscover disabled
CREATE TABLE t1 (
pk1 INT NOT NULL PRIMARY KEY,
attr1 INT NOT NULL,
attr2 INT,
attr3 VARCHAR(10)
) ENGINE=ndbcluster;
drop table t1;
#
# Basic test to show that the NDB
# table handler is working
......
-- source include/have_ndb.inc
-- source include/have_binlog_format_row.inc
--disable_warnings
drop table if exists t1, t2;
drop database if exists mysqltest;
create database mysqltest;
use mysqltest;
drop database mysqltest;
use test;
--enable_warnings
#
# basic insert, update, delete test, alter, rename, drop
# check that binlog_index gets the right info
#
create table t1 (a int primary key) engine=ndb;
insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
save_master_pos;
--replace_column 1 #
select @max_epoch:=max(epoch)-1 from cluster_replication.binlog_index;
delete from t1;
alter table t1 add (b int);
insert into t1 values (3,3),(4,4);
alter table t1 rename t2;
# get all in one epoch
begin;
insert into t2 values (1,1),(2,2);
update t2 set b=1 where a=3;
delete from t2 where a=4;
commit;
drop table t2;
# check that above is ok
# (save_master_pos waits for last gcp to complete, ensuring that we have
# the expected data in the binlog)
save_master_pos;
select inserts from cluster_replication.binlog_index where epoch > @max_epoch and inserts > 5;
select deletes from cluster_replication.binlog_index where epoch > @max_epoch and deletes > 5;
select inserts,updates,deletes from
cluster_replication.binlog_index where epoch > @max_epoch and updates > 0;
select schemaops from
cluster_replication.binlog_index where epoch > @max_epoch and schemaops > 0;
#
# check that purge clears the binlog_index
#
flush logs;
--sleep 1
purge master logs before now();
select count(*) from cluster_replication.binlog_index;
#
# several tables in different databases
# check that same table name in different databases don't mix up
#
create table t1 (a int primary key, b int) engine=ndb;
create database mysqltest;
use mysqltest;
create table t1 (c int, d int primary key) engine=ndb;
use test;
insert into mysqltest.t1 values (2,1),(2,2);
save_master_pos;
--replace_column 1 #
select @max_epoch:=max(epoch)-1 from cluster_replication.binlog_index;
drop table t1;
drop database mysqltest;
select inserts,updates,deletes from
cluster_replication.binlog_index where epoch > @max_epoch and inserts > 0;
select schemaops from
cluster_replication.binlog_index where epoch > @max_epoch and schemaops > 0;
-- source include/have_ndb.inc
-- source include/have_multi_ndb.inc
-- source include/have_binlog_format_row.inc
--disable_warnings
connection server2;
drop table if exists t1,t2;
connection server1;
drop table if exists t1,t2;
--enable_warnings
#
# basic test to see if one server sees the table from the other
# and sets up the replication correctly
#
# no tables and nothing in cluster_replication.binlog_index;
connection server1;
SHOW TABLES;
# create table on the other server
connection server2;
CREATE TABLE t2 (a INT PRIMARY KEY, b int) ENGINE = NDB;
# make sure the first mysql server knows about this table
connection server1;
show tables;
# insert something on server2
connection server2;
INSERT INTO t2 VALUES (1,1),(2,2);
select * from t2 order by a;
save_master_pos;
--replace_column 1 <the_epoch>
SELECT @the_epoch:=epoch,inserts,updates,deletes,schemaops FROM
cluster_replication.binlog_index ORDER BY epoch DESC LIMIT 1;
let $the_epoch= `SELECT @the_epoch`;
# see if we got something on server1
connection server1;
SELECT * FROM t2 ORDER BY a;
--replace_result $the_epoch <the_epoch>
eval SELECT inserts,updates,deletes,schemaops FROM
cluster_replication.binlog_index WHERE epoch=$the_epoch;
# drop the table on server1
DROP TABLE t2;
CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE = NDB;
INSERT INTO t1 VALUES (1),(2);
save_master_pos;
--replace_column 1 <the_epoch2>
SELECT @the_epoch2:=epoch,inserts,updates,deletes,schemaops FROM
cluster_replication.binlog_index ORDER BY epoch DESC LIMIT 1;
let $the_epoch2= `SELECT @the_epoch2`;
--replace_result $the_epoch <the_epoch> $the_epoch2 <the_epoch2>
eval SELECT inserts,updates,deletes,schemaops FROM
cluster_replication.binlog_index WHERE epoch > $the_epoch AND epoch < $the_epoch2;
drop table t1;
# flush on server2
connection server2;
SHOW TABLES;
--replace_result $the_epoch <the_epoch> $the_epoch2 <the_epoch2>
eval SELECT inserts,updates,deletes,schemaops FROM
cluster_replication.binlog_index WHERE epoch > $the_epoch AND epoch < $the_epoch2;
# reset
connection server1;
-- source include/have_ndb.inc
-- source include/have_multi_ndb.inc
-- source include/not_embedded.inc
-- source include/have_binlog_format_statement.inc
--disable_warnings
connection server2;
drop table if exists t1, t2, t3, t4;
connection server1;
drop table if exists t1, t2, t3, t4;
--enable_warnings
......
-- source include/have_ndb.inc
-- source include/have_multi_ndb.inc
-- source include/not_embedded.inc
-- source include/have_binlog_format_row.inc
--disable_warnings
connection server2;
drop table if exists t1, t2, t3, t4;
connection server1;
drop table if exists t1, t2, t3, t4;
--enable_warnings
flush status;
# Create test tables on server1
create table t1 (a int) engine=ndbcluster;
create table t2 (a int) engine=ndbcluster;
insert into t1 value (2);
insert into t2 value (3);
select * from t1;
select * from t2;
show status like 'handler_discover%';
# Check dropping and recreating table on same server
connect (con1,localhost,,,test);
connect (con2,localhost,,,test);
connection con1;
select * from t1;
connection con2;
drop table t1;
create table t1 (a int) engine=ndbcluster;
insert into t1 value (2);
connection con1;
select * from t1;
# Check dropping and recreating table on different server
connection server2;
show status like 'handler_discover%';
drop table t1;
create table t1 (a int) engine=ndbcluster;
insert into t1 value (2);
connection server1;
## Currently a retry is required remotely
#--error 1412
#select * from t1;
#show warnings;
#flush table t1;
# Table definition change should be propagated automatically
select * from t1;
# Connect to server2 and use the tables from there
connection server2;
flush status;
select * from t1;
update t1 set a=3 where a=2;
show status like 'handler_discover%';
# Create a new table on server2
create table t3 (a int not null primary key, b varchar(22),
c int, last_col text) engine=ndb;
insert into t3 values(1, 'Hi!', 89, 'Longtext column');
create table t4 (pk int primary key, b int) engine=ndb;
# Check that the tables are accessible from server1
connection server1;
select * from t1;
select * from t3;
show status like 'handler_discover%';
show tables like 't4';
show status like 'handler_discover%';
show tables;
drop table t1, t2, t3, t4;
connection server2;
drop table t1, t3, t4;
# End of 4.1 tests
#
# Currently this test only runs in the source tree with the
# ndb/test programs compiled.
# invoke with: ./mysql-test-run --ndb-extra-test --do-test=rpl_ndb_bank
#
# 1. start a "bank" application running on the master cluster
# 2. perform online sync of slave
# 3. periodically check consistency of slave
# 4. stop the bank application
# 5. check that the slave and master BANK databases are the same
#
# kill any trailing processes
--system killall lt-bankTransactionMaker lt-bankTimer lt-bankMakeGL || true
--source include/have_ndb.inc
--source include/have_ndb_extra.inc
--source include/have_binlog_format_row.inc
--source include/master-slave.inc
--disable_warnings
# initialize master
--connection master
CREATE DATABASE IF NOT EXISTS BANK;
DROP DATABASE BANK;
CREATE DATABASE BANK default charset=latin1 default collate=latin1_bin;
--enable_warnings
#
# These tables should correspond to the table definitions in
# storage/ndb/test/src/NDBT_Tables.cpp
#
--connection master
USE BANK;
CREATE TABLE GL ( TIME BIGINT UNSIGNED NOT NULL,
ACCOUNT_TYPE INT UNSIGNED NOT NULL,
BALANCE INT UNSIGNED NOT NULL,
DEPOSIT_COUNT INT UNSIGNED NOT NULL,
DEPOSIT_SUM INT UNSIGNED NOT NULL,
WITHDRAWAL_COUNT INT UNSIGNED NOT NULL,
WITHDRAWAL_SUM INT UNSIGNED NOT NULL,
PURGED INT UNSIGNED NOT NULL,
PRIMARY KEY USING HASH (TIME,ACCOUNT_TYPE))
ENGINE = NDB;
CREATE TABLE ACCOUNT ( ACCOUNT_ID INT UNSIGNED NOT NULL,
OWNER INT UNSIGNED NOT NULL,
BALANCE INT UNSIGNED NOT NULL,
ACCOUNT_TYPE INT UNSIGNED NOT NULL,
PRIMARY KEY USING HASH (ACCOUNT_ID))
ENGINE = NDB;
CREATE TABLE TRANSACTION ( TRANSACTION_ID BIGINT UNSIGNED NOT NULL,
ACCOUNT INT UNSIGNED NOT NULL,
ACCOUNT_TYPE INT UNSIGNED NOT NULL,
OTHER_ACCOUNT INT UNSIGNED NOT NULL,
TRANSACTION_TYPE INT UNSIGNED NOT NULL,
TIME BIGINT UNSIGNED NOT NULL,
AMOUNT INT UNSIGNED NOT NULL,
PRIMARY KEY USING HASH (TRANSACTION_ID,ACCOUNT))
ENGINE = NDB;
CREATE TABLE SYSTEM_VALUES ( SYSTEM_VALUES_ID INT UNSIGNED NOT NULL,
VALUE BIGINT UNSIGNED NOT NULL,
PRIMARY KEY USING HASH (SYSTEM_VALUES_ID))
ENGINE = NDB;
CREATE TABLE ACCOUNT_TYPE ( ACCOUNT_TYPE_ID INT UNSIGNED NOT NULL,
DESCRIPTION CHAR(64) NOT NULL,
PRIMARY KEY USING HASH (ACCOUNT_TYPE_ID))
ENGINE = NDB;
#
# create "BANK" application
#
--exec NDB_CONNECTSTRING=localhost:$NDBCLUSTER_PORT ../storage/ndb/test/ndbapi/bank/bankCreator >> $NDB_TOOLS_OUTPUT
#
# start main loop
# repeat backup-restore-check
#
# set this high if testing to run many syncs in loop
--let $2=1
while ($2)
{
#
# start "BANK" application
#
--exec NDB_CONNECTSTRING=localhost:$NDBCLUSTER_PORT ../storage/ndb/test/ndbapi/bank/bankTimer -w 5 >> $NDB_TOOLS_OUTPUT &
--exec NDB_CONNECTSTRING=localhost:$NDBCLUSTER_PORT ../storage/ndb/test/ndbapi/bank/bankMakeGL >> $NDB_TOOLS_OUTPUT &
--exec NDB_CONNECTSTRING=localhost:$NDBCLUSTER_PORT ../storage/ndb/test/ndbapi/bank/bankTransactionMaker >> $NDB_TOOLS_OUTPUT &
#
# let the "BANK" run for a while
#
--sleep 5
--disable_warnings
# initialize slave for sync
--connection slave
STOP SLAVE;
RESET SLAVE;
# to make sure we drop any ndbcluster tables
CREATE DATABASE IF NOT EXISTS BANK;
DROP DATABASE BANK;
# create database
CREATE DATABASE BANK;
--enable_warnings
#
# Time to sync the slave:
# start by taking a backup on master
--connection master
RESET MASTER;
--exec $NDB_MGM --no-defaults --ndb-connectstring=localhost:$NDBCLUSTER_PORT -e "start backup" >> $NDB_TOOLS_OUTPUT
# there is no neat way to find the backupid, this is a hack to find it...
--exec $NDB_TOOLS_DIR/ndb_select_all --ndb-connectstring=localhost:$NDBCLUSTER_PORT -d sys -D , SYSTAB_0 | grep 520093696 > var/tmp.dat
CREATE TABLE IF NOT EXISTS cluster_replication.backup_info (id INT, backup_id INT) ENGINE = HEAP;
DELETE FROM cluster_replication.backup_info;
LOAD DATA INFILE '../../var/tmp.dat' INTO TABLE cluster_replication.backup_info FIELDS TERMINATED BY ',';
--replace_column 1 <the_backup_id>
SELECT @the_backup_id:=backup_id FROM cluster_replication.backup_info;
let the_backup_id=`select @the_backup_id`;
# restore on slave, first check that nothing is there
--connection slave
--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults --ndb-connectstring="localhost:$NDBCLUSTER_PORT_SLAVE" -p 8 -b $the_backup_id -n 1 -m -r --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id >> $NDB_TOOLS_OUTPUT
--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults --ndb-connectstring="localhost:$NDBCLUSTER_PORT_SLAVE" -p 8 -b $the_backup_id -n 2 -r -e --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id >> $NDB_TOOLS_OUTPUT
#
# now setup replication to continue from last epoch
# 1. get apply_status epoch from slave
# 2. get corresponding _next_ binlog postition from master
# 3. change master on slave
# 4. start the replication
# 1.
--connection slave
--replace_column 1 <the_epoch>
SELECT @the_epoch:=MAX(epoch) FROM cluster_replication.apply_status;
--let $the_epoch= `select @the_epoch`
# 2.
--connection master
--replace_result $the_epoch <the_epoch>
--replace_column 1 <the_pos>
eval SELECT @the_pos:=Position,@the_file:=SUBSTRING_INDEX(FILE, '/', -1)
FROM cluster_replication.binlog_index WHERE epoch > $the_epoch ORDER BY epoch ASC LIMIT 1;
--let $the_pos= `SELECT @the_pos`
--let $the_file= `SELECT @the_file`
# 3.
--connection slave
--replace_result $the_pos <the_pos>
eval CHANGE MASTER TO
master_log_file = '$the_file',
master_log_pos = $the_pos;
# 4.
--connection slave
START SLAVE;
#
# Now loop and check consistency every 2 seconds on slave
#
--connection slave
--let $1=10
while ($1)
{
--sleep 2
--replace_result $MASTER_MYPORT MASTER_PORT
--replace_column 1 <Slave_IO_State> 7 <Read_Master_Log_Pos> 8 <Relay_Log_File> 9 <Relay_Log_Pos> 16 <Replicate_Ignore_Table> 22 <Exec_Master_Log_Pos> 23 <Relay_Log_Space> 33 <Seconds_Behind_Master>
SHOW SLAVE STATUS;
STOP SLAVE;
--exec NDB_CONNECTSTRING=localhost:$NDBCLUSTER_PORT_SLAVE ../storage/ndb/test/ndbapi/bank/bankValidateAllGLs >> $NDB_TOOLS_OUTPUT
START SLAVE;
--dec $1
}
#
# Stop transactions
#
--exec killall lt-bankTransactionMaker lt-bankTimer lt-bankMakeGL
#
# Check that the databases are the same on slave and master
# 1. dump database BANK on both master and slave
# 2. compare, there should be no difference
#
--exec $MYSQL_DUMP --compact --order-by-primary --skip-extended-insert --no-create-info BANK ACCOUNT_TYPE ACCOUNT GL TRANSACTION > ./var/tmp/master_BANK.sql
--connection master
use test;
create table t1 (a int primary key) engine=ndb;
insert into t1 values (1);
--sync_slave_with_master
--exec $MYSQL_DUMP_SLAVE --compact --order-by-primary --skip-extended-insert --no-create-info BANK ACCOUNT_TYPE ACCOUNT GL TRANSACTION > ./var/tmp/slave_BANK.sql
--connection master
drop table t1;
--exec diff ./var/tmp/master_BANK.sql ./var/tmp/slave_BANK.sql
--dec $2
}
--source include/have_ndb.inc
--source include/have_binlog_format_row.inc
--source include/master-slave.inc
#
# Bug #11087
#
# connect to the master and create tabe t1 in gotoslave database
--connection master
CREATE TABLE `t1` ( `nid` int(11) NOT NULL default '0',
`nom` char(4) default NULL,
`prenom` char(4) default NULL,
PRIMARY KEY (`nid`))
ENGINE=ndbcluster DEFAULT CHARSET=latin1;
INSERT INTO t1 VALUES(1,"XYZ1","ABC1");
select * from t1 order by nid;
--sync_slave_with_master
# connect to slave and ensure data it there.
--connection slave
select * from t1 order by nid;
--connection master
delete from t1;
INSERT INTO t1 VALUES(1,"XYZ2","ABC2");
# Make sure all rows are on the master
select * from t1 order by nid;
# make sure all rows are on the slave.
--sync_slave_with_master
--connection slave
# Bug #11087 would have row with nid 2 missing
select * from t1 order by nid;
--connection master
DROP table t1;
#
# Test replication of table with no primary key
#
--connection master
CREATE TABLE `t1` ( `nid` int(11) NOT NULL default '0',
`nom` char(4) default NULL,
`prenom` char(4) default NULL)
ENGINE=ndbcluster DEFAULT CHARSET=latin1;
INSERT INTO t1 VALUES(1,"XYZ1","ABC1"),(2,"AAA","BBB"),(3,"CCC","DDD");
select * from t1 order by nid;
--sync_slave_with_master
# connect to slave and ensure data it there.
--connection slave
select * from t1 order by nid;
--connection master
delete from t1 where nid = 2;
INSERT INTO t1 VALUES(4,"EEE","FFF");
# Make sure all rows are on the master
select * from t1 order by nid;
# make sure all rows are on the slave.
--sync_slave_with_master
--connection slave
select * from t1 order by nid;
--connection master
UPDATE t1 set nid=nid+1;
UPDATE t1 set nom="CCP" where nid = 4;
select * from t1 order by nid;
# make sure all rows are on the slave.
--sync_slave_with_master
--connection slave
select * from t1 order by nid;
--connection master
DROP table t1;
##################################################################
#
# Check that retries are made on the slave on some temporary errors
#
#
# 1. Deadlock
#
--connection master
CREATE TABLE `t1` ( `nid` int(11) NOT NULL default '0',
`nom` char(4) default NULL,
`prenom` char(4) default NULL,
PRIMARY KEY USING HASH (`nid`))
ENGINE=ndbcluster DEFAULT CHARSET=latin1;
INSERT INTO t1 VALUES(1,"XYZ1","ABC1");
# cause a lock on that row on the slave
--sync_slave_with_master
--connection slave
BEGIN;
UPDATE t1 SET `nom`="LOCK" WHERE `nid`=1;
# set number of retries low so we fail the retries
set GLOBAL slave_transaction_retries=1;
# now do a change to this row on the master
# will deadlock on the slave because of lock above
--connection master
UPDATE t1 SET `nom`="DEAD" WHERE `nid`=1;
# wait for deadlock to be detected
# sleep longer than dead lock detection timeout in config
# we do this 2 times, once with few retries to verify that we
# get a failure with the set sleep, and once with the _same_
# sleep, but with more retries to get it to succeed
--sleep 5
# replication should have stopped, since max retries where not enough
# verify with show slave status
--connection slave
--replace_result $MASTER_MYPORT MASTER_PORT
--replace_column 1 <Slave_IO_State> 7 <Read_Master_Log_Pos> 8 <Relay_Log_File> 9 <Relay_Log_Pos> 16 <Replicate_Ignore_Table> 22 <Exec_Master_Log_Pos> 23 <Relay_Log_Space> 33 <Seconds_Behind_Master>
SHOW SLAVE STATUS;
# now set max retries high enough to succeed, and start slave again
set GLOBAL slave_transaction_retries=10;
START SLAVE;
# wait for deadlock to be detected and retried
# should be the same sleep as above for test to be valid
--sleep 5
# commit transaction to release lock on row and let replication succeed
select * from t1 order by nid;
COMMIT;
# verify that the row succeded to be applied on the slave
--connection master
--sync_slave_with_master
--connection slave
select * from t1 order by nid;
# cleanup
--connection master
DROP TABLE t1;
--source include/have_ndb.inc
--source include/have_binlog_format_row.inc
--source include/master-slave.inc
--disable_warnings
DROP TABLE IF EXISTS t1;
--enable_warnings
#
# Basic test of disk tables for NDB
#
#
# Start by creating a logfile group
#
CREATE LOGFILE GROUP lg1
ADD UNDOFILE 'undofile.dat'
INITIAL_SIZE 16M
UNDO_BUFFER_SIZE = 1M
ENGINE=NDB;
alter logfile group lg1
add undofile 'undofile02.dat'
initial_size 4M engine=ndb;
#
# Create a tablespace connected to the logfile group
#
CREATE TABLESPACE ts1
ADD DATAFILE 'datafile.dat'
USE LOGFILE GROUP lg1
INITIAL_SIZE 12M
ENGINE NDB;
alter tablespace ts1
add datafile 'datafile02.dat'
initial_size 4M engine=ndb;
#
# Create a table using this tablespace
#
CREATE TABLE t1
(pk1 int not null primary key, b int not null, c int not null)
tablespace ts1 storage disk
engine ndb;
#
# insert some data
#
insert into t1 values (1,2,3);
select * from t1 order by pk1;
#
# check that the data is also on the slave
#
--sync_slave_with_master
--connection slave
select * from t1 order by pk1;
#
# view the binlog
#
--connection master
let $VERSION=`select version()`;
--replace_result $VERSION VERSION
show binlog events;
#
# cleanup
#
drop table t1;
alter tablespace ts1
drop datafile 'datafile.dat'
engine=ndb;
alter tablespace ts1
drop datafile 'datafile02.dat'
engine=ndb;
DROP TABLESPACE ts1 ENGINE=NDB;
DROP LOGFILE GROUP lg1 ENGINE=NDB;
--sync_slave_with_master
--source include/have_ndb.inc
--source include/have_binlog_format_row.inc
--source include/master-slave.inc
#
# Currently test only works with ndb since it retrieves "old"
# binlog positions with cluster_replication.binlog_index and apply_status;
#
# create a table with one row
CREATE TABLE t1 (c1 CHAR(15), c2 CHAR(15), c3 INT, PRIMARY KEY (c3)) ENGINE = NDB ;
INSERT INTO t1 VALUES ("row1","will go away",1);
SELECT * FROM t1 ORDER BY c3;
# sync slave and retrieve epoch
sync_slave_with_master;
--replace_column 1 <the_epoch>
SELECT @the_epoch:=MAX(epoch) FROM cluster_replication.apply_status;
let $the_epoch= `select @the_epoch` ;
SELECT * FROM t1 ORDER BY c3;
# get the master binlog pos from the epoch
connection master;
--replace_result $the_epoch <the_epoch>
--replace_column 1 <the_pos>
eval SELECT @the_pos:=Position,@the_file:=SUBSTRING_INDEX(FILE, '/', -1)
FROM cluster_replication.binlog_index WHERE epoch = $the_epoch ;
let $the_pos= `SELECT @the_pos` ;
let $the_file= `SELECT @the_file` ;
# insert some more values
INSERT INTO t1 VALUES ("row2","will go away",2),("row3","will change",3),("row4","D",4);
DELETE FROM t1 WHERE c3 = 1;
UPDATE t1 SET c2="should go away" WHERE c3 = 2;
UPDATE t1 SET c2="C" WHERE c3 = 3;
DELETE FROM t1 WHERE c3 = 2;
SELECT * FROM t1 ORDER BY c3;
# check that we have it on the slave
--sync_slave_with_master
--connection slave
SELECT * FROM t1 ORDER BY c3;
--replace_result $MASTER_MYPORT MASTER_PORT
--replace_column 1 <Slave_IO_State> 7 <Read_Master_Log_Pos> 8 <Relay_Log_File> 9 <Relay_Log_Pos> 16 <Replicate_Ignore_Table> 22 <Exec_Master_Log_Pos> 23 <Relay_Log_Space> 33 <Seconds_Behind_Master>
SHOW SLAVE STATUS;
# stop slave and reset position to before the last changes
STOP SLAVE;
--replace_result $the_pos <the_pos>
eval CHANGE MASTER TO
master_log_file = '$the_file',
master_log_pos = $the_pos ;
--replace_result $MASTER_MYPORT MASTER_PORT
--replace_column 1 <Slave_IO_State> 7 <Read_Master_Log_Pos> 8 <Relay_Log_File> 9 <Relay_Log_Pos> 16 <Replicate_Ignore_Table> 22 <Exec_Master_Log_Pos> 23 <Relay_Log_Space> 33 <Seconds_Behind_Master>
SHOW SLAVE STATUS;
# start the slave again
# -> same events should have been applied again
# e.g. inserting rows that already there
# deleting a row which is not there
# updating a row which is not there
START SLAVE;
--connection master
SELECT * FROM t1 ORDER BY c3;
--sync_slave_with_master
--connection slave
SELECT * FROM t1 ORDER BY c3;
STOP SLAVE;
#
# cleanup
#
--connection master
DROP TABLE t1;
RESET master;
--connection slave
DROP TABLE t1;
RESET slave;
START SLAVE;
#
# Test that we can handle update of a row that does not exist on the slave
# will trigger usage of AO_IgnoreError on slave side so that the INSERT
# still succeeds even if the replication of the UPDATE generates an error.
#
--connection master
CREATE TABLE t1 (c1 CHAR(15) NOT NULL, c2 CHAR(15) NOT NULL, c3 INT NOT NULL, PRIMARY KEY (c3)) ENGINE = NDB ;
INSERT INTO t1 VALUES ("row1","remove on slave",1);
--sync_slave_with_master
--connection slave
DELETE FROM t1;
--connection master
BEGIN;
UPDATE t1 SET c2="does not exist" WHERE c3=1;
INSERT INTO t1 VALUES ("row2","new on slave",2);
COMMIT;
--sync_slave_with_master
--connection slave
SELECT * FROM t1;
--replace_result $MASTER_MYPORT MASTER_PORT
--replace_column 1 <Slave_IO_State> 7 <Read_Master_Log_Pos> 8 <Relay_Log_File> 9 <Relay_Log_Pos> 16 <Replicate_Ignore_Table> 22 <Exec_Master_Log_Pos> 23 <Relay_Log_Space> 33 <Seconds_Behind_Master>
SHOW SLAVE STATUS;
#
# Currently this test only runs in the source tree with the
# ndb/test programs compiled.
# invoke with: ./mysql-test-run --ndb-extra-test --do-test=rpl_ndb_load
#
--source include/have_ndb.inc
--source include/have_ndb_extra.inc
--source include/have_binlog_format_row.inc
--source include/master-slave.inc
--disable_warnings
# reset master
connection master;
DROP DATABASE IF EXISTS TEST_DB;
CREATE DATABASE TEST_DB;
--enable_warnings
#
# These tables should correspond to the table definitions in
# storage/ndb/test/ndbapi/bench/
#
connection master;
USE TEST_DB;
CREATE TABLE SUBSCRIBER
( NUMBER CHAR(12) BINARY NOT NULL,
NAME CHAR(32) BINARY NOT NULL,
GROUP_ID INT UNSIGNED NOT NULL,
LOCATION INT UNSIGNED NOT NULL,
SESSIONS INT UNSIGNED NOT NULL,
CHANGED_BY CHAR(32) BINARY NOT NULL,
CHANGED_TIME CHAR(32) BINARY NOT NULL,
PRIMARY KEY USING HASH (NUMBER))
ENGINE = NDB;
CREATE TABLE GROUP_T
( GROUP_ID INT UNSIGNED NOT NULL,
GROUP_NAME CHAR(32) BINARY NOT NULL,
ALLOW_READ CHAR(1) BINARY NOT NULL,
ALLOW_INSERT INT UNSIGNED NOT NULL,
ALLOW_DELETE INT UNSIGNED NOT NULL,
PRIMARY KEY USING HASH (GROUP_ID))
ENGINE = NDB;
CREATE TABLE SESSION
( NUMBER CHAR(12) BINARY NOT NULL,
SERVER_ID INT UNSIGNED NOT NULL,
DATA VARBINARY(1998) NOT NULL,
PRIMARY KEY USING HASH (NUMBER,SERVER_ID))
ENGINE = NDB;
CREATE TABLE SERVER
( SUFFIX CHAR(2) BINARY NOT NULL,
SERVER_ID INT UNSIGNED NOT NULL,
NAME CHAR(32) BINARY NOT NULL,
NO_OF_READ INT UNSIGNED NOT NULL,
NO_OF_INSERT INT UNSIGNED NOT NULL,
NO_OF_DELETE INT UNSIGNED NOT NULL,
PRIMARY KEY USING HASH (SUFFIX, SERVER_ID))
ENGINE = NDB;
#
# start "load" application
#
--exec NDB_CONNECTSTRING=localhost:$NDBCLUSTER_PORT ../storage/ndb/test/ndbapi/DbCreate >> $NDB_TOOLS_OUTPUT
--exec NDB_CONNECTSTRING=localhost:$NDBCLUSTER_PORT ../storage/ndb/test/ndbapi/DbAsyncGenerator >> $NDB_TOOLS_OUTPUT
--source include/have_ndb.inc
--source include/have_multi_ndb.inc
--source include/have_binlog_format_row.inc
--source include/master-slave.inc
# note: server2 is another "master" connected to the master cluster
#
# Currently test only works with ndb since it retrieves "old"
# binlog positions with cluster_replication.binlog_index and apply_status;
#
# create a table with one row, and make sure the other "master" gets it
CREATE TABLE t1 (c1 CHAR(15), c2 CHAR(15), c3 INT, PRIMARY KEY (c3)) ENGINE = NDB ;
connection server2;
reset master;
SHOW TABLES;
connection master;
INSERT INTO t1 VALUES ("row1","will go away",1);
SELECT * FROM t1 ORDER BY c3;
connection server2;
SELECT * FROM t1 ORDER BY c3;
# sync slave and retrieve epoch and stop the slave
connection master;
sync_slave_with_master;
--replace_column 1 <the_epoch>
SELECT @the_epoch:=MAX(epoch) FROM cluster_replication.apply_status;
let $the_epoch= `select @the_epoch` ;
SELECT * FROM t1 ORDER BY c3;
stop slave;
# get the master binlog pos from the epoch, from the _other_ "master", server2
connection server2;
--replace_result $the_epoch <the_epoch>
eval SELECT @the_pos:=Position,@the_file:=SUBSTRING_INDEX(FILE, '/', -1)
FROM cluster_replication.binlog_index WHERE epoch = $the_epoch ;
let $the_pos= `SELECT @the_pos` ;
let $the_file= `SELECT @the_file` ;
# now connect the slave to the _other_ "master"
connection slave;
--replace_result $MASTER_MYPORT1 <MASTER_PORT1>
eval CHANGE MASTER TO
master_port=$MASTER_MYPORT1,
master_log_file = '$the_file',
master_log_pos = $the_pos ;
start slave;
# insert some more values on the first master
connection master;
INSERT INTO t1 VALUES ("row2","will go away",2),("row3","will change",3),("row4","D",4);
DELETE FROM t1 WHERE c3 = 1;
UPDATE t1 SET c2="should go away" WHERE c3 = 2;
UPDATE t1 SET c2="C" WHERE c3 = 3;
DELETE FROM t1 WHERE c3 = 2;
SELECT * FROM t1 ORDER BY c3;
save_master_pos;
# insert another row, and check that we have it on the slave
connection server2;
INSERT INTO t1 VALUES ("row5","E",5);
SELECT * FROM t1 ORDER BY c3;
#sync_slave_with_master;
connection slave;
--sleep 2
SELECT * FROM t1 ORDER BY c3;
STOP SLAVE;
--source include/have_ndb.inc
--source include/have_binlog_format_row.inc
--source include/master-slave.inc
#
# Currently test only works with ndb since it retrieves "old"
# binlog positions with cluster_replication.binlog_index and apply_status;
#
# stop the save
connection slave;
STOP SLAVE;
CREATE DATABASE ndbsynctest;
USE ndbsynctest;
# get some data on the master
connection master;
CREATE DATABASE ndbsynctest;
USE ndbsynctest;
CREATE TABLE t1 (c1 BIT(1) NOT NULL, c2 BIT(1) NOT NULL, c3 CHAR(15), PRIMARY KEY(c3)) ENGINE = NDB ;
INSERT INTO t1 VALUES (1,1,"row1"),(0,1,"row2"),(1,0,"row3"),(0,0,"row4");
CREATE TABLE t2 (c1 CHAR(15), c2 BIT(1) NOT NULL, c3 BIT(1) NOT NULL, PRIMARY KEY(c1)) ENGINE = NDB ;
INSERT INTO t2 VALUES ("ABC",1,1),("BCDEF",0,1),("CD",1,0),("DEFGHIJKL",0,0);
SELECT hex(c1),hex(c2),c3 FROM t1 ORDER BY c3;
SELECT hex(c2),hex(c3),c1 FROM t2 ORDER BY c1;
# take a backup on master
--exec $NDB_MGM --no-defaults --ndb-connectstring=localhost:$NDBCLUSTER_PORT -e "start backup" >> $NDB_TOOLS_OUTPUT
--exec $NDB_TOOLS_DIR/ndb_select_all --no-defaults --ndb-connectstring=localhost:$NDBCLUSTER_PORT -d sys -D , SYSTAB_0 | grep 520093696 > var/tmp.dat
CREATE TABLE IF NOT EXISTS cluster_replication.backup_info (id INT, backup_id INT);
DELETE FROM cluster_replication.backup_info;
LOAD DATA INFILE '../../var/tmp.dat' INTO TABLE cluster_replication.backup_info FIELDS TERMINATED BY ',';
--replace_column 1 <the_backup_id>
SELECT @the_backup_id:=backup_id FROM cluster_replication.backup_info;
let the_backup_id=`select @the_backup_id` ;
# update a row
UPDATE t1 SET c2=0 WHERE c3="row2";
SELECT hex(c1),hex(c2),c3 FROM t1 ORDER BY c3;
# restore on slave, first check that nothing is there
connection slave
# we should have no tables
SHOW TABLES;
--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults --ndb-connectstring="localhost:$NDBCLUSTER_PORT_SLAVE" -b $the_backup_id -n 1 -m -r --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id >> $NDB_TOOLS_OUTPUT
--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults --ndb-connectstring="localhost:$NDBCLUSTER_PORT_SLAVE" -b $the_backup_id -n 2 -r -e --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id >> $NDB_TOOLS_OUTPUT
#
# BUG#11960
# prior to bugfix "DROP DATABASE" would give a warning since
# the events were not created by ndb_restore
#
DROP DATABASE ndbsynctest;
CREATE DATABASE ndbsynctest;
USE ndbsynctest;
--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults --ndb-connectstring="localhost:$NDBCLUSTER_PORT_SLAVE" -b $the_backup_id -n 1 -m -r --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id >> $NDB_TOOLS_OUTPUT
--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults --ndb-connectstring="localhost:$NDBCLUSTER_PORT_SLAVE" -b $the_backup_id -n 2 -r -e --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id >> $NDB_TOOLS_OUTPUT
# continue test
SHOW TABLES;
SELECT hex(c1),hex(c2),c3 FROM t1 ORDER BY c3;
SELECT hex(c2),hex(c3),c1 FROM t2 ORDER BY c1;
#
# now setup replication to continue from last epoch
# 1. get apply_status epoch from slave
# 2. get corresponding _next_ binlog postition from master
# 3. change master on slave
# 1.
connection slave;
--replace_column 1 <the_epoch>
SELECT @the_epoch:=MAX(epoch) FROM cluster_replication.apply_status;
let $the_epoch= `select @the_epoch` ;
# 2.
connection master;
--replace_result $the_epoch <the_epoch>
--replace_column 1 <the_pos>
eval SELECT @the_pos:=Position,@the_file:=SUBSTRING_INDEX(FILE, '/', -1)
FROM cluster_replication.binlog_index WHERE epoch > $the_epoch ORDER BY epoch ASC LIMIT 1;
let $the_pos= `SELECT @the_pos` ;
let $the_file= `SELECT @the_file` ;
# 3.
connection slave;
--replace_result $the_pos <the_pos>
eval CHANGE MASTER TO
master_log_file = '$the_file',
master_log_pos = $the_pos ;
START SLAVE;
#
#
#
connection master;
#sync_slave_with_master;
--sleep 2
connection slave;
--replace_result $MASTER_MYPORT MASTER_PORT
--replace_column 1 <Slave_IO_State> 7 <Read_Master_Log_Pos> 8 <Relay_Log_File> 9 <Relay_Log_Pos> 16 <Replicate_Ignore_Table> 22 <Exec_Master_Log_Pos> 23 <Relay_Log_Space> 33 <Seconds_Behind_Master>
SHOW SLAVE STATUS;
SELECT hex(c1),hex(c2),c3 FROM t1 ORDER BY c3;
SELECT hex(c2),hex(c3),c1 FROM t2 ORDER BY c1;
#
# Cleanup
#
connection master;
DROP DATABASE ndbsynctest;
#sync_slave_with_master;
--sleep 2
connection slave;
STOP SLAVE;
#
# Test some replication commands
#
connection master;
reset master;
# should now contain nothing
select * from cluster_replication.binlog_index;
connection slave;
reset slave;
# should now contain nothing
select * from cluster_replication.apply_status;
-- source include/have_ndb.inc
let $type= 'NDB' ;
let $extra_index= ;
-- source include/rpl_row_basic.inc
......@@ -813,6 +813,8 @@ $c_p
$c_pp
$c_ev
CREATE DATABASE IF NOT EXISTS cluster_replication;
CREATE TABLE IF NOT EXISTS cluster_replication.binlog_index (Position BIGINT UNSIGNED NOT NULL, File VARCHAR(255) NOT NULL, epoch BIGINT UNSIGNED NOT NULL, inserts BIGINT UNSIGNED NOT NULL, updates BIGINT UNSIGNED NOT NULL, deletes BIGINT UNSIGNED NOT NULL, schemaops BIGINT UNSIGNED NOT NULL, PRIMARY KEY(epoch)) ENGINE=MYISAM;
END_OF_DATA
......@@ -58,6 +58,7 @@ noinst_HEADERS = item.h item_func.h item_sum.h item_cmpfunc.h \
sql_select.h structs.h table.h sql_udf.h hash_filo.h\
lex.h lex_symbol.h sql_acl.h sql_crypt.h \
log_event.h sql_repl.h slave.h rpl_filter.h \
rpl_injector.h \
stacktrace.h sql_sort.h sql_cache.h set_var.h \
spatial.h gstream.h client_settings.h tzfile.h \
tztime.h my_decimal.h\
......@@ -89,6 +90,7 @@ mysqld_SOURCES = sql_lex.cc sql_handler.cc sql_partition.cc \
sql_load.cc mf_iocache.cc field_conv.cc sql_show.cc \
sql_udf.cc sql_analyse.cc sql_analyse.h sql_cache.cc \
slave.cc sql_repl.cc rpl_filter.cc rpl_tblmap.cc \
rpl_injector.cc \
sql_union.cc sql_derived.cc \
client.c sql_client.cc mini_client_errors.c pack.c\
stacktrace.c repl_failsafe.h repl_failsafe.cc \
......@@ -104,6 +106,8 @@ EXTRA_mysqld_SOURCES = ha_innodb.cc ha_berkeley.cc ha_archive.cc \
ha_innodb.h ha_berkeley.h ha_archive.h \
ha_blackhole.cc ha_federated.cc ha_ndbcluster.cc \
ha_blackhole.h ha_federated.h ha_ndbcluster.h \
ha_ndbcluster_binlog.cc ha_ndbcluster_binlog.h \
ha_ndbcluster_tables.h \
ha_partition.cc ha_partition.h
mysqld_DEPENDENCIES = @mysql_se_objs@
gen_lex_hash_SOURCES = gen_lex_hash.cc
......@@ -160,6 +164,9 @@ ha_berkeley.o: ha_berkeley.cc ha_berkeley.h
ha_ndbcluster.o:ha_ndbcluster.cc ha_ndbcluster.h
$(CXXCOMPILE) @ndbcluster_includes@ $(LM_CFLAGS) -c $<
ha_ndbcluster_binlog.o:ha_ndbcluster_binlog.cc ha_ndbcluster_binlog.h
$(CXXCOMPILE) @ndbcluster_includes@ $(LM_CFLAGS) -c $<
#Until we can get rid of dependencies on ha_ndbcluster.h
handler.o: handler.cc ha_ndbcluster.h
$(CXXCOMPILE) @ndbcluster_includes@ $(CXXFLAGS) -c $<
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
/* Copyright (C) 2000-2003 MySQL AB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#define NDB_REP_DB "cluster_replication"
#define NDB_REP_TABLE "binlog_index"
#define NDB_APPLY_TABLE "apply_status"
#define NDB_SCHEMA_TABLE "schema"
This diff is collapsed.
This diff is collapsed.
......@@ -988,6 +988,7 @@ bool MYSQL_LOG::reset_logs(THD* thd)
enum_log_type save_log_type;
DBUG_ENTER("reset_logs");
ha_reset_logs(thd);
/*
We need to get both locks to be sure that no one is trying to
write to the index log file.
......@@ -1237,6 +1238,9 @@ int MYSQL_LOG::purge_logs(const char *to_log,
DBUG_PRINT("info",("purging %s",log_info.log_file_name));
if (!my_delete(log_info.log_file_name, MYF(0)) && decrease_log_space)
*decrease_log_space-= file_size;
ha_binlog_index_purge_file(current_thd, log_info.log_file_name);
if (find_next_log(&log_info, 0) || exit_loop)
break;
}
......@@ -1297,6 +1301,9 @@ int MYSQL_LOG::purge_logs_before_date(time_t purge_time)
stat_area.st_mtime >= purge_time)
break;
my_delete(log_info.log_file_name, MYF(0));
ha_binlog_index_purge_file(current_thd, log_info.log_file_name);
if (find_next_log(&log_info, 0))
break;
}
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -1383,6 +1383,7 @@ public:
#define SYSTEM_THREAD_DELAYED_INSERT 1
#define SYSTEM_THREAD_SLAVE_IO 2
#define SYSTEM_THREAD_SLAVE_SQL 4
#define SYSTEM_THREAD_NDBCLUSTER_BINLOG 8
/*
Used to hold information about file and file structure in exchainge
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -146,6 +146,12 @@ public:
void setMonitorAllAttributes(bool val) {
BitmaskImpl::setField(1, &m_triggerInfo, 25, 1, val);
}
bool getReportAllMonitoredAttributes() const {
return BitmaskImpl::getField(1, &m_triggerInfo, 26, 1);
}
void setReportAllMonitoredAttributes(bool val) {
BitmaskImpl::setField(1, &m_triggerInfo, 26, 1, val);
}
Uint32 getOnline() const {
return m_online;
}
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment