Commit 277e783d authored by unknown's avatar unknown

Merge magare.gmz:/home/kgeorge/mysql/work/B36011-take2-5.0-bugteam

into  magare.gmz:/home/kgeorge/mysql/work/B36011-5.1-bugteam


sql/sql_select.cc:
  Auto merged
mysql-test/r/subselect.result:
  merge of bug 36011 to 5.1-bugteam
mysql-test/t/subselect.test:
  merge of bug 36011 to 5.1-bugteam
parents db84d000 0fb1527e
This diff is collapsed.
###################################################
#Author: Mats (based on file written by Jeb)
#Date: 2008-05-06
#Purpose: To wait for slave SQL thread to start
#Details:
# 1) Fill in and setup variables
# 2) loop through looking for both
# io and sql threads to start
# 3) If loops too long die.
####################################################
connection slave;
let $row_number= 1;
let $run= 1;
let $counter= 300;
while ($run)
{
let $sql_result= query_get_value("SHOW SLAVE STATUS", Slave_SQL_Running, $row_number);
if (`SELECT '$sql_result' = 'Yes'`){
let $run= 0;
}
sleep 0.1;
if (!$counter){
--echo "Failed while waiting for slave SQL to start"
query_vertical SHOW SLAVE STATUS;
exit;
}
dec $counter;
}
......@@ -4346,6 +4346,21 @@ t1.a= (select b from t2 limit 1) and not
t1.a= (select a from t2 limit 1) ;
a
drop table t1, t2;
CREATE TABLE t1 (a INT);
INSERT INTO t1 VALUES (1),(2);
EXPLAIN EXTENDED SELECT 1 FROM t1 WHERE 1 IN (SELECT 1 FROM t1 GROUP BY a);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2
2 DEPENDENT SUBQUERY t1 ALL NULL NULL NULL NULL 2 Using temporary; Using filesort
Warnings:
Note 1003 select 1 AS `1` from `test`.`t1` where <in_optimizer>(1,<exists>(select 1 AS `1` from `test`.`t1` group by `test`.`t1`.`a` having (<cache>(1) = <ref_null_helper>(1))))
EXPLAIN EXTENDED SELECT 1 FROM t1 WHERE 1 IN (SELECT 1 FROM t1 WHERE a > 3 GROUP BY a);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
2 DEPENDENT SUBQUERY t1 ALL NULL NULL NULL NULL 2 Using where; Using temporary; Using filesort
Warnings:
Note 1003 select 1 AS `1` from `test`.`t1` where <in_optimizer>(1,<exists>(select 1 AS `1` from `test`.`t1` where (`test`.`t1`.`a` > 3) group by `test`.`t1`.`a` having (<cache>(1) = <ref_null_helper>(1))))
DROP TABLE t1;
End of 5.0 tests.
CREATE TABLE t1 (a int, b int);
INSERT INTO t1 VALUES (2,22),(1,11),(2,22);
......
################################################################################
# inc/partition_alter1_1.inc #
# #
# Purpose: #
# ADD/DROP PRIMARY KEYs and/or UNIQUE INDEXes tests on partitioned tables #
# This routine is only useful for the partition_<feature>_<engine> tests. #
# #
#------------------------------------------------------------------------------#
# Original Author: mleich #
# Original Date: 2006-03-05 #
# Change Author: #
# Change Date: #
# Change: #
################################################################################
--echo
--echo #========================================================================
--echo # 1. ALTER TABLE ADD PRIMARY KEY and/or UNIQUE INDEX
--echo #========================================================================
# Rule: The table does not have a PRIMARY KEY or UNIQUE INDEX.
# ---> $unique must be empty
# ---> The PRIMARY KEY or UNIQUE INDEX to be created must contain
# the columns used for partitioning.
--echo #------------------------------------------------------------------------
--echo # 1.1 ADD PRIMARY KEY or UNIQUE INDEX to table with one column (f_int1)
--echo # within the partitioning function
--echo #------------------------------------------------------------------------
# Rule: Only f_int1 is used within the partitioning function
# ---> inc/partition_alter_11.inc
if ($do_pk_tests)
{
# The value of the following test is maybe covered by 1.1.3.
if ($more_pk_ui_tests)
{
--echo # 1.1.1 PRIMARY KEY consisting of one column
let $alter= ALTER TABLE t1 ADD PRIMARY KEY(f_int1);
--source suite/parts/inc/partition_alter_11.inc
}
# This must fail, because PRIMARY KEY does not contain f_int1
let $alter= ALTER TABLE t1 ADD PRIMARY KEY(f_int2);
--source suite/parts/inc/partition_alter_11.inc
}
# The value of the following test is maybe covered by 1.1.4.
if ($more_pk_ui_tests)
{
--echo # 1.1.2 UNIQUE INDEX consisting of one column
let $alter= ALTER TABLE t1 ADD UNIQUE INDEX uidx1 (f_int1);
--source suite/parts/inc/partition_alter_11.inc
}
# This must fail, because UNIQUE INDEX does not contain f_int1
let $alter= ALTER TABLE t1 ADD UNIQUE INDEX uidx1 (f_int2);
--source suite/parts/inc/partition_alter_11.inc
if ($do_pk_tests)
{
--echo # 1.1.3 PRIMARY KEY consisting of two columns
let $alter= ALTER TABLE t1 ADD PRIMARY KEY(f_int1,f_int2);
--source suite/parts/inc/partition_alter_11.inc
let $alter= ALTER TABLE t1 ADD PRIMARY KEY(f_int2,f_int1);
--source suite/parts/inc/partition_alter_11.inc
}
--echo # 1.1.4 UNIQUE INDEX consisting of two columns
let $alter= ALTER TABLE t1 ADD UNIQUE INDEX uidx1 (f_int1,f_int2);
--source suite/parts/inc/partition_alter_11.inc
let $alter= ALTER TABLE t1 ADD UNIQUE INDEX uidx1 (f_int2,f_int1);
--source suite/parts/inc/partition_alter_11.inc
################################################################################
# inc/partition_alter1_1_2.inc #
# #
# Purpose: #
# ADD/DROP PRIMARY KEYs and/or UNIQUE INDEXes tests on partitioned tables #
# This routine is only useful for the partition_<feature>_<engine> tests. #
# #
#------------------------------------------------------------------------------#
# Original Author: mleich #
# Original Date: 2006-03-05 #
# Change Author: #
# Change Date: #
# Change: #
################################################################################
--echo
--echo #========================================================================
--echo # 1. ALTER TABLE ADD PRIMARY KEY and/or UNIQUE INDEX
--echo #========================================================================
# Rule: The table does not have a PRIMARY KEY or UNIQUE INDEX.
# ---> $unique must be empty
# ---> The PRIMARY KEY or UNIQUE INDEX to be created must contain
# the columns used for partitioning.
#
--echo #------------------------------------------------------------------------
--echo # 1.2 ADD PRIMARY KEY or UNIQUE INDEX to table with two columns
--echo # (f_int1 and f_int2) within the partitioning function
--echo #------------------------------------------------------------------------
# Rule: f_int1 and f_int2 is used within the partitioning function
# ---> inc/partition_alter_13.inc
if ($do_pk_tests)
{
--echo # 1.2.1 PRIMARY KEY consisting of two columns
let $alter= ALTER TABLE t1 ADD PRIMARY KEY(f_int1,f_int2);
--source suite/parts/inc/partition_alter_13.inc
let $alter= ALTER TABLE t1 ADD PRIMARY KEY(f_int2,f_int1);
--source suite/parts/inc/partition_alter_13.inc
}
--echo # 1.2.2 UNIQUE INDEX consisting of two columns
let $alter= ALTER TABLE t1 ADD UNIQUE INDEX uidx1 (f_int1,f_int2);
--source suite/parts/inc/partition_alter_13.inc
let $alter= ALTER TABLE t1 ADD UNIQUE INDEX uidx1 (f_int2,f_int1);
--source suite/parts/inc/partition_alter_13.inc
if ($do_pk_tests)
{
--echo # 1.2.3 PRIMARY KEY and UNIQUE INDEX consisting of two columns
let $alter= ALTER TABLE t1 ADD UNIQUE INDEX uidx1 (f_int1,f_int2), ADD PRIMARY KEY(f_int2,f_int1);
--source suite/parts/inc/partition_alter_13.inc
let $alter= ALTER TABLE t1 ADD UNIQUE INDEX uidx1 (f_int2,f_int1), ADD PRIMARY KEY(f_int1,f_int2);
--source suite/parts/inc/partition_alter_13.inc
let $unique= ;
--source suite/parts/inc/partition_alter_13.inc
}
################################################################################
# inc/partition_alter1_2.inc #
# #
# Purpose: #
# ADD/DROP PRIMARY KEYs and/or UNIQUE INDEXes tests on partitioned tables #
# This routine is only useful for the partition_<feature>_<engine> tests. #
# #
#------------------------------------------------------------------------------#
# Original Author: mleich #
# Original Date: 2006-03-05 #
# Change Author: #
# Change Date: #
# Change: #
################################################################################
#
--echo
--echo #========================================================================
--echo # 2 DROP PRIMARY KEY or UNIQUE INDEX
--echo #========================================================================
# Rule: The table must have a PRIMARY KEY or UNIQUE INDEX.
# ---> $unique must not be empty
# ---> The PRIMARY KEY or UNIQUE INDEX to be dropped must contain
# the columns used for partitioning.
--echo #------------------------------------------------------------------------
--echo # 2.1 Partitioning function contains one column(f_int1)
--echo #------------------------------------------------------------------------
# Rule: Only f_int1 is used within the partitioning function
# ---> inc/partition_alter_11.inc
# The value of the following test is maybe covered by 2.1.5.
if ($more_pk_ui_tests)
{
if ($do_pk_tests)
{
--echo # 2.1.1 DROP PRIMARY KEY consisting of one column
let $unique= , PRIMARY KEY(f_int1);
let $alter= ALTER TABLE t1 DROP PRIMARY KEY;
--source suite/parts/inc/partition_alter_11.inc
}
#
--echo # 2.1.2 DROP UNIQUE INDEX consisting of one column
let $unique= , UNIQUE INDEX uidx1 (f_int1);
let $alter= ALTER TABLE t1 DROP INDEX uidx1;
--source suite/parts/inc/partition_alter_11.inc
#
if ($do_pk_tests)
{
--echo # 2.1.3 DROP PRIMARY KEY consisting of two columns
let $alter= ALTER TABLE t1 DROP PRIMARY KEY;
let $unique= , PRIMARY KEY(f_int1,f_int2);
--source suite/parts/inc/partition_alter_11.inc
let $unique= , PRIMARY KEY(f_int2,f_int1);
--source suite/parts/inc/partition_alter_11.inc
}
#
--echo # 2.1.4 DROP UNIQUE INDEX consisting of two columns
let $alter= ALTER TABLE t1 DROP INDEX uidx1;
let $unique= , UNIQUE INDEX uidx1 (f_int1,f_int2);
--source suite/parts/inc/partition_alter_11.inc
let $unique= , UNIQUE INDEX uidx1 (f_int2,f_int1);
--source suite/parts/inc/partition_alter_11.inc
}
#
if ($do_pk_tests)
{
--echo # 2.1.5 DROP PRIMARY KEY + UNIQUE INDEX consisting of two columns
let $unique= , UNIQUE INDEX uidx1 (f_int1,f_int2), PRIMARY KEY(f_int2,f_int1);
let $alter= ALTER TABLE t1 DROP PRIMARY KEY, DROP INDEX uidx1;
--source suite/parts/inc/partition_alter_11.inc
let $unique= , UNIQUE INDEX uidx1 (f_int2,f_int1), PRIMARY KEY(f_int1,f_int2);
let $alter= ALTER TABLE t1 DROP PRIMARY KEY, DROP INDEX uidx1;
--source suite/parts/inc/partition_alter_11.inc
}
let $unique= , UNIQUE INDEX uidx1 (f_int1,f_int2), UNIQUE INDEX uidx2 (f_int2,f_int1);
let $alter= ALTER TABLE t1 DROP INDEX uidx1, DROP INDEX uidx2;
--source suite/parts/inc/partition_alter_11.inc
#
--echo #------------------------------------------------------------------------
--echo # 2.2 Partitioning function contains two columns (f_int1,f_int2)
--echo #------------------------------------------------------------------------
# Rule: f_int1 and f_int2 is used within the partitioning function
# ---> inc/partition_alter_13.inc
if ($do_pk_tests)
{
--echo # 2.2.1 DROP PRIMARY KEY consisting of two columns
let $alter= ALTER TABLE t1 DROP PRIMARY KEY;
let $unique= , PRIMARY KEY(f_int1,f_int2);
--source suite/parts/inc/partition_alter_13.inc
let $unique= , PRIMARY KEY(f_int2,f_int1);
--source suite/parts/inc/partition_alter_13.inc
}
#
--echo # 2.2.2 DROP UNIQUE INDEX consisting of two columns
let $alter= ALTER TABLE t1 DROP INDEX uidx1;
let $unique= , UNIQUE INDEX uidx1 (f_int1,f_int2);
--source suite/parts/inc/partition_alter_13.inc
let $unique= , UNIQUE INDEX uidx1 (f_int2,f_int1);
--source suite/parts/inc/partition_alter_13.inc
#
if ($do_pk_tests)
{
--echo # 2.2.3 DROP PRIMARY KEY + UNIQUE INDEX consisting of two columns
let $unique= , UNIQUE INDEX uidx1 (f_int1,f_int2), PRIMARY KEY(f_int2,f_int1);
let $alter= ALTER TABLE t1 DROP PRIMARY KEY, DROP INDEX uidx1;
--source suite/parts/inc/partition_alter_13.inc
let $unique= , UNIQUE INDEX uidx1 (f_int2,f_int1), PRIMARY KEY(f_int1,f_int2);
let $alter= ALTER TABLE t1 DROP PRIMARY KEY, DROP INDEX uidx1;
--source suite/parts/inc/partition_alter_13.inc
}
let $unique= , UNIQUE INDEX uidx1 (f_int1,f_int2), UNIQUE INDEX uidx2 (f_int2,f_int1);
let $alter= ALTER TABLE t1 DROP INDEX uidx1, DROP INDEX uidx2;
--source suite/parts/inc/partition_alter_13.inc
if (0)
{
--echo
--echo #========================================================================
--echo # 3. ALTER TABLE "ALTER" PRIMARY KEY
--echo # mleich: I think that an ALTER TABLE statement where a PRIMARY
--echo # KEY is dropped and recreated (with different layout) might
--echo # be of interest, if the tree containing the table data has
--echo # to be reorganized during this operation.
--echo # To be implemented
--echo #========================================================================
--echo
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
ndb_blob_partition : cannot create t1
ndb_dd_backuprestore : cannot create t1
ndb_partition_error : cannot create t1
ndb_partition_list : cannot create t1
ndb_partition_range : cannot create t1
part_supported_sql_func_ndb : cannot create t1
partition_alter1_1_ndb : test works, but runs too long on PB
partition_alter1_1_2_ndb : test works, but runs too long on PB
partition_alter1_2_ndb : test works, but runs too long on PB
partition_alter2_ndb : cannot create t1
partition_basic_ndb : cannot create t1
partition_bit_ndb : cannot create t1
partition_engine_ndb : cannot create t1
partition_int_ndb : cannot create t1
partition_sessions : needs system_3_init.inc
partition_syntax_ndb : cannot create t1
partition_value_innodb : Bug#30581 partition_value tests use disallowed CAST() function
partition_value_myisam : Bug#30581 partition_value tests use disallowed CAST() function
partition_value_ndb : cannot create t1
rpl_ndb_dd_partitions : cannot create t1
partition_alter4_myisam : Bug#20129 / WL#4176
partition_alter4_innodb : Bug#20129 / WL#4176
################################################################################
# t/partition_alter1_1_2_innodb.test #
# #
# Purpose: #
# Tests around ADD/DROP PRIMARY KEY and/or UNIQUE INDEX #
# InnoDB branch #
# #
#------------------------------------------------------------------------------#
# Original Author: mleich #
# Original Date: 2006-03-05 #
# Change Author: pcrews #
# Change Date: 2008-05-05 #
# Change: Split up original partition_alter1.test file to better accommodate #
# PushBuild machines' workloads. Total run time for all components #
# is essentially the same, but max. single run time is significantly #
# reduced #
################################################################################
#
# NOTE: PLEASE DO NOT ADD NOT INNODB SPECIFIC TESTCASES HERE !
# TESTCASES WHICH MUST BE APPLIED TO ALL STORAGE ENGINES MUST BE ADDED IN
# THE SOURCED FILES ONLY.
#
# Please read the README at the end of inc/partition.pre before changing
# any of the variables.
#
#------------------------------------------------------------------------------#
# General not engine specific settings and requirements
##### Options, for debugging support #####
let $debug= 0;
let $with_partitioning= 1;
##### Option, for displaying files #####
let $ls= 1;
##### Number of rows for the INSERT/UPDATE/DELETE/SELECT experiments #####
# on partioned tables
SET @max_row = 20;
##### Execute more tests #####
let $more_trigger_tests= 0;
let $more_pk_ui_tests= 0;
# The server must support partitioning.
--source include/have_partition.inc
#------------------------------------------------------------------------------#
# Engine specific settings and requirements
##### Storage engine to be tested
--source include/have_innodb.inc
let $engine= 'InnoDB';
##### Execute the test of "table" files
# InnoDB has no files per PK, UI, ...
let $do_file_tests= 0;
##### Execute PRIMARY KEY tests #####
# AFAIK InnoDB clusters the table around PRIMARY KEYs.
let $do_pk_tests= 1;
##### Assign a big number smaller than the maximum value for partitions #####
# and smaller than the maximum value of SIGNED INTEGER
let $MAX_VALUE= (2147483646);
# Generate the prerequisites ($variables, @variables, tables) needed
--source suite/parts/inc/partition.pre
##### Workarounds for known open engine specific bugs
# none
#------------------------------------------------------------------------------#
# Execute the tests to be applied to all storage engines
--source suite/parts/inc/partition_alter1_1_2.inc
#------------------------------------------------------------------------------#
# Execute storage engine specific tests
#------------------------------------------------------------------------------#
# Cleanup
--source suite/parts/inc/partition_cleanup.inc
################################################################################
# t/partition_alter1_1_2_myisam.test #
# #
# Purpose: #
# Tests around ADD/DROP PRIMARY KEY and/or UNIQUE INDEX #
# MyISAM branch #
# #
#------------------------------------------------------------------------------#
# Original Author: mleich #
# Original Date: 2006-03-05 #
# Change Author: pcrews #
# Change Date: 2008-05-05 #
# Change: Split up original partition_alter1.test file to better accommodate #
# PushBuild machines' workloads. Total run time for all components #
# is essentially the same, but max. single run time is significantly #
# reduced #
################################################################################
#
# NOTE: PLEASE DO NOT ADD NOT MYISAM SPECIFIC TESTCASES HERE !
# TESTCASES WHICH MUST BE APPLIED TO ALL STORAGE ENGINES MUST BE ADDED IN
# THE SOURCED FILES ONLY.
#
# Please read the README at the end of inc/partition.pre before changing
# any of the variables.
#
#------------------------------------------------------------------------------#
# General not engine specific settings and requirements
##### Options, for debugging support #####
let $debug= 0;
let $with_partitioning= 1;
##### Option, for displaying files #####
let $ls= 1;
##### Number of rows for the INSERT/UPDATE/DELETE/SELECT experiments #####
# on partioned tables
SET @max_row = 20;
##### Execute more tests #####
let $more_trigger_tests= 0;
let $more_pk_ui_tests= 0;
# The server must support partitioning.
--source include/have_partition.inc
#------------------------------------------------------------------------------#
# Engine specific settings and requirements
##### Storage engine to be tested
let $engine= 'MyISAM';
##### Execute the test of "table" files
# MyISAM has files per PK, UI, ...
let $do_file_tests= 1;
##### Execute PRIMARY KEY tests #####
# AFAIK MyISAM treats PRIMARY KEYs like UNIQUE INDEXes
let $do_pk_tests= 0;
##### Assign a big number smaller than the maximum value for partitions #####
# and smaller than the maximum value of SIGNED INTEGER
let $MAX_VALUE= (2147483646);
# Generate the prerequisites ($variables, @variables, tables) needed
--source suite/parts/inc/partition.pre
##### Workarounds for known open engine specific bugs
# none
#------------------------------------------------------------------------------#
# Execute the tests to be applied to all storage engines
--source suite/parts/inc/partition_alter1_1_2.inc
#------------------------------------------------------------------------------#
# Execute storage engine specific tests
#------------------------------------------------------------------------------#
# Cleanup
--source suite/parts/inc/partition_cleanup.inc
################################################################################
# t/partition_alter1_1_2_ndb.test #
# #
# Purpose: #
# Tests around ADD/DROP PRIMARY KEY and/or UNIQUE INDEX #
# NDB branch #
# #
#------------------------------------------------------------------------------#
# Original Author: mleich #
# Original Date: 2006-03-05 #
# Change Author: pcrews #
# Change Date: 2008-05-05 #
# Change: Split up original partition_alter1.test file to better accommodate #
# PushBuild machines' workloads. Total run time for all components #
# is essentially the same, but max. single run time is significantly #
# reduced #
################################################################################
#
# NOTE: PLEASE DO NOT ADD NOT NDB SPECIFIC TESTCASES HERE !
# TESTCASES WHICH MUST BE APPLIED TO ALL STORAGE ENGINES MUST BE ADDED IN
# THE SOURCED FILES ONLY.
#
# Please read the README at the end of inc/partition.pre before changing
# any of the variables.
#
#------------------------------------------------------------------------------#
# General not engine specific settings and requirements
##### Options, for debugging support #####
let $debug= 0;
##### Option, for displaying files #####
let $ls= 1;
##### Number of rows for the INSERT/UPDATE/DELETE/SELECT experiments #####
# on partioned tables
SET @max_row = 20;
##### Execute more tests #####
let $more_trigger_tests= 0;
let $more_pk_ui_tests= 0;
# The server must support partitioning. But NDB is partitioned from the start.
# Thats why the next line is set to comment.
# --source include/have_partition.inc
#------------------------------------------------------------------------------#
# Engine specific settings and requirements
##### Storage engine to be tested
--source include/have_ndb.inc
let $engine= 'ndbcluster';
connection default;
##### Execute the test of "table" files
# NDB has no files per PK, UI, ...
let $do_file_tests= 0;
##### Execute PRIMARY KEY tests #####
# AFAIK NDB is always partitioned using the explicit defined PRIMARY KEY
# or uses an internal one.
let $do_pk_tests= 1;
##### Assign a big number smaller than the maximum value for partitions #####
# and smaller than the maximum value of SIGNED INTEGER
# The NDB handler only supports 32 bit integers in VALUES
# 2147483647 seems to be too big.
let $MAX_VALUE= (2147483646);
# Generate the prerequisites ($variables, @variables, tables) needed
--source suite/parts/inc/partition.pre
##### Workarounds for known open engine specific bugs
# Bug#18735: Partitions: NDB, UNIQUE INDEX, UPDATE, strange server response
let $fixed_bug18735= 1;
#------------------------------------------------------------------------------#
# Execute the tests to be applied to all storage engines
--source suite/parts/inc/partition_alter1_1_2.inc
#------------------------------------------------------------------------------#
# Execute storage engine specific tests
#------------------------------------------------------------------------------#
# Cleanup
--source suite/parts/inc/partition_cleanup.inc
################################################################################
# t/partition_alter1_1_innodb.test #
# #
# Purpose: #
# Tests around ADD/DROP PRIMARY KEY and/or UNIQUE INDEX #
# InnoDB branch #
# #
#------------------------------------------------------------------------------#
# Original Author: mleich #
# Original Date: 2006-03-05 #
# Change Author: pcrews #
# Change Date: 2008-05-05 #
# Change: Split up original partition_alter1.test file to better accommodate #
# PushBuild machines' workloads. Total run time for all components #
# is essentially the same, but max. single run time is significantly #
# reduced #
################################################################################
#
# NOTE: PLEASE DO NOT ADD NOT INNODB SPECIFIC TESTCASES HERE !
# TESTCASES WHICH MUST BE APPLIED TO ALL STORAGE ENGINES MUST BE ADDED IN
# THE SOURCED FILES ONLY.
#
# Please read the README at the end of inc/partition.pre before changing
# any of the variables.
#
#------------------------------------------------------------------------------#
# General not engine specific settings and requirements
##### Options, for debugging support #####
let $debug= 0;
let $with_partitioning= 1;
##### Option, for displaying files #####
let $ls= 1;
##### Number of rows for the INSERT/UPDATE/DELETE/SELECT experiments #####
# on partioned tables
SET @max_row = 20;
##### Execute more tests #####
let $more_trigger_tests= 0;
let $more_pk_ui_tests= 0;
# The server must support partitioning.
--source include/have_partition.inc
#------------------------------------------------------------------------------#
# Engine specific settings and requirements
##### Storage engine to be tested
--source include/have_innodb.inc
let $engine= 'InnoDB';
##### Execute the test of "table" files
# InnoDB has no files per PK, UI, ...
let $do_file_tests= 0;
##### Execute PRIMARY KEY tests #####
# AFAIK InnoDB clusters the table around PRIMARY KEYs.
let $do_pk_tests= 1;
##### Assign a big number smaller than the maximum value for partitions #####
# and smaller than the maximum value of SIGNED INTEGER
let $MAX_VALUE= (2147483646);
# Generate the prerequisites ($variables, @variables, tables) needed
--source suite/parts/inc/partition.pre
##### Workarounds for known open engine specific bugs
# none
#------------------------------------------------------------------------------#
# Execute the tests to be applied to all storage engines
--source suite/parts/inc/partition_alter1_1.inc
#------------------------------------------------------------------------------#
# Execute storage engine specific tests
#------------------------------------------------------------------------------#
# Cleanup
--source suite/parts/inc/partition_cleanup.inc
################################################################################
# t/partition_alter1_1_myisam.test #
# #
# Purpose: #
# Tests around ADD/DROP PRIMARY KEY and/or UNIQUE INDEX #
# MyISAM branch #
# #
#------------------------------------------------------------------------------#
# Original Author: mleich #
# Original Date: 2006-03-05 #
# Change Author: pcrews #
# Change Date: 2008-05-05 #
# Change: Split up original partition_alter1.test file to better accommodate #
# PushBuild machines' workloads. Total run time for all components #
# is essentially the same, but max. single run time is significantly #
# reduced #
################################################################################
#
# NOTE: PLEASE DO NOT ADD NOT MYISAM SPECIFIC TESTCASES HERE !
# TESTCASES WHICH MUST BE APPLIED TO ALL STORAGE ENGINES MUST BE ADDED IN
# THE SOURCED FILES ONLY.
#
# Please read the README at the end of inc/partition.pre before changing
# any of the variables.
#
#------------------------------------------------------------------------------#
# General not engine specific settings and requirements
##### Options, for debugging support #####
let $debug= 0;
let $with_partitioning= 1;
##### Option, for displaying files #####
let $ls= 1;
##### Number of rows for the INSERT/UPDATE/DELETE/SELECT experiments #####
# on partioned tables
SET @max_row = 20;
##### Execute more tests #####
let $more_trigger_tests= 0;
let $more_pk_ui_tests= 0;
# The server must support partitioning.
--source include/have_partition.inc
#------------------------------------------------------------------------------#
# Engine specific settings and requirements
##### Storage engine to be tested
let $engine= 'MyISAM';
##### Execute the test of "table" files
# MyISAM has files per PK, UI, ...
let $do_file_tests= 1;
##### Execute PRIMARY KEY tests #####
# AFAIK MyISAM treats PRIMARY KEYs like UNIQUE INDEXes
let $do_pk_tests= 0;
##### Assign a big number smaller than the maximum value for partitions #####
# and smaller than the maximum value of SIGNED INTEGER
let $MAX_VALUE= (2147483646);
# Generate the prerequisites ($variables, @variables, tables) needed
--source suite/parts/inc/partition.pre
##### Workarounds for known open engine specific bugs
# none
#------------------------------------------------------------------------------#
# Execute the tests to be applied to all storage engines
--source suite/parts/inc/partition_alter1_1.inc
#------------------------------------------------------------------------------#
# Execute storage engine specific tests
#------------------------------------------------------------------------------#
# Cleanup
--source suite/parts/inc/partition_cleanup.inc
################################################################################
# t/partition_alter1_1_ndb.test #
# #
# Purpose: #
# Tests around ADD/DROP PRIMARY KEY and/or UNIQUE INDEX #
# NDB branch #
# #
#------------------------------------------------------------------------------#
# Original Author: mleich #
# Original Date: 2006-03-05 #
# Change Author: pcrews #
# Change Date: 2008-05-05 #
# Change: Split up original partition_alter1.test file to better accommodate #
# PushBuild machines' workloads. Total run time for all components #
# is essentially the same, but max. single run time is significantly #
# reduced #
################################################################################
#
# NOTE: PLEASE DO NOT ADD NOT NDB SPECIFIC TESTCASES HERE !
# TESTCASES WHICH MUST BE APPLIED TO ALL STORAGE ENGINES MUST BE ADDED IN
# THE SOURCED FILES ONLY.
#
# Please read the README at the end of inc/partition.pre before changing
# any of the variables.
#
#------------------------------------------------------------------------------#
# General not engine specific settings and requirements
##### Options, for debugging support #####
let $debug= 0;
##### Option, for displaying files #####
let $ls= 1;
##### Number of rows for the INSERT/UPDATE/DELETE/SELECT experiments #####
# on partioned tables
SET @max_row = 20;
##### Execute more tests #####
let $more_trigger_tests= 0;
let $more_pk_ui_tests= 0;
# The server must support partitioning. But NDB is partitioned from the start.
# Thats why the next line is set to comment.
# --source include/have_partition.inc
#------------------------------------------------------------------------------#
# Engine specific settings and requirements
##### Storage engine to be tested
--source include/have_ndb.inc
let $engine= 'ndbcluster';
connection default;
##### Execute the test of "table" files
# NDB has no files per PK, UI, ...
let $do_file_tests= 0;
##### Execute PRIMARY KEY tests #####
# AFAIK NDB is always partitioned using the explicit defined PRIMARY KEY
# or uses an internal one.
let $do_pk_tests= 1;
##### Assign a big number smaller than the maximum value for partitions #####
# and smaller than the maximum value of SIGNED INTEGER
# The NDB handler only supports 32 bit integers in VALUES
# 2147483647 seems to be too big.
let $MAX_VALUE= (2147483646);
# Generate the prerequisites ($variables, @variables, tables) needed
--source suite/parts/inc/partition.pre
##### Workarounds for known open engine specific bugs
# Bug#18735: Partitions: NDB, UNIQUE INDEX, UPDATE, strange server response
let $fixed_bug18735= 1;
#------------------------------------------------------------------------------#
# Execute the tests to be applied to all storage engines
--source suite/parts/inc/partition_alter1_1.inc
#------------------------------------------------------------------------------#
# Execute storage engine specific tests
#------------------------------------------------------------------------------#
# Cleanup
--source suite/parts/inc/partition_cleanup.inc
################################################################################
# t/partition_alter1_2_innodb.test #
# #
# Purpose: #
# Tests around ADD/DROP PRIMARY KEY and/or UNIQUE INDEX #
# InnoDB branch #
# #
#------------------------------------------------------------------------------#
# Original Author: mleich #
# Original Date: 2006-03-05 #
# Change Author: pcrews #
# Change Date: 2008-05-05 #
# Change: Split up original partition_alter1.test file to better accommodate #
# PushBuild machines' workloads. Total run time for all components #
# is essentially the same, but max. single run time is significantly #
# reduced #
################################################################################
#
# NOTE: PLEASE DO NOT ADD NOT INNODB SPECIFIC TESTCASES HERE !
# TESTCASES WHICH MUST BE APPLIED TO ALL STORAGE ENGINES MUST BE ADDED IN
# THE SOURCED FILES ONLY.
#
# Please read the README at the end of inc/partition.pre before changing
# any of the variables.
#
#------------------------------------------------------------------------------#
# General not engine specific settings and requirements
##### Options, for debugging support #####
let $debug= 0;
let $with_partitioning= 1;
##### Option, for displaying files #####
let $ls= 1;
##### Number of rows for the INSERT/UPDATE/DELETE/SELECT experiments #####
# on partioned tables
SET @max_row = 20;
##### Execute more tests #####
let $more_trigger_tests= 0;
let $more_pk_ui_tests= 0;
# The server must support partitioning.
--source include/have_partition.inc
#------------------------------------------------------------------------------#
# Engine specific settings and requirements
##### Storage engine to be tested
--source include/have_innodb.inc
let $engine= 'InnoDB';
##### Execute the test of "table" files
# InnoDB has no files per PK, UI, ...
let $do_file_tests= 0;
##### Execute PRIMARY KEY tests #####
# AFAIK InnoDB clusters the table around PRIMARY KEYs.
let $do_pk_tests= 1;
##### Assign a big number smaller than the maximum value for partitions #####
# and smaller than the maximum value of SIGNED INTEGER
let $MAX_VALUE= (2147483646);
# Generate the prerequisites ($variables, @variables, tables) needed
--source suite/parts/inc/partition.pre
##### Workarounds for known open engine specific bugs
# none
#------------------------------------------------------------------------------#
# Execute the tests to be applied to all storage engines
--source suite/parts/inc/partition_alter1_2.inc
#------------------------------------------------------------------------------#
# Execute storage engine specific tests
#------------------------------------------------------------------------------#
# Cleanup
--source suite/parts/inc/partition_cleanup.inc
################################################################################
# t/partition_alter1_2_myisam.test #
# #
# Purpose: #
# Tests around ADD/DROP PRIMARY KEY and/or UNIQUE INDEX #
# MyISAM branch #
# #
#------------------------------------------------------------------------------#
# Original Author: mleich #
# Original Date: 2006-03-05 #
# Change Author: pcrews #
# Change Date: 2008-05-05 #
# Change: Split up original partition_alter1.test file to better accommodate #
# PushBuild machines' workloads. Total run time for all components #
# is essentially the same, but max. single run time is significantly #
# reduced #
################################################################################
#
# NOTE: PLEASE DO NOT ADD NOT MYISAM SPECIFIC TESTCASES HERE !
# TESTCASES WHICH MUST BE APPLIED TO ALL STORAGE ENGINES MUST BE ADDED IN
# THE SOURCED FILES ONLY.
#
# Please read the README at the end of inc/partition.pre before changing
# any of the variables.
#
#------------------------------------------------------------------------------#
# General not engine specific settings and requirements
##### Options, for debugging support #####
let $debug= 0;
let $with_partitioning= 1;
##### Option, for displaying files #####
let $ls= 1;
##### Number of rows for the INSERT/UPDATE/DELETE/SELECT experiments #####
# on partioned tables
SET @max_row = 20;
##### Execute more tests #####
let $more_trigger_tests= 0;
let $more_pk_ui_tests= 0;
# The server must support partitioning.
--source include/have_partition.inc
#------------------------------------------------------------------------------#
# Engine specific settings and requirements
##### Storage engine to be tested
let $engine= 'MyISAM';
##### Execute the test of "table" files
# MyISAM has files per PK, UI, ...
let $do_file_tests= 1;
##### Execute PRIMARY KEY tests #####
# AFAIK MyISAM treats PRIMARY KEYs like UNIQUE INDEXes
let $do_pk_tests= 0;
##### Assign a big number smaller than the maximum value for partitions #####
# and smaller than the maximum value of SIGNED INTEGER
let $MAX_VALUE= (2147483646);
# Generate the prerequisites ($variables, @variables, tables) needed
--source suite/parts/inc/partition.pre
##### Workarounds for known open engine specific bugs
# none
#------------------------------------------------------------------------------#
# Execute the tests to be applied to all storage engines
--source suite/parts/inc/partition_alter1_2.inc
#------------------------------------------------------------------------------#
# Execute storage engine specific tests
#------------------------------------------------------------------------------#
# Cleanup
--source suite/parts/inc/partition_cleanup.inc
################################################################################
# t/partition_alter1_2_ndb.test #
# #
# Purpose: #
# Tests around ADD/DROP PRIMARY KEY and/or UNIQUE INDEX #
# NDB branch #
# #
#------------------------------------------------------------------------------#
# Original Author: mleich #
# Original Date: 2006-03-05 #
# Change Author: pcrews #
# Change Date: 2008-05-05 #
# Change: Split up original partition_alter1.test file to better accommodate #
# PushBuild machines' workloads. Total run time for all components #
# is essentially the same, but max. single run time is significantly #
# reduced #
################################################################################
#
# NOTE: PLEASE DO NOT ADD NOT NDB SPECIFIC TESTCASES HERE !
# TESTCASES WHICH MUST BE APPLIED TO ALL STORAGE ENGINES MUST BE ADDED IN
# THE SOURCED FILES ONLY.
#
# Please read the README at the end of inc/partition.pre before changing
# any of the variables.
#
#------------------------------------------------------------------------------#
# General not engine specific settings and requirements
##### Options, for debugging support #####
let $debug= 0;
##### Option, for displaying files #####
let $ls= 1;
##### Number of rows for the INSERT/UPDATE/DELETE/SELECT experiments #####
# on partioned tables
SET @max_row = 20;
##### Execute more tests #####
let $more_trigger_tests= 0;
let $more_pk_ui_tests= 0;
# The server must support partitioning. But NDB is partitioned from the start.
# Thats why the next line is set to comment.
# --source include/have_partition.inc
#------------------------------------------------------------------------------#
# Engine specific settings and requirements
##### Storage engine to be tested
--source include/have_ndb.inc
let $engine= 'ndbcluster';
connection default;
##### Execute the test of "table" files
# NDB has no files per PK, UI, ...
let $do_file_tests= 0;
##### Execute PRIMARY KEY tests #####
# AFAIK NDB is always partitioned using the explicit defined PRIMARY KEY
# or uses an internal one.
let $do_pk_tests= 1;
##### Assign a big number smaller than the maximum value for partitions #####
# and smaller than the maximum value of SIGNED INTEGER
# The NDB handler only supports 32 bit integers in VALUES
# 2147483647 seems to be too big.
let $MAX_VALUE= (2147483646);
# Generate the prerequisites ($variables, @variables, tables) needed
--source suite/parts/inc/partition.pre
##### Workarounds for known open engine specific bugs
# Bug#18735: Partitions: NDB, UNIQUE INDEX, UPDATE, strange server response
let $fixed_bug18735= 1;
#------------------------------------------------------------------------------#
# Execute the tests to be applied to all storage engines
--source suite/parts/inc/partition_alter1_2.inc
#------------------------------------------------------------------------------#
# Execute storage engine specific tests
#------------------------------------------------------------------------------#
# Cleanup
--source suite/parts/inc/partition_cleanup.inc
stop slave;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
reset master;
reset slave;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
start slave;
==== 0. Setting it all up ====
SET BINLOG_FORMAT=STATEMENT;
**** On Master ****
CREATE TABLE t1 (a INT);
CREATE TABLE logtbl (sect INT, test INT, count INT);
INSERT INTO t1 VALUES (1),(2),(3);
INSERT INTO t1 SELECT 2*a+3 FROM t1;
INSERT INTO t1 SELECT 2*a+3 FROM t1;
INSERT INTO t1 SELECT 2*a+3 FROM t1;
INSERT INTO t1 SELECT 2*a+3 FROM t1;
INSERT INTO t1 SELECT 2*a+3 FROM t1;
INSERT INTO t1 SELECT 2*a+3 FROM t1;
#### 1. Using statement mode ####
==== 1.1. Simple test ====
SELECT SQL_CALC_FOUND_ROWS * FROM t1 WHERE a > 5 ORDER BY a LIMIT 1;
a
7
SELECT FOUND_ROWS() INTO @a;
INSERT INTO logtbl VALUES(1,1,@a);
SELECT SQL_CALC_FOUND_ROWS * FROM t1 WHERE a < 5 ORDER BY a LIMIT 1;
a
1
SELECT FOUND_ROWS() INTO @a;
INSERT INTO logtbl VALUES(1,2,@a);
SELECT * FROM logtbl WHERE sect = 1 ORDER BY sect,test;
sect test count
1 1 183
1 2 3
**** On Slave ****
SELECT * FROM logtbl WHERE sect = 1 ORDER BY sect,test;
sect test count
1 1 183
1 2 3
==== 1.2. Stored procedure ====
**** On Master ****
CREATE PROCEDURE calc_and_log(sect INT, test INT) BEGIN
DECLARE cnt INT;
SELECT SQL_CALC_FOUND_ROWS * FROM t1 WHERE a < 5 ORDER BY a LIMIT 1;
SELECT FOUND_ROWS() INTO cnt;
INSERT INTO logtbl VALUES(sect,test,cnt);
SELECT SQL_CALC_FOUND_ROWS * FROM t1 WHERE a > 5 ORDER BY a LIMIT 1;
SELECT FOUND_ROWS() INTO cnt;
INSERT INTO logtbl VALUES(sect,test+1,cnt);
END $$
CALL calc_and_log(2,1);
a
1
a
7
CREATE PROCEDURE just_log(sect INT, test INT, found_rows INT) BEGIN
INSERT INTO logtbl VALUES (sect,test,found_rows);
END $$
SELECT SQL_CALC_FOUND_ROWS * FROM t1 WHERE a > 5 ORDER BY a LIMIT 1;
a
7
SELECT FOUND_ROWS() INTO @found_rows;
CALL just_log(2,3,@found_rows);
SELECT * FROM logtbl WHERE sect = 2 ORDER BY sect,test;
sect test count
2 1 3
2 2 183
2 3 183
**** On Slave ****
SELECT * FROM logtbl WHERE sect = 2 ORDER BY sect,test;
sect test count
2 1 3
2 2 183
2 3 183
==== 1.3. Stored functions ====
**** On Master ****
CREATE FUNCTION log_rows(sect INT, test INT, found_rows INT)
RETURNS INT
BEGIN
INSERT INTO logtbl VALUES(sect,test,found_rows);
RETURN found_rows;
END $$
SELECT SQL_CALC_FOUND_ROWS * FROM t1 WHERE a > 5 ORDER BY a LIMIT 1;
a
7
SELECT FOUND_ROWS() INTO @found_rows;
SELECT log_rows(3,1,@found_rows), log_rows(3,2,@found_rows);
log_rows(3,1,@found_rows) log_rows(3,2,@found_rows)
183 183
SELECT * FROM logtbl WHERE sect = 3 ORDER BY sect,test;
sect test count
3 1 183
3 2 183
**** On Slave ****
SELECT * FROM logtbl WHERE sect = 3 ORDER BY sect,test;
sect test count
3 1 183
3 2 183
==== 1.9. Cleanup ====
**** On Master ****
DELETE FROM logtbl;
DROP PROCEDURE just_log;
DROP PROCEDURE calc_and_log;
DROP FUNCTION log_rows;
**** Resetting master and slave ****
STOP SLAVE;
RESET SLAVE;
RESET MASTER;
START SLAVE;
#### 2. Using mixed mode ####
==== 2.1. Checking a procedure ====
**** On Master ****
SET BINLOG_FORMAT=MIXED;
CREATE PROCEDURE just_log(sect INT, test INT) BEGIN
INSERT INTO logtbl VALUES (sect,test,FOUND_ROWS());
END $$
**** On Master 1 ****
SET BINLOG_FORMAT=MIXED;
SELECT SQL_CALC_FOUND_ROWS * FROM t1 WHERE a > 5 ORDER BY a LIMIT 1;
a
7
CALL just_log(1,1);
**** On Master ****
SELECT SQL_CALC_FOUND_ROWS * FROM t1 WHERE a > 5 ORDER BY a LIMIT 1;
a
7
CALL just_log(1,2);
**** On Master 1 ****
SELECT SQL_CALC_FOUND_ROWS * FROM t1 WHERE a < 5 ORDER BY a LIMIT 1;
a
1
CALL just_log(1,3);
**** On Master ****
SELECT SQL_CALC_FOUND_ROWS * FROM t1 WHERE a > 5 ORDER BY a LIMIT 1;
a
7
CALL just_log(1,4);
SELECT * FROM logtbl WHERE sect = 1 ORDER BY sect,test;
sect test count
1 1 183
1 2 183
1 3 3
1 4 183
**** On Slave ****
SELECT * FROM logtbl WHERE sect = 1 ORDER BY sect,test;
sect test count
1 1 183
1 2 183
1 3 3
1 4 183
==== 2.1. Checking a stored function ====
**** On Master ****
CREATE FUNCTION log_rows(sect INT, test INT)
RETURNS INT
BEGIN
DECLARE found_rows INT;
SELECT FOUND_ROWS() INTO found_rows;
INSERT INTO logtbl VALUES(sect,test,found_rows);
RETURN found_rows;
END $$
SELECT SQL_CALC_FOUND_ROWS * FROM t1 WHERE a < 5 ORDER BY a LIMIT 1;
a
1
SELECT log_rows(2,1), log_rows(2,2);
log_rows(2,1) log_rows(2,2)
3 3
CREATE TABLE t2 (a INT, b INT);
CREATE TRIGGER t2_tr BEFORE INSERT ON t2 FOR EACH ROW
BEGIN
INSERT INTO logtbl VALUES (NEW.a, NEW.b, FOUND_ROWS());
END $$
SELECT SQL_CALC_FOUND_ROWS * FROM t1 WHERE a < 5 ORDER BY a LIMIT 1;
a
1
INSERT INTO t2 VALUES (2,3), (2,4);
DROP TRIGGER t2_tr;
CREATE TRIGGER t2_tr BEFORE INSERT ON t2 FOR EACH ROW
BEGIN
DECLARE dummy INT;
SELECT log_rows(NEW.a, NEW.b) INTO dummy;
END $$
SELECT SQL_CALC_FOUND_ROWS * FROM t1 WHERE a > 5 ORDER BY a LIMIT 1;
a
7
INSERT INTO t2 VALUES (2,5), (2,6);
DROP TRIGGER t2_tr;
CREATE PROCEDURE log_me_inner(sect INT, test INT)
BEGIN
DECLARE dummy INT;
SELECT log_rows(sect, test) INTO dummy;
SELECT log_rows(sect, test+1) INTO dummy;
END $$
CREATE PROCEDURE log_me(sect INT, test INT)
BEGIN
CALL log_me_inner(sect,test);
END $$
CREATE TRIGGER t2_tr BEFORE INSERT ON t2 FOR EACH ROW
BEGIN
CALL log_me(NEW.a, NEW.b);
END $$
SELECT SQL_CALC_FOUND_ROWS * FROM t1 WHERE a > 5 ORDER BY a LIMIT 1;
a
7
INSERT INTO t2 VALUES (2,5), (2,6);
SELECT * FROM logtbl WHERE sect = 2 ORDER BY sect,test;
sect test count
2 1 3
2 2 3
2 3 3
2 4 3
2 5 183
2 5 183
2 6 183
2 6 0
2 6 183
2 7 0
SELECT * FROM logtbl WHERE sect = 2 ORDER BY sect,test;
sect test count
2 1 3
2 2 3
2 3 3
2 4 3
2 5 183
2 5 183
2 6 183
2 6 0
2 6 183
2 7 0
DROP TABLE t1, t2, logtbl;
DROP PROCEDURE just_log;
DROP PROCEDURE log_me;
DROP PROCEDURE log_me_inner;
DROP FUNCTION log_rows;
stop slave;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
reset master;
reset slave;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
start slave;
create table t1 (a int not null primary key);
insert into t1 values (1);
create table t2 (a int);
insert into t2 values (1);
update t1, t2 set t1.a = 0 where t1.a = t2.a;
show tables;
Tables_in_test
t1
select * from t1;
a
0
drop table t1;
insert into t1 values (1);
SHOW SLAVE STATUS;
Slave_IO_State #
Master_Host 127.0.0.1
Master_User root
Master_Port MASTER_PORT
Connect_Retry 1
Master_Log_File master-bin.000001
Read_Master_Log_Pos 1153
Relay_Log_File #
Relay_Log_Pos #
Relay_Master_Log_File master-bin.000001
Slave_IO_Running Yes
Slave_SQL_Running No
Replicate_Do_DB
Replicate_Ignore_DB
Replicate_Do_Table
Replicate_Ignore_Table #
Replicate_Wild_Do_Table
Replicate_Wild_Ignore_Table
Last_Errno 1146
Last_Error Error 'Table 'test.t1' doesn't exist' on opening tables
Skip_Counter 0
Exec_Master_Log_Pos 941
Relay_Log_Space #
Until_Condition None
Until_Log_File
Until_Log_Pos 0
Master_SSL_Allowed No
Master_SSL_CA_File
Master_SSL_CA_Path
Master_SSL_Cert
Master_SSL_Cipher
Master_SSL_Key
Seconds_Behind_Master #
Master_SSL_Verify_Server_Cert No
Last_IO_Errno #
Last_IO_Error #
Last_SQL_Errno 1146
Last_SQL_Error Error 'Table 'test.t1' doesn't exist' on opening tables
drop table t1, t2;
source include/master-slave.inc;
# It is not possible to replicate FOUND_ROWS() using statement-based
# replication, but there is a workaround that stores the result of
# FOUND_ROWS() into a user variable and then replicates this instead.
# The purpose of this test case is to test that the workaround
# function properly even when inside stored programs (i.e., stored
# routines and triggers).
--echo ==== 0. Setting it all up ====
SET BINLOG_FORMAT=STATEMENT;
--echo **** On Master ****
connection master;
CREATE TABLE t1 (a INT);
CREATE TABLE logtbl (sect INT, test INT, count INT);
INSERT INTO t1 VALUES (1),(2),(3);
INSERT INTO t1 SELECT 2*a+3 FROM t1;
INSERT INTO t1 SELECT 2*a+3 FROM t1;
INSERT INTO t1 SELECT 2*a+3 FROM t1;
INSERT INTO t1 SELECT 2*a+3 FROM t1;
INSERT INTO t1 SELECT 2*a+3 FROM t1;
INSERT INTO t1 SELECT 2*a+3 FROM t1;
--echo #### 1. Using statement mode ####
--echo ==== 1.1. Simple test ====
SELECT SQL_CALC_FOUND_ROWS * FROM t1 WHERE a > 5 ORDER BY a LIMIT 1;
# Instead of
# INSERT INTO logtbl VALUES(1, 1, FOUND_ROWS());
# we write
SELECT FOUND_ROWS() INTO @a;
INSERT INTO logtbl VALUES(1,1,@a);
SELECT SQL_CALC_FOUND_ROWS * FROM t1 WHERE a < 5 ORDER BY a LIMIT 1;
# Instead of
# INSERT INTO logtbl VALUES(1, 2, FOUND_ROWS());
# we write
SELECT FOUND_ROWS() INTO @a;
INSERT INTO logtbl VALUES(1,2,@a);
SELECT * FROM logtbl WHERE sect = 1 ORDER BY sect,test;
--echo **** On Slave ****
sync_slave_with_master;
SELECT * FROM logtbl WHERE sect = 1 ORDER BY sect,test;
--echo ==== 1.2. Stored procedure ====
# Here we do both the calculation and the logging. We also do it twice
# to make sure that there are no limitations on how many times it can
# be used.
--echo **** On Master ****
connection master;
--delimiter $$
CREATE PROCEDURE calc_and_log(sect INT, test INT) BEGIN
DECLARE cnt INT;
SELECT SQL_CALC_FOUND_ROWS * FROM t1 WHERE a < 5 ORDER BY a LIMIT 1;
SELECT FOUND_ROWS() INTO cnt;
INSERT INTO logtbl VALUES(sect,test,cnt);
SELECT SQL_CALC_FOUND_ROWS * FROM t1 WHERE a > 5 ORDER BY a LIMIT 1;
SELECT FOUND_ROWS() INTO cnt;
INSERT INTO logtbl VALUES(sect,test+1,cnt);
END $$
--delimiter ;
CALL calc_and_log(2,1);
--delimiter $$
CREATE PROCEDURE just_log(sect INT, test INT, found_rows INT) BEGIN
INSERT INTO logtbl VALUES (sect,test,found_rows);
END $$
--delimiter ;
SELECT SQL_CALC_FOUND_ROWS * FROM t1 WHERE a > 5 ORDER BY a LIMIT 1;
SELECT FOUND_ROWS() INTO @found_rows;
CALL just_log(2,3,@found_rows);
SELECT * FROM logtbl WHERE sect = 2 ORDER BY sect,test;
--echo **** On Slave ****
sync_slave_with_master;
SELECT * FROM logtbl WHERE sect = 2 ORDER BY sect,test;
--echo ==== 1.3. Stored functions ====
--echo **** On Master ****
connection master;
--delimiter $$
CREATE FUNCTION log_rows(sect INT, test INT, found_rows INT)
RETURNS INT
BEGIN
INSERT INTO logtbl VALUES(sect,test,found_rows);
RETURN found_rows;
END $$
--delimiter ;
SELECT SQL_CALC_FOUND_ROWS * FROM t1 WHERE a > 5 ORDER BY a LIMIT 1;
SELECT FOUND_ROWS() INTO @found_rows;
SELECT log_rows(3,1,@found_rows), log_rows(3,2,@found_rows);
SELECT * FROM logtbl WHERE sect = 3 ORDER BY sect,test;
--echo **** On Slave ****
sync_slave_with_master;
SELECT * FROM logtbl WHERE sect = 3 ORDER BY sect,test;
--echo ==== 1.9. Cleanup ====
--echo **** On Master ****
connection master;
DELETE FROM logtbl;
DROP PROCEDURE just_log;
DROP PROCEDURE calc_and_log;
DROP FUNCTION log_rows;
sync_slave_with_master;
source include/reset_master_and_slave.inc;
--echo #### 2. Using mixed mode ####
--echo ==== 2.1. Checking a procedure ====
--echo **** On Master ****
connection master;
SET BINLOG_FORMAT=MIXED;
# We will now check some stuff that will not work in statement-based
# replication, but which should cause the binary log to switch to
# row-based logging.
--delimiter $$
CREATE PROCEDURE just_log(sect INT, test INT) BEGIN
INSERT INTO logtbl VALUES (sect,test,FOUND_ROWS());
END $$
--delimiter ;
sync_slave_with_master;
--echo **** On Master 1 ****
connection master1;
SET BINLOG_FORMAT=MIXED;
SELECT SQL_CALC_FOUND_ROWS * FROM t1 WHERE a > 5 ORDER BY a LIMIT 1;
CALL just_log(1,1);
--echo **** On Master ****
connection master;
SELECT SQL_CALC_FOUND_ROWS * FROM t1 WHERE a > 5 ORDER BY a LIMIT 1;
CALL just_log(1,2);
--echo **** On Master 1 ****
connection master1;
SELECT SQL_CALC_FOUND_ROWS * FROM t1 WHERE a < 5 ORDER BY a LIMIT 1;
CALL just_log(1,3);
sync_slave_with_master;
--echo **** On Master ****
connection master;
SELECT SQL_CALC_FOUND_ROWS * FROM t1 WHERE a > 5 ORDER BY a LIMIT 1;
CALL just_log(1,4);
sync_slave_with_master;
connection master;
SELECT * FROM logtbl WHERE sect = 1 ORDER BY sect,test;
--echo **** On Slave ****
sync_slave_with_master;
SELECT * FROM logtbl WHERE sect = 1 ORDER BY sect,test;
--echo ==== 2.1. Checking a stored function ====
--echo **** On Master ****
connection master;
--delimiter $$
CREATE FUNCTION log_rows(sect INT, test INT)
RETURNS INT
BEGIN
DECLARE found_rows INT;
SELECT FOUND_ROWS() INTO found_rows;
INSERT INTO logtbl VALUES(sect,test,found_rows);
RETURN found_rows;
END $$
--delimiter ;
SELECT SQL_CALC_FOUND_ROWS * FROM t1 WHERE a < 5 ORDER BY a LIMIT 1;
SELECT log_rows(2,1), log_rows(2,2);
CREATE TABLE t2 (a INT, b INT);
# Trying with referencing FOUND_ROWS() directly in the trigger.
--delimiter $$
CREATE TRIGGER t2_tr BEFORE INSERT ON t2 FOR EACH ROW
BEGIN
INSERT INTO logtbl VALUES (NEW.a, NEW.b, FOUND_ROWS());
END $$
--delimiter ;
SELECT SQL_CALC_FOUND_ROWS * FROM t1 WHERE a < 5 ORDER BY a LIMIT 1;
INSERT INTO t2 VALUES (2,3), (2,4);
# Referencing FOUND_ROWS() indirectly.
DROP TRIGGER t2_tr;
--delimiter $$
CREATE TRIGGER t2_tr BEFORE INSERT ON t2 FOR EACH ROW
BEGIN
DECLARE dummy INT;
SELECT log_rows(NEW.a, NEW.b) INTO dummy;
END $$
--delimiter ;
SELECT SQL_CALC_FOUND_ROWS * FROM t1 WHERE a > 5 ORDER BY a LIMIT 1;
INSERT INTO t2 VALUES (2,5), (2,6);
# Putting FOUND_ROWS() even lower in the call chain.
connection master;
DROP TRIGGER t2_tr;
--delimiter $$
CREATE PROCEDURE log_me_inner(sect INT, test INT)
BEGIN
DECLARE dummy INT;
SELECT log_rows(sect, test) INTO dummy;
SELECT log_rows(sect, test+1) INTO dummy;
END $$
CREATE PROCEDURE log_me(sect INT, test INT)
BEGIN
CALL log_me_inner(sect,test);
END $$
--delimiter ;
--delimiter $$
CREATE TRIGGER t2_tr BEFORE INSERT ON t2 FOR EACH ROW
BEGIN
CALL log_me(NEW.a, NEW.b);
END $$
--delimiter ;
SELECT SQL_CALC_FOUND_ROWS * FROM t1 WHERE a > 5 ORDER BY a LIMIT 1;
INSERT INTO t2 VALUES (2,5), (2,6);
SELECT * FROM logtbl WHERE sect = 2 ORDER BY sect,test;
sync_slave_with_master;
SELECT * FROM logtbl WHERE sect = 2 ORDER BY sect,test;
connection master;
DROP TABLE t1, t2, logtbl;
DROP PROCEDURE just_log;
DROP PROCEDURE log_me;
DROP PROCEDURE log_me_inner;
DROP FUNCTION log_rows;
sync_slave_with_master;
#################################
# Wrapper for rpl_insert_id.test#
#################################
########################################################
# By JBM 2005-02-15 Wrapped to allow reuse of test code#
# Added to skip if ndb is default #
########################################################
-- source include/not_ndb_default.inc
-- source include/have_innodb.inc
let $engine_type=myisam;
-- source extra/rpl_tests/rpl_insert_id.test
--source include/master-slave.inc
############################################################################
# Test case for BUG#10780
#
# REQUIREMENT
# A slave without replication privileges should have Slave_IO_Running = No
# 1. Create new replication user
connection master;
grant replication slave on *.* to rpl@127.0.0.1 identified by 'rpl';
connection slave;
stop slave;
change master to master_user='rpl',master_password='rpl';
start slave;
# 2. Do replication as new user
connection master;
--disable_warnings
drop table if exists t1;
--enable_warnings
create table t1 (n int);
insert into t1 values (1);
save_master_pos;
connection slave;
sync_with_master;
select * from t1;
# 3. Delete new replication user
connection master;
delete from mysql.user where user='rpl';
flush privileges;
connection slave;
# 4. Restart slave without privileges
# (slave.err will contain access denied error for this START SLAVE command)
stop slave;
source include/wait_for_slave_to_stop.inc;
start slave;
source include/wait_for_slave_sql_to_start.inc;
# 5. Make sure Slave_IO_Running = No
--replace_result $MASTER_MYPORT MASTER_MYPORT
# Column 1 is replaced, since the output can be either
# "Connecting to master" or "Waiting for master update"
--replace_column 1 # 7 # 8 # 9 # 22 # 23 # 35 # 36 #
query_vertical show slave status;
# Cleanup (Note that slave IO thread is not running)
connection slave;
drop table t1;
delete from mysql.user where user='rpl';
# cleanup: slave io thread has been stopped "irrecoverably"
# so we clean up mess manually
connection master;
drop table t1;
# end of 4.1 tests
......@@ -3225,6 +3225,18 @@ select t1.a from t1 where
drop table t1, t2;
#
# Bug #36011: Server crash with explain extended on query with dependent
# subqueries
#
CREATE TABLE t1 (a INT);
INSERT INTO t1 VALUES (1),(2);
EXPLAIN EXTENDED SELECT 1 FROM t1 WHERE 1 IN (SELECT 1 FROM t1 GROUP BY a);
EXPLAIN EXTENDED SELECT 1 FROM t1 WHERE 1 IN (SELECT 1 FROM t1 WHERE a > 3 GROUP BY a);
DROP TABLE t1;
--echo End of 5.0 tests.
#
......
......@@ -2188,11 +2188,12 @@ JOIN::exec()
/*
With EXPLAIN EXTENDED we have to restore original ref_array
for a derived table which is always materialized.
Otherwise we would not be able to print the query correctly.
We also need to do this when we have temp table(s).
Otherwise we would not be able to print the query correctly.
*/
if (items0 &&
(thd->lex->describe & DESCRIBE_EXTENDED) &&
select_lex->linkage == DERIVED_TABLE_TYPE)
if (items0 && (thd->lex->describe & DESCRIBE_EXTENDED) &&
(select_lex->linkage == DERIVED_TABLE_TYPE ||
exec_tmp_table1 || exec_tmp_table2))
set_items_ref_array(items0);
DBUG_VOID_RETURN;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment