Commit a74d0467 authored by Michael Widenius's avatar Michael Widenius

Fixed warnings and build failures after last push

Speed up some PBXT tests by adding begin...commit around creating of testing tables.

include/my_base.h:
  Fixed wrong constant
mysql-test/mysql-test-run.pl:
  Print MariaDB instead of MySQL
mysql-test/r/range.result:
  Move test that required partitions to parts.optimizer
mysql-test/suite/innodb_plugin/t/disabled.def:
  Disable test that causes valgrind warning about not released memory in xtradb
mysql-test/suite/parts/r/optimizer.result:
  Moved from range.result
mysql-test/suite/parts/t/optimizer.test:
  Moved from range.test
mysql-test/suite/pbxt/r/join_nested.result:
  Updated results after optimizer changes
mysql-test/suite/pbxt/r/renamedb.result:
  Updated test for new error message
mysql-test/suite/pbxt/t/check.test:
  Speed up test
mysql-test/suite/pbxt/t/count_distinct2.test:
  Speed up test
mysql-test/suite/pbxt/t/derived.test:
  Speed up test
mysql-test/suite/pbxt/t/renamedb.test:
  Updated test for new error message
mysql-test/suite/rpl/r/rpl_log_pos.result:
  Updated results
mysql-test/suite/rpl/t/rpl_log_pos.test:
  Update test to read from a position that has 'known wrong' data.
  The orignal test read a timestamp, so the error message could differ between runs.
mysql-test/suite/rpl/t/rpl_temporary_errors.test:
  Sync to slave to make test predictable
mysql-test/t/events_time_zone.test:
  Extend wait to make test predictable
mysql-test/t/range.test:
  Move test that required partitions to parts.optimizer
sql/sql_list.h:
  Fixed compiler warning
sql/sql_load.cc:
  buffer was not freed in some error conditions
tests/mysql_client_test.c:
  Fixed compiler warning
parent 80204e05
......@@ -272,7 +272,7 @@ enum ha_base_keytype {
#define HA_USES_PARSER 16384 /* Fulltext index uses [pre]parser */
#define HA_USES_BLOCK_SIZE ((uint) 32768)
#define HA_SORT_ALLOWS_SAME 512 /* Intern bit when sorting records */
#if MYSQL_VERSION_ID < 0x50200
#if MYSQL_VERSION_ID < 50200
/*
Key has a part that can have end space. If this is an unique key
we have to handle it differently from other unique keys as we can find
......
......@@ -1601,7 +1601,7 @@ sub collect_mysqld_features {
#print "Major: $1 Minor: $2 Build: $3\n";
$mysql_version_id= $1*10000 + $2*100 + $3;
#print "mysql_version_id: $mysql_version_id\n";
mtr_report("MySQL Version $1.$2.$3");
mtr_report("MariaDB Version $1.$2.$3");
}
}
else
......
......@@ -1653,48 +1653,4 @@ a b
0 0
1 1
DROP TABLE t1;
#
# Bug#50939: Loose Index Scan unduly relies on engine to remember range
# endpoints
#
CREATE TABLE t1 (
a INT,
b INT,
KEY ( a, b )
) PARTITION BY HASH (a) PARTITIONS 1;
CREATE TABLE t2 (
a INT,
b INT,
KEY ( a, b )
);
INSERT INTO t1 VALUES (1, 1), (2, 2), (3, 3), (4, 4), (5, 5);
INSERT INTO t1 SELECT a + 5, b + 5 FROM t1;
INSERT INTO t1 SELECT a + 10, b + 10 FROM t1;
INSERT INTO t1 SELECT a + 20, b + 20 FROM t1;
INSERT INTO t1 SELECT a + 40, b + 40 FROM t1;
INSERT INTO t2 SELECT * FROM t1;
# plans should be identical
EXPLAIN SELECT a, MAX(b) FROM t1 WHERE a IN (10,100) GROUP BY a;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range a a 5 NULL 1 Using where; Using index for group-by
EXPLAIN SELECT a, MAX(b) FROM t2 WHERE a IN (10,100) GROUP BY a;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 range a a 5 NULL 2 Using where; Using index for group-by
FLUSH status;
SELECT a, MAX(b) FROM t1 WHERE a IN (10, 100) GROUP BY a;
a MAX(b)
10 10
# Should be no more than 4 reads.
SHOW status LIKE 'handler_read_key';
Variable_name Value
Handler_read_key 4
FLUSH status;
SELECT a, MAX(b) FROM t2 WHERE a IN (10, 100) GROUP BY a;
a MAX(b)
10 10
# Should be no more than 4 reads.
SHOW status LIKE 'handler_read_key';
Variable_name Value
Handler_read_key 4
DROP TABLE t1, t2;
End of 5.1 tests
......@@ -9,3 +9,5 @@
# Do not use any TAB characters for whitespace.
#
##############################################################################
innodb-use-sys-malloc : Waiting for buyfix from Percona for LP#612600
drop table if exists t1,t2;
#
# Bug#50939: Loose Index Scan unduly relies on engine to remember range
# endpoints
#
CREATE TABLE t1 (
a INT,
b INT,
KEY ( a, b )
) PARTITION BY HASH (a) PARTITIONS 1;
CREATE TABLE t2 (
a INT,
b INT,
KEY ( a, b )
);
INSERT INTO t1 VALUES (1, 1), (2, 2), (3, 3), (4, 4), (5, 5);
INSERT INTO t1 SELECT a + 5, b + 5 FROM t1;
INSERT INTO t1 SELECT a + 10, b + 10 FROM t1;
INSERT INTO t1 SELECT a + 20, b + 20 FROM t1;
INSERT INTO t1 SELECT a + 40, b + 40 FROM t1;
INSERT INTO t2 SELECT * FROM t1;
# plans should be identical
EXPLAIN SELECT a, MAX(b) FROM t1 WHERE a IN (10,100) GROUP BY a;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range a a 5 NULL 1 Using where; Using index for group-by
EXPLAIN SELECT a, MAX(b) FROM t2 WHERE a IN (10,100) GROUP BY a;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 range a a 5 NULL 2 Using where; Using index for group-by
FLUSH status;
SELECT a, MAX(b) FROM t1 WHERE a IN (10, 100) GROUP BY a;
a MAX(b)
10 10
# Should be no more than 4 reads.
SHOW status LIKE 'handler_read_key';
Variable_name Value
Handler_read_key 4
FLUSH status;
SELECT a, MAX(b) FROM t2 WHERE a IN (10, 100) GROUP BY a;
a MAX(b)
10 10
# Should be no more than 4 reads.
SHOW status LIKE 'handler_read_key';
Variable_name Value
Handler_read_key 4
DROP TABLE t1, t2;
# The server must support partitioning.
--source include/have_partition.inc
--disable_warnings
drop table if exists t1,t2;
--enable_warnings
--echo #
--echo # Bug#50939: Loose Index Scan unduly relies on engine to remember range
--echo # endpoints
--echo #
CREATE TABLE t1 (
a INT,
b INT,
KEY ( a, b )
) PARTITION BY HASH (a) PARTITIONS 1;
CREATE TABLE t2 (
a INT,
b INT,
KEY ( a, b )
);
INSERT INTO t1 VALUES (1, 1), (2, 2), (3, 3), (4, 4), (5, 5);
INSERT INTO t1 SELECT a + 5, b + 5 FROM t1;
INSERT INTO t1 SELECT a + 10, b + 10 FROM t1;
INSERT INTO t1 SELECT a + 20, b + 20 FROM t1;
INSERT INTO t1 SELECT a + 40, b + 40 FROM t1;
INSERT INTO t2 SELECT * FROM t1;
--echo # plans should be identical
EXPLAIN SELECT a, MAX(b) FROM t1 WHERE a IN (10,100) GROUP BY a;
EXPLAIN SELECT a, MAX(b) FROM t2 WHERE a IN (10,100) GROUP BY a;
FLUSH status;
SELECT a, MAX(b) FROM t1 WHERE a IN (10, 100) GROUP BY a;
--echo # Should be no more than 4 reads.
SHOW status LIKE 'handler_read_key';
FLUSH status;
SELECT a, MAX(b) FROM t2 WHERE a IN (10, 100) GROUP BY a;
--echo # Should be no more than 4 reads.
SHOW status LIKE 'handler_read_key';
DROP TABLE t1, t2;
......@@ -958,15 +958,15 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t0 ALL NULL NULL NULL NULL 3 100.00 Using where
1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 Using where; Using join buffer
1 SIMPLE t2 ALL NULL NULL NULL NULL 3 100.00 Using where
1 SIMPLE t4 ref idx_b idx_b 5 test.t2.b 1 100.00
1 SIMPLE t3 ALL NULL NULL NULL NULL 2 100.00 Using where
1 SIMPLE t4 ref idx_b idx_b 5 test.t2.b 1 100.00 Using where
1 SIMPLE t5 ALL idx_b NULL NULL NULL 3 100.00 Using where
1 SIMPLE t7 ALL NULL NULL NULL NULL 2 100.00 Using where
1 SIMPLE t6 ALL NULL NULL NULL NULL 3 100.00 Using where
1 SIMPLE t8 ALL NULL NULL NULL NULL 2 100.00 Using where
1 SIMPLE t9 ALL NULL NULL NULL NULL 3 100.00 Using where; Using join buffer
Warnings:
Note 1003 select `test`.`t0`.`a` AS `a`,`test`.`t0`.`b` AS `b`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b`,`test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b`,`test`.`t3`.`a` AS `a`,`test`.`t3`.`b` AS `b`,`test`.`t4`.`a` AS `a`,`test`.`t4`.`b` AS `b`,`test`.`t5`.`a` AS `a`,`test`.`t5`.`b` AS `b`,`test`.`t6`.`a` AS `a`,`test`.`t6`.`b` AS `b`,`test`.`t7`.`a` AS `a`,`test`.`t7`.`b` AS `b`,`test`.`t8`.`a` AS `a`,`test`.`t8`.`b` AS `b`,`test`.`t9`.`a` AS `a`,`test`.`t9`.`b` AS `b` from `test`.`t0` join `test`.`t1` left join (`test`.`t2` left join (`test`.`t3` join `test`.`t4`) on(((`test`.`t4`.`b` = `test`.`t2`.`b`) and (`test`.`t3`.`a` = 1))) join `test`.`t5` left join (`test`.`t6` join `test`.`t7` left join `test`.`t8` on(((`test`.`t8`.`b` = `test`.`t5`.`b`) and (`test`.`t6`.`b` < 10)))) on(((`test`.`t7`.`b` = `test`.`t5`.`b`) and (`test`.`t6`.`b` >= 2)))) on((((`test`.`t3`.`b` = 2) or isnull(`test`.`t3`.`c`)) and ((`test`.`t6`.`b` = 2) or isnull(`test`.`t6`.`c`)) and ((`test`.`t5`.`b` = `test`.`t0`.`b`) or isnull(`test`.`t3`.`c`) or isnull(`test`.`t6`.`c`) or isnull(`test`.`t8`.`c`)) and (`test`.`t1`.`a` <> 2))) join `test`.`t9` where ((`test`.`t9`.`a` = 1) and (`test`.`t1`.`b` = `test`.`t0`.`b`) and (`test`.`t0`.`a` = 1) and ((`test`.`t2`.`a` >= 4) or isnull(`test`.`t2`.`c`)) and ((`test`.`t3`.`a` < 5) or isnull(`test`.`t3`.`c`)) and ((`test`.`t4`.`b` = `test`.`t3`.`b`) or isnull(`test`.`t3`.`c`) or isnull(`test`.`t4`.`c`)) and ((`test`.`t5`.`a` >= 2) or isnull(`test`.`t5`.`c`)) and ((`test`.`t6`.`a` >= 4) or isnull(`test`.`t6`.`c`)) and ((`test`.`t7`.`a` <= 2) or isnull(`test`.`t7`.`c`)) and ((`test`.`t8`.`a` < 1) or isnull(`test`.`t8`.`c`)) and ((`test`.`t9`.`b` = `test`.`t8`.`b`) or isnull(`test`.`t8`.`c`)))
Note 1003 select `test`.`t0`.`a` AS `a`,`test`.`t0`.`b` AS `b`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b`,`test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b`,`test`.`t3`.`a` AS `a`,`test`.`t3`.`b` AS `b`,`test`.`t4`.`a` AS `a`,`test`.`t4`.`b` AS `b`,`test`.`t5`.`a` AS `a`,`test`.`t5`.`b` AS `b`,`test`.`t6`.`a` AS `a`,`test`.`t6`.`b` AS `b`,`test`.`t7`.`a` AS `a`,`test`.`t7`.`b` AS `b`,`test`.`t8`.`a` AS `a`,`test`.`t8`.`b` AS `b`,`test`.`t9`.`a` AS `a`,`test`.`t9`.`b` AS `b` from `test`.`t0` join `test`.`t1` left join (`test`.`t2` left join (`test`.`t3` join `test`.`t4`) on(((`test`.`t4`.`b` = `test`.`t2`.`b`) and (`test`.`t3`.`a` = 1))) join `test`.`t5` left join (`test`.`t6` join `test`.`t7` left join `test`.`t8` on(((`test`.`t8`.`b` = `test`.`t5`.`b`) and (`test`.`t6`.`b` < 10)))) on(((`test`.`t7`.`b` = `test`.`t5`.`b`) and (`test`.`t6`.`b` >= 2)))) on((((`test`.`t3`.`b` = 2) or isnull(`test`.`t3`.`c`)) and ((`test`.`t6`.`b` = 2) or isnull(`test`.`t6`.`c`)) and ((`test`.`t5`.`b` = `test`.`t0`.`b`) or isnull(`test`.`t3`.`c`) or isnull(`test`.`t6`.`c`) or isnull(`test`.`t8`.`c`)) and (`test`.`t1`.`a` <> 2))) join `test`.`t9` where ((`test`.`t9`.`a` = 1) and (`test`.`t1`.`b` = `test`.`t0`.`b`) and (`test`.`t0`.`a` = 1) and ((`test`.`t2`.`a` >= 4) or isnull(`test`.`t2`.`c`)) and ((`test`.`t3`.`a` < 5) or isnull(`test`.`t3`.`c`)) and ((`test`.`t3`.`b` = `test`.`t4`.`b`) or isnull(`test`.`t3`.`c`) or isnull(`test`.`t4`.`c`)) and ((`test`.`t5`.`a` >= 2) or isnull(`test`.`t5`.`c`)) and ((`test`.`t6`.`a` >= 4) or isnull(`test`.`t6`.`c`)) and ((`test`.`t7`.`a` <= 2) or isnull(`test`.`t7`.`c`)) and ((`test`.`t8`.`a` < 1) or isnull(`test`.`t8`.`c`)) and ((`test`.`t9`.`b` = `test`.`t8`.`b`) or isnull(`test`.`t8`.`c`)))
CREATE INDEX idx_b ON t8(b);
EXPLAIN
SELECT t0.a,t0.b,t1.a,t1.b,t2.a,t2.b,t3.a,t3.b,t4.a,t4.b,
......
......@@ -7,6 +7,6 @@ ERROR HY000: Incorrect usage of ALTER DATABASE UPGRADE DATA DIRECTORY NAME and n
ALTER DATABASE `#mysql51#not-yet` UPGRADE DATA DIRECTORY NAME;
ERROR HY000: Incorrect usage of ALTER DATABASE UPGRADE DATA DIRECTORY NAME and name
ALTER DATABASE `#mysql50#` UPGRADE DATA DIRECTORY NAME;
ERROR HY000: Incorrect usage of ALTER DATABASE UPGRADE DATA DIRECTORY NAME and name
ERROR 42000: Incorrect database name '#mysql50#'
ALTER DATABASE `#mysql50#upgrade-me` UPGRADE DATA DIRECTORY NAME;
ERROR 42000: Unknown database '#mysql50#upgrade-me'
......@@ -9,11 +9,13 @@ drop table if exists t1;
create table t1(n int not null, key(n), key(n), key(n), key(n));
let $1=10000;
disable_query_log;
begin;
while ($1)
{
eval insert into t1 values ($1);
dec $1;
}
commit;
enable_query_log;
send check table t1 extended;
connection con2;
......
......@@ -52,11 +52,13 @@ drop table t1;
create table t1 (n int default NULL);
let $1=5000;
disable_query_log;
begin;
while ($1)
{
eval insert into t1 values($1);
dec $1;
}
commit;
enable_query_log;
flush status;
......@@ -68,11 +70,13 @@ drop table t1;
create table t1 (s text);
let $1=5000;
disable_query_log;
begin;
while ($1)
{
eval insert into t1 values('$1');
dec $1;
}
commit;
enable_query_log;
flush status;
select count(distinct s) from t1;
......
......@@ -46,12 +46,14 @@ explain select * from (select t1.*, t2.a as t2a from t1,t2 where t1.a=t2.a) t1;
drop table t1, t2;
create table t1(a int not null, t char(8), index(a));
disable_query_log;
begin;
let $1 = 10000;
while ($1)
{
eval insert into t1 values ($1,'$1');
dec $1;
}
commit;
enable_query_log;
SELECT * FROM (SELECT * FROM t1) as b ORDER BY a ASC LIMIT 0,20;
explain select count(*) from t1 as tt1, (select * from t1) as tt2;
......
......@@ -44,7 +44,7 @@ ALTER DATABASE `#mysql41#not-supported` UPGRADE DATA DIRECTORY NAME;
--error ER_WRONG_USAGE
ALTER DATABASE `#mysql51#not-yet` UPGRADE DATA DIRECTORY NAME;
--error ER_WRONG_USAGE
--error ER_WRONG_DB_NAME
ALTER DATABASE `#mysql50#` UPGRADE DATA DIRECTORY NAME;
--error ER_BAD_DB_ERROR
......
......@@ -4,13 +4,15 @@ reset master;
reset slave;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
start slave;
create table if not exists t1 (n int);
drop table t1;
call mtr.add_suppression ("Slave I/O: Got fatal error 1236 from master when reading data from binary");
call mtr.add_suppression ("Error in Log_event::read_log_event");
show master status;
File Position Binlog_Do_DB Binlog_Ignore_DB
master-bin.000001 # <Binlog_Do_DB> <Binlog_Ignore_DB>
include/stop_slave.inc
change master to master_log_pos=MASTER_LOG_POS;
Read_Master_Log_Pos 75
start slave;
Last_IO_Error = Got fatal error 1236 from master when reading data from binary log: 'log event entry exceeded max_allowed_packet; Increase max_allowed_packet on master'
include/stop_slave.inc
......
......@@ -11,20 +11,30 @@
# Passes with rbr no problem, removed statement include [jbm]
source include/master-slave.inc;
#
# Add an event to get some information into the log we can try to parse
#
let $read_pos= query_get_value(SHOW MASTER STATUS, Position, 1);
create table if not exists t1 (n int);
drop table t1;
call mtr.add_suppression ("Slave I/O: Got fatal error 1236 from master when reading data from binary");
call mtr.add_suppression ("Error in Log_event::read_log_event");
source include/show_master_status.inc;
sync_slave_with_master;
source include/stop_slave.inc;
--replace_result 75 MASTER_LOG_POS
change master to master_log_pos=75;
let $status_items= Read_Master_Log_Pos;
source include/show_slave_status.inc;
let $wrong_log_pos= `SELECT $read_pos+2`;
--replace_result $wrong_log_pos MASTER_LOG_POS
eval change master to master_log_pos=$wrong_log_pos;
start slave;
let $slave_io_errno= 1236;
let $show_slave_io_error= 1;
source include/wait_for_slave_io_error.inc;
--disable_warnings
source include/stop_slave.inc;
--enable_warnings
connection master;
source include/show_master_status.inc;
......
......@@ -77,6 +77,7 @@ DROP TABLE t_myisam, t_innodb;
# the error log only during shutdown, and currently the suppression of
# "Deadlock found" set in this test case is not effective during server
# shutdown.
--sync_slave_with_master
connection slave;
STOP SLAVE;
--source include/wait_for_slave_to_stop.inc
......@@ -174,13 +174,13 @@ CREATE EVENT e2 ON SCHEDULE EVERY @step SECOND
# We want to start at the beginning of the DST cycle, so we wait
# untill current time divides by @step6.
let $wait_timeout= `SELECT @step6 + 1`;
let $wait_timeout= `SELECT @step6*2 + 1`;
let $wait_condition= SELECT UNIX_TIMESTAMP() % @step6 = @step6 - 1;
--source include/wait_condition.inc
# The second wait is needed because after the first wait we may end up
# on the ending edge of a second. Second wait will bring us to the
# beginning edge.
let $wait_timeout= `SELECT @step + 1`;
let $wait_timeout= `SELECT @step*2 + 1`;
let $wait_condition= SELECT UNIX_TIMESTAMP() % @step6 = 0;
--source include/wait_condition.inc
......
......@@ -1313,45 +1313,4 @@ SELECT * FROM t1 FORCE INDEX (PRIMARY)
DROP TABLE t1;
--echo #
--echo # Bug#50939: Loose Index Scan unduly relies on engine to remember range
--echo # endpoints
--echo #
CREATE TABLE t1 (
a INT,
b INT,
KEY ( a, b )
) PARTITION BY HASH (a) PARTITIONS 1;
CREATE TABLE t2 (
a INT,
b INT,
KEY ( a, b )
);
INSERT INTO t1 VALUES (1, 1), (2, 2), (3, 3), (4, 4), (5, 5);
INSERT INTO t1 SELECT a + 5, b + 5 FROM t1;
INSERT INTO t1 SELECT a + 10, b + 10 FROM t1;
INSERT INTO t1 SELECT a + 20, b + 20 FROM t1;
INSERT INTO t1 SELECT a + 40, b + 40 FROM t1;
INSERT INTO t2 SELECT * FROM t1;
--echo # plans should be identical
EXPLAIN SELECT a, MAX(b) FROM t1 WHERE a IN (10,100) GROUP BY a;
EXPLAIN SELECT a, MAX(b) FROM t2 WHERE a IN (10,100) GROUP BY a;
FLUSH status;
SELECT a, MAX(b) FROM t1 WHERE a IN (10, 100) GROUP BY a;
--echo # Should be no more than 4 reads.
SHOW status LIKE 'handler_read_key';
FLUSH status;
SELECT a, MAX(b) FROM t2 WHERE a IN (10, 100) GROUP BY a;
--echo # Should be no more than 4 reads.
SHOW status LIKE 'handler_read_key';
DROP TABLE t1, t2;
--echo End of 5.1 tests
......@@ -74,7 +74,7 @@ public:
SQL_I_List() { empty(); }
SQL_I_List(const SQL_I_List &tmp)
SQL_I_List(const SQL_I_List &tmp) :Sql_alloc()
{
elements= tmp.elements;
first= tmp.first;
......
......@@ -1054,7 +1054,7 @@ READ_INFO::READ_INFO(File file_par, uint tot_length, CHARSET_INFO *cs,
String &field_term, String &line_start, String &line_term,
String &enclosed_par, int escape, bool get_it_from_net,
bool is_fifo)
:file(file_par),escape_char(escape)
:file(file_par),buffer(0),escape_char(escape)
{
read_charset= cs;
field_term_ptr=(char*) field_term.ptr();
......@@ -1103,6 +1103,7 @@ READ_INFO::READ_INFO(File file_par, uint tot_length, CHARSET_INFO *cs,
MYF(MY_WME)))
{
my_free((uchar*) buffer,MYF(0)); /* purecov: inspected */
buffer= 0;
error=1;
}
else
......@@ -1133,9 +1134,8 @@ READ_INFO::~READ_INFO()
{
if (need_end_io_cache)
::end_io_cache(&cache);
my_free((uchar*) buffer,MYF(0));
error=1;
}
my_free((uchar*) buffer,MYF(MY_ALLOW_ZERO_PTR));
}
......
......@@ -18159,7 +18159,7 @@ static void test_bug53907()
rc= mysql_change_user(mysql, "testbug", NULL, "bug53907");
myquery(rc);
rc= simple_command(mysql, COM_TABLE_DUMP, buf, sizeof(buf), 0);
rc= simple_command(mysql, COM_TABLE_DUMP, (uchar*) buf, sizeof(buf), 0);
fprintf(stderr, ">>>>>>>>> %d\n", mysql_errno(mysql));
DIE_UNLESS(mysql_errno(mysql) == 1103); /* ER_WRONG_TABLE_NAME */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment