Commit 29d514b0 authored by Satya B's avatar Satya B

merge to 5.0-bugteam

parents 2005f53c 1aee9058
......@@ -6665,7 +6665,7 @@ void init_re_comp(my_regex_t *re, const char* str)
char erbuf[100];
size_t len= my_regerror(err, re, erbuf, sizeof(erbuf));
die("error %s, %d/%d `%s'\n",
re_eprint(err), len, (int)sizeof(erbuf), erbuf);
re_eprint(err), (int)len, (int)sizeof(erbuf), erbuf);
}
}
......@@ -6721,7 +6721,7 @@ int match_re(my_regex_t *re, char *str)
char erbuf[100];
size_t len= my_regerror(err, re, erbuf, sizeof(erbuf));
die("error %s, %d/%d `%s'\n",
re_eprint(err), len, (int)sizeof(erbuf), erbuf);
re_eprint(err), (int)len, (int)sizeof(erbuf), erbuf);
}
return 0;
}
......
#
# Include this script to wait until the connection to the
# server has been dropped
--disable_result_log
--disable_query_log
let $counter= 500;
let $mysql_errno= 0;
while (!$mysql_errno)
{
--error 0,1053,2002,2006,2013
show status;
dec $counter;
if (!$counter)
{
--die Server failed to dissapear
}
--sleep 0.1
}
--enable_query_log
--enable_result_log
DROP TABLE IF EXISTS t1;
CREATE TABLE t1( a INT, b INT );
INSERT INTO t1 VALUES (1, 1), (2, 2), (3, 3), (4, 4);
# 1. test regular tables
# 1.1. test altering of columns that multiupdate doesn't use
# 1.1.1. normal mode
# 1.1.2. PS mode
# 1.2. test altering of columns that multiupdate uses
# 1.2.1. normal mode
# 1.2.2. PS mode
ALTER TABLE t1 ADD COLUMN a INT;
# 2. test UNIONs
# 2.1. test altering of columns that multiupdate doesn't use
# 2.1.1. normal mode
# 2.1.2. PS mode
# 2.2. test altering of columns that multiupdate uses
# 2.2.1. normal mode
# 2.2.2. PS mode
DROP TABLE t1;
DROP TABLE IF EXISTS t1,t2,t3;
CREATE TABLE t1 (
a int(11) unsigned default NULL,
b varchar(255) default NULL,
UNIQUE KEY a (a),
KEY b (b)
);
INSERT INTO t1 VALUES (1, 1), (2, 2), (3, 3);
CREATE TABLE t2 SELECT * FROM t1;
CREATE TABLE t3 SELECT * FROM t1;
# test altering of columns that multiupdate doesn't use
# normal mode
# PS mode
# test altering of columns that multiupdate uses
# normal mode
# PS mode
DROP TABLE t1, t2, t3;
DROP TABLE IF EXISTS t1;
SET @old_max_allowed_packet= @@global.max_allowed_packet;
SET @@global.max_allowed_packet = 2 * 1024 * 1024 + 1024;
CREATE TABLE t1(data LONGBLOB);
INSERT INTO t1 SELECT REPEAT('1', 2*1024*1024);
SELECT LENGTH(data) FROM t1;
LENGTH(data)
2097152
DROP TABLE t1;
SET @@global.max_allowed_packet = @old_max_allowed_packet;
......@@ -192,13 +192,4 @@ delimiter
1
1
1
set @old_max_allowed_packet = @@global.max_allowed_packet;
set @@global.max_allowed_packet = 2 * 1024 * 1024 + 1024;
CREATE TABLE t1(data LONGBLOB);
INSERT INTO t1 SELECT REPEAT('1', 2*1024*1024);
SELECT LENGTH(data) FROM t1;
LENGTH(data)
2097152
DROP TABLE t1;
set @@global.max_allowed_packet = @old_max_allowed_packet;
End of 5.0 tests
......@@ -153,4 +153,23 @@ a b
SET @@session.time_zone = default;
DROP TABLE t1;
SET @@session.time_zone = default;
reset master;
CREATE TABLE t1 (date timestamp NOT NULL default CURRENT_TIMESTAMP on update CURRENT_TIMESTAMP, a int(11) default NULL);
SET @@session.time_zone='+01:00';
insert into t1 values('2008-12-23 19:39:39',1);
SET @@session.time_zone='+02:00';
insert delayed into t1 values ('2008-12-23 19:39:39',2);
flush table t1;
flush logs;
select * from t1;
date a
2008-12-23 20:39:39 1
2008-12-23 19:39:39 2
DROP TABLE t1;
select * from t1 order by a;
date a
2008-12-23 20:39:39 1
2008-12-23 19:39:39 2
DROP TABLE t1;
SET @@session.time_zone = default;
End of 5.0 tests
......@@ -84,8 +84,6 @@ insert into t1 values(2,2);
insert into t2 values(1,2);
lock table t1 read;
connection writer;
# mleich: IMHO the "send is not necessary because the update should not block.
# But it will save some runtime in case we block because of an error.
send
update t1,t2 set c=a where b=d;
connection reader;
......@@ -191,8 +189,8 @@ connection locker;
USE mysql;
LOCK TABLES columns_priv WRITE, db WRITE, host WRITE, user WRITE;
FLUSH TABLES;
--sleep 1
#
connection reader;
USE mysql;
# Note: This must be a multi-table select, otherwise the deadlock will not occur
......
# Bug38499 flush tables and multitable table update with derived table cause crash
# MySQL >= 5.0
#
# Save the initial number of concurrent sessions
--source include/count_sessions.inc
connect (locker,localhost,root,,);
connect (writer,localhost,root,,);
--connection default
--disable_warnings
DROP TABLE IF EXISTS t1;
--enable_warnings
CREATE TABLE t1( a INT, b INT );
INSERT INTO t1 VALUES (1, 1), (2, 2), (3, 3), (4, 4);
--echo # 1. test regular tables
--echo # 1.1. test altering of columns that multiupdate doesn't use
--echo # 1.1.1. normal mode
--disable_query_log
let $i = 100;
while ($i) {
--dec $i
--connection writer
send UPDATE t1, (SELECT 1 FROM t1 t1i) d SET a = 0 WHERE 1=0;
--connection locker
ALTER TABLE t1 ADD COLUMN (c INT);
ALTER TABLE t1 DROP COLUMN c;
--connection writer
--reap
}
--echo # 1.1.2. PS mode
--connection writer
PREPARE stmt FROM 'UPDATE t1, (SELECT 1 FROM t1 t1i) d SET a = 0 WHERE 1=0';
let $i = 100;
while ($i) {
--dec $i
--connection writer
--send EXECUTE stmt
--connection locker
ALTER TABLE t1 ADD COLUMN (c INT);
ALTER TABLE t1 DROP COLUMN c;
--connection writer
--reap
}
--enable_query_log
--echo # 1.2. test altering of columns that multiupdate uses
--echo # 1.2.1. normal mode
--connection default
--disable_query_log
let $i = 100;
while ($i) {
dec $i;
--connection locker
--error 0,ER_DUP_FIELDNAME
ALTER TABLE t1 ADD COLUMN a int(11) unsigned default NULL;
UPDATE t1 SET a=b;
--connection writer
--send UPDATE t1, (SELECT 1 FROM t1 t1i) d SET a = 0 WHERE 1=0;
--connection locker
--error 0,ER_CANT_DROP_FIELD_OR_KEY
ALTER TABLE t1 DROP COLUMN a;
--connection writer
--error 0,ER_BAD_FIELD_ERROR # unknown column error
--reap
}
--enable_query_log
--echo # 1.2.2. PS mode
--disable_query_log
let $i = 100;
while ($i) {
dec $i;
--connection locker
--error 0,ER_DUP_FIELDNAME
ALTER TABLE t1 ADD COLUMN a INT;
UPDATE t1 SET a=b;
--connection writer
PREPARE stmt FROM 'UPDATE t1, (SELECT 1 FROM t1 t1i) d SET a = 0 WHERE 1=0';
--send EXECUTE stmt
--connection locker
--error 0,ER_CANT_DROP_FIELD_OR_KEY
ALTER TABLE t1 DROP COLUMN a;
--connection writer
--error 0,ER_BAD_FIELD_ERROR # Unknown column 'a' in 'field list'
--reap
}
--enable_query_log
--connection default
ALTER TABLE t1 ADD COLUMN a INT;
--echo # 2. test UNIONs
--echo # 2.1. test altering of columns that multiupdate doesn't use
--echo # 2.1.1. normal mode
--disable_query_log
let $i = 100;
while ($i) {
--dec $i
--connection writer
send UPDATE t1, ((SELECT 1 FROM t1 t1i) UNION (SELECT 2 FROM t1 t1ii)) e SET a = 0 WHERE 1=0;
--connection locker
ALTER TABLE t1 ADD COLUMN (c INT);
ALTER TABLE t1 DROP COLUMN c;
--connection writer
--reap
}
--echo # 2.1.2. PS mode
--connection writer
PREPARE stmt FROM 'UPDATE t1, ((SELECT 1 FROM t1 t1i) UNION (SELECT 2 FROM t1 t1ii)) e SET a = 0 WHERE 1=0';
let $i = 100;
while ($i) {
--dec $i
--connection writer
--send EXECUTE stmt
--connection locker
ALTER TABLE t1 ADD COLUMN (c INT);
ALTER TABLE t1 DROP COLUMN c;
--connection writer
--reap
}
--enable_query_log
--echo # 2.2. test altering of columns that multiupdate uses
--echo # 2.2.1. normal mode
--connection default
--disable_query_log
let $i = 100;
while ($i) {
dec $i;
--connection locker
--error 0,ER_DUP_FIELDNAME
ALTER TABLE t1 ADD COLUMN a int(11) unsigned default NULL;
UPDATE t1 SET a=b;
--connection writer
--send UPDATE t1, ((SELECT 1 FROM t1 t1i) UNION (SELECT 2 FROM t1 t1ii)) e SET a = 0 WHERE 1=0;
--connection locker
--error 0,ER_CANT_DROP_FIELD_OR_KEY
ALTER TABLE t1 DROP COLUMN a;
--connection writer
--error 0,ER_BAD_FIELD_ERROR # Unknown column 'a' in 'field list'
--reap
}
--enable_query_log
--echo # 2.2.2. PS mode
--disable_query_log
let $i = 100;
while ($i) {
dec $i;
--connection locker
--error 0,ER_DUP_FIELDNAME
ALTER TABLE t1 ADD COLUMN a INT;
UPDATE t1 SET a=b;
--connection writer
PREPARE stmt FROM 'UPDATE t1, ((SELECT 1 FROM t1 t1i) UNION (SELECT 2 FROM t1 t1ii)) e SET a = 0 WHERE 1=0';
--send EXECUTE stmt
--connection locker
--error 0,ER_CANT_DROP_FIELD_OR_KEY
ALTER TABLE t1 DROP COLUMN a;
--connection writer
--error 0,ER_BAD_FIELD_ERROR # Unknown column 'a' in 'field list'
--reap
}
--enable_query_log
--connection default
DROP TABLE t1;
# Close connections
--disconnect locker
--disconnect writer
# End of 5.0 tests
# Wait till all disconnects are completed
--source include/wait_until_count_sessions.inc
#
# Bug#38691 segfault/abort in ``UPDATE ...JOIN'' while
# ``FLUSH TABLES WITH READ LOCK''
# MySQL >= 5.0
#
# Save the initial number of concurrent sessions
--source include/count_sessions.inc
# Test to see if select will get the lock ahead of low priority update
connect (locker,localhost,root,,);
connect (writer,localhost,root,,);
--connection default
--disable_warnings
DROP TABLE IF EXISTS t1,t2,t3;
--enable_warnings
CREATE TABLE t1 (
a int(11) unsigned default NULL,
b varchar(255) default NULL,
UNIQUE KEY a (a),
KEY b (b)
);
INSERT INTO t1 VALUES (1, 1), (2, 2), (3, 3);
CREATE TABLE t2 SELECT * FROM t1;
CREATE TABLE t3 SELECT * FROM t1;
--echo # test altering of columns that multiupdate doesn't use
--echo # normal mode
--disable_query_log
let $i = 100;
while ($i) {
--dec $i
--connection writer
send UPDATE t2 INNER JOIN (t1 JOIN t3 USING(a)) USING(a)
SET a = NULL WHERE t1.b <> t2.b;
--connection locker
ALTER TABLE t2 ADD COLUMN (c INT);
ALTER TABLE t2 DROP COLUMN c;
--connection writer
--reap
}
--echo # PS mode
--connection writer
PREPARE stmt FROM 'UPDATE t2 INNER JOIN (t1 JOIN t3 USING(a)) USING(a)
SET a = NULL WHERE t1.b <> t2.b';
let $i = 100;
while ($i) {
--dec $i
--connection writer
--send EXECUTE stmt
--connection locker
ALTER TABLE t2 ADD COLUMN (c INT);
ALTER TABLE t2 DROP COLUMN c;
--connection writer
--reap
}
--enable_query_log
--echo # test altering of columns that multiupdate uses
--echo # normal mode
--connection default
--disable_query_log
let $i = 100;
while ($i) {
dec $i;
--connection locker
--error 0,ER_DUP_FIELDNAME
ALTER TABLE t2 ADD COLUMN a int(11) unsigned default NULL;
UPDATE t2 SET a=b;
--connection writer
--send UPDATE t2 INNER JOIN (t1 JOIN t3 USING(a)) USING(a) SET a = NULL WHERE t1.b <> t2.b
--connection locker
--error 0,ER_CANT_DROP_FIELD_OR_KEY
ALTER TABLE t2 DROP COLUMN a;
--connection writer
--error 0,ER_BAD_FIELD_ERROR
--reap
}
--enable_query_log
--echo # PS mode
--disable_query_log
let $i = 100;
while ($i) {
dec $i;
--connection locker
--error 0,ER_DUP_FIELDNAME
ALTER TABLE t2 ADD COLUMN a int(11) unsigned default NULL;
UPDATE t2 SET a=b;
--connection writer
PREPARE stmt FROM 'UPDATE t2 INNER JOIN (t1 JOIN t3 USING(a)) USING(a) SET a = NULL WHERE t1.b <> t2.b';
--send EXECUTE stmt
--connection locker
--error 0,ER_CANT_DROP_FIELD_OR_KEY
ALTER TABLE t2 DROP COLUMN a;
--connection writer
--error 0,ER_BAD_FIELD_ERROR
--reap
}
--enable_query_log
--connection default
DROP TABLE t1, t2, t3;
# Close connections
--disconnect locker
--disconnect writer
# Wait till all disconnects are completed
--source include/wait_until_count_sessions.inc
#
# Bug#41486 extra character appears in BLOB for every ~40Mb after
# mysqldump/import
#
# This test consumes a significant amount of resources.
# Therefore it should be kept separated from other tests.
# Otherwise we might suffer from problems like
# Bug#43801 mysql.test takes too long, fails due to expired timeout
# on debx86-b in PB
#
--disable_warnings
DROP TABLE IF EXISTS t1;
--enable_warnings
# Have to change the global variable as the session variable is
# read-only.
SET @old_max_allowed_packet= @@global.max_allowed_packet;
# 2 MB blob length + some space for the rest of INSERT query
SET @@global.max_allowed_packet = 2 * 1024 * 1024 + 1024;
# Create a new connection since the global max_allowed_packet
# has no effect for the current connection
connect (con1, localhost, root,,);
CREATE TABLE t1(data LONGBLOB);
INSERT INTO t1 SELECT REPEAT('1', 2*1024*1024);
let $outfile= $MYSQLTEST_VARDIR/tmp/bug41486.sql;
--error 0,1
remove_file $outfile;
--exec $MYSQL_DUMP test t1 > $outfile
# Check that the mysql client does not insert extra newlines when loading
# strings longer than client's max_allowed_packet
--exec $MYSQL --max_allowed_packet=1M test < $outfile 2>&1
SELECT LENGTH(data) FROM t1;
DROP TABLE t1;
# Cleanup
disconnect con1;
--source include/wait_until_disconnected.inc
remove_file $outfile;
connection default;
SET @@global.max_allowed_packet = @old_max_allowed_packet;
......@@ -341,37 +341,4 @@ EOF
remove_file $MYSQLTEST_VARDIR/tmp/bug31060.sql;
#
# Bug #41486: extra character appears in BLOB for every ~40Mb after
# mysqldump/import
#
# Have to change the global variable as the session variable is
# read-only.
set @old_max_allowed_packet = @@global.max_allowed_packet;
# 2 MB blob length + some space for the rest of INSERT query
set @@global.max_allowed_packet = 2 * 1024 * 1024 + 1024;
# Create a new connection since the global max_allowed_packet
# has no effect for the current connection
connect (con1, localhost, root,,);
connection con1;
CREATE TABLE t1(data LONGBLOB);
INSERT INTO t1 SELECT REPEAT('1', 2*1024*1024);
--exec $MYSQL_DUMP test t1 >$MYSQLTEST_VARDIR/tmp/bug41486.sql
# Check that the mysql client does not insert extra newlines when loading
# strings longer than client's max_allowed_packet
--exec $MYSQL --max_allowed_packet=1M test < $MYSQLTEST_VARDIR/tmp/bug41486.sql 2>&1
SELECT LENGTH(data) FROM t1;
remove_file $MYSQLTEST_VARDIR/tmp/bug41486.sql;
DROP TABLE t1;
connection default;
disconnect con1;
set @@global.max_allowed_packet = @old_max_allowed_packet;
--echo End of 5.0 tests
......@@ -154,5 +154,31 @@ connection master;
DROP TABLE t1;
SET @@session.time_zone = default;
# Bug#41719 delayed INSERT into timestamp col needs set time_zone for concurrent binlogging
# To test that time_zone is correctly binloging for 'insert delayed' statement
# Insert 2 values into timestamp col with different time_zone. Check result.
--connection master
reset master;
CREATE TABLE t1 (date timestamp NOT NULL default CURRENT_TIMESTAMP on update CURRENT_TIMESTAMP, a int(11) default NULL);
SET @@session.time_zone='+01:00';
insert into t1 values('2008-12-23 19:39:39',1);
--connection master1
SET @@session.time_zone='+02:00';
insert delayed into t1 values ('2008-12-23 19:39:39',2);
# Forces table t1 to be closed and flushes the query cache.
# This makes sure that 'delayed insert' is executed before next statement.
flush table t1;
flush logs;
select * from t1;
DROP TABLE t1;
--exec $MYSQL_BINLOG $MYSQLTEST_VARDIR/log/master-bin.000001 | $MYSQL
--connection master1
select * from t1 order by a;
DROP TABLE t1;
SET @@session.time_zone = default;
--echo End of 5.0 tests
......@@ -1605,10 +1605,11 @@ class delayed_row :public ilink {
ulong auto_increment_increment;
ulong auto_increment_offset;
timestamp_auto_set_type timestamp_field_type;
Time_zone *time_zone;
uint query_length;
delayed_row(enum_duplicates dup_arg, bool ignore_arg, bool log_query_arg)
:record(0), query(0), dup(dup_arg), ignore(ignore_arg), log_query(log_query_arg) {}
:record(0), query(0), time_zone(0), dup(dup_arg), ignore(ignore_arg), log_query(log_query_arg) {}
~delayed_row()
{
x_free(record);
......@@ -2062,6 +2063,19 @@ int write_delayed(THD *thd,TABLE *table,enum_duplicates duplic, bool ignore,
row->last_insert_id= thd->last_insert_id;
row->timestamp_field_type= table->timestamp_field_type;
/* Add session variable timezone
Time_zone object will not be freed even the thread is ended.
So we can get time_zone object from thread which handling delayed statement.
See the comment of my_tz_find() for detail.
*/
if (thd->time_zone_used)
{
row->time_zone = thd->variables.time_zone;
}
else
{
row->time_zone = NULL;
}
/* The session variable settings can always be copied. */
row->auto_increment_increment= thd->variables.auto_increment_increment;
row->auto_increment_offset= thd->variables.auto_increment_offset;
......@@ -2515,8 +2529,19 @@ bool Delayed_insert::handle_inserts(void)
}
if (row->query && row->log_query && using_bin_log)
{
bool backup_time_zone_used = thd.time_zone_used;
Time_zone *backup_time_zone = thd.variables.time_zone;
if (row->time_zone != NULL)
{
thd.time_zone_used = true;
thd.variables.time_zone = row->time_zone;
}
Query_log_event qinfo(&thd, row->query, row->query_length, 0, FALSE);
mysql_bin_log.write(&qinfo);
thd.time_zone_used = backup_time_zone_used;
thd.variables.time_zone = backup_time_zone;
}
if (table->s->blob_fields)
free_delayed_insert_blobs(table);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment