Commit f71f07f2 authored by unknown's avatar unknown

Merge gkodinov@bk-internal.mysql.com:/home/bk/mysql-5.1-opt

into  magare.gmz:/home/kgeorge/mysql/autopush/B31562-5.1-opt

parents c3a4dfd6 d68b313d
...@@ -900,6 +900,7 @@ libmysql_r/.libs/libmysqlclient_r.lai ...@@ -900,6 +900,7 @@ libmysql_r/.libs/libmysqlclient_r.lai
libmysql_r/.libs/libmysqlclient_r.so.15 libmysql_r/.libs/libmysqlclient_r.so.15
libmysql_r/.libs/libmysqlclient_r.so.15.0.0 libmysql_r/.libs/libmysqlclient_r.so.15.0.0
libmysql_r/acconfig.h libmysql_r/acconfig.h
libmysql_r/client_settings.h
libmysql_r/conf_to_src libmysql_r/conf_to_src
libmysql_r/my_static.h libmysql_r/my_static.h
libmysql_r/mysys_priv.h libmysql_r/mysys_priv.h
......
drop table if exists t1;
set global myisam_data_pointer_size=2;
CREATE TABLE t1 (a int auto_increment primary key not null, b longtext) ENGINE=MyISAM;
DELETE FROM t1 WHERE a=1 or a=5;
INSERT INTO t1 SET b=repeat('a',600);
ERROR HY000: The table 't1' is full
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
test.t1 check warning Datafile is almost full, 65448 of 65534 used
test.t1 check status OK
UPDATE t1 SET b=repeat('a', 800) where a=10;
ERROR HY000: The table 't1' is full
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
test.t1 check warning Datafile is almost full, 65448 of 65534 used
test.t1 check status OK
INSERT INTO t1 SET b=repeat('a',400);
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
test.t1 check warning Datafile is almost full, 65448 of 65534 used
test.t1 check status OK
DELETE FROM t1 WHERE a=2 or a=6;
UPDATE t1 SET b=repeat('a', 600) where a=11;
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
test.t1 check warning Datafile is almost full, 65448 of 65534 used
test.t1 check status OK
drop table t1;
set global myisam_data_pointer_size=default;
...@@ -1291,4 +1291,9 @@ t1 CREATE TABLE `t1` ( ...@@ -1291,4 +1291,9 @@ t1 CREATE TABLE `t1` (
`b` int(11) DEFAULT NULL `b` int(11) DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1 /*!50100 PARTITION BY RANGE (b) (PARTITION p1 VALUES LESS THAN (10) ENGINE = MyISAM, PARTITION p2 VALUES LESS THAN (20) ENGINE = MyISAM) */ ) ENGINE=MyISAM DEFAULT CHARSET=latin1 /*!50100 PARTITION BY RANGE (b) (PARTITION p1 VALUES LESS THAN (10) ENGINE = MyISAM, PARTITION p2 VALUES LESS THAN (20) ENGINE = MyISAM) */
drop table t1, t2; drop table t1, t2;
create table t1
(s1 timestamp on update current_timestamp, s2 int)
partition by key(s1) partitions 3;
insert into t1 values (null,null);
drop table t1;
End of 5.1 tests End of 5.1 tests
#
# Some special cases with empty tables
#
--disable_warnings
drop table if exists t1;
--enable_warnings
set global myisam_data_pointer_size=2;
CREATE TABLE t1 (a int auto_increment primary key not null, b longtext) ENGINE=MyISAM;
--disable_query_log
let $1= 303;
while ($1)
{
INSERT INTO t1 SET b=repeat('a',200);
dec $1;
}
--enable_query_log
DELETE FROM t1 WHERE a=1 or a=5;
--error 1114
INSERT INTO t1 SET b=repeat('a',600);
CHECK TABLE t1 EXTENDED;
--error 1114
UPDATE t1 SET b=repeat('a', 800) where a=10;
CHECK TABLE t1 EXTENDED;
INSERT INTO t1 SET b=repeat('a',400);
CHECK TABLE t1 EXTENDED;
DELETE FROM t1 WHERE a=2 or a=6;
UPDATE t1 SET b=repeat('a', 600) where a=11;
CHECK TABLE t1 EXTENDED;
drop table t1;
set global myisam_data_pointer_size=default;
# End of 4.1 tests
...@@ -1528,4 +1528,25 @@ PARTITION BY RANGE (b) ( ...@@ -1528,4 +1528,25 @@ PARTITION BY RANGE (b) (
show create table t1; show create table t1;
drop table t1, t2; drop table t1, t2;
#
# Bug #32067 Partitions: crash with timestamp column
# this bug occurs randomly on some UPDATE statement
# with the '1032: Can't find record in 't1'' error
create table t1
(s1 timestamp on update current_timestamp, s2 int)
partition by key(s1) partitions 3;
insert into t1 values (null,null);
--disable_query_log
let $cnt= 1000;
while ($cnt)
{
update t1 set s2 = 1;
update t1 set s2 = 2;
dec $cnt;
}
--enable_query_log
drop table t1;
--echo End of 5.1 tests --echo End of 5.1 tests
...@@ -2783,16 +2783,28 @@ int ha_partition::write_row(uchar * buf) ...@@ -2783,16 +2783,28 @@ int ha_partition::write_row(uchar * buf)
int ha_partition::update_row(const uchar *old_data, uchar *new_data) int ha_partition::update_row(const uchar *old_data, uchar *new_data)
{ {
uint32 new_part_id, old_part_id; uint32 new_part_id, old_part_id;
int error; int error= 0;
longlong func_value; longlong func_value;
timestamp_auto_set_type orig_timestamp_type= table->timestamp_field_type;
DBUG_ENTER("ha_partition::update_row"); DBUG_ENTER("ha_partition::update_row");
/*
We need to set timestamp field once before we calculate
the partition. Then we disable timestamp calculations
inside m_file[*]->update_row() methods
*/
if (orig_timestamp_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
{
table->timestamp_field->set_time();
table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
}
if ((error= get_parts_for_update(old_data, new_data, table->record[0], if ((error= get_parts_for_update(old_data, new_data, table->record[0],
m_part_info, &old_part_id, &new_part_id, m_part_info, &old_part_id, &new_part_id,
&func_value))) &func_value)))
{ {
m_part_info->err_value= func_value; m_part_info->err_value= func_value;
DBUG_RETURN(error); goto exit;
} }
/* /*
...@@ -2804,23 +2816,27 @@ int ha_partition::update_row(const uchar *old_data, uchar *new_data) ...@@ -2804,23 +2816,27 @@ int ha_partition::update_row(const uchar *old_data, uchar *new_data)
if (new_part_id == old_part_id) if (new_part_id == old_part_id)
{ {
DBUG_PRINT("info", ("Update in partition %d", new_part_id)); DBUG_PRINT("info", ("Update in partition %d", new_part_id));
DBUG_RETURN(m_file[new_part_id]->update_row(old_data, new_data)); error= m_file[new_part_id]->update_row(old_data, new_data);
goto exit;
} }
else else
{ {
DBUG_PRINT("info", ("Update from partition %d to partition %d", DBUG_PRINT("info", ("Update from partition %d to partition %d",
old_part_id, new_part_id)); old_part_id, new_part_id));
if ((error= m_file[new_part_id]->write_row(new_data))) if ((error= m_file[new_part_id]->write_row(new_data)))
DBUG_RETURN(error); goto exit;
if ((error= m_file[old_part_id]->delete_row(old_data))) if ((error= m_file[old_part_id]->delete_row(old_data)))
{ {
#ifdef IN_THE_FUTURE #ifdef IN_THE_FUTURE
(void) m_file[new_part_id]->delete_last_inserted_row(new_data); (void) m_file[new_part_id]->delete_last_inserted_row(new_data);
#endif #endif
DBUG_RETURN(error); goto exit;
} }
} }
DBUG_RETURN(0);
exit:
table->timestamp_field_type= orig_timestamp_type;
DBUG_RETURN(error);
} }
......
...@@ -326,6 +326,29 @@ static int write_dynamic_record(MI_INFO *info, const uchar *record, ...@@ -326,6 +326,29 @@ static int write_dynamic_record(MI_INFO *info, const uchar *record,
DBUG_ENTER("write_dynamic_record"); DBUG_ENTER("write_dynamic_record");
flag=0; flag=0;
/*
Check if we have enough room for the new record.
First we do simplified check to make usual case faster.
Then we do more precise check for the space left.
Though it still is not absolutely precise, as
we always use MI_MAX_DYN_BLOCK_HEADER while it can be
less in the most of the cases.
*/
if (unlikely(info->s->base.max_data_file_length -
info->state->data_file_length <
reclength + MI_MAX_DYN_BLOCK_HEADER))
{
if (info->s->base.max_data_file_length - info->state->data_file_length +
info->state->empty - info->state->del * MI_MAX_DYN_BLOCK_HEADER <
reclength + MI_MAX_DYN_BLOCK_HEADER)
{
my_errno=HA_ERR_RECORD_FILE_FULL;
DBUG_RETURN(1);
}
}
do do
{ {
if (_mi_find_writepos(info,reclength,&filepos,&length)) if (_mi_find_writepos(info,reclength,&filepos,&length))
...@@ -762,6 +785,51 @@ static int update_dynamic_record(MI_INFO *info, my_off_t filepos, uchar *record, ...@@ -762,6 +785,51 @@ static int update_dynamic_record(MI_INFO *info, my_off_t filepos, uchar *record,
DBUG_ENTER("update_dynamic_record"); DBUG_ENTER("update_dynamic_record");
flag=block_info.second_read=0; flag=block_info.second_read=0;
/*
Check if we have enough room for the record.
First we do simplified check to make usual case faster.
Then we do more precise check for the space left.
Though it still is not absolutely precise, as
we always use MI_MAX_DYN_BLOCK_HEADER while it can be
less in the most of the cases.
*/
/*
compare with just the reclength as we're going
to get some space from the old replaced record
*/
if (unlikely(info->s->base.max_data_file_length -
info->state->data_file_length < reclength))
{
/*
let's read the old record's block to find out the length of the
old record
*/
if ((error=_mi_get_block_info(&block_info,info->dfile,filepos))
& (BLOCK_DELETED | BLOCK_ERROR | BLOCK_SYNC_ERROR | BLOCK_FATAL_ERROR))
{
DBUG_PRINT("error",("Got wrong block info"));
if (!(error & BLOCK_FATAL_ERROR))
my_errno=HA_ERR_WRONG_IN_RECORD;
goto err;
}
/*
if new record isn't longer, we can go on safely
*/
if (block_info.rec_len < reclength)
{
if (info->s->base.max_data_file_length - info->state->data_file_length +
info->state->empty - info->state->del * MI_MAX_DYN_BLOCK_HEADER <
reclength - block_info.rec_len + MI_MAX_DYN_BLOCK_HEADER)
{
my_errno=HA_ERR_RECORD_FILE_FULL;
goto err;
}
}
block_info.second_read=0;
}
while (reclength > 0) while (reclength > 0)
{ {
if (filepos != info->s->state.dellink) if (filepos != info->s->state.dellink)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment