Commit f60d9a3e authored by unknown's avatar unknown

Merge bk-internal.mysql.com:/home/bk/mysql-5.0

into  bodhi.local:/opt/local/work/mysql-5.0-runtime

parents 3ff08ab8 a7dddd3b
......@@ -183,3 +183,47 @@ a
32
42
drop table t1;
create table t1 (a tinyint not null auto_increment primary key) engine=myisam;
insert into t1 values(103);
set auto_increment_increment=11;
set auto_increment_offset=4;
insert into t1 values(null);
insert into t1 values(null);
insert into t1 values(null);
ERROR 23000: Duplicate entry '125' for key 1
select a, mod(a-@@auto_increment_offset,@@auto_increment_increment) from t1 order by a;
a mod(a-@@auto_increment_offset,@@auto_increment_increment)
103 0
114 0
125 0
create table t2 (a tinyint unsigned not null auto_increment primary key) engine=myisam;
set auto_increment_increment=10;
set auto_increment_offset=1;
set insert_id=1000;
insert into t2 values(null);
Warnings:
Warning 1264 Out of range value adjusted for column 'a' at row 1
select a, mod(a-@@auto_increment_offset,@@auto_increment_increment) from t2 order by a;
a mod(a-@@auto_increment_offset,@@auto_increment_increment)
251 0
create table t3 like t1;
set auto_increment_increment=1000;
set auto_increment_offset=700;
insert into t3 values(null);
Warnings:
Warning 1264 Out of range value adjusted for column 'a' at row 1
select * from t3 order by a;
a
127
select * from t1 order by a;
a
103
114
125
select * from t2 order by a;
a
251
select * from t3 order by a;
a
127
drop table t1,t2,t3;
......@@ -132,3 +132,68 @@ id last_id
drop function bug15728;
drop function bug15728_insert;
drop table t1, t2;
create table t1 (n int primary key auto_increment not null,
b int, unique(b));
set sql_log_bin=0;
insert into t1 values(null,100);
replace into t1 values(null,50),(null,100),(null,150);
select * from t1 order by n;
n b
2 50
3 100
4 150
truncate table t1;
set sql_log_bin=1;
insert into t1 values(null,100);
select * from t1 order by n;
n b
1 100
insert into t1 values(null,200),(null,300);
delete from t1 where b <> 100;
select * from t1 order by n;
n b
1 100
replace into t1 values(null,100),(null,350);
select * from t1 order by n;
n b
2 100
3 350
select * from t1 order by n;
n b
2 100
3 350
insert into t1 values (NULL,400),(3,500),(NULL,600) on duplicate key UPDATE n=1000;
select * from t1 order by n;
n b
2 100
4 400
1000 350
1001 600
select * from t1 order by n;
n b
2 100
4 400
1000 350
1001 600
drop table t1;
create table t1 (n int primary key auto_increment not null,
b int, unique(b));
insert into t1 values(null,100);
select * from t1 order by n;
n b
1 100
insert into t1 values(null,200),(null,300);
delete from t1 where b <> 100;
select * from t1 order by n;
n b
1 100
insert into t1 values(null,100),(null,350) on duplicate key update n=2;
select * from t1 order by n;
n b
2 100
3 350
select * from t1 order by n;
n b
2 100
3 350
drop table t1;
......@@ -96,9 +96,47 @@ select * from t1;
sync_slave_with_master;
select * from t1;
connection master;
# Test for BUG#20524 "auto_increment_* not observed when inserting
# a too large value". When an autogenerated value was bigger than the
# maximum possible value of the field, it was truncated to that max
# possible value, without being "rounded down" to still honour
# auto_increment_* variables.
connection master;
drop table t1;
create table t1 (a tinyint not null auto_increment primary key) engine=myisam;
insert into t1 values(103);
set auto_increment_increment=11;
set auto_increment_offset=4;
insert into t1 values(null);
insert into t1 values(null);
--error 1062
insert into t1 values(null);
select a, mod(a-@@auto_increment_offset,@@auto_increment_increment) from t1 order by a;
# same but with a larger value
create table t2 (a tinyint unsigned not null auto_increment primary key) engine=myisam;
set auto_increment_increment=10;
set auto_increment_offset=1;
set insert_id=1000;
insert into t2 values(null);
select a, mod(a-@@auto_increment_offset,@@auto_increment_increment) from t2 order by a;
# An offset so big that even first value does not fit
create table t3 like t1;
set auto_increment_increment=1000;
set auto_increment_offset=700;
insert into t3 values(null);
select * from t3 order by a;
sync_slave_with_master;
select * from t1 order by a;
select * from t2 order by a;
select * from t3 order by a;
connection master;
drop table t1,t2,t3;
# End cleanup
sync_slave_with_master;
......@@ -147,6 +147,69 @@ drop function bug15728;
drop function bug15728_insert;
drop table t1, t2;
# test of BUG#20188 REPLACE or ON DUPLICATE KEY UPDATE in
# auto_increment breaks binlog
create table t1 (n int primary key auto_increment not null,
b int, unique(b));
# First, test that we do not call restore_auto_increment() too early
# in write_record():
set sql_log_bin=0;
insert into t1 values(null,100);
replace into t1 values(null,50),(null,100),(null,150);
select * from t1 order by n;
truncate table t1;
set sql_log_bin=1;
insert into t1 values(null,100);
select * from t1 order by n;
sync_slave_with_master;
# make slave's table autoinc counter bigger
insert into t1 values(null,200),(null,300);
delete from t1 where b <> 100;
# check that slave's table content is identical to master
select * from t1 order by n;
# only the auto_inc counter differs.
connection master;
replace into t1 values(null,100),(null,350);
select * from t1 order by n;
sync_slave_with_master;
select * from t1 order by n;
# Same test as for REPLACE, but for ON DUPLICATE KEY UPDATE
# We first check that if we update a row using a value larger than the
# table's counter, the counter for next row is bigger than the
# after-value of the updated row.
connection master;
insert into t1 values (NULL,400),(3,500),(NULL,600) on duplicate key UPDATE n=1000;
select * from t1 order by n;
sync_slave_with_master;
select * from t1 order by n;
# and now test for the bug:
connection master;
drop table t1;
create table t1 (n int primary key auto_increment not null,
b int, unique(b));
insert into t1 values(null,100);
select * from t1 order by n;
sync_slave_with_master;
insert into t1 values(null,200),(null,300);
delete from t1 where b <> 100;
select * from t1 order by n;
connection master;
insert into t1 values(null,100),(null,350) on duplicate key update n=2;
select * from t1 order by n;
sync_slave_with_master;
select * from t1 order by n;
connection master;
drop table t1;
# End of 5.0 tests
sync_slave_with_master;
......@@ -1471,6 +1471,66 @@ next_insert_id(ulonglong nr,struct system_variables *variables)
}
void handler::adjust_next_insert_id_after_explicit_value(ulonglong nr)
{
/*
If we have set THD::next_insert_id previously and plan to insert an
explicitely-specified value larger than this, we need to increase
THD::next_insert_id to be greater than the explicit value.
*/
THD *thd= table->in_use;
if (thd->clear_next_insert_id && (nr >= thd->next_insert_id))
{
if (thd->variables.auto_increment_increment != 1)
nr= next_insert_id(nr, &thd->variables);
else
nr++;
thd->next_insert_id= nr;
DBUG_PRINT("info",("next_insert_id: %lu", (ulong) nr));
}
}
/*
Computes the largest number X:
- smaller than or equal to "nr"
- of the form: auto_increment_offset + N * auto_increment_increment
where N>=0.
SYNOPSIS
prev_insert_id
nr Number to "round down"
variables variables struct containing auto_increment_increment and
auto_increment_offset
RETURN
The number X if it exists, "nr" otherwise.
*/
inline ulonglong
prev_insert_id(ulonglong nr, struct system_variables *variables)
{
if (unlikely(nr < variables->auto_increment_offset))
{
/*
There's nothing good we can do here. That is a pathological case, where
the offset is larger than the column's max possible value, i.e. not even
the first sequence value may be inserted. User will receive warning.
*/
DBUG_PRINT("info",("auto_increment: nr: %lu cannot honour "
"auto_increment_offset: %lu",
nr, variables->auto_increment_offset));
return nr;
}
if (variables->auto_increment_increment == 1)
return nr; // optimization of the formula below
nr= (((nr - variables->auto_increment_offset)) /
(ulonglong) variables->auto_increment_increment);
return (nr * (ulonglong) variables->auto_increment_increment +
variables->auto_increment_offset);
}
/*
Update the auto_increment field if necessary
......@@ -1547,17 +1607,7 @@ bool handler::update_auto_increment()
/* Clear flag for next row */
/* Mark that we didn't generate a new value **/
auto_increment_column_changed=0;
/* Update next_insert_id if we have already generated a value */
if (thd->clear_next_insert_id && nr >= thd->next_insert_id)
{
if (variables->auto_increment_increment != 1)
nr= next_insert_id(nr, variables);
else
nr++;
thd->next_insert_id= nr;
DBUG_PRINT("info",("next_insert_id: %lu", (ulong) nr));
}
adjust_next_insert_id_after_explicit_value(nr);
DBUG_RETURN(0);
}
if (!(nr= thd->next_insert_id))
......@@ -1580,10 +1630,19 @@ bool handler::update_auto_increment()
/* Mark that we should clear next_insert_id before next stmt */
thd->clear_next_insert_id= 1;
if (!table->next_number_field->store((longlong) nr, TRUE))
if (likely(!table->next_number_field->store((longlong) nr, TRUE)))
thd->insert_id((ulonglong) nr);
else
thd->insert_id(table->next_number_field->val_int());
{
/*
overflow of the field; we'll use the max value, however we try to
decrease it to honour auto_increment_* variables:
*/
nr= prev_insert_id(table->next_number_field->val_int(), variables);
thd->insert_id(nr);
if (unlikely(table->next_number_field->store((longlong) nr, TRUE)))
thd->insert_id(nr= table->next_number_field->val_int());
}
/*
We can't set next_insert_id if the auto-increment key is not the
......
......@@ -564,6 +564,7 @@ class handler :public Sql_alloc
{}
virtual ~handler(void) { /* TODO: DBUG_ASSERT(inited == NONE); */ }
int ha_open(const char *name, int mode, int test_if_locked);
void adjust_next_insert_id_after_explicit_value(ulonglong nr);
bool update_auto_increment();
virtual void print_error(int error, myf errflag);
virtual bool get_error_message(int error, String *buf);
......
......@@ -492,10 +492,11 @@ bool make_date_time(DATE_TIME_FORMAT *format, TIME *l_time,
const char *ptr, *end;
MY_LOCALE *locale;
THD *thd= current_thd;
char buf[128];
String tmp(buf, thd->variables.character_set_results);
char buf[STRING_BUFFER_USUAL_SIZE];
String tmp(buf, sizeof(buf), thd->variables.character_set_results);
uint errors= 0;
tmp.length(0);
str->length(0);
str->set_charset(&my_charset_bin);
locale = thd->variables.lc_time_names;
......
......@@ -624,8 +624,10 @@ void close_temporary_tables(THD *thd)
if (!mysql_bin_log.is_open())
{
for (table= thd->temporary_tables; table; table= table->next)
TABLE *next;
for (table= thd->temporary_tables; table; table= next)
{
next= table->next;
close_temporary(table, 1);
}
thd->temporary_tables= 0;
......@@ -648,7 +650,6 @@ void close_temporary_tables(THD *thd)
insertion sort of temp tables by pseudo_thread_id to build ordered list
of sublists of equal pseudo_thread_id
*/
for (prev_table= thd->temporary_tables, table= prev_table->next;
table;
prev_table= table, table= table->next)
......
......@@ -1035,7 +1035,6 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info)
uint key_nr;
if (error != HA_WRITE_SKIP)
goto err;
table->file->restore_auto_increment();
if ((int) (key_nr = table->file->get_dup_key(error)) < 0)
{
error=HA_WRITE_SKIP; /* Database can't find key */
......@@ -1108,20 +1107,20 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info)
if (res == VIEW_CHECK_ERROR)
goto before_trg_err;
if (thd->clear_next_insert_id)
{
/* Reset auto-increment cacheing if we do an update */
thd->clear_next_insert_id= 0;
thd->next_insert_id= 0;
}
if ((error=table->file->update_row(table->record[1],table->record[0])))
{
if ((error == HA_ERR_FOUND_DUPP_KEY) && info->ignore)
{
table->file->restore_auto_increment();
goto ok_or_after_trg_err;
}
goto err;
}
info->updated++;
if (table->next_number_field)
table->file->adjust_next_insert_id_after_explicit_value(table->next_number_field->val_int());
trg_error= (table->triggers &&
table->triggers->process_triggers(thd, TRG_EVENT_UPDATE,
TRG_ACTION_AFTER, TRUE));
......@@ -1150,12 +1149,6 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info)
table->timestamp_field_type == TIMESTAMP_AUTO_SET_ON_BOTH) &&
(!table->triggers || !table->triggers->has_delete_triggers()))
{
if (thd->clear_next_insert_id)
{
/* Reset auto-increment cacheing if we do an update */
thd->clear_next_insert_id= 0;
thd->next_insert_id= 0;
}
if ((error=table->file->update_row(table->record[1],
table->record[0])))
goto err;
......@@ -1219,6 +1212,7 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info)
table->file->print_error(error,MYF(0));
before_trg_err:
table->file->restore_auto_increment();
if (key)
my_safe_afree(key, table->s->max_unique_length, MAX_KEY_LENGTH);
DBUG_RETURN(1);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment