Commit afa07926 authored by unknown's avatar unknown

Allow optimzation of multi-table-update also for InnoDB tables

MEMORY is alias for HEAP for CREATE TABLE ... TYPE=HEAP
Fixed bug in multi-table-update where a row could be updated several times


mysql-test/r/heap.result:
  Test of CREATE TABLE ... type=MEMORY
mysql-test/r/innodb.result:
  Added multi-update-test
mysql-test/r/multi_update.result:
  Added multi-update-test
mysql-test/t/heap.test:
  Test of CREATE TABLE ... type=MEMORY
mysql-test/t/innodb.test:
  Added multi-update-test
mysql-test/t/multi_update.test:
  Added multi-update-test
sql/ha_innodb.h:
  Allow optimzation of multi-table-update also for InnoDB tables
sql/handler.h:
  Allow optimzation of multi-update also for InnoDB tables
sql/key.cc:
  After merge fix
sql/lex.h:
  MEMORY is alias for HEAP
sql/sql_test.cc:
  Fixed wrong printf
sql/sql_update.cc:
  Fixed bug in multi-table-update where a row could be updated several times
sql/sql_yacc.yy:
  MEMORY is alias for HEAP
parent f3a5ccf5
......@@ -23,7 +23,7 @@ a b
4 6
alter table t1 add c int not null, add key (c,a);
drop table t1;
create table t1 (a int not null,b int not null, primary key (a)) type=heap comment="testing heaps";
create table t1 (a int not null,b int not null, primary key (a)) type=memory comment="testing heaps";
insert into t1 values(1,1),(2,2),(3,3),(4,4);
delete from t1 where a > 0;
select * from t1;
......
......@@ -1122,3 +1122,82 @@ my-test-1 my-test-2
COMMIT;
set autocommit=1;
DROP TABLE t1,t2,t3;
CREATE TABLE t1 (a int not null primary key, b int not null, unique (b)) type=innodb;
INSERT INTO t1 values (1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8),(9,9);
UPDATE t1 set a=a+100 where b between 2 and 3 and a < 1000;
SELECT * from t1;
a b
1 1
102 2
103 3
4 4
5 5
6 6
7 7
8 8
9 9
drop table t1;
CREATE TABLE t1 (a int not null primary key, b int not null, key (b)) type=innodb;
CREATE TABLE t2 (a int not null primary key, b int not null, key (b)) type=innodb;
INSERT INTO t1 values (1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8),(9,9);
INSERT INTO t2 values (1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8),(9,9);
update t1,t2 set t1.a=t1.a+100;
select * from t1;
a b
101 1
102 2
103 3
104 4
105 5
106 6
107 7
108 8
109 9
update t1,t2 set t1.a=t1.a+100 where t1.a=101;
select * from t1;
a b
201 1
102 2
103 3
104 4
105 5
106 6
107 7
108 8
109 9
update t1,t2 set t1.b=t1.b+10 where t1.b=2;
select * from t1;
a b
201 1
103 3
104 4
105 5
106 6
107 7
108 8
109 9
102 12
update t1,t2 set t1.b=t1.b+2,t2.b=t1.b where t1.b between 3 and 5;
select * from t1;
a b
201 1
103 5
104 6
106 6
105 7
107 7
108 8
109 9
102 12
select * from t2;
a b
1 5
2 5
3 5
4 5
5 5
6 5
7 5
8 5
9 5
drop table t1,t2;
......@@ -246,3 +246,67 @@ INSERT INTO t3 VALUES (1,'jedan'),(2,'dva');
update t1,t2 set t1.naziv="aaaa" where t1.broj=t2.broj;
update t1,t2,t3 set t1.naziv="bbbb", t2.naziv="aaaa" where t1.broj=t2.broj and t2.broj=t3.broj;
drop table if exists t1,t2,t3;
CREATE TABLE t1 (a int not null primary key, b int not null, key (b));
CREATE TABLE t2 (a int not null primary key, b int not null, key (b));
INSERT INTO t1 values (1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8),(9,9);
INSERT INTO t2 values (1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8),(9,9);
update t1,t2 set t1.a=t1.a+100;
select * from t1;
a b
101 1
102 2
103 3
104 4
105 5
106 6
107 7
108 8
109 9
update t1,t2 set t1.a=t1.a+100 where t1.a=101;
select * from t1;
a b
201 1
102 2
103 3
104 4
105 5
106 6
107 7
108 8
109 9
update t1,t2 set t1.b=t1.b+10 where t1.b=2;
select * from t1;
a b
201 1
102 12
103 3
104 4
105 5
106 6
107 7
108 8
109 9
update t1,t2 set t1.b=t1.b+2,t2.b=t1.b where t1.b between 3 and 5;
select * from t1;
a b
201 1
102 12
103 5
104 6
105 7
106 6
107 7
108 8
109 9
select * from t2;
a b
1 3
2 3
3 3
4 3
5 3
6 3
7 3
8 3
9 3
drop table t1,t2;
......@@ -17,7 +17,7 @@ select * from t1;
alter table t1 add c int not null, add key (c,a);
drop table t1;
create table t1 (a int not null,b int not null, primary key (a)) type=heap comment="testing heaps";
create table t1 (a int not null,b int not null, primary key (a)) type=memory comment="testing heaps";
insert into t1 values(1,1),(2,2),(3,3),(4,4);
delete from t1 where a > 0;
select * from t1;
......
......@@ -769,3 +769,31 @@ INSERT INTO t1 values (1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8),(9,9);
UPDATE t1 set a=a+100 where b between 2 and 3 and a < 1000;
SELECT * from t1;
drop table t1;
#
# Test multi update with different join methods
#
CREATE TABLE t1 (a int not null primary key, b int not null, key (b)) type=innodb;
CREATE TABLE t2 (a int not null primary key, b int not null, key (b)) type=innodb;
INSERT INTO t1 values (1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8),(9,9);
INSERT INTO t2 values (1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8),(9,9);
# Full join, without key
update t1,t2 set t1.a=t1.a+100;
select * from t1;
# unique key
update t1,t2 set t1.a=t1.a+100 where t1.a=101;
select * from t1;
# ref key
update t1,t2 set t1.b=t1.b+10 where t1.b=2;
select * from t1;
# Range key (in t1)
update t1,t2 set t1.b=t1.b+2,t2.b=t1.b where t1.b between 3 and 5;
select * from t1;
select * from t2;
drop table t1,t2;
......@@ -224,3 +224,31 @@ INSERT INTO t3 VALUES (1,'jedan'),(2,'dva');
update t1,t2 set t1.naziv="aaaa" where t1.broj=t2.broj;
update t1,t2,t3 set t1.naziv="bbbb", t2.naziv="aaaa" where t1.broj=t2.broj and t2.broj=t3.broj;
drop table if exists t1,t2,t3;
#
# Test multi update with different join methods
#
CREATE TABLE t1 (a int not null primary key, b int not null, key (b));
CREATE TABLE t2 (a int not null primary key, b int not null, key (b));
INSERT INTO t1 values (1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8),(9,9);
INSERT INTO t2 values (1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8),(9,9);
# Full join, without key
update t1,t2 set t1.a=t1.a+100;
select * from t1;
# unique key
update t1,t2 set t1.a=t1.a+100 where t1.a=101;
select * from t1;
# ref key
update t1,t2 set t1.b=t1.b+10 where t1.b=2;
select * from t1;
# Range key (in t1)
update t1,t2 set t1.b=t1.b+2,t2.b=t1.b where t1.b between 3 and 5;
select * from t1;
select * from t2;
drop table t1,t2;
......@@ -82,8 +82,7 @@ class ha_innobase: public handler
HA_PRIMARY_KEY_IN_READ_INDEX |
HA_DROP_BEFORE_CREATE |
HA_NO_PREFIX_CHAR_KEYS |
HA_TABLE_SCAN_ON_INDEX |
HA_NOT_MULTI_UPDATE),
HA_TABLE_SCAN_ON_INDEX),
last_dup_key((uint) -1),
start_of_scan(0)
{
......
......@@ -67,7 +67,6 @@
#define HA_CAN_FULLTEXT (HA_NO_PREFIX_CHAR_KEYS*2)
#define HA_CAN_SQL_HANDLER (HA_CAN_FULLTEXT*2)
#define HA_NO_AUTO_INCREMENT (HA_CAN_SQL_HANDLER*2)
#define HA_NOT_MULTI_UPDATE (HA_NO_AUTO_INCREMENT*2)
/*
Next record gives next record according last record read (even
......
......@@ -275,7 +275,7 @@ bool check_if_key_used(TABLE *table, uint idx, List<Item> &fields)
key is not updated
*/
if (idx != table->primary_key && table->primary_key < MAX_KEY &&
(table->file->option_flag() & HA_PRIMARY_KEY_IN_READ_INDEX))
(table->file->table_flags() & HA_PRIMARY_KEY_IN_READ_INDEX))
return check_if_key_used(table, table->primary_key, fields);
return 0;
}
......@@ -240,6 +240,7 @@ static SYMBOL symbols[] = {
{ "MEDIUMINT", SYM(MEDIUMINT),0,0},
{ "MERGE", SYM(MERGE_SYM),0,0},
{ "MEDIUM", SYM(MEDIUM_SYM),0,0},
{ "MEMORY", SYM(MEMORY_SYM),0,0},
{ "MIDDLEINT", SYM(MEDIUMINT),0,0}, /* For powerbuilder */
{ "MIN_ROWS", SYM(MIN_ROWS),0,0},
{ "MINUTE", SYM(MINUTE_SYM),0,0},
......
......@@ -131,7 +131,7 @@ void TEST_filesort(SORT_FIELD *sortorder,uint s_length, ha_rows special)
DBUG_LOCK_FILE;
VOID(fputs("\nInfo about FILESORT\n",DBUG_FILE));
if (special)
fprintf(DBUG_FILE,"Records to sort: %ld\n",special);
fprintf(DBUG_FILE,"Records to sort: %lu\n",(ulong) special);
fprintf(DBUG_FILE,"Sortorder: %s\n",out.ptr());
DBUG_UNLOCK_FILE;
DBUG_VOID_RETURN;
......
......@@ -23,6 +23,8 @@
#include "sql_acl.h"
#include "sql_select.h"
static bool safe_update_on_fly(JOIN_TAB *join_tab, List<Item> *fields);
/* Return 0 if row hasn't changed */
static bool compare_record(TABLE *table, ulong query_id)
......@@ -520,11 +522,12 @@ int multi_update::prepare(List<Item> &not_used_values)
/*
Store first used table in main_table as this should be updated first
This is because we know that no row in this table will be read twice.
Initialize table for multi table
Create temporary tables to store changed values for all other tables
that are updated.
IMPLEMENTATION
- Update first table in join on the fly, if possible
- Create temporary tables to store changed values for all other tables
that are updated (and main_table if the above doesn't hold).
*/
bool
......@@ -538,52 +541,113 @@ multi_update::initialize_tables(JOIN *join)
main_table=join->join_tab->table;
trans_safe= transactional_tables= main_table->file->has_transactions();
log_delayed= trans_safe || main_table->tmp_table != NO_TMP_TABLE;
table_to_update= (main_table->file->table_flags() & HA_NOT_MULTI_UPDATE) ?
(TABLE *) 0 : main_table;
/* Create a temporary table for all tables after except main table */
table_to_update= 0;
/* Create a temporary table for keys to all tables, except main table */
for (table_ref= update_tables; table_ref; table_ref=table_ref->next)
{
TABLE *table=table_ref->table;
if (table != table_to_update)
{
uint cnt= table_ref->shared;
ORDER group;
List<Item> temp_fields= *fields_for_table[cnt];
TMP_TABLE_PARAM *tmp_param= tmp_table_param+cnt;
/*
Create a temporary table to store all fields that are changed for this
table. The first field in the temporary table is a pointer to the
original row so that we can find and update it
*/
/* ok to be on stack as this is not referenced outside of this func */
Field_string offset(table->file->ref_length, 0, "offset",
table, 1);
if (temp_fields.push_front(new Item_field(((Field *) &offset))))
DBUG_RETURN(1);
uint cnt= table_ref->shared;
List<Item> temp_fields= *fields_for_table[cnt];
ORDER group;
/* Make an unique key over the first field to avoid duplicated updates */
bzero((char*) &group, sizeof(group));
group.asc= 1;
group.item= (Item**) temp_fields.head_ref();
tmp_param->quick_group=1;
tmp_param->field_count=temp_fields.elements;
tmp_param->group_parts=1;
tmp_param->group_length= table->file->ref_length;
if (!(tmp_tables[cnt]=create_tmp_table(thd,
tmp_param,
temp_fields,
(ORDER*) &group, 0, 0, 0,
TMP_TABLE_ALL_COLUMNS)))
DBUG_RETURN(1);
tmp_tables[cnt]->file->extra(HA_EXTRA_WRITE_CACHE);
if (table == main_table) // First table in join
{
if (safe_update_on_fly(join->join_tab, &temp_fields))
{
table_to_update= main_table; // Update table on the fly
continue;
}
}
TMP_TABLE_PARAM *tmp_param= tmp_table_param+cnt;
/*
Create a temporary table to store all fields that are changed for this
table. The first field in the temporary table is a pointer to the
original row so that we can find and update it
*/
/* ok to be on stack as this is not referenced outside of this func */
Field_string offset(table->file->ref_length, 0, "offset",
table, 1);
if (temp_fields.push_front(new Item_field(((Field *) &offset))))
DBUG_RETURN(1);
/* Make an unique key over the first field to avoid duplicated updates */
bzero((char*) &group, sizeof(group));
group.asc= 1;
group.item= (Item**) temp_fields.head_ref();
tmp_param->quick_group=1;
tmp_param->field_count=temp_fields.elements;
tmp_param->group_parts=1;
tmp_param->group_length= table->file->ref_length;
if (!(tmp_tables[cnt]=create_tmp_table(thd,
tmp_param,
temp_fields,
(ORDER*) &group, 0, 0, 0,
TMP_TABLE_ALL_COLUMNS)))
DBUG_RETURN(1);
tmp_tables[cnt]->file->extra(HA_EXTRA_WRITE_CACHE);
}
DBUG_RETURN(0);
}
/*
Check if table is safe to update on fly
SYNOPSIS
safe_update_on_fly
join_tab How table is used in join
fields Fields that are updated
NOTES
We can update the first table in join on the fly if we know that
a row in this tabel will never be read twice. This is true under
the folloing conditions:
- We are doing a table scan and the data is in a separate file (MyISAM) or
if we don't update a clustered key.
- We are doing a range scan and we don't update the scan key or
the primary key for a clustered table handler.
WARNING
This code is a bit dependent of how make_join_readinfo() works.
RETURN
0 Not safe to update
1 Safe to update
*/
static bool safe_update_on_fly(JOIN_TAB *join_tab, List<Item> *fields)
{
TABLE *table= join_tab->table;
switch (join_tab->type) {
case JT_SYSTEM:
case JT_CONST:
case JT_EQ_REF:
return 1; // At most one matching row
case JT_REF:
return !check_if_key_used(table, join_tab->ref.key, *fields);
case JT_ALL:
/* If range search on index */
if (join_tab->quick)
return !check_if_key_used(table, join_tab->quick->index,
*fields);
/* If scanning in clustered key */
if ((table->file->table_flags() & HA_PRIMARY_KEY_IN_READ_INDEX) &&
table->primary_key < MAX_KEY)
return !check_if_key_used(table, table->primary_key, *fields);
return 1;
default:
break; // Avoid compler warning
}
return 0;
}
multi_update::~multi_update()
{
......
......@@ -256,6 +256,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b,int *yystacksize);
%token MAX_UPDATES_PER_HOUR
%token MEDIUM_SYM
%token MERGE_SYM
%token MEMORY_SYM
%token MIN_ROWS
%token MYISAM_SYM
%token NATIONAL_SYM
......@@ -871,6 +872,7 @@ table_types:
| MYISAM_SYM { $$= DB_TYPE_MYISAM; }
| MERGE_SYM { $$= DB_TYPE_MRG_MYISAM; }
| HEAP_SYM { $$= DB_TYPE_HEAP; }
| MEMORY_SYM { $$= DB_TYPE_HEAP; }
| BERKELEY_DB_SYM { $$= DB_TYPE_BERKELEY_DB; }
| INNOBASE_SYM { $$= DB_TYPE_INNODB; };
......@@ -3316,6 +3318,7 @@ keyword:
| MAX_UPDATES_PER_HOUR {}
| MEDIUM_SYM {}
| MERGE_SYM {}
| MEMORY_SYM {}
| MINUTE_SYM {}
| MIN_ROWS {}
| MODIFY_SYM {}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment