Commit 938f5f69 authored by unknown's avatar unknown

After merge fixes for patch solving bug#18437 "Wrong values inserted with a

before update trigger on NDB table".

Two main changes:
- We use TABLE::read_set/write_set bitmaps for marking fields used by
  statement instead of Field::query_id in 5.1.
- Now when we mark columns used by statement we take into account columns 
  used by table's triggers instead of marking all columns as used if table
  has triggers.


mysql-test/r/federated.result:
  Changed test in order to make it work with RBR.
  RBR changes the way in which we execute "DELETE FROM t1" statement - we don't
  use handler::delete_all_rows() method if RBR is enabled (see bug#19066).
  As result federated engine produces different sequences of statements for
  remote server in non-RBR and in RBR cases. And this changes order of the
  rows inserted by following INSERT statements.
mysql-test/t/federated.test:
  Changed test in order to make it work with RBR.
  RBR changes the way in which we execute "DELETE FROM t1" statement - we don't
  use handler::delete_all_rows() method if RBR is enabled (see bug#19066).
  As result federated engine produces different sequences of statements for
  remote server in non-RBR and in RBR cases. And this changes order of the
  rows inserted by following INSERT statements.
sql/ha_partition.cc:
  Added handling of HA_EXTRA_WRITE_CAN_REPLACE/HA_EXTRA_WRITE_CANNOT_REPLACE
  to ha_partition::extra().
sql/item.cc:
  Adjusted comment after merge. In 5.1 we use TABLE::read_set/write_set
  bitmaps instead of Field::query_id for marking columns used.
sql/log_event.cc:
  Write_rows_log_event::do_before_row_operations():
    Now we explicitly inform handler that we want to replace rows so it can
    promote operation done by write_row() to replace.
sql/mysql_priv.h:
  Removed declaration of mark_fields_used_by_triggers_for_insert_stmt() which
  is no longer used (we have TABLE::mark_columns_needed_for_insert() instead).
sql/sql_insert.cc:
  Adjusted code after merge. Get rid of mark_fields_used_by_triggers_for_insert_stmt()
  as now we use TABLE::mark_columns_needed_for_insert() for the same purprose.
  Aligned places where we call this method with places where we call
  mark_fields_used_by_triggers_for_insert() in 5.0.
  Finally we no longer need to call handler::extra(HA_EXTRA_WRITE_CAN_REPLACE)
  in case of REPLACE statement since in 5.1 write_record() marks all columns
  as used before doing actual row replacement.
sql/sql_load.cc:
  Adjusted code after merge. In 5.1 we use TABLE::mark_columns_needed_for_insert() instead of
  mark_fields_used_by_triggers_for_insert_stmt() routine. We also no longer
  need to call handler::extra(HA_EXTRA_RETRIEVE_ALL_COLS) if we execute LOAD
  DATA REPLACE since in 5.1 write_record() will mark all columns as used before
  doing actual row replacement.
sql/sql_trigger.cc:
  Table_triggers_list::mark_fields_used():
    We use TABLE::read_set/write_set bitmaps for marking fields used instead
    of Field::query_id in 5.1.
sql/sql_trigger.h:
  TABLE::mark_columns_needed_for_* methods no longer need to be friends of
  Table_triggers_list class as intead of dirrectly accessing its private
  members they can use public Table_triggers_list::mark_fields_used() method.
  Also Table_triggers)list::mark_fields_used() no longer needs THD argument.
sql/table.cc:
  TABLE::mark_columns_needed_for_*():
    Now we mark columns which are really used by table's triggers instead of
    marking all columns as used if table has triggers.
parent d6d5501b
......@@ -1611,20 +1611,20 @@ create trigger federated.t1_bi before insert on federated.t1 for each row set ne
create table federated.t2 (a int, b int);
insert into federated.t2 values (13, 17), (19, 23);
insert into federated.t1 (a, b) values (1, 2), (3, 5), (7, 11);
select * from federated.t1;
select * from federated.t1 order by a;
a b c
1 2 2
3 5 15
7 11 77
delete from federated.t1;
insert into federated.t1 (a, b) select * from federated.t2;
select * from federated.t1;
select * from federated.t1 order by a;
a b c
13 17 221
19 23 437
delete from federated.t1;
load data infile '../std_data_ln/loaddata5.dat' into table federated.t1 fields terminated by '' enclosed by '' ignore 1 lines (a, b);
select * from federated.t1;
select * from federated.t1 order by a;
a b c
3 4 12
5 6 30
......
......@@ -1391,15 +1391,15 @@ insert into federated.t2 values (13, 17), (19, 23);
# Each of three statements should correctly set values for all three fields
# insert
insert into federated.t1 (a, b) values (1, 2), (3, 5), (7, 11);
select * from federated.t1;
select * from federated.t1 order by a;
delete from federated.t1;
# insert ... select
insert into federated.t1 (a, b) select * from federated.t2;
select * from federated.t1;
select * from federated.t1 order by a;
delete from federated.t1;
# load
load data infile '../std_data_ln/loaddata5.dat' into table federated.t1 fields terminated by '' enclosed by '' ignore 1 lines (a, b);
select * from federated.t1;
select * from federated.t1 order by a;
drop tables federated.t1, federated.t2;
connection slave;
......
......@@ -4665,6 +4665,27 @@ int ha_partition::extra(enum ha_extra_function operation)
*/
break;
}
case HA_EXTRA_WRITE_CAN_REPLACE:
case HA_EXTRA_WRITE_CANNOT_REPLACE:
{
/*
Informs handler that write_row() can replace rows which conflict
with row being inserted by PK/unique key without reporting error
to the SQL-layer.
This optimization is not safe for partitioned table in general case
since we may have to put new version of row into partition which is
different from partition in which old version resides (for example
when we partition by non-PK column or by some column which is not
part of unique key which were violated).
And since NDB which is the only engine at the moment that supports
this optimization handles partitioning on its own we simple disable
it here. (BTW for NDB this optimization is safe since it supports
only KEY partitioning and won't use this optimization for tables
which have additional unique constraints).
*/
break;
}
default:
{
/* Temporary crash to discover what is wrong */
......
......@@ -5437,11 +5437,11 @@ void Item_trigger_field::setup_field(THD *thd, TABLE *table,
GRANT_INFO *table_grant_info)
{
/*
There is no sense in marking fields used by trigger with current value
of THD::query_id since it is completely unrelated to the THD::query_id
value for statements which will invoke trigger. So instead we use
Table_triggers_list::mark_fields_used() method which is called during
execution of these statements.
It is too early to mark fields used here, because before execution
of statement that will invoke trigger other statements may use same
TABLE object, so all such mark-up will be wiped out.
So instead we do it in Table_triggers_list::mark_fields_used()
method which is called during execution of these statements.
*/
enum_mark_columns save_mark_used_columns= thd->mark_used_columns;
thd->mark_used_columns= MARK_COLUMNS_NONE;
......
......@@ -6112,6 +6112,7 @@ int Write_rows_log_event::do_before_row_operations(TABLE *table)
thd->lex->sql_command= SQLCOM_REPLACE;
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); // Needed for ndbcluster
table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE); // Needed for ndbcluster
table->file->extra(HA_EXTRA_IGNORE_NO_KEY); // Needed for ndbcluster
/*
TODO: the cluster team (Tomas?) says that it's better if the engine knows
......
......@@ -908,8 +908,6 @@ bool mysql_insert(THD *thd,TABLE_LIST *table,List<Item> &fields,
bool ignore);
int check_that_all_fields_are_given_values(THD *thd, TABLE *entry,
TABLE_LIST *table_list);
void mark_fields_used_by_triggers_for_insert_stmt(THD *thd, TABLE *table,
enum_duplicates duplic);
bool mysql_prepare_delete(THD *thd, TABLE_LIST *table_list, Item **conds);
bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
SQL_LIST *order, ha_rows rows, ulonglong options,
......
......@@ -181,9 +181,6 @@ static int check_insert_fields(THD *thd, TABLE_LIST *table_list,
}
}
}
if (table->found_next_number_field)
table->mark_auto_increment_column();
table->mark_columns_needed_for_insert();
// For the values we need select_priv
#ifndef NO_EMBEDDED_ACCESS_CHECKS
table->grant.want_privilege= (SELECT_ACL & ~table->grant.privilege);
......@@ -255,33 +252,6 @@ static int check_update_fields(THD *thd, TABLE_LIST *insert_table_list,
}
/*
Mark fields used by triggers for INSERT-like statement.
SYNOPSIS
mark_fields_used_by_triggers_for_insert_stmt()
thd The current thread
table Table to which insert will happen
duplic Type of duplicate handling for insert which will happen
NOTE
For REPLACE there is no sense in marking particular fields
used by ON DELETE trigger as to execute it properly we have
to retrieve and store values for all table columns anyway.
*/
void mark_fields_used_by_triggers_for_insert_stmt(THD *thd, TABLE *table,
enum_duplicates duplic)
{
if (table->triggers)
{
table->triggers->mark_fields_used(thd, TRG_EVENT_INSERT);
if (duplic == DUP_UPDATE)
table->triggers->mark_fields_used(thd, TRG_EVENT_UPDATE);
}
}
bool mysql_insert(THD *thd,TABLE_LIST *table_list,
List<Item> &fields,
List<List_item> &values_list,
......@@ -442,17 +412,9 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
thd->proc_info="update";
if (duplic != DUP_ERROR || ignore)
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
if (duplic == DUP_REPLACE)
{
if (!table->triggers || !table->triggers->has_delete_triggers())
table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE);
/*
REPLACE should change values of all columns so we should mark
all columns as columns to be set. As nice side effect we will
retrieve columns which values are needed for ON DELETE triggers.
*/
table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
}
if (duplic == DUP_REPLACE &&
(!table->triggers || !table->triggers->has_delete_triggers()))
table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE);
/*
let's *try* to start bulk inserts. It won't necessary
start them as values_list.elements should be greater than
......@@ -481,7 +443,7 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
error= 1;
}
mark_fields_used_by_triggers_for_insert_stmt(thd, table, duplic);
table->mark_columns_needed_for_insert();
if (table_list->prepare_where(thd, 0, TRUE) ||
table_list->prepare_check_option(thd))
......@@ -2346,12 +2308,9 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
thd->cuted_fields=0;
if (info.ignore || info.handle_duplicates != DUP_ERROR)
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
if (info.handle_duplicates == DUP_REPLACE)
{
if (!table->triggers || !table->triggers->has_delete_triggers())
table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE);
table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
}
if (info.handle_duplicates == DUP_REPLACE &&
(!table->triggers || !table->triggers->has_delete_triggers()))
table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE);
thd->no_trans_update= 0;
thd->abort_on_warning= (!info.ignore &&
(thd->variables.sql_mode &
......@@ -2363,8 +2322,8 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
table_list->prepare_check_option(thd));
if (!res)
mark_fields_used_by_triggers_for_insert_stmt(thd, table,
info.handle_duplicates);
table->mark_columns_needed_for_insert();
DBUG_RETURN(res);
}
......@@ -2840,12 +2799,9 @@ select_create::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
thd->cuted_fields=0;
if (info.ignore || info.handle_duplicates != DUP_ERROR)
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
if (info.handle_duplicates == DUP_REPLACE)
{
if (!table->triggers || !table->triggers->has_delete_triggers())
table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE);
table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
}
if (info.handle_duplicates == DUP_REPLACE &&
(!table->triggers || !table->triggers->has_delete_triggers()))
table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE);
if (!thd->prelocked_mode)
table->file->ha_start_bulk_insert((ha_rows) 0);
thd->no_trans_update= 0;
......@@ -2853,8 +2809,10 @@ select_create::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
(thd->variables.sql_mode &
(MODE_STRICT_TRANS_TABLES |
MODE_STRICT_ALL_TABLES)));
DBUG_RETURN(check_that_all_fields_are_given_values(thd, table,
table_list));
if (check_that_all_fields_are_given_values(thd, table, table_list))
DBUG_RETURN(1);
table->mark_columns_needed_for_insert();
DBUG_RETURN(0);
}
......
......@@ -187,9 +187,6 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
table= table_list->table;
transactional_table= table->file->has_transactions();
if (table->found_next_number_field)
table->mark_auto_increment_column();
if (!fields_vars.elements)
{
Field **field;
......@@ -232,7 +229,7 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
DBUG_RETURN(TRUE);
}
mark_fields_used_by_triggers_for_insert_stmt(thd, table, handle_duplicates);
table->mark_columns_needed_for_insert();
uint tot_length=0;
bool use_blobs= 0, use_vars= 0;
......@@ -364,13 +361,10 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
if (ignore ||
handle_duplicates == DUP_REPLACE)
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
if (handle_duplicates == DUP_REPLACE)
{
if (!table->triggers ||
!table->triggers->has_delete_triggers())
if (handle_duplicates == DUP_REPLACE &&
(!table->triggers ||
!table->triggers->has_delete_triggers()))
table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE);
table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
}
if (!thd->prelocked_mode)
table->file->ha_start_bulk_insert((ha_rows) 0);
table->copy_blobs=1;
......
......@@ -1543,12 +1543,12 @@ bool Table_triggers_list::process_triggers(THD *thd, trg_event_type event,
DESCRIPTION
This method marks fields of subject table which are read/set in its
triggers as such (by setting Field::query_id equal to THD::query_id)
triggers as such (by properly updating TABLE::read_set/write_set)
and thus informs handler that values for these fields should be
retrieved/stored during execution of statement.
*/
void Table_triggers_list::mark_fields_used(THD *thd, trg_event_type event)
void Table_triggers_list::mark_fields_used(trg_event_type event)
{
int action_time;
Item_trigger_field *trg_field;
......@@ -1560,9 +1560,14 @@ void Table_triggers_list::mark_fields_used(THD *thd, trg_event_type event)
{
/* We cannot mark fields which does not present in table. */
if (trg_field->field_idx != (uint)-1)
table->field[trg_field->field_idx]->query_id = thd->query_id;
{
bitmap_set_bit(table->read_set, trg_field->field_idx);
if (trg_field->get_settable_routine_parameter())
bitmap_set_bit(table->write_set, trg_field->field_idx);
}
}
}
table->file->column_bitmaps_signal();
}
......
......@@ -125,7 +125,7 @@ class Table_triggers_list: public Sql_alloc
void set_table(TABLE *new_table);
void mark_fields_used(THD *thd, trg_event_type event);
void mark_fields_used(trg_event_type event);
friend class Item_trigger_field;
friend int sp_cache_routines_and_add_tables_for_triggers(THD *thd, LEX *lex,
......@@ -140,10 +140,6 @@ class Table_triggers_list: public Sql_alloc
const char *db_name,
LEX_STRING *old_table_name,
LEX_STRING *new_table_name);
friend void st_table::mark_columns_needed_for_insert(void);
friend void st_table::mark_columns_needed_for_update(void);
friend void st_table::mark_columns_needed_for_delete(void);
};
extern const LEX_STRING trg_action_time_type_names[];
......
......@@ -3925,16 +3925,7 @@ void st_table::mark_auto_increment_column()
void st_table::mark_columns_needed_for_delete()
{
if (triggers)
{
if (triggers->bodies[TRG_EVENT_DELETE][TRG_ACTION_BEFORE] ||
triggers->bodies[TRG_EVENT_DELETE][TRG_ACTION_AFTER])
{
/* TODO: optimize to only add columns used by trigger */
use_all_columns();
return;
}
}
triggers->mark_fields_used(TRG_EVENT_DELETE);
if (file->ha_table_flags() & HA_REQUIRES_KEY_COLUMNS_FOR_DELETE)
{
Field **reg_field;
......@@ -3985,15 +3976,7 @@ void st_table::mark_columns_needed_for_update()
{
DBUG_ENTER("mark_columns_needed_for_update");
if (triggers)
{
if (triggers->bodies[TRG_EVENT_UPDATE][TRG_ACTION_BEFORE] ||
triggers->bodies[TRG_EVENT_UPDATE][TRG_ACTION_AFTER])
{
/* TODO: optimize to only add columns used by trigger */
use_all_columns();
DBUG_VOID_RETURN;
}
}
triggers->mark_fields_used(TRG_EVENT_UPDATE);
if (file->ha_table_flags() & HA_REQUIRES_KEY_COLUMNS_FOR_DELETE)
{
/* Mark all used key columns for read */
......@@ -4036,13 +4019,14 @@ void st_table::mark_columns_needed_for_insert()
{
if (triggers)
{
if (triggers->bodies[TRG_EVENT_INSERT][TRG_ACTION_BEFORE] ||
triggers->bodies[TRG_EVENT_INSERT][TRG_ACTION_AFTER])
{
/* TODO: optimize to only add columns used by trigger */
use_all_columns();
return;
}
/*
We don't need to mark columns which are used by ON DELETE and
ON UPDATE triggers, which may be invoked in case of REPLACE or
INSERT ... ON DUPLICATE KEY UPDATE, since before doing actual
row replacement or update write_record() will mark all table
fields as used.
*/
triggers->mark_fields_used(TRG_EVENT_INSERT);
}
if (found_next_number_field)
mark_auto_increment_column();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment