Commit 790852c0 authored by Georgi Kodinov's avatar Georgi Kodinov

Bug #55580 : segfault in read_view_sees_trx_id

The server was not checking for errors generated during
the execution of Item::val_xxx() methods when copying
data to the group, order, or distinct temp table's row.
Fixed by extending the copy_funcs() to return an error
code and by checking for that error code on the places
copy_funcs() is called. 
Test case added.
parent 8b25c0e4
...@@ -2499,4 +2499,26 @@ ORDER BY f1 DESC LIMIT 5; ...@@ -2499,4 +2499,26 @@ ORDER BY f1 DESC LIMIT 5;
id select_type table type possible_keys key key_len ref rows Extra id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range f2,f4 f4 1 NULL 11 Using where 1 SIMPLE t1 range f2,f4 f4 1 NULL 11 Using where
DROP TABLE t1; DROP TABLE t1;
#
# Bug#55580: segfault in read_view_sees_trx_id
#
CREATE TABLE t1 (a INT) ENGINE=Innodb;
CREATE TABLE t2 (a INT) ENGINE=Innodb;
INSERT INTO t1 VALUES (1),(2);
INSERT INTO t2 VALUES (1),(2);
START TRANSACTION;
SELECT * FROM t2 LOCK IN SHARE MODE;
a
1
2
START TRANSACTION;
SELECT * FROM t1 LOCK IN SHARE MODE;
a
1
2
SELECT * FROM t1 FOR UPDATE;
# should not crash
SELECT * FROM t1 GROUP BY (SELECT a FROM t2 LIMIT 1 FOR UPDATE) + t1.a;
ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
DROP TABLE t1,t2;
End of 5.1 tests End of 5.1 tests
...@@ -733,4 +733,39 @@ ORDER BY f1 DESC LIMIT 5; ...@@ -733,4 +733,39 @@ ORDER BY f1 DESC LIMIT 5;
DROP TABLE t1; DROP TABLE t1;
--echo #
--echo # Bug#55580: segfault in read_view_sees_trx_id
--echo #
CREATE TABLE t1 (a INT) ENGINE=Innodb;
CREATE TABLE t2 (a INT) ENGINE=Innodb;
INSERT INTO t1 VALUES (1),(2);
INSERT INTO t2 VALUES (1),(2);
connect (con1,localhost,root,,test);
connect (con2,localhost,root,,test);
connection con1;
START TRANSACTION;
SELECT * FROM t2 LOCK IN SHARE MODE;
connection con2;
START TRANSACTION;
SELECT * FROM t1 LOCK IN SHARE MODE;
connection con1;
--send SELECT * FROM t1 FOR UPDATE
connection con2;
--echo # should not crash
--error ER_LOCK_DEADLOCK
SELECT * FROM t1 GROUP BY (SELECT a FROM t2 LIMIT 1 FOR UPDATE) + t1.a;
connection default;
disconnect con1;
disconnect con2;
DROP TABLE t1,t2;
--echo End of 5.1 tests --echo End of 5.1 tests
...@@ -2556,7 +2556,8 @@ bool Item_sum_count_distinct::add() ...@@ -2556,7 +2556,8 @@ bool Item_sum_count_distinct::add()
if (always_null) if (always_null)
return 0; return 0;
copy_fields(tmp_table_param); copy_fields(tmp_table_param);
copy_funcs(tmp_table_param->items_to_copy); if (copy_funcs(tmp_table_param->items_to_copy, table->in_use))
return TRUE;
for (Field **field=table->field ; *field ; field++) for (Field **field=table->field ; *field ; field++)
if ((*field)->is_real_null(0)) if ((*field)->is_real_null(0))
...@@ -3128,7 +3129,8 @@ bool Item_func_group_concat::add() ...@@ -3128,7 +3129,8 @@ bool Item_func_group_concat::add()
if (always_null) if (always_null)
return 0; return 0;
copy_fields(tmp_table_param); copy_fields(tmp_table_param);
copy_funcs(tmp_table_param->items_to_copy); if (copy_funcs(tmp_table_param->items_to_copy, table->in_use))
return TRUE;
for (uint i= 0; i < arg_count_field; i++) for (uint i= 0; i < arg_count_field; i++)
{ {
......
...@@ -12487,7 +12487,9 @@ end_write(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), ...@@ -12487,7 +12487,9 @@ end_write(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
if (!end_of_records) if (!end_of_records)
{ {
copy_fields(&join->tmp_table_param); copy_fields(&join->tmp_table_param);
copy_funcs(join->tmp_table_param.items_to_copy); if (copy_funcs(join->tmp_table_param.items_to_copy, join->thd))
DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
#ifdef TO_BE_DELETED #ifdef TO_BE_DELETED
if (!table->uniques) // If not unique handling if (!table->uniques) // If not unique handling
{ {
...@@ -12593,7 +12595,8 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), ...@@ -12593,7 +12595,8 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
memcpy(table->record[0]+key_part->offset, group->buff, 1); memcpy(table->record[0]+key_part->offset, group->buff, 1);
} }
init_tmptable_sum_functions(join->sum_funcs); init_tmptable_sum_functions(join->sum_funcs);
copy_funcs(join->tmp_table_param.items_to_copy); if (copy_funcs(join->tmp_table_param.items_to_copy, join->thd))
DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
if ((error=table->file->ha_write_row(table->record[0]))) if ((error=table->file->ha_write_row(table->record[0])))
{ {
if (create_myisam_from_heap(join->thd, table, &join->tmp_table_param, if (create_myisam_from_heap(join->thd, table, &join->tmp_table_param,
...@@ -12628,7 +12631,8 @@ end_unique_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), ...@@ -12628,7 +12631,8 @@ end_unique_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
init_tmptable_sum_functions(join->sum_funcs); init_tmptable_sum_functions(join->sum_funcs);
copy_fields(&join->tmp_table_param); // Groups are copied twice. copy_fields(&join->tmp_table_param); // Groups are copied twice.
copy_funcs(join->tmp_table_param.items_to_copy); if (copy_funcs(join->tmp_table_param.items_to_copy, join->thd))
DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
if (!(error=table->file->ha_write_row(table->record[0]))) if (!(error=table->file->ha_write_row(table->record[0])))
join->send_records++; // New group join->send_records++; // New group
...@@ -12715,7 +12719,8 @@ end_write_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), ...@@ -12715,7 +12719,8 @@ end_write_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
if (idx < (int) join->send_group_parts) if (idx < (int) join->send_group_parts)
{ {
copy_fields(&join->tmp_table_param); copy_fields(&join->tmp_table_param);
copy_funcs(join->tmp_table_param.items_to_copy); if (copy_funcs(join->tmp_table_param.items_to_copy, join->thd))
DBUG_RETURN(NESTED_LOOP_ERROR);
if (init_sum_functions(join->sum_funcs, join->sum_funcs_end[idx+1])) if (init_sum_functions(join->sum_funcs, join->sum_funcs_end[idx+1]))
DBUG_RETURN(NESTED_LOOP_ERROR); DBUG_RETURN(NESTED_LOOP_ERROR);
if (join->procedure) if (join->procedure)
...@@ -15775,14 +15780,39 @@ update_sum_func(Item_sum **func_ptr) ...@@ -15775,14 +15780,39 @@ update_sum_func(Item_sum **func_ptr)
return 0; return 0;
} }
/** Copy result of functions to record in tmp_table. */ /**
Copy result of functions to record in tmp_table.
void Uses the thread pointer to check for errors in
copy_funcs(Item **func_ptr) some of the val_xxx() methods called by the
save_in_result_field() function.
TODO: make the Item::val_xxx() return error code
@param func_ptr array of the function Items to copy to the tmp table
@param thd pointer to the current thread for error checking
@retval
FALSE if OK
@retval
TRUE on error
*/
bool
copy_funcs(Item **func_ptr, const THD *thd)
{ {
Item *func; Item *func;
for (; (func = *func_ptr) ; func_ptr++) for (; (func = *func_ptr) ; func_ptr++)
{
func->save_in_result_field(1); func->save_in_result_field(1);
/*
Need to check the THD error state because Item::val_xxx() don't
return error code, but can generate errors
TODO: change it for a real status check when Item::val_xxx()
are extended to return status code.
*/
if (thd->is_error())
return TRUE;
}
return FALSE;
} }
......
...@@ -601,7 +601,7 @@ bool setup_copy_fields(THD *thd, TMP_TABLE_PARAM *param, ...@@ -601,7 +601,7 @@ bool setup_copy_fields(THD *thd, TMP_TABLE_PARAM *param,
List<Item> &new_list1, List<Item> &new_list2, List<Item> &new_list1, List<Item> &new_list2,
uint elements, List<Item> &fields); uint elements, List<Item> &fields);
void copy_fields(TMP_TABLE_PARAM *param); void copy_fields(TMP_TABLE_PARAM *param);
void copy_funcs(Item **func_ptr); bool copy_funcs(Item **func_ptr, const THD *thd);
bool create_myisam_from_heap(THD *thd, TABLE *table, TMP_TABLE_PARAM *param, bool create_myisam_from_heap(THD *thd, TABLE *table, TMP_TABLE_PARAM *param,
int error, bool ignore_last_dupp_error); int error, bool ignore_last_dupp_error);
uint find_shortest_key(TABLE *table, const key_map *usable_keys); uint find_shortest_key(TABLE *table, const key_map *usable_keys);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment