Commit 47e02ca7 authored by Kentoku's avatar Kentoku

MDEV-18994 Fix the bug encountered when the size of (v1,v2,v3...) value list...

MDEV-18994 Fix the bug encountered when the size of (v1,v2,v3...) value list in “select * from tb where id in (v1, v2, v3...)” is greater than 1000
Some engines need to avoid in optimization.
parent 0cc7c608
......@@ -327,7 +327,12 @@ enum enum_alter_inplace_result {
/* Support native hash index */
#define HA_CAN_HASH_KEYS (1ULL << 58)
#define HA_LAST_TABLE_FLAG HA_CAN_HASH_KEYS
/**
Storage engine does not want to transform IN predicates into IN subqueries
*/
#define HA_IGNORE_TRANSFORM_IN_INTO_SUBQ (1ULL << 59)
#define HA_LAST_TABLE_FLAG HA_IGNORE_TRANSFORM_IN_INTO_SUBQ
/* bits in index_flags(index_number) for what you can do with index */
#define HA_READ_NEXT 1 /* TODO really use this flag */
......
......@@ -6435,18 +6435,7 @@ class Item_cache: public Item,
bool value_cached;
table_map used_table_map;
public:
Item_cache(THD *thd):
Item(thd),
Type_handler_hybrid_field_type(&type_handler_string),
example(0), cached_field(0),
value_cached(0),
used_table_map(0)
{
maybe_null= 1;
null_value= 1;
}
protected:
Item_cache(THD *thd, const Type_handler *handler):
Item(thd),
Type_handler_hybrid_field_type(handler),
......@@ -6876,7 +6865,7 @@ class Item_cache_row: public Item_cache
bool save_array;
public:
Item_cache_row(THD *thd):
Item_cache(thd), values(0), item_count(2),
Item_cache(thd, &type_handler_row), values(0), item_count(2),
save_array(0) {}
/*
......
......@@ -8308,9 +8308,9 @@ int setup_conds(THD *thd, TABLE_LIST *tables, List<TABLE_LIST> &leaves,
*/
if ((*conds)->type() == Item::FIELD_ITEM && !derived)
wrap_ident(thd, conds);
(*conds)->mark_as_condition_AND_part(NO_JOIN_NEST);
if ((*conds)->fix_fields_if_needed_for_bool(thd, conds))
goto err_no_arena;
(*conds)->mark_as_condition_AND_part(NO_JOIN_NEST);
}
/*
......
......@@ -800,15 +800,48 @@ Item *Item_func_in::in_predicate_to_in_subs_transformer(THD *thd,
bool Item_func_in::to_be_transformed_into_in_subq(THD *thd)
{
DBUG_ENTER("Item_func_in::to_be_transformed_into_in_subq");
uint values_count= arg_count-1;
if (args[0]->type() == Item::FIELD_ITEM)
{
Field *field = ((Item_field *) args[0])->field;
if (field && (field->table->file->ha_table_flags() &
HA_IGNORE_TRANSFORM_IN_INTO_SUBQ))
DBUG_RETURN(false);
}
else if (args[0]->type() == Item::ROW_ITEM)
{
bool ignore= FALSE;
Item_row *item_row= (Item_row *) args[0];
uint cols= item_row->cols(), col_index;
for (col_index= 0; col_index < cols; ++col_index)
{
Item *item= item_row->element_index(col_index);
if (item->type() == Item::FIELD_ITEM)
{
Field *field = ((Item_field *) item)->field;
if (field && (field->table->file->ha_table_flags() &
HA_IGNORE_TRANSFORM_IN_INTO_SUBQ))
{
ignore= TRUE;
continue;
}
break;
}
if (!item->const_item())
break;
}
if (col_index == cols && ignore)
DBUG_RETURN(false);
}
if (args[1]->type() == Item::ROW_ITEM)
values_count*= ((Item_row *)(args[1]))->cols();
if (values_count < thd->variables.in_subquery_conversion_threshold)
return false;
DBUG_RETURN(false);
return true;
DBUG_RETURN(true);
}
......
......@@ -9419,6 +9419,9 @@ ulonglong ha_spider::table_flags() const
#endif
#ifdef HA_CAN_FORCE_BULK_DELETE
(share && share->force_bulk_delete ? HA_CAN_FORCE_BULK_DELETE : 0) |
#endif
#ifdef HA_IGNORE_TRANSFORM_IN_INTO_SUBQ
HA_IGNORE_TRANSFORM_IN_INTO_SUBQ |
#endif
(share ? share->additional_table_flags : 0)
;
......
--let $MASTER_1_COMMENT_2_1= $MASTER_1_COMMENT_2_1_BACKUP
--let $CHILD2_1_DROP_TABLES= $CHILD2_1_DROP_TABLES_BACKUP
--let $CHILD2_1_CREATE_TABLES= $CHILD2_1_CREATE_TABLES_BACKUP
--let $CHILD2_1_SELECT_TABLES= $CHILD2_1_SELECT_TABLES_BACKUP
--disable_warnings
--disable_query_log
--disable_result_log
--source ../t/test_deinit.inc
--enable_result_log
--enable_query_log
--enable_warnings
--disable_warnings
--disable_query_log
--disable_result_log
--source ../t/test_init.inc
--enable_result_log
--enable_query_log
--enable_warnings
--let $MASTER_1_COMMENT_2_1_BACKUP= $MASTER_1_COMMENT_2_1
let $MASTER_1_COMMENT_2_1=
COMMENT='table "tbl_a", srv "s_2_1"';
--let $CHILD2_1_DROP_TABLES_BACKUP= $CHILD2_1_DROP_TABLES
let $CHILD2_1_DROP_TABLES=
DROP TABLE IF EXISTS tbl_a;
--let $CHILD2_1_CREATE_TABLES_BACKUP= $CHILD2_1_CREATE_TABLES
let $CHILD2_1_CREATE_TABLES=
CREATE TABLE tbl_a (
c1 int NOT NULL,
c2 int NOT NULL,
PRIMARY KEY (c1, c2)
) $CHILD2_1_ENGINE $CHILD2_1_CHARSET;
--let $CHILD2_1_SELECT_TABLES_BACKUP= $CHILD2_1_SELECT_TABLES
let $CHILD2_1_SELECT_TABLES=
SELECT COUNT(*) FROM tbl_a;
let $CHILD2_1_SELECT_ARGUMENT1=
SELECT argument FROM mysql.general_log WHERE argument LIKE '%select %';
let $CHILD2_1_DATA_PREPARE=
INSERT INTO tbl_a (c1,c2) VALUES (1, 1) $STR_SEMICOLON
INSERT INTO tbl_a (c1,c2) SELECT c1 + 1, c2 + 1 FROM tbl_a $STR_SEMICOLON
INSERT INTO tbl_a (c1,c2) SELECT c1 + 2, c2 + 2 FROM tbl_a $STR_SEMICOLON
INSERT INTO tbl_a (c1,c2) SELECT c1 + 4, c2 + 4 FROM tbl_a $STR_SEMICOLON
INSERT INTO tbl_a (c1,c2) SELECT c1 + 8, c2 + 8 FROM tbl_a $STR_SEMICOLON
INSERT INTO tbl_a (c1,c2) SELECT c1 + 16, c2 + 16 FROM tbl_a $STR_SEMICOLON
INSERT INTO tbl_a (c1,c2) SELECT c1 + 32, c2 + 32 FROM tbl_a $STR_SEMICOLON
INSERT INTO tbl_a (c1,c2) SELECT c1 + 64, c2 + 64 FROM tbl_a $STR_SEMICOLON
INSERT INTO tbl_a (c1,c2) SELECT c1 + 128, c2 + 128 FROM tbl_a $STR_SEMICOLON
INSERT INTO tbl_a (c1,c2) SELECT c1 + 256, c2 + 256 FROM tbl_a $STR_SEMICOLON
INSERT INTO tbl_a (c1,c2) SELECT c1 + 512, c2 + 512 FROM tbl_a $STR_SEMICOLON
INSERT INTO tbl_a (c1,c2) SELECT c1 + 1024, c2 + 1024 FROM tbl_a $STR_SEMICOLON
FLUSH TABLES;
!include include/default_mysqld.cnf
!include ../my_1_1.cnf
!include ../my_2_1.cnf
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment