Commit 2d18c5be authored by Evgeny Potemkin's avatar Evgeny Potemkin

Bug#50539: Wrong result when loose index scan is used for an aggregate

           function with distinct.
Loose index scan is used to find MIN/MAX values using appropriate index and
thus allow to avoid grouping. For each found row it updates non-aggregated
fields with values from row with found MIN/MAX value.
Without loose index scan non-aggregated fields are copied by end_send_group
function. With loose index scan there is no need in end_send_group and
end_send is used instead. Non-aggregated fields still need to be copied and
this was wrongly implemented in QUICK_GROUP_MIN_MAX_SELECT::get_next.
WL#3220 added a case when loose index scan can be used with end_send_group to
optimize calculation of aggregate functions with distinct. In this case
the row found by QUICK_GROUP_MIN_MAX_SELECT::get_next might belong to a next
group and copying it will produce wrong result.

Update of non-aggregated fields is moved to the end_send function from
QUICK_GROUP_MIN_MAX_SELECT::get_next.


mysql-test/r/group_min_max.result:
  Added a test case for the bug#50539.
mysql-test/t/group_min_max.test:
  Added a test case for the bug#50539.
sql/opt_range.cc:
  Bug#50539: Wrong result when loose index scan is used for an aggregate
  function with distinct.
  Update of non-aggregated fields is moved to the end_send function from
  QUICK_GROUP_MIN_MAX_SELECT::get_next.
sql/sql_select.cc:
  Bug#50539: Wrong result when loose index scan is used for an aggregate
  function with distinct.
  Update of non-aggregated fields is moved to the end_send function from
  QUICK_GROUP_MIN_MAX_SELECT::get_next.
parent d9338ad3
...@@ -2686,7 +2686,7 @@ a c COUNT(DISTINCT c, a, b) ...@@ -2686,7 +2686,7 @@ a c COUNT(DISTINCT c, a, b)
1 1 1 1 1 1
1 1 1 1 1 1
1 1 1 1 1 1
2 1 1 1 1 1
2 1 1 2 1 1
2 1 1 2 1 1
2 1 1 2 1 1
...@@ -2714,7 +2714,7 @@ id select_type table type possible_keys key key_len ref rows Extra ...@@ -2714,7 +2714,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 range NULL a 10 NULL 9 Using index for group-by 1 SIMPLE t2 range NULL a 10 NULL 9 Using index for group-by
SELECT a, COUNT(DISTINCT b), SUM(DISTINCT b) FROM t2 GROUP BY a; SELECT a, COUNT(DISTINCT b), SUM(DISTINCT b) FROM t2 GROUP BY a;
a COUNT(DISTINCT b) SUM(DISTINCT b) a COUNT(DISTINCT b) SUM(DISTINCT b)
2 8 36 1 8 36
2 8 36 2 8 36
EXPLAIN SELECT COUNT(DISTINCT b), SUM(DISTINCT b) FROM t2 GROUP BY a; EXPLAIN SELECT COUNT(DISTINCT b), SUM(DISTINCT b) FROM t2 GROUP BY a;
id select_type table type possible_keys key key_len ref rows Extra id select_type table type possible_keys key key_len ref rows Extra
...@@ -2761,7 +2761,7 @@ SELECT 42 * (a + c + COUNT(DISTINCT c, a, b)) FROM t2 GROUP BY a, b, c; ...@@ -2761,7 +2761,7 @@ SELECT 42 * (a + c + COUNT(DISTINCT c, a, b)) FROM t2 GROUP BY a, b, c;
126 126
126 126
126 126
168 126
168 168
168 168
168 168
...@@ -2779,3 +2779,24 @@ SELECT (SUM(DISTINCT a) + MAX(b)) FROM t2 GROUP BY a; ...@@ -2779,3 +2779,24 @@ SELECT (SUM(DISTINCT a) + MAX(b)) FROM t2 GROUP BY a;
10 10
DROP TABLE t1,t2; DROP TABLE t1,t2;
# end of WL#3220 tests # end of WL#3220 tests
#
# Bug#50539: Wrong result when loose index scan is used for an aggregate
# function with distinct
#
CREATE TABLE t1 (
f1 int(11) NOT NULL DEFAULT '0',
f2 char(1) NOT NULL DEFAULT '',
PRIMARY KEY (f1,f2)
) ;
insert into t1 values(1,'A'),(1 , 'B'), (1, 'C'), (2, 'A'),
(3, 'A'), (3, 'B'), (3, 'C'), (3, 'D');
SELECT f1, COUNT(DISTINCT f2) FROM t1 GROUP BY f1;
f1 COUNT(DISTINCT f2)
1 3
2 1
3 4
explain SELECT f1, COUNT(DISTINCT f2) FROM t1 GROUP BY f1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range NULL PRIMARY 5 NULL 9 Using index for group-by (scanning)
drop table t1;
# End of test#50539.
...@@ -1166,3 +1166,22 @@ SELECT (SUM(DISTINCT a) + MAX(b)) FROM t2 GROUP BY a; ...@@ -1166,3 +1166,22 @@ SELECT (SUM(DISTINCT a) + MAX(b)) FROM t2 GROUP BY a;
DROP TABLE t1,t2; DROP TABLE t1,t2;
--echo # end of WL#3220 tests --echo # end of WL#3220 tests
--echo #
--echo # Bug#50539: Wrong result when loose index scan is used for an aggregate
--echo # function with distinct
--echo #
CREATE TABLE t1 (
f1 int(11) NOT NULL DEFAULT '0',
f2 char(1) NOT NULL DEFAULT '',
PRIMARY KEY (f1,f2)
) ;
insert into t1 values(1,'A'),(1 , 'B'), (1, 'C'), (2, 'A'),
(3, 'A'), (3, 'B'), (3, 'C'), (3, 'D');
SELECT f1, COUNT(DISTINCT f2) FROM t1 GROUP BY f1;
explain SELECT f1, COUNT(DISTINCT f2) FROM t1 GROUP BY f1;
drop table t1;
--echo # End of test#50539.
...@@ -10959,17 +10959,7 @@ int QUICK_GROUP_MIN_MAX_SELECT::get_next() ...@@ -10959,17 +10959,7 @@ int QUICK_GROUP_MIN_MAX_SELECT::get_next()
} while ((result == HA_ERR_KEY_NOT_FOUND || result == HA_ERR_END_OF_FILE) && } while ((result == HA_ERR_KEY_NOT_FOUND || result == HA_ERR_END_OF_FILE) &&
is_last_prefix != 0); is_last_prefix != 0);
if (result == 0) if (result == HA_ERR_KEY_NOT_FOUND)
{
/*
Partially mimic the behavior of end_select_send. Copy the
field data from Item_field::field into Item_field::result_field
of each non-aggregated field (the group fields, and optionally
other fields in non-ANSI SQL mode).
*/
copy_fields(&join->tmp_table_param);
}
else if (result == HA_ERR_KEY_NOT_FOUND)
result= HA_ERR_END_OF_FILE; result= HA_ERR_END_OF_FILE;
DBUG_RETURN(result); DBUG_RETURN(result);
......
...@@ -12255,6 +12255,12 @@ end_send(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), ...@@ -12255,6 +12255,12 @@ end_send(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
if (!end_of_records) if (!end_of_records)
{ {
int error; int error;
if (join->tables &&
join->join_tab->is_using_loose_index_scan())
{
/* Copy non-aggregated fields when loose index scan is used. */
copy_fields(&join->tmp_table_param);
}
if (join->having && join->having->val_int() == 0) if (join->having && join->having->val_int() == 0)
DBUG_RETURN(NESTED_LOOP_OK); // Didn't match having DBUG_RETURN(NESTED_LOOP_OK); // Didn't match having
error=0; error=0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment