Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
M
MariaDB
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
nexedi
MariaDB
Commits
e87440b7
Commit
e87440b7
authored
May 03, 2023
by
Oleksandr Byelkin
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch '10.4' into 10.5
parents
69932b6e
ed3e6f66
Changes
7
Hide whitespace changes
Inline
Side-by-side
Showing
7 changed files
with
1161 additions
and
45 deletions
+1161
-45
mysql-test/main/derived_split_innodb.result
mysql-test/main/derived_split_innodb.result
+517
-0
mysql-test/main/derived_split_innodb.test
mysql-test/main/derived_split_innodb.test
+213
-0
mysql-test/main/opt_trace.result
mysql-test/main/opt_trace.result
+114
-0
mysql-test/main/opt_trace.test
mysql-test/main/opt_trace.test
+70
-0
sql/opt_split.cc
sql/opt_split.cc
+185
-37
sql/sql_select.cc
sql/sql_select.cc
+34
-5
sql/sql_select.h
sql/sql_select.h
+28
-3
No files found.
mysql-test/main/derived_split_innodb.result
View file @
e87440b7
...
@@ -285,3 +285,520 @@ id select_type table type possible_keys key key_len ref rows Extra
...
@@ -285,3 +285,520 @@ id select_type table type possible_keys key key_len ref rows Extra
2 DERIVED t4 ALL NULL NULL NULL NULL 40 Using filesort
2 DERIVED t4 ALL NULL NULL NULL NULL 40 Using filesort
drop table t3, t4;
drop table t3, t4;
# End of 10.3 tests
# End of 10.3 tests
#
# MDEV-26301: Split optimization refills temporary table too many times
#
create table t1(a int, b int);
insert into t1 select seq,seq from seq_1_to_5;
create table t2(a int, b int, key(a));
insert into t2
select A.seq,B.seq from seq_1_to_25 A, seq_1_to_2 B;
create table t3(a int, b int, key(a));
insert into t3
select A.seq,B.seq from seq_1_to_5 A, seq_1_to_3 B;
analyze table t1,t2,t3 persistent for all;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status Table is already up to date
test.t3 analyze status Engine-independent statistics collected
test.t3 analyze status Table is already up to date
explain
select * from
(t1 left join t2 on t2.a=t1.b) left join t3 on t3.a=t1.b;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 5
1 SIMPLE t2 ref a a 5 test.t1.b 2 Using where
1 SIMPLE t3 ref a a 5 test.t1.b 3 Using where
create table t10 (
grp_id int,
col1 int,
key(grp_id)
);
insert into t10
select
A.seq,
B.seq
from
seq_1_to_100 A,
seq_1_to_100 B;
create table t11 (
col1 int,
col2 int
);
insert into t11
select A.seq, A.seq from seq_1_to_10 A;
analyze table t10,t11 persistent for all;
Table Op Msg_type Msg_text
test.t10 analyze status Engine-independent statistics collected
test.t10 analyze status Table is already up to date
test.t11 analyze status Engine-independent statistics collected
test.t11 analyze status OK
explain select * from
(
(t1 left join t2 on t2.a=t1.b)
left join t3 on t3.a=t1.b
) left join (select grp_id, count(*)
from t10 left join t11 on t11.col1=t10.col1
group by grp_id) T on T.grp_id=t1.b;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 5
1 PRIMARY t2 ref a a 5 test.t1.b 2 Using where
1 PRIMARY t3 ref a a 5 test.t1.b 3 Using where
1 PRIMARY <derived2> ref key0 key0 5 test.t1.b 10 Using where
2 LATERAL DERIVED t10 ref grp_id grp_id 5 test.t1.b 100
2 LATERAL DERIVED t11 ALL NULL NULL NULL NULL 10 Using where; Using join buffer (flat, BNL join)
# The important part in the below output is:
# "lateral": 1,
# "query_block": {
# "select_id": 2,
# "r_loops": 5, <-- must be 5, not 30.
analyze format=json select * from
(
(t1 left join t2 on t2.a=t1.b)
left join t3 on t3.a=t1.b
) left join (select grp_id, count(*)
from t10 left join t11 on t11.col1=t10.col1
group by grp_id) T on T.grp_id=t1.b;
ANALYZE
{
"query_block": {
"select_id": 1,
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"const_condition": "1",
"table": {
"table_name": "t1",
"access_type": "ALL",
"r_loops": 1,
"rows": 5,
"r_rows": 5,
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
"r_filtered": 100
},
"table": {
"table_name": "t2",
"access_type": "ref",
"possible_keys": ["a"],
"key": "a",
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t1.b"],
"r_loops": 5,
"rows": 2,
"r_rows": 2,
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
"r_filtered": 100,
"attached_condition": "trigcond(trigcond(t1.b is not null))"
},
"table": {
"table_name": "t3",
"access_type": "ref",
"possible_keys": ["a"],
"key": "a",
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t1.b"],
"r_loops": 10,
"rows": 3,
"r_rows": 3,
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
"r_filtered": 100,
"attached_condition": "trigcond(trigcond(t1.b is not null))"
},
"table": {
"table_name": "<derived2>",
"access_type": "ref",
"possible_keys": ["key0"],
"key": "key0",
"key_length": "5",
"used_key_parts": ["grp_id"],
"ref": ["test.t1.b"],
"r_loops": 30,
"rows": 10,
"r_rows": 1,
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
"r_filtered": 100,
"attached_condition": "trigcond(trigcond(t1.b is not null))",
"materialized": {
"lateral": 1,
"query_block": {
"select_id": 2,
"r_loops": 5,
"r_total_time_ms": "REPLACED",
"outer_ref_condition": "t1.b is not null",
"table": {
"table_name": "t10",
"access_type": "ref",
"possible_keys": ["grp_id"],
"key": "grp_id",
"key_length": "5",
"used_key_parts": ["grp_id"],
"ref": ["test.t1.b"],
"r_loops": 5,
"rows": 100,
"r_rows": 100,
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
"r_filtered": 100
},
"block-nl-join": {
"table": {
"table_name": "t11",
"access_type": "ALL",
"r_loops": 5,
"rows": 10,
"r_rows": 10,
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
"r_filtered": 100
},
"buffer_type": "flat",
"buffer_size": "1Kb",
"join_type": "BNL",
"attached_condition": "trigcond(t11.col1 = t10.col1)",
"r_filtered": 10
}
}
}
}
}
}
create table t21 (pk int primary key);
insert into t21 values (1),(2),(3);
create table t22 (pk int primary key);
insert into t22 values (1),(2),(3);
explain
select * from
t21, t22,
(
(t1 left join t2 on t2.a=t1.b)
left join t3 on t3.a=t1.b
) left join (select grp_id, count(*)
from t10 left join t11 on t11.col1=t10.col1
group by grp_id) T on T.grp_id=t1.b
where
t21.pk=1 and t22.pk=2;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t21 const PRIMARY PRIMARY 4 const 1 Using index
1 PRIMARY t22 const PRIMARY PRIMARY 4 const 1 Using index
1 PRIMARY t1 ALL NULL NULL NULL NULL 5
1 PRIMARY t2 ref a a 5 test.t1.b 2 Using where
1 PRIMARY t3 ref a a 5 test.t1.b 3 Using where
1 PRIMARY <derived2> ref key0 key0 5 test.t1.b 10 Using where
2 LATERAL DERIVED t10 ref grp_id grp_id 5 test.t1.b 100
2 LATERAL DERIVED t11 ALL NULL NULL NULL NULL 10 Using where; Using join buffer (flat, BNL join)
explain
select * from
t21,
(
(t1 left join t2 on t2.a=t1.b)
left join t3 on t3.a=t1.b
) left join (select grp_id, count(*)
from
t22 join t10 left join t11 on t11.col1=t10.col1
where
t22.pk=1
group by grp_id) T on T.grp_id=t1.b
where
t21.pk=1;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t21 const PRIMARY PRIMARY 4 const 1 Using index
1 PRIMARY t1 ALL NULL NULL NULL NULL 5
1 PRIMARY t2 ref a a 5 test.t1.b 2 Using where
1 PRIMARY t3 ref a a 5 test.t1.b 3 Using where
1 PRIMARY <derived2> ref key0 key0 5 test.t1.b 10 Using where
2 LATERAL DERIVED t22 const PRIMARY PRIMARY 4 const 1 Using index
2 LATERAL DERIVED t10 ref grp_id grp_id 5 test.t1.b 100
2 LATERAL DERIVED t11 ALL NULL NULL NULL NULL 10 Using where; Using join buffer (flat, BNL join)
create table t5 (
pk int primary key
);
insert into t5 select seq from seq_1_to_1000;
explain
select * from
t21,
(
(((t1 join t5 on t5.pk=t1.b)) left join t2 on t2.a=t1.b)
left join t3 on t3.a=t1.b
) left join (select grp_id, count(*)
from
t22 join t10 left join t11 on t11.col1=t10.col1
where
t22.pk=1
group by grp_id) T on T.grp_id=t1.b
where
t21.pk=1;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t21 const PRIMARY PRIMARY 4 const 1 Using index
1 PRIMARY t1 ALL NULL NULL NULL NULL 5 Using where
1 PRIMARY t5 eq_ref PRIMARY PRIMARY 4 test.t1.b 1 Using index
1 PRIMARY t2 ref a a 5 test.t1.b 2
1 PRIMARY t3 ref a a 5 test.t1.b 3
1 PRIMARY <derived2> ref key0 key0 5 test.t1.b 10 Using where
2 LATERAL DERIVED t22 const PRIMARY PRIMARY 4 const 1 Using index
2 LATERAL DERIVED t10 ref grp_id grp_id 5 test.t5.pk 100 Using index condition
2 LATERAL DERIVED t11 ALL NULL NULL NULL NULL 10 Using where; Using join buffer (flat, BNL join)
drop table t1,t2,t3,t5, t10, t11, t21, t22;
create table t1(a int, b int);
insert into t1 select seq,seq from seq_1_to_5;
create table t2(a int, b int, key(a));
insert into t2
select A.seq,B.seq from seq_1_to_25 A, seq_1_to_2 B;
create table t3(a int, b int, key(a));
insert into t3
select A.seq,B.seq from seq_1_to_5 A, seq_1_to_3 B;
analyze table t1,t2,t3 persistent for all;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status Table is already up to date
test.t3 analyze status Engine-independent statistics collected
test.t3 analyze status Table is already up to date
create table t10 (
grp_id int,
col1 int,
key(grp_id)
);
insert into t10
select
A.seq,
B.seq
from
seq_1_to_100 A,
seq_1_to_100 B;
create table t11 (
col1 int,
col2 int
);
insert into t11
select A.seq, A.seq from seq_1_to_10 A;
analyze table t10,t11 persistent for all;
Table Op Msg_type Msg_text
test.t10 analyze status Engine-independent statistics collected
test.t10 analyze status Table is already up to date
test.t11 analyze status Engine-independent statistics collected
test.t11 analyze status OK
explain select *
from
(
(t1 left join t2 on t2.a=t1.b)
left join
t3
on t3.a=t1.b
)
left join
(
select grp_id, count(*)
from t10 left join t11 on t11.col1=t10.col1
group by grp_id
)dt
on dt.grp_id=t1.b;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 5
1 PRIMARY t2 ref a a 5 test.t1.b 2 Using where
1 PRIMARY t3 ref a a 5 test.t1.b 3 Using where
1 PRIMARY <derived2> ref key0 key0 5 test.t1.b 10 Using where
2 LATERAL DERIVED t10 ref grp_id grp_id 5 test.t1.b 100
2 LATERAL DERIVED t11 ALL NULL NULL NULL NULL 10 Using where; Using join buffer (flat, BNL join)
select *
from
(
(t1 left join t2 on t2.a=t1.b)
left join
t3
on t3.a=t1.b
)
left join
(
select grp_id, count(*)
from t10 left join t11 on t11.col1=t10.col1
group by grp_id
)dt
on dt.grp_id=t1.b;
a b a b a b grp_id count(*)
1 1 1 1 1 1 1 100
1 1 1 1 1 2 1 100
1 1 1 1 1 3 1 100
1 1 1 2 1 1 1 100
1 1 1 2 1 2 1 100
1 1 1 2 1 3 1 100
2 2 2 1 2 1 2 100
2 2 2 1 2 2 2 100
2 2 2 1 2 3 2 100
2 2 2 2 2 1 2 100
2 2 2 2 2 2 2 100
2 2 2 2 2 3 2 100
3 3 3 1 3 1 3 100
3 3 3 1 3 2 3 100
3 3 3 1 3 3 3 100
3 3 3 2 3 1 3 100
3 3 3 2 3 2 3 100
3 3 3 2 3 3 3 100
4 4 4 1 4 1 4 100
4 4 4 1 4 2 4 100
4 4 4 1 4 3 4 100
4 4 4 2 4 1 4 100
4 4 4 2 4 2 4 100
4 4 4 2 4 3 4 100
5 5 5 1 5 1 5 100
5 5 5 1 5 2 5 100
5 5 5 1 5 3 5 100
5 5 5 2 5 1 5 100
5 5 5 2 5 2 5 100
5 5 5 2 5 3 5 100
set join_cache_level=4;
explain select *
from
(
(t1 left join t2 on t2.a=t1.b)
left join
t3
on t3.a=t1.b
)
left join
(
select grp_id, count(*)
from t10 left join t11 on t11.col1=t10.col1
group by grp_id
)dt
on dt.grp_id=t1.b;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 5
1 PRIMARY t2 ref a a 5 test.t1.b 2 Using where
1 PRIMARY t3 ref a a 5 test.t1.b 3 Using where
1 PRIMARY <derived2> ref key0 key0 5 test.t1.b 10 Using where
2 LATERAL DERIVED t10 ref grp_id grp_id 5 test.t1.b 100
2 LATERAL DERIVED t11 hash_ALL NULL #hash#$hj 5 test.t10.col1 10 Using where; Using join buffer (flat, BNLH join)
select *
from
(
(t1 left join t2 on t2.a=t1.b)
left join
t3
on t3.a=t1.b
)
left join
(
select grp_id, count(*)
from t10 left join t11 on t11.col1=t10.col1
group by grp_id
)dt
on dt.grp_id=t1.b;
a b a b a b grp_id count(*)
1 1 1 1 1 1 1 100
1 1 1 1 1 2 1 100
1 1 1 1 1 3 1 100
1 1 1 2 1 1 1 100
1 1 1 2 1 2 1 100
1 1 1 2 1 3 1 100
2 2 2 1 2 1 2 100
2 2 2 1 2 2 2 100
2 2 2 1 2 3 2 100
2 2 2 2 2 1 2 100
2 2 2 2 2 2 2 100
2 2 2 2 2 3 2 100
3 3 3 1 3 1 3 100
3 3 3 1 3 2 3 100
3 3 3 1 3 3 3 100
3 3 3 2 3 1 3 100
3 3 3 2 3 2 3 100
3 3 3 2 3 3 3 100
4 4 4 1 4 1 4 100
4 4 4 1 4 2 4 100
4 4 4 1 4 3 4 100
4 4 4 2 4 1 4 100
4 4 4 2 4 2 4 100
4 4 4 2 4 3 4 100
5 5 5 1 5 1 5 100
5 5 5 1 5 2 5 100
5 5 5 1 5 3 5 100
5 5 5 2 5 1 5 100
5 5 5 2 5 2 5 100
5 5 5 2 5 3 5 100
set join_cache_level=default;
drop index a on t2;
drop index a on t3;
explain select *
from
(
(t1 left join t2 on t2.a=t1.b)
left join
t3
on t3.a=t1.b
)
left join
(
select grp_id, count(*)
from t10 left join t11 on t11.col1=t10.col1
group by grp_id
)dt
on dt.grp_id=t1.b;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 5
1 PRIMARY t2 ALL NULL NULL NULL NULL 50 Using where; Using join buffer (flat, BNL join)
1 PRIMARY t3 ALL NULL NULL NULL NULL 15 Using where; Using join buffer (incremental, BNL join)
1 PRIMARY <derived2> ref key0 key0 5 test.t1.b 1000 Using where
2 DERIVED t10 ALL grp_id NULL NULL NULL 10000 Using temporary; Using filesort
2 DERIVED t11 ALL NULL NULL NULL NULL 10 Using where; Using join buffer (flat, BNL join)
select *
from
(
(t1 left join t2 on t2.a=t1.b)
left join
t3
on t3.a=t1.b
)
left join
(
select grp_id, count(*)
from t10 left join t11 on t11.col1=t10.col1
group by grp_id
)dt
on dt.grp_id=t1.b;
a b a b a b grp_id count(*)
1 1 1 1 1 1 1 100
1 1 1 2 1 1 1 100
1 1 1 1 1 2 1 100
1 1 1 2 1 2 1 100
1 1 1 1 1 3 1 100
1 1 1 2 1 3 1 100
2 2 2 1 2 1 2 100
2 2 2 2 2 1 2 100
2 2 2 1 2 2 2 100
2 2 2 2 2 2 2 100
2 2 2 1 2 3 2 100
2 2 2 2 2 3 2 100
3 3 3 1 3 1 3 100
3 3 3 2 3 1 3 100
3 3 3 1 3 2 3 100
3 3 3 2 3 2 3 100
3 3 3 1 3 3 3 100
3 3 3 2 3 3 3 100
4 4 4 1 4 1 4 100
4 4 4 2 4 1 4 100
4 4 4 1 4 2 4 100
4 4 4 2 4 2 4 100
4 4 4 1 4 3 4 100
4 4 4 2 4 3 4 100
5 5 5 1 5 1 5 100
5 5 5 2 5 1 5 100
5 5 5 1 5 2 5 100
5 5 5 2 5 2 5 100
5 5 5 1 5 3 5 100
5 5 5 2 5 3 5 100
drop table t1,t2,t3;
drop table t10, t11;
# End of 10.4 tests
mysql-test/main/derived_split_innodb.test
View file @
e87440b7
...
@@ -229,3 +229,216 @@ where t3.b > 15;
...
@@ -229,3 +229,216 @@ where t3.b > 15;
drop
table
t3
,
t4
;
drop
table
t3
,
t4
;
--
echo
# End of 10.3 tests
--
echo
# End of 10.3 tests
--
source
include
/
have_sequence
.
inc
--
echo
#
--
echo
# MDEV-26301: Split optimization refills temporary table too many times
--
echo
#
# 5 values
create
table
t1
(
a
int
,
b
int
);
insert
into
t1
select
seq
,
seq
from
seq_1_to_5
;
# 5 value groups of size 2 each
create
table
t2
(
a
int
,
b
int
,
key
(
a
));
insert
into
t2
select
A
.
seq
,
B
.
seq
from
seq_1_to_25
A
,
seq_1_to_2
B
;
# 5 value groups of size 3 each
create
table
t3
(
a
int
,
b
int
,
key
(
a
));
insert
into
t3
select
A
.
seq
,
B
.
seq
from
seq_1_to_5
A
,
seq_1_to_3
B
;
analyze
table
t1
,
t2
,
t3
persistent
for
all
;
explain
select
*
from
(
t1
left
join
t2
on
t2
.
a
=
t1
.
b
)
left
join
t3
on
t3
.
a
=
t1
.
b
;
# Now, create tables for Groups.
create
table
t10
(
grp_id
int
,
col1
int
,
key
(
grp_id
)
);
# 100 groups of 100 values each
insert
into
t10
select
A
.
seq
,
B
.
seq
from
seq_1_to_100
A
,
seq_1_to_100
B
;
# and X10 multiplier
create
table
t11
(
col1
int
,
col2
int
);
insert
into
t11
select
A
.
seq
,
A
.
seq
from
seq_1_to_10
A
;
analyze
table
t10
,
t11
persistent
for
all
;
let
$q1
=
select
*
from
(
(
t1
left
join
t2
on
t2
.
a
=
t1
.
b
)
left
join
t3
on
t3
.
a
=
t1
.
b
)
left
join
(
select
grp_id
,
count
(
*
)
from
t10
left
join
t11
on
t11
.
col1
=
t10
.
col1
group
by
grp_id
)
T
on
T
.
grp_id
=
t1
.
b
;
eval
explain
$q1
;
--
echo
# The important part in the below output is:
--
echo
# "lateral": 1,
--
echo
# "query_block": {
--
echo
# "select_id": 2,
--
echo
# "r_loops": 5, <-- must be 5, not 30.
--
source
include
/
analyze
-
format
.
inc
eval
analyze
format
=
json
$q1
;
create
table
t21
(
pk
int
primary
key
);
insert
into
t21
values
(
1
),(
2
),(
3
);
create
table
t22
(
pk
int
primary
key
);
insert
into
t22
values
(
1
),(
2
),(
3
);
# Same as above but throw in a couple of const tables.
explain
select
*
from
t21
,
t22
,
(
(
t1
left
join
t2
on
t2
.
a
=
t1
.
b
)
left
join
t3
on
t3
.
a
=
t1
.
b
)
left
join
(
select
grp_id
,
count
(
*
)
from
t10
left
join
t11
on
t11
.
col1
=
t10
.
col1
group
by
grp_id
)
T
on
T
.
grp_id
=
t1
.
b
where
t21
.
pk
=
1
and
t22
.
pk
=
2
;
explain
select
*
from
t21
,
(
(
t1
left
join
t2
on
t2
.
a
=
t1
.
b
)
left
join
t3
on
t3
.
a
=
t1
.
b
)
left
join
(
select
grp_id
,
count
(
*
)
from
t22
join
t10
left
join
t11
on
t11
.
col1
=
t10
.
col1
where
t22
.
pk
=
1
group
by
grp_id
)
T
on
T
.
grp_id
=
t1
.
b
where
t21
.
pk
=
1
;
# And also add a non-const table
create
table
t5
(
pk
int
primary
key
);
insert
into
t5
select
seq
from
seq_1_to_1000
;
explain
select
*
from
t21
,
(
(((
t1
join
t5
on
t5
.
pk
=
t1
.
b
))
left
join
t2
on
t2
.
a
=
t1
.
b
)
left
join
t3
on
t3
.
a
=
t1
.
b
)
left
join
(
select
grp_id
,
count
(
*
)
from
t22
join
t10
left
join
t11
on
t11
.
col1
=
t10
.
col1
where
t22
.
pk
=
1
group
by
grp_id
)
T
on
T
.
grp_id
=
t1
.
b
where
t21
.
pk
=
1
;
drop
table
t1
,
t2
,
t3
,
t5
,
t10
,
t11
,
t21
,
t22
;
# 5 values
create
table
t1
(
a
int
,
b
int
);
insert
into
t1
select
seq
,
seq
from
seq_1_to_5
;
# 5 value groups of size 2 each
create
table
t2
(
a
int
,
b
int
,
key
(
a
));
insert
into
t2
select
A
.
seq
,
B
.
seq
from
seq_1_to_25
A
,
seq_1_to_2
B
;
# 5 value groups of size 3 each
create
table
t3
(
a
int
,
b
int
,
key
(
a
));
insert
into
t3
select
A
.
seq
,
B
.
seq
from
seq_1_to_5
A
,
seq_1_to_3
B
;
analyze
table
t1
,
t2
,
t3
persistent
for
all
;
create
table
t10
(
grp_id
int
,
col1
int
,
key
(
grp_id
)
);
# 100 groups of 100 values each
insert
into
t10
select
A
.
seq
,
B
.
seq
from
seq_1_to_100
A
,
seq_1_to_100
B
;
# and X10 multiplier
create
table
t11
(
col1
int
,
col2
int
);
insert
into
t11
select
A
.
seq
,
A
.
seq
from
seq_1_to_10
A
;
analyze
table
t10
,
t11
persistent
for
all
;
let
$q
=
select
*
from
(
(
t1
left
join
t2
on
t2
.
a
=
t1
.
b
)
left
join
t3
on
t3
.
a
=
t1
.
b
)
left
join
(
select
grp_id
,
count
(
*
)
from
t10
left
join
t11
on
t11
.
col1
=
t10
.
col1
group
by
grp_id
)
dt
on
dt
.
grp_id
=
t1
.
b
;
eval
explain
$q
;
eval
$q
;
set
join_cache_level
=
4
;
eval
explain
$q
;
eval
$q
;
set
join_cache_level
=
default
;
drop
index
a
on
t2
;
drop
index
a
on
t3
;
eval
explain
$q
;
eval
$q
;
drop
table
t1
,
t2
,
t3
;
drop
table
t10
,
t11
;
--
echo
# End of 10.4 tests
mysql-test/main/opt_trace.result
View file @
e87440b7
...
@@ -449,6 +449,11 @@ select * from v2 {
...
@@ -449,6 +449,11 @@ select * from v2 {
}
}
]
]
},
},
{
"check_split_materialized": {
"not_applicable": "no candidate field can be accessed through ref"
}
},
{
{
"best_join_order": ["t1"]
"best_join_order": ["t1"]
},
},
...
@@ -772,6 +777,11 @@ explain select * from v1 {
...
@@ -772,6 +777,11 @@ explain select * from v1 {
}
}
]
]
},
},
{
"check_split_materialized": {
"not_applicable": "group list has no candidates"
}
},
{
{
"best_join_order": ["t1"]
"best_join_order": ["t1"]
},
},
...
@@ -8825,6 +8835,110 @@ SET optimizer_trace=DEFAULT;
...
@@ -8825,6 +8835,110 @@ SET optimizer_trace=DEFAULT;
DROP VIEW v;
DROP VIEW v;
DROP TABLE t;
DROP TABLE t;
#
#
# MDEV-26301: Split optimization improvements: Optimizer Trace coverage
#
create table t1(a int, b int);
insert into t1 select seq,seq from seq_1_to_5;
create table t2(a int, b int, key(a));
insert into t2
select A.seq,B.seq from seq_1_to_25 A, seq_1_to_2 B;
create table t3(a int, b int, key(a));
insert into t3
select A.seq,B.seq from seq_1_to_5 A, seq_1_to_3 B;
analyze table t1,t2,t3 persistent for all;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status Table is already up to date
test.t3 analyze status Engine-independent statistics collected
test.t3 analyze status Table is already up to date
create table t10 (
grp_id int,
col1 int,
key(grp_id)
);
insert into t10
select
A.seq,
B.seq
from
seq_1_to_100 A,
seq_1_to_100 B;
create table t11 (
col1 int,
col2 int
);
insert into t11
select A.seq, A.seq from seq_1_to_10 A;
analyze table t10,t11 persistent for all;
Table Op Msg_type Msg_text
test.t10 analyze status Engine-independent statistics collected
test.t10 analyze status Table is already up to date
test.t11 analyze status Engine-independent statistics collected
test.t11 analyze status OK
set optimizer_trace=1;
explain
select * from
(
(t1 left join t2 on t2.a=t1.b)
left join t3 on t3.a=t1.b
) left join (select grp_id, count(*)
from t10 left join t11 on t11.col1=t10.col1
group by grp_id) T on T.grp_id=t1.b;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 5
1 PRIMARY t2 ref a a 5 test.t1.b 2 Using where
1 PRIMARY t3 ref a a 5 test.t1.b 3 Using where
1 PRIMARY <derived2> ref key0 key0 5 test.t1.b 10 Using where
2 LATERAL DERIVED t10 ref grp_id grp_id 5 test.t1.b 100
2 LATERAL DERIVED t11 ALL NULL NULL NULL NULL 10 Using where; Using join buffer (flat, BNL join)
select json_detailed(json_extract(trace, '$**.check_split_materialized')) as JS
from information_schema.optimizer_trace;
JS
[
{
"split_candidates":
["t10.grp_id"]
}
]
select
json_detailed(
json_remove(
json_extract(trace, '$**.choose_best_splitting')
, '$[0].split_plan_search[0]'
)
) as JS
from information_schema.optimizer_trace;
JS
[
{
"considered_keys":
[
{
"table_name": "t10",
"index": "grp_id",
"rec_per_key": 100,
"param_tables": 1
}
],
"refills": 5,
"spl_pd_boundary": 2,
"split_plan_search":
[],
"lead_table": "t10",
"index": "grp_id",
"parts": 1,
"split_sel": 0.001,
"cost": 2535.968504,
"records": 100,
"refills": 5,
"chosen": true
}
]
drop table t1,t2,t3,t10,t11;
set optimizer_trace=DEFAULT;
#
# End of 10.4 tests
# End of 10.4 tests
#
#
set optimizer_trace='enabled=on';
set optimizer_trace='enabled=on';
...
...
mysql-test/main/opt_trace.test
View file @
e87440b7
...
@@ -696,6 +696,76 @@ SET optimizer_trace=DEFAULT;
...
@@ -696,6 +696,76 @@ SET optimizer_trace=DEFAULT;
DROP
VIEW
v
;
DROP
VIEW
v
;
DROP
TABLE
t
;
DROP
TABLE
t
;
--
echo
#
--
echo
# MDEV-26301: Split optimization improvements: Optimizer Trace coverage
--
echo
#
# 5 values
create
table
t1
(
a
int
,
b
int
);
insert
into
t1
select
seq
,
seq
from
seq_1_to_5
;
# 5 value groups of size 2 each
create
table
t2
(
a
int
,
b
int
,
key
(
a
));
insert
into
t2
select
A
.
seq
,
B
.
seq
from
seq_1_to_25
A
,
seq_1_to_2
B
;
# 5 value groups of size 3 each
create
table
t3
(
a
int
,
b
int
,
key
(
a
));
insert
into
t3
select
A
.
seq
,
B
.
seq
from
seq_1_to_5
A
,
seq_1_to_3
B
;
analyze
table
t1
,
t2
,
t3
persistent
for
all
;
create
table
t10
(
grp_id
int
,
col1
int
,
key
(
grp_id
)
);
# 100 groups of 100 values each
insert
into
t10
select
A
.
seq
,
B
.
seq
from
seq_1_to_100
A
,
seq_1_to_100
B
;
# and X10 multiplier
create
table
t11
(
col1
int
,
col2
int
);
insert
into
t11
select
A
.
seq
,
A
.
seq
from
seq_1_to_10
A
;
analyze
table
t10
,
t11
persistent
for
all
;
set
optimizer_trace
=
1
;
explain
select
*
from
(
(
t1
left
join
t2
on
t2
.
a
=
t1
.
b
)
left
join
t3
on
t3
.
a
=
t1
.
b
)
left
join
(
select
grp_id
,
count
(
*
)
from
t10
left
join
t11
on
t11
.
col1
=
t10
.
col1
group
by
grp_id
)
T
on
T
.
grp_id
=
t1
.
b
;
select
json_detailed
(
json_extract
(
trace
,
'$**.check_split_materialized'
))
as
JS
from
information_schema
.
optimizer_trace
;
select
json_detailed
(
json_remove
(
json_extract
(
trace
,
'$**.choose_best_splitting'
)
,
'$[0].split_plan_search[0]'
)
)
as
JS
from
information_schema
.
optimizer_trace
;
drop
table
t1
,
t2
,
t3
,
t10
,
t11
;
set
optimizer_trace
=
DEFAULT
;
--
echo
#
--
echo
#
--
echo
# End of 10.4 tests
--
echo
# End of 10.4 tests
--
echo
#
--
echo
#
...
...
sql/opt_split.cc
View file @
e87440b7
...
@@ -65,7 +65,7 @@
...
@@ -65,7 +65,7 @@
If we have only one equi-join condition then we either push it as
If we have only one equi-join condition then we either push it as
for Q1R or we don't. In a general case we may have much more options.
for Q1R or we don't. In a general case we may have much more options.
Consider the query (Q3)
Consider the query (Q3)
SELECT
SELECT
*
FROM t1,t2 (SELECT t3.a, t3.b, MIN(t3.c) as min
FROM t1,t2 (SELECT t3.a, t3.b, MIN(t3.c) as min
FROM t3 GROUP BY a,b) t
FROM t3 GROUP BY a,b) t
WHERE t.a = t1.a AND t.b = t2.b
WHERE t.a = t1.a AND t.b = t2.b
...
@@ -102,6 +102,47 @@
...
@@ -102,6 +102,47 @@
If we just drop the index on t3(a,b) the chances that the splitting
If we just drop the index on t3(a,b) the chances that the splitting
will be used becomes much lower but they still exists providing that
will be used becomes much lower but they still exists providing that
the fanout of the partial join of t1 and t2 is small enough.
the fanout of the partial join of t1 and t2 is small enough.
The lateral derived table LT formed as a result of SM optimization applied
to a materialized derived table DT must be joined after all parameters
of splitting has been evaluated, i.e. after all expressions used in the
equalities pushed into DT that make the employed splitting effective
could be evaluated. With the chosen join order all the parameters can be
evaluated after the last table LPT that contains any columns referenced in
the parameters has been joined and the table APT following LPT in the chosen
join order is accessed.
Usually the formed lateral derived table LT is accessed right after the table
LPT. As in such cases table LT must be refilled for each combination of
splitting parameters this table must be populated before each access to LT
and the estimate of the expected number of refills that could be suggested in
such cases is the number of rows in the partial join ending with table LPT.
However in other cases the chosen join order may contain tables between LPT
and LT.
Consider the query (Q4)
SELECT *
FROM t1 JOIN t2 ON t1.b = t2.b
LEFT JOIN (SELECT t3.a, t3.b, MIN(t3.c) as min
FROM t3 GROUP BY a,b) t
ON t.a = t1.a AND t.c > 0
[WHERE P(t1,t2)];
Let's assume that the join order t1,t2,t was chosen for this query and
SP optimization was applied to t with splitting over t3.a using the index
on column t3.a. Here the table t1 serves as LPT, t2 as APT while t with
pushed condition t.a = t1.a serves as LT. Note that here LT is accessed
after t2, not right after t1. Here the number of refills of the lateral
derived is not more that the number of key values of t1.a that might be
less than the cardinality of the partial join (t1,t2). That's why it makes
sense to signal that t3 has to be refilled just before t2 is accessed.
However if the cardinality of the partial join (t1,t2) happens to be less
than the cardinality of the partial join (t1) due to additional selective
condition P(t1,t2) then the flag informing about necessity of a new refill
can be set either when accessing t2 or right after it has been joined.
The current code sets such flag right after generating a record of the
partial join with minimal cardinality for all those partial joins that
end between APT and LT. It allows sometimes to push extra conditions
into the lateral derived without any increase of the number of refills.
However this flag can be set only after the last join table between
APT and LT using join buffer has been joined.
*/
*/
/*
/*
...
@@ -187,6 +228,7 @@
...
@@ -187,6 +228,7 @@
#include "mariadb.h"
#include "mariadb.h"
#include "sql_select.h"
#include "sql_select.h"
#include "opt_trace.h"
/* Info on a splitting field */
/* Info on a splitting field */
struct
SplM_field_info
struct
SplM_field_info
...
@@ -248,6 +290,7 @@ class SplM_opt_info : public Sql_alloc
...
@@ -248,6 +290,7 @@ class SplM_opt_info : public Sql_alloc
double
unsplit_card
;
double
unsplit_card
;
/* Lastly evaluated execution plan for 'join' with pushed equalities */
/* Lastly evaluated execution plan for 'join' with pushed equalities */
SplM_plan_info
*
last_plan
;
SplM_plan_info
*
last_plan
;
double
last_refills
;
SplM_plan_info
*
find_plan
(
TABLE
*
table
,
uint
key
,
uint
parts
);
SplM_plan_info
*
find_plan
(
TABLE
*
table
,
uint
key
,
uint
parts
);
};
};
...
@@ -345,6 +388,9 @@ bool JOIN::check_for_splittable_materialized()
...
@@ -345,6 +388,9 @@ bool JOIN::check_for_splittable_materialized()
if
(
!
partition_list
)
if
(
!
partition_list
)
return
false
;
return
false
;
Json_writer_object
trace_wrapper
(
thd
);
Json_writer_object
trace_split
(
thd
,
"check_split_materialized"
);
ORDER
*
ord
;
ORDER
*
ord
;
Dynamic_array
<
SplM_field_ext_info
>
candidates
(
PSI_INSTRUMENT_MEM
);
Dynamic_array
<
SplM_field_ext_info
>
candidates
(
PSI_INSTRUMENT_MEM
);
...
@@ -390,7 +436,10 @@ bool JOIN::check_for_splittable_materialized()
...
@@ -390,7 +436,10 @@ bool JOIN::check_for_splittable_materialized()
}
}
}
}
if
(
candidates
.
elements
()
==
0
)
// no candidates satisfying (8.1) && (8.2)
if
(
candidates
.
elements
()
==
0
)
// no candidates satisfying (8.1) && (8.2)
{
trace_split
.
add
(
"not_applicable"
,
"group list has no candidates"
);
return
false
;
return
false
;
}
/*
/*
For each table from this join find the keys that can be used for ref access
For each table from this join find the keys that can be used for ref access
...
@@ -449,7 +498,11 @@ bool JOIN::check_for_splittable_materialized()
...
@@ -449,7 +498,11 @@ bool JOIN::check_for_splittable_materialized()
}
}
if
(
!
spl_field_cnt
)
// No candidate field can be accessed by ref => !(9)
if
(
!
spl_field_cnt
)
// No candidate field can be accessed by ref => !(9)
{
trace_split
.
add
(
"not_applicable"
,
"no candidate field can be accessed through ref"
);
return
false
;
return
false
;
}
/*
/*
Create a structure of the type SplM_opt_info and fill it with
Create a structure of the type SplM_opt_info and fill it with
...
@@ -467,16 +520,22 @@ bool JOIN::check_for_splittable_materialized()
...
@@ -467,16 +520,22 @@ bool JOIN::check_for_splittable_materialized()
spl_opt_info
->
tables_usable_for_splitting
=
0
;
spl_opt_info
->
tables_usable_for_splitting
=
0
;
spl_opt_info
->
spl_field_cnt
=
spl_field_cnt
;
spl_opt_info
->
spl_field_cnt
=
spl_field_cnt
;
spl_opt_info
->
spl_fields
=
spl_field
;
spl_opt_info
->
spl_fields
=
spl_field
;
for
(
cand
=
cand_start
;
cand
<
cand_end
;
cand
++
)
{
{
if
(
!
cand
->
is_usable_for_ref_access
)
Json_writer_array
trace_range
(
thd
,
"split_candidates"
);
continue
;
for
(
cand
=
cand_start
;
cand
<
cand_end
;
cand
++
)
spl_field
->
producing_item
=
cand
->
producing_item
;
{
spl_field
->
underlying_field
=
cand
->
underlying_field
;
if
(
!
cand
->
is_usable_for_ref_access
)
spl_field
->
mat_field
=
cand
->
mat_field
;
continue
;
spl_opt_info
->
tables_usable_for_splitting
|=
trace_range
.
add
(
cand
->
producing_item
);
cand
->
underlying_field
->
table
->
map
;
spl_field
++
;
spl_field
->
producing_item
=
cand
->
producing_item
;
spl_field
->
underlying_field
=
cand
->
underlying_field
;
spl_field
->
mat_field
=
cand
->
mat_field
;
spl_opt_info
->
tables_usable_for_splitting
|=
cand
->
underlying_field
->
table
->
map
;
spl_field
++
;
}
}
}
/* Attach this info to the table T */
/* Attach this info to the table T */
...
@@ -731,7 +790,7 @@ void JOIN::add_keyuses_for_splitting()
...
@@ -731,7 +790,7 @@ void JOIN::add_keyuses_for_splitting()
bzero
((
char
*
)
&
keyuse_ext_end
,
sizeof
(
keyuse_ext_end
));
bzero
((
char
*
)
&
keyuse_ext_end
,
sizeof
(
keyuse_ext_end
));
if
(
ext_keyuses_for_splitting
->
push
(
keyuse_ext_end
))
if
(
ext_keyuses_for_splitting
->
push
(
keyuse_ext_end
))
goto
err
;
goto
err
;
// psergey-todo: trace anything here?
spl_opt_info
->
unsplit_card
=
join_record_count
;
spl_opt_info
->
unsplit_card
=
join_record_count
;
rec_len
=
table
->
s
->
rec_buff_length
;
rec_len
=
table
->
s
->
rec_buff_length
;
...
@@ -829,13 +888,13 @@ SplM_plan_info *SplM_opt_info::find_plan(TABLE *table, uint key, uint parts)
...
@@ -829,13 +888,13 @@ SplM_plan_info *SplM_opt_info::find_plan(TABLE *table, uint key, uint parts)
static
static
void
reset_validity_vars_for_keyuses
(
KEYUSE_EXT
*
key_keyuse_ext_start
,
void
reset_validity_vars_for_keyuses
(
KEYUSE_EXT
*
key_keyuse_ext_start
,
TABLE
*
table
,
uint
key
,
TABLE
*
table
,
uint
key
,
table_map
remaining
_tables
,
table_map
excluded
_tables
,
bool
validity_val
)
bool
validity_val
)
{
{
KEYUSE_EXT
*
keyuse_ext
=
key_keyuse_ext_start
;
KEYUSE_EXT
*
keyuse_ext
=
key_keyuse_ext_start
;
do
do
{
{
if
(
!
(
keyuse_ext
->
needed_in_prefix
&
remaining
_tables
))
if
(
!
(
keyuse_ext
->
needed_in_prefix
&
excluded
_tables
))
{
{
/*
/*
The enabling/disabling flags are set just in KEYUSE_EXT structures.
The enabling/disabling flags are set just in KEYUSE_EXT structures.
...
@@ -855,8 +914,11 @@ void reset_validity_vars_for_keyuses(KEYUSE_EXT *key_keyuse_ext_start,
...
@@ -855,8 +914,11 @@ void reset_validity_vars_for_keyuses(KEYUSE_EXT *key_keyuse_ext_start,
Choose the best splitting to extend the evaluated partial join
Choose the best splitting to extend the evaluated partial join
@param
@param
record_count estimated cardinality of the extended partial join
idx index for joined table T in current partial join P
remaining_tables tables not joined yet
remaining_tables tables not joined yet
spl_pd_boundary OUT bitmap of the table from P extended by T that
starts the sub-sequence of tables S from which
no conditions are allowed to be pushed into T.
@details
@details
This function is called during the search for the best execution
This function is called during the search for the best execution
...
@@ -871,17 +933,19 @@ void reset_validity_vars_for_keyuses(KEYUSE_EXT *key_keyuse_ext_start,
...
@@ -871,17 +933,19 @@ void reset_validity_vars_for_keyuses(KEYUSE_EXT *key_keyuse_ext_start,
splitting the function set it as the true plan of materialization
splitting the function set it as the true plan of materialization
of the table T.
of the table T.
The function caches the found plans for materialization of table T
The function caches the found plans for materialization of table T
together
if
the info what key was used for splitting. Next time when
together
with
the info what key was used for splitting. Next time when
the optimizer prefers to use the same key the plan is taken from
the optimizer prefers to use the same key the plan is taken from
the cache of plans
the cache of plans
@retval
@retval
Pointer to the info on the found plan that employs the pushed equalities
Pointer to the info on the found plan that employs the pushed equalities
if the plan has been chosen, NULL - otherwise.
if the plan has been chosen, NULL - otherwise.
If the function returns NULL the value of spl_param_tables is set to 0.
*/
*/
SplM_plan_info
*
JOIN_TAB
::
choose_best_splitting
(
double
record_count
,
SplM_plan_info
*
JOIN_TAB
::
choose_best_splitting
(
uint
idx
,
table_map
remaining_tables
)
table_map
remaining_tables
,
table_map
*
spl_pd_boundary
)
{
{
SplM_opt_info
*
spl_opt_info
=
table
->
spl_opt_info
;
SplM_opt_info
*
spl_opt_info
=
table
->
spl_opt_info
;
DBUG_ASSERT
(
spl_opt_info
!=
NULL
);
DBUG_ASSERT
(
spl_opt_info
!=
NULL
);
...
@@ -896,7 +960,9 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
...
@@ -896,7 +960,9 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
SplM_plan_info
*
spl_plan
=
0
;
SplM_plan_info
*
spl_plan
=
0
;
uint
best_key
=
0
;
uint
best_key
=
0
;
uint
best_key_parts
=
0
;
uint
best_key_parts
=
0
;
table_map
best_param_tables
;
Json_writer_object
trace_obj
(
thd
,
"choose_best_splitting"
);
Json_writer_array
trace_arr
(
thd
,
"considered_keys"
);
/*
/*
Check whether there are keys that can be used to join T employing splitting
Check whether there are keys that can be used to join T employing splitting
and if so, select the best out of such keys
and if so, select the best out of such keys
...
@@ -914,6 +980,7 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
...
@@ -914,6 +980,7 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
uint
key
=
keyuse_ext
->
key
;
uint
key
=
keyuse_ext
->
key
;
KEYUSE_EXT
*
key_keyuse_ext_start
=
keyuse_ext
;
KEYUSE_EXT
*
key_keyuse_ext_start
=
keyuse_ext
;
key_part_map
found_parts
=
0
;
key_part_map
found_parts
=
0
;
table_map
needed_in_prefix
=
0
;
do
do
{
{
if
(
keyuse_ext
->
needed_in_prefix
&
remaining_tables
)
if
(
keyuse_ext
->
needed_in_prefix
&
remaining_tables
)
...
@@ -939,6 +1006,7 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
...
@@ -939,6 +1006,7 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
KEY
*
key_info
=
table
->
key_info
+
key
;
KEY
*
key_info
=
table
->
key_info
+
key
;
double
rec_per_key
=
double
rec_per_key
=
key_info
->
actual_rec_per_key
(
keyuse_ext
->
keypart
);
key_info
->
actual_rec_per_key
(
keyuse_ext
->
keypart
);
needed_in_prefix
|=
keyuse_ext
->
needed_in_prefix
;
if
(
rec_per_key
<
best_rec_per_key
)
if
(
rec_per_key
<
best_rec_per_key
)
{
{
best_table
=
keyuse_ext
->
table
;
best_table
=
keyuse_ext
->
table
;
...
@@ -946,6 +1014,14 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
...
@@ -946,6 +1014,14 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
best_key_parts
=
keyuse_ext
->
keypart
+
1
;
best_key_parts
=
keyuse_ext
->
keypart
+
1
;
best_rec_per_key
=
rec_per_key
;
best_rec_per_key
=
rec_per_key
;
best_key_keyuse_ext_start
=
key_keyuse_ext_start
;
best_key_keyuse_ext_start
=
key_keyuse_ext_start
;
best_param_tables
=
needed_in_prefix
;
// trace table, key_name, parts, needed_tables.
Json_writer_object
cur_index
(
thd
);
cur_index
.
add
(
"table_name"
,
best_table
->
alias
.
ptr
()).
add
(
"index"
,
best_table
->
key_info
[
best_key
].
name
).
add
(
"rec_per_key"
,
best_rec_per_key
).
add
(
"param_tables"
,
best_param_tables
);
}
}
keyuse_ext
++
;
keyuse_ext
++
;
}
}
...
@@ -953,9 +1029,37 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
...
@@ -953,9 +1029,37 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
}
}
while
(
keyuse_ext
->
table
==
table
);
while
(
keyuse_ext
->
table
==
table
);
}
}
trace_arr
.
end
();
spl_opt_info
->
last_plan
=
0
;
spl_opt_info
->
last_plan
=
0
;
double
refills
=
DBL_MAX
;
table_map
excluded_tables
=
remaining_tables
|
this
->
join
->
sjm_lookup_tables
;
if
(
best_table
)
if
(
best_table
)
{
{
*
spl_pd_boundary
=
this
->
table
->
map
;
if
(
!
best_param_tables
)
refills
=
1
;
else
{
table_map
last_found
=
this
->
table
->
map
;
for
(
POSITION
*
pos
=
&
this
->
join
->
positions
[
idx
-
1
];
;
pos
--
)
{
if
(
pos
->
table
->
table
->
map
&
excluded_tables
)
continue
;
if
(
pos
->
partial_join_cardinality
<
refills
)
{
*
spl_pd_boundary
=
last_found
;
refills
=
pos
->
partial_join_cardinality
;
}
last_found
=
pos
->
table
->
table
->
map
;
if
((
last_found
&
best_param_tables
)
||
pos
->
use_join_buffer
)
break
;
}
}
trace_obj
.
add
(
"refills"
,
refills
).
add
(
"spl_pd_boundary"
,
*
spl_pd_boundary
);
/*
/*
The key for splitting was chosen, look for the plan for this key
The key for splitting was chosen, look for the plan for this key
in the cache
in the cache
...
@@ -967,11 +1071,13 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
...
@@ -967,11 +1071,13 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
The plan for the chosen key has not been found in the cache.
The plan for the chosen key has not been found in the cache.
Build a new plan and save info on it in the cache
Build a new plan and save info on it in the cache
*/
*/
Json_writer_array
wrapper
(
thd
,
"split_plan_search"
);
table_map
all_table_map
=
(((
table_map
)
1
)
<<
join
->
table_count
)
-
1
;
table_map
all_table_map
=
(((
table_map
)
1
)
<<
join
->
table_count
)
-
1
;
reset_validity_vars_for_keyuses
(
best_key_keyuse_ext_start
,
best_table
,
reset_validity_vars_for_keyuses
(
best_key_keyuse_ext_start
,
best_table
,
best_key
,
remaining
_tables
,
true
);
best_key
,
excluded
_tables
,
true
);
choose_plan
(
join
,
all_table_map
&
~
join
->
const_table_map
);
choose_plan
(
join
,
all_table_map
&
~
join
->
const_table_map
);
wrapper
.
end
();
/*
/*
Check that the chosen plan is really a splitting plan.
Check that the chosen plan is really a splitting plan.
If not or if there is not enough memory to save the plan in the cache
If not or if there is not enough memory to save the plan in the cache
...
@@ -988,7 +1094,8 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
...
@@ -988,7 +1094,8 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
spl_opt_info
->
plan_cache
.
push_back
(
spl_plan
))
spl_opt_info
->
plan_cache
.
push_back
(
spl_plan
))
{
{
reset_validity_vars_for_keyuses
(
best_key_keyuse_ext_start
,
best_table
,
reset_validity_vars_for_keyuses
(
best_key_keyuse_ext_start
,
best_table
,
best_key
,
remaining_tables
,
false
);
best_key
,
excluded_tables
,
false
);
trace_obj
.
add
(
"split_plan_discarded"
,
"constructed unapplicable query plan"
);
return
0
;
return
0
;
}
}
...
@@ -1012,18 +1119,34 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
...
@@ -1012,18 +1119,34 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
(
char
*
)
join
->
best_positions
,
(
char
*
)
join
->
best_positions
,
sizeof
(
POSITION
)
*
join
->
table_count
);
sizeof
(
POSITION
)
*
join
->
table_count
);
reset_validity_vars_for_keyuses
(
best_key_keyuse_ext_start
,
best_table
,
reset_validity_vars_for_keyuses
(
best_key_keyuse_ext_start
,
best_table
,
best_key
,
remaining
_tables
,
false
);
best_key
,
excluded
_tables
,
false
);
}
}
else
trace_obj
.
add
(
"cached_plan_found"
,
1
);
if
(
spl_plan
)
if
(
spl_plan
)
{
{
if
(
record_count
*
spl_plan
->
cost
<
spl_opt_info
->
unsplit_cost
-
0.01
)
trace_obj
.
add
(
"lead_table"
,
spl_plan
->
table
->
alias
.
ptr
()).
add
(
"index"
,
spl_plan
->
table
->
key_info
[
spl_plan
->
key
].
name
).
add
(
"parts"
,
spl_plan
->
parts
).
add
(
"split_sel"
,
spl_plan
->
split_sel
).
add
(
"cost"
,
spl_plan
->
cost
).
add
(
"records"
,
(
ha_rows
)
(
records
*
spl_plan
->
split_sel
)).
add
(
"refills"
,
refills
);
if
(
refills
*
spl_plan
->
cost
<
spl_opt_info
->
unsplit_cost
-
0.01
)
{
{
/*
/*
The best plan that employs splitting is cheaper than
The best plan that employs splitting is cheaper than
the plan without splitting
the plan without splitting
*/
*/
spl_opt_info
->
last_plan
=
spl_plan
;
spl_opt_info
->
last_plan
=
spl_plan
;
spl_opt_info
->
last_refills
=
refills
;
trace_obj
.
add
(
"chosen"
,
true
);
}
}
else
trace_obj
.
add
(
"chosen"
,
false
);
}
}
}
}
...
@@ -1032,11 +1155,14 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
...
@@ -1032,11 +1155,14 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
spl_plan
=
spl_opt_info
->
last_plan
;
spl_plan
=
spl_opt_info
->
last_plan
;
if
(
spl_plan
)
if
(
spl_plan
)
{
{
startup_cost
=
record_count
*
spl_plan
->
cost
;
startup_cost
=
spl_opt_info
->
last_refills
*
spl_plan
->
cost
;
records
=
(
ha_rows
)
(
records
*
spl_plan
->
split_sel
);
records
=
(
ha_rows
)
(
records
*
spl_plan
->
split_sel
);
}
}
else
else
{
startup_cost
=
spl_opt_info
->
unsplit_cost
;
startup_cost
=
spl_opt_info
->
unsplit_cost
;
*
spl_pd_boundary
=
0
;
}
return
spl_plan
;
return
spl_plan
;
}
}
...
@@ -1046,13 +1172,13 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
...
@@ -1046,13 +1172,13 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
Inject equalities for splitting used by the materialization join
Inject equalities for splitting used by the materialization join
@param
@param
excluded_tables
used to filter out the equalities that can
not
excluded_tables
used to filter out the equalities that are
not
be pushed.
to
be pushed.
@details
@details
This function injects equalities pushed into a derived table T for which
This function injects equalities pushed into a derived table T for which
the split optimization has been chosen by the optimizer. The function
the split optimization has been chosen by the optimizer. The function
is called by JOIN::inject_splitting_cond_for_all_tables_with_split_op().
is called by JOIN::inject_splitting_cond_for_all_tables_with_split_op
t
().
All equalities usable for splitting T whose right parts do not depend on
All equalities usable for splitting T whose right parts do not depend on
any of the 'excluded_tables' can be pushed into the where clause of the
any of the 'excluded_tables' can be pushed into the where clause of the
derived table T.
derived table T.
...
@@ -1140,7 +1266,7 @@ bool is_eq_cond_injected_for_split_opt(Item_func_eq *eq_item)
...
@@ -1140,7 +1266,7 @@ bool is_eq_cond_injected_for_split_opt(Item_func_eq *eq_item)
@param
@param
spl_plan info on the splitting plan chosen for the splittable table T
spl_plan info on the splitting plan chosen for the splittable table T
remaining_tables the table T is joined just before these tables
excluded_tables tables that cannot be used in equalities pushed into T
is_const_table the table T is a constant table
is_const_table the table T is a constant table
@details
@details
...
@@ -1155,7 +1281,7 @@ bool is_eq_cond_injected_for_split_opt(Item_func_eq *eq_item)
...
@@ -1155,7 +1281,7 @@ bool is_eq_cond_injected_for_split_opt(Item_func_eq *eq_item)
*/
*/
bool
JOIN_TAB
::
fix_splitting
(
SplM_plan_info
*
spl_plan
,
bool
JOIN_TAB
::
fix_splitting
(
SplM_plan_info
*
spl_plan
,
table_map
remaining
_tables
,
table_map
excluded
_tables
,
bool
is_const_table
)
bool
is_const_table
)
{
{
SplM_opt_info
*
spl_opt_info
=
table
->
spl_opt_info
;
SplM_opt_info
*
spl_opt_info
=
table
->
spl_opt_info
;
...
@@ -1163,6 +1289,7 @@ bool JOIN_TAB::fix_splitting(SplM_plan_info *spl_plan,
...
@@ -1163,6 +1289,7 @@ bool JOIN_TAB::fix_splitting(SplM_plan_info *spl_plan,
JOIN
*
md_join
=
spl_opt_info
->
join
;
JOIN
*
md_join
=
spl_opt_info
->
join
;
if
(
spl_plan
&&
!
is_const_table
)
if
(
spl_plan
&&
!
is_const_table
)
{
{
is_split_derived
=
true
;
memcpy
((
char
*
)
md_join
->
best_positions
,
memcpy
((
char
*
)
md_join
->
best_positions
,
(
char
*
)
spl_plan
->
best_positions
,
(
char
*
)
spl_plan
->
best_positions
,
sizeof
(
POSITION
)
*
md_join
->
table_count
);
sizeof
(
POSITION
)
*
md_join
->
table_count
);
...
@@ -1173,7 +1300,7 @@ bool JOIN_TAB::fix_splitting(SplM_plan_info *spl_plan,
...
@@ -1173,7 +1300,7 @@ bool JOIN_TAB::fix_splitting(SplM_plan_info *spl_plan,
reset_validity_vars_for_keyuses
(
spl_plan
->
keyuse_ext_start
,
reset_validity_vars_for_keyuses
(
spl_plan
->
keyuse_ext_start
,
spl_plan
->
table
,
spl_plan
->
table
,
spl_plan
->
key
,
spl_plan
->
key
,
remaining
_tables
,
excluded
_tables
,
true
);
true
);
}
}
else
if
(
md_join
->
save_qep
)
else
if
(
md_join
->
save_qep
)
...
@@ -1209,8 +1336,21 @@ bool JOIN::fix_all_splittings_in_plan()
...
@@ -1209,8 +1336,21 @@ bool JOIN::fix_all_splittings_in_plan()
if
(
tab
->
table
->
is_splittable
())
if
(
tab
->
table
->
is_splittable
())
{
{
SplM_plan_info
*
spl_plan
=
cur_pos
->
spl_plan
;
SplM_plan_info
*
spl_plan
=
cur_pos
->
spl_plan
;
table_map
excluded_tables
=
(
all_tables
&
~
prev_tables
)
|
sjm_lookup_tables
;
;
if
(
spl_plan
)
{
POSITION
*
pos
=
cur_pos
;
table_map
spl_pd_boundary
=
pos
->
spl_pd_boundary
;
do
{
excluded_tables
|=
pos
->
table
->
table
->
map
;
}
while
(
!
((
pos
--
)
->
table
->
table
->
map
&
spl_pd_boundary
));
}
if
(
tab
->
fix_splitting
(
spl_plan
,
if
(
tab
->
fix_splitting
(
spl_plan
,
all_tables
&
~
prev
_tables
,
excluded
_tables
,
tablenr
<
const_tables
))
tablenr
<
const_tables
))
return
true
;
return
true
;
}
}
...
@@ -1249,13 +1389,21 @@ bool JOIN::inject_splitting_cond_for_all_tables_with_split_opt()
...
@@ -1249,13 +1389,21 @@ bool JOIN::inject_splitting_cond_for_all_tables_with_split_opt()
continue
;
continue
;
SplM_opt_info
*
spl_opt_info
=
tab
->
table
->
spl_opt_info
;
SplM_opt_info
*
spl_opt_info
=
tab
->
table
->
spl_opt_info
;
JOIN
*
join
=
spl_opt_info
->
join
;
JOIN
*
join
=
spl_opt_info
->
join
;
/*
table_map
excluded_tables
=
(
all_tables
&
~
prev_tables
)
|
sjm_lookup_tables
;
Currently the equalities referencing columns of SJM tables with
table_map
spl_pd_boundary
=
cur_pos
->
spl_pd_boundary
;
look-up access cannot be pushed into materialized derived.
for
(
POSITION
*
pos
=
cur_pos
;
;
pos
--
)
*/
{
if
(
join
->
inject_best_splitting_cond
((
all_tables
&
~
prev_tables
)
|
excluded_tables
|=
pos
->
table
->
table
->
map
;
sjm_lookup_tables
))
pos
->
table
->
no_forced_join_cache
=
true
;
return
true
;
if
(
pos
->
table
->
table
->
map
&
spl_pd_boundary
)
{
pos
->
table
->
split_derived_to_update
|=
tab
->
table
->
map
;
break
;
}
}
if
(
join
->
inject_best_splitting_cond
(
excluded_tables
))
return
true
;
}
}
return
false
;
return
false
;
}
}
sql/sql_select.cc
View file @
e87440b7
...
@@ -7344,6 +7344,7 @@ void set_position(JOIN *join,uint idx,JOIN_TAB *table,KEYUSE *key)
...
@@ -7344,6 +7344,7 @@ void set_position(JOIN *join,uint idx,JOIN_TAB *table,KEYUSE *key)
join->positions[idx].records_read=1.0; /* This is a const table */
join->positions[idx].records_read=1.0; /* This is a const table */
join->positions[idx].cond_selectivity= 1.0;
join->positions[idx].cond_selectivity= 1.0;
join->positions[idx].ref_depend_map= 0;
join->positions[idx].ref_depend_map= 0;
join->positions[idx].partial_join_cardinality= 1;
// join->positions[idx].loosescan_key= MAX_KEY; /* Not a LooseScan */
// join->positions[idx].loosescan_key= MAX_KEY; /* Not a LooseScan */
join->positions[idx].sj_strategy= SJ_OPT_NONE;
join->positions[idx].sj_strategy= SJ_OPT_NONE;
...
@@ -7361,6 +7362,7 @@ void set_position(JOIN *join,uint idx,JOIN_TAB *table,KEYUSE *key)
...
@@ -7361,6 +7362,7 @@ void set_position(JOIN *join,uint idx,JOIN_TAB *table,KEYUSE *key)
}
}
join->best_ref[idx]=table;
join->best_ref[idx]=table;
join->positions[idx].spl_plan= 0;
join->positions[idx].spl_plan= 0;
join->positions[idx].spl_pd_boundary= 0;
}
}
...
@@ -7533,6 +7535,7 @@ best_access_path(JOIN *join,
...
@@ -7533,6 +7535,7 @@ best_access_path(JOIN *join,
MY_BITMAP *eq_join_set= &s->table->eq_join_set;
MY_BITMAP *eq_join_set= &s->table->eq_join_set;
KEYUSE *hj_start_key= 0;
KEYUSE *hj_start_key= 0;
SplM_plan_info *spl_plan= 0;
SplM_plan_info *spl_plan= 0;
table_map spl_pd_boundary= 0;
Range_rowid_filter_cost_info *filter= 0;
Range_rowid_filter_cost_info *filter= 0;
const char* cause= NULL;
const char* cause= NULL;
enum join_type best_type= JT_UNKNOWN, type= JT_UNKNOWN;
enum join_type best_type= JT_UNKNOWN, type= JT_UNKNOWN;
...
@@ -7543,15 +7546,17 @@ best_access_path(JOIN *join,
...
@@ -7543,15 +7546,17 @@ best_access_path(JOIN *join,
DBUG_ENTER("best_access_path");
DBUG_ENTER("best_access_path");
Json_writer_object trace_wrapper(thd, "best_access_path");
Json_writer_object trace_wrapper(thd, "best_access_path");
Json_writer_array
trace_paths
(
thd
,
"considered_access_paths"
);
bitmap_clear_all(eq_join_set);
bitmap_clear_all(eq_join_set);
loose_scan_opt.init(join, s, remaining_tables);
loose_scan_opt.init(join, s, remaining_tables);
if (s->table->is_splittable())
if (s->table->is_splittable())
spl_plan
=
s
->
choose_best_splitting
(
record_count
,
remaining_tables
);
spl_plan= s->choose_best_splitting(idx,
remaining_tables,
&spl_pd_boundary);
Json_writer_array trace_paths(thd, "considered_access_paths");
if (s->keyuse)
if (s->keyuse)
{ /* Use key if possible */
{ /* Use key if possible */
KEYUSE *keyuse;
KEYUSE *keyuse;
...
@@ -8347,8 +8352,9 @@ best_access_path(JOIN *join,
...
@@ -8347,8 +8352,9 @@ best_access_path(JOIN *join,
best_filter= filter;
best_filter= filter;
/* range/index_merge/ALL/index access method are "independent", so: */
/* range/index_merge/ALL/index access method are "independent", so: */
best_ref_depends_map= 0;
best_ref_depends_map= 0;
best_uses_jbuf
=
MY_TEST
(
!
disable_jbuf
&&
!
((
s
->
table
->
map
&
best_uses_jbuf= MY_TEST(!disable_jbuf &&
join
->
outer_join
)));
(join->allowed_outer_join_with_cache ||
!(s->table->map & join->outer_join)));
spl_plan= 0;
spl_plan= 0;
best_type= type;
best_type= type;
}
}
...
@@ -8370,6 +8376,7 @@ best_access_path(JOIN *join,
...
@@ -8370,6 +8376,7 @@ best_access_path(JOIN *join,
pos->loosescan_picker.loosescan_key= MAX_KEY;
pos->loosescan_picker.loosescan_key= MAX_KEY;
pos->use_join_buffer= best_uses_jbuf;
pos->use_join_buffer= best_uses_jbuf;
pos->spl_plan= spl_plan;
pos->spl_plan= spl_plan;
pos->spl_pd_boundary= !spl_plan ? 0 : spl_pd_boundary;
pos->range_rowid_filter_info= best_filter;
pos->range_rowid_filter_info= best_filter;
loose_scan_opt.save_to_position(s, loose_scan_pos);
loose_scan_opt.save_to_position(s, loose_scan_pos);
...
@@ -8899,6 +8906,9 @@ optimize_straight_join(JOIN *join, table_map join_tables)
...
@@ -8899,6 +8906,9 @@ optimize_straight_join(JOIN *join, table_map join_tables)
pushdown_cond_selectivity= table_cond_selectivity(join, idx, s,
pushdown_cond_selectivity= table_cond_selectivity(join, idx, s,
join_tables);
join_tables);
position->cond_selectivity= pushdown_cond_selectivity;
position->cond_selectivity= pushdown_cond_selectivity;
double partial_join_cardinality= record_count *
pushdown_cond_selectivity;
join->positions[idx].partial_join_cardinality= partial_join_cardinality;
++idx;
++idx;
}
}
...
@@ -9939,6 +9949,8 @@ best_extension_by_limited_search(JOIN *join,
...
@@ -9939,6 +9949,8 @@ best_extension_by_limited_search(JOIN *join,
double partial_join_cardinality= current_record_count *
double partial_join_cardinality= current_record_count *
pushdown_cond_selectivity;
pushdown_cond_selectivity;
join->positions[idx].partial_join_cardinality= partial_join_cardinality;
if ( (search_depth > 1) && (remaining_tables & ~real_table_bit) & allowed_tables )
if ( (search_depth > 1) && (remaining_tables & ~real_table_bit) & allowed_tables )
{ /* Recursively expand the current partial plan */
{ /* Recursively expand the current partial plan */
swap_variables(JOIN_TAB*, join->best_ref[idx], *pos);
swap_variables(JOIN_TAB*, join->best_ref[idx], *pos);
...
@@ -12975,6 +12987,9 @@ uint check_join_cache_usage(JOIN_TAB *tab,
...
@@ -12975,6 +12987,9 @@ uint check_join_cache_usage(JOIN_TAB *tab,
join->return_tab= 0;
join->return_tab= 0;
if (tab->no_forced_join_cache)
return 0;
/*
/*
Don't use join cache if @@join_cache_level==0 or this table is the first
Don't use join cache if @@join_cache_level==0 or this table is the first
one join suborder (either at top level or inside a bush)
one join suborder (either at top level or inside a bush)
...
@@ -13936,7 +13951,8 @@ bool JOIN_TAB::preread_init()
...
@@ -13936,7 +13951,8 @@ bool JOIN_TAB::preread_init()
DBUG_RETURN(TRUE);
DBUG_RETURN(TRUE);
if (!(derived->get_unit()->uncacheable & UNCACHEABLE_DEPENDENT) ||
if (!(derived->get_unit()->uncacheable & UNCACHEABLE_DEPENDENT) ||
derived
->
is_nonrecursive_derived_with_rec_ref
())
derived->is_nonrecursive_derived_with_rec_ref() ||
is_split_derived)
preread_init_done= TRUE;
preread_init_done= TRUE;
if (select && select->quick)
if (select && select->quick)
select->quick->replace_handler(table->file);
select->quick->replace_handler(table->file);
...
@@ -17404,6 +17420,9 @@ void optimize_wo_join_buffering(JOIN *join, uint first_tab, uint last_tab,
...
@@ -17404,6 +17420,9 @@ void optimize_wo_join_buffering(JOIN *join, uint first_tab, uint last_tab,
reopt_remaining_tables &
reopt_remaining_tables &
~real_table_bit);
~real_table_bit);
}
}
double partial_join_cardinality= rec_count *
pushdown_cond_selectivity;
join->positions[i].partial_join_cardinality= partial_join_cardinality;
(*outer_rec_count) *= pushdown_cond_selectivity;
(*outer_rec_count) *= pushdown_cond_selectivity;
if (!rs->emb_sj_nest)
if (!rs->emb_sj_nest)
*outer_rec_count= COST_MULT(*outer_rec_count, pos.records_read);
*outer_rec_count= COST_MULT(*outer_rec_count, pos.records_read);
...
@@ -21048,6 +21067,16 @@ sub_select(JOIN *join,JOIN_TAB *join_tab,bool end_of_records)
...
@@ -21048,6 +21067,16 @@ sub_select(JOIN *join,JOIN_TAB *join_tab,bool end_of_records)
{
{
DBUG_ENTER("sub_select");
DBUG_ENTER("sub_select");
if (join_tab->split_derived_to_update && !end_of_records)
{
table_map tab_map= join_tab->split_derived_to_update;
for (uint i= 0; tab_map; i++, tab_map>>= 1)
{
if (tab_map & 1)
join->map2table[i]->preread_init_done= false;
}
}
if (join_tab->last_inner)
if (join_tab->last_inner)
{
{
JOIN_TAB *last_inner_tab= join_tab->last_inner;
JOIN_TAB *last_inner_tab= join_tab->last_inner;
...
...
sql/sql_select.h
View file @
e87440b7
...
@@ -394,6 +394,8 @@ typedef struct st_join_table {
...
@@ -394,6 +394,8 @@ typedef struct st_join_table {
*/
*/
bool
idx_cond_fact_out
;
bool
idx_cond_fact_out
;
bool
use_join_cache
;
bool
use_join_cache
;
/* TRUE <=> it is prohibited to join this table using join buffer */
bool
no_forced_join_cache
;
uint
used_join_cache_level
;
uint
used_join_cache_level
;
ulong
join_buffer_size_limit
;
ulong
join_buffer_size_limit
;
JOIN_CACHE
*
cache
;
JOIN_CACHE
*
cache
;
...
@@ -520,6 +522,16 @@ typedef struct st_join_table {
...
@@ -520,6 +522,16 @@ typedef struct st_join_table {
bool
preread_init_done
;
bool
preread_init_done
;
/* true <=> split optimization has been applied to this materialized table */
bool
is_split_derived
;
/*
Bitmap of split materialized derived tables that can be filled just before
this join table is to be joined. All parameters of the split derived tables
belong to tables preceding this join table.
*/
table_map
split_derived_to_update
;
/*
/*
Cost info to the range filter used when joining this join table
Cost info to the range filter used when joining this join table
(Defined when the best join order has been already chosen)
(Defined when the best join order has been already chosen)
...
@@ -682,9 +694,10 @@ typedef struct st_join_table {
...
@@ -682,9 +694,10 @@ typedef struct st_join_table {
void
partial_cleanup
();
void
partial_cleanup
();
void
add_keyuses_for_splitting
();
void
add_keyuses_for_splitting
();
SplM_plan_info
*
choose_best_splitting
(
double
record_count
,
SplM_plan_info
*
choose_best_splitting
(
uint
idx
,
table_map
remaining_tables
);
table_map
remaining_tables
,
bool
fix_splitting
(
SplM_plan_info
*
spl_plan
,
table_map
remaining_tables
,
table_map
*
spl_pd_boundary
);
bool
fix_splitting
(
SplM_plan_info
*
spl_plan
,
table_map
excluded_tables
,
bool
is_const_table
);
bool
is_const_table
);
}
JOIN_TAB
;
}
JOIN_TAB
;
...
@@ -949,9 +962,21 @@ class POSITION
...
@@ -949,9 +962,21 @@ class POSITION
*/
*/
KEYUSE
*
key
;
KEYUSE
*
key
;
/* Cardinality of current partial join ending with this position */
double
partial_join_cardinality
;
/* Info on splitting plan used at this position */
/* Info on splitting plan used at this position */
SplM_plan_info
*
spl_plan
;
SplM_plan_info
*
spl_plan
;
/*
If spl_plan is NULL the value of spl_pd_boundary is 0. Otherwise
spl_pd_boundary contains the bitmap of the table from the current
partial join ending at this position that starts the sub-sequence of
tables S from which no conditions are allowed to be used in the plan
spl_plan for the split table joined at this position.
*/
table_map
spl_pd_boundary
;
/* Cost info for the range filter used at this position */
/* Cost info for the range filter used at this position */
Range_rowid_filter_cost_info
*
range_rowid_filter_info
;
Range_rowid_filter_cost_info
*
range_rowid_filter_info
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment