Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
M
MariaDB
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
nexedi
MariaDB
Commits
5d7607f3
Commit
5d7607f3
authored
Feb 01, 2017
by
Sergei Golubchik
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
cleanup: style fixes, sql_join_cache.cc
parent
8246b0ac
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
289 additions
and
294 deletions
+289
-294
mysql-test/r/join_cache.result
mysql-test/r/join_cache.result
+2
-2
mysql-test/t/join_cache.test
mysql-test/t/join_cache.test
+2
-2
sql/sql_join_cache.cc
sql/sql_join_cache.cc
+279
-285
sql/sql_select.cc
sql/sql_select.cc
+6
-5
No files found.
mysql-test/r/join_cache.result
View file @
5d7607f3
...
...
@@ -5609,8 +5609,8 @@ set join_cache_level = default;
set optimizer_switch=@tmp_optimizer_switch;
DROP TABLE t1,t2,t3;
#
#
Bug #925985:
LEFT JOIN with optimize_join_buffer_size=off +
#
join_buffer_size > join_buffer_space_limit
#
lp:925985
LEFT JOIN with optimize_join_buffer_size=off +
# join_buffer_size > join_buffer_space_limit
#
CREATE TABLE t1 (a int);
INSERT INTO t1 VALUES (5), (3);
...
...
mysql-test/t/join_cache.test
View file @
5d7607f3
...
...
@@ -3587,8 +3587,8 @@ set optimizer_switch=@tmp_optimizer_switch;
DROP
TABLE
t1
,
t2
,
t3
;
--
echo
#
--
echo
#
Bug #925985:
LEFT JOIN with optimize_join_buffer_size=off +
--
echo
#
join_buffer_size > join_buffer_space_limit
--
echo
#
lp:925985
LEFT JOIN with optimize_join_buffer_size=off +
--
echo
# join_buffer_size > join_buffer_space_limit
--
echo
#
CREATE
TABLE
t1
(
a
int
);
...
...
sql/sql_join_cache.cc
View file @
5d7607f3
...
...
@@ -1376,7 +1376,8 @@ uint JOIN_CACHE::write_record_data(uchar * link, bool *is_full)
if
(
copy
->
referenced_field_no
)
copy
->
offset
=
cp
-
curr_rec_pos
;
if
(
copy
->
type
==
CACHE_BLOB
)
switch
(
copy
->
type
)
{
case
CACHE_BLOB
:
{
Field_blob
*
blob_field
=
(
Field_blob
*
)
copy
->
field
;
if
(
last_record
)
...
...
@@ -1397,69 +1398,66 @@ uint JOIN_CACHE::write_record_data(uchar * link, bool *is_full)
memcpy
(
cp
+
copy
->
length
,
copy
->
str
,
copy
->
blob_length
);
cp
+=
copy
->
length
+
copy
->
blob_length
;
}
break
;
}
else
case
CACHE_VARSTR1
:
/* Copy the significant part of the short varstring field */
len
=
(
uint
)
copy
->
str
[
0
]
+
1
;
DBUG_ASSERT
(
cp
+
len
<=
buff
+
buff_size
);
memcpy
(
cp
,
copy
->
str
,
len
);
cp
+=
len
;
break
;
case
CACHE_VARSTR2
:
/* Copy the significant part of the long varstring field */
len
=
uint2korr
(
copy
->
str
)
+
2
;
DBUG_ASSERT
(
cp
+
len
<=
buff
+
buff_size
);
memcpy
(
cp
,
copy
->
str
,
len
);
cp
+=
len
;
break
;
case
CACHE_STRIPPED
:
{
switch
(
copy
->
type
)
{
case
CACHE_VARSTR1
:
/* Copy the significant part of the short varstring field */
len
=
(
uint
)
copy
->
str
[
0
]
+
1
;
DBUG_ASSERT
(
cp
+
len
<=
buff
+
buff_size
);
memcpy
(
cp
,
copy
->
str
,
len
);
cp
+=
len
;
break
;
case
CACHE_VARSTR2
:
/* Copy the significant part of the long varstring field */
len
=
uint2korr
(
copy
->
str
)
+
2
;
DBUG_ASSERT
(
cp
+
len
<=
buff
+
buff_size
);
memcpy
(
cp
,
copy
->
str
,
len
);
cp
+=
len
;
break
;
case
CACHE_STRIPPED
:
/*
Put down the field value stripping all trailing spaces off.
After this insert the length of the written sequence of bytes.
*/
uchar
*
str
,
*
end
;
for
(
str
=
copy
->
str
,
end
=
str
+
copy
->
length
;
end
>
str
&&
end
[
-
1
]
==
' '
;
end
--
)
;
len
=
(
uint
)
(
end
-
str
);
DBUG_ASSERT
(
cp
+
len
+
2
<=
buff
+
buff_size
);
int2store
(
cp
,
len
);
memcpy
(
cp
+
2
,
str
,
len
);
cp
+=
len
+
2
;
break
;
}
case
CACHE_ROWID
:
if
(
!
copy
->
length
)
{
/*
Put down the field value stripping all trailing spaces off.
After this insert the length of the written sequence of bytes.
*/
uchar
*
str
,
*
end
;
for
(
str
=
copy
->
str
,
end
=
str
+
copy
->
length
;
end
>
str
&&
end
[
-
1
]
==
' '
;
end
--
)
;
len
=
(
uint
)
(
end
-
str
);
DBUG_ASSERT
(
cp
+
len
+
2
<=
buff
+
buff_size
);
int2store
(
cp
,
len
);
memcpy
(
cp
+
2
,
str
,
len
);
cp
+=
len
+
2
;
break
;
}
case
CACHE_ROWID
:
if
(
!
copy
->
length
)
{
/*
This may happen only for ROWID fields of materialized
derived tables and views.
*/
TABLE
*
table
=
(
TABLE
*
)
copy
->
str
;
copy
->
str
=
table
->
file
->
ref
;
copy
->
length
=
table
->
file
->
ref_length
;
if
(
!
copy
->
str
)
{
/*
This may happen only for ROWID fields of materialized
derived tables and views.
*/
TABLE
*
table
=
(
TABLE
*
)
copy
->
str
;
copy
->
str
=
table
->
file
->
ref
;
copy
->
length
=
table
->
file
->
ref_length
;
if
(
!
copy
->
str
)
{
/*
If table is an empty inner table of an outer join and it is
a materialized derived table then table->file->ref == NULL.
*/
cp
+=
copy
->
length
;
break
;
}
If table is an empty inner table of an outer join and it is
a materialized derived table then table->file->ref == NULL.
*/
cp
+=
copy
->
length
;
break
;
}
/* fall through */
default:
/* Copy the entire image of the field from the record buffer */
DBUG_ASSERT
(
cp
+
copy
->
length
<=
buff
+
buff_size
);
if
(
copy
->
str
)
memcpy
(
cp
,
copy
->
str
,
copy
->
length
);
cp
+=
copy
->
length
;
}
/* fall through */
default:
/* Copy the entire image of the field from the record buffer */
DBUG_ASSERT
(
cp
+
copy
->
length
<=
buff
+
buff_size
);
if
(
copy
->
str
)
memcpy
(
cp
,
copy
->
str
,
copy
->
length
);
cp
+=
copy
->
length
;
}
}
...
...
@@ -1717,7 +1715,7 @@ uint JOIN_CACHE::aux_buffer_incr(ulong recno)
The function reads all flag and data fields of a record from the join
buffer into the corresponding record buffers.
The fields are read starting from the position 'pos' which is
supposed to point to the beginning o
g
the first record field.
supposed to point to the beginning o
f
the first record field.
The function increments the value of 'pos' by the length of the
read data.
...
...
@@ -1808,60 +1806,58 @@ uint JOIN_CACHE::read_flag_fields()
uint
JOIN_CACHE
::
read_record_field
(
CACHE_FIELD
*
copy
,
bool
blob_in_rec_buff
)
{
uint
len
;
/* Do not copy the field if its value is null */
/* Do not copy the field if its value is null */
if
(
copy
->
field
&&
copy
->
field
->
maybe_null
()
&&
copy
->
field
->
is_null
())
return
0
;
if
(
copy
->
type
==
CACHE_BLOB
)
{
Field_blob
*
blob_field
=
(
Field_blob
*
)
copy
->
field
;
/*
Copy the length and the pointer to data but not the blob data
itself to the record buffer
*/
if
(
blob_in_rec_buff
)
{
blob_field
->
set_image
(
pos
,
copy
->
length
+
sizeof
(
char
*
),
blob_field
->
charset
());
len
=
copy
->
length
+
sizeof
(
char
*
);
}
else
return
0
;
switch
(
copy
->
type
)
{
case
CACHE_BLOB
:
{
blob_field
->
set_ptr
(
pos
,
pos
+
copy
->
length
);
len
=
copy
->
length
+
blob_field
->
get_length
();
}
}
else
{
switch
(
copy
->
type
)
{
case
CACHE_VARSTR1
:
/* Copy the significant part of the short varstring field */
len
=
(
uint
)
pos
[
0
]
+
1
;
memcpy
(
copy
->
str
,
pos
,
len
);
break
;
case
CACHE_VARSTR2
:
/* Copy the significant part of the long varstring field */
len
=
uint2korr
(
pos
)
+
2
;
memcpy
(
copy
->
str
,
pos
,
len
);
break
;
case
CACHE_STRIPPED
:
/* Pad the value by spaces that has been stripped off */
len
=
uint2korr
(
pos
);
memcpy
(
copy
->
str
,
pos
+
2
,
len
);
memset
(
copy
->
str
+
len
,
' '
,
copy
->
length
-
len
);
len
+=
2
;
break
;
case
CACHE_ROWID
:
if
(
!
copy
->
str
)
Field_blob
*
blob_field
=
(
Field_blob
*
)
copy
->
field
;
/*
Copy the length and the pointer to data but not the blob data
itself to the record buffer
*/
if
(
blob_in_rec_buff
)
{
len
=
copy
->
length
;
break
;
blob_field
->
set_image
(
pos
,
copy
->
length
+
sizeof
(
char
*
),
blob_field
->
charset
());
len
=
copy
->
length
+
sizeof
(
char
*
);
}
/* fall through */
default:
/* Copy the entire image of the field from the record buffer */
else
{
blob_field
->
set_ptr
(
pos
,
pos
+
copy
->
length
);
len
=
copy
->
length
+
blob_field
->
get_length
();
}
}
break
;
case
CACHE_VARSTR1
:
/* Copy the significant part of the short varstring field */
len
=
(
uint
)
pos
[
0
]
+
1
;
memcpy
(
copy
->
str
,
pos
,
len
);
break
;
case
CACHE_VARSTR2
:
/* Copy the significant part of the long varstring field */
len
=
uint2korr
(
pos
)
+
2
;
memcpy
(
copy
->
str
,
pos
,
len
);
break
;
case
CACHE_STRIPPED
:
/* Pad the value by spaces that has been stripped off */
len
=
uint2korr
(
pos
);
memcpy
(
copy
->
str
,
pos
+
2
,
len
);
memset
(
copy
->
str
+
len
,
' '
,
copy
->
length
-
len
);
len
+=
2
;
break
;
case
CACHE_ROWID
:
if
(
!
copy
->
str
)
{
len
=
copy
->
length
;
memcpy
(
copy
->
str
,
pos
,
len
)
;
break
;
}
/* fall through */
default:
/* Copy the entire image of the field from the record buffer */
len
=
copy
->
length
;
memcpy
(
copy
->
str
,
pos
,
len
);
}
pos
+=
len
;
return
len
;
...
...
@@ -2718,7 +2714,7 @@ int JOIN_CACHE_HASHED::init(bool for_explain)
data_fields_offset
+=
copy
->
length
;
}
DBUG_RETURN
(
rc
);
DBUG_RETURN
(
0
);
}
...
...
@@ -3927,9 +3923,9 @@ static
void
bka_range_seq_key_info
(
void
*
init_params
,
uint
*
length
,
key_part_map
*
map
)
{
TABLE_REF
*
ref
=
&
(((
JOIN_CACHE
*
)
init_params
)
->
join_tab
->
ref
);
*
length
=
ref
->
key_length
;
*
map
=
(
key_part_map
(
1
)
<<
ref
->
key_parts
)
-
1
;
TABLE_REF
*
ref
=
&
(((
JOIN_CACHE
*
)
init_params
)
->
join_tab
->
ref
);
*
length
=
ref
->
key_length
;
*
map
=
(
key_part_map
(
1
)
<<
ref
->
key_parts
)
-
1
;
}
...
...
@@ -3957,10 +3953,10 @@ RETURN VALUE
static
range_seq_t
bka_range_seq_init
(
void
*
init_param
,
uint
n_ranges
,
uint
flags
)
{
DBUG_ENTER
(
"bka_range_seq_init"
);
JOIN_CACHE_BKA
*
cache
=
(
JOIN_CACHE_BKA
*
)
init_param
;
cache
->
reset
(
0
);
DBUG_RETURN
((
range_seq_t
)
init_param
);
DBUG_ENTER
(
"bka_range_seq_init"
);
JOIN_CACHE_BKA
*
cache
=
(
JOIN_CACHE_BKA
*
)
init_param
;
cache
->
reset
(
0
);
DBUG_RETURN
((
range_seq_t
)
init_param
);
}
...
...
@@ -3988,21 +3984,21 @@ RETURN VALUE
static
bool
bka_range_seq_next
(
range_seq_t
rseq
,
KEY_MULTI_RANGE
*
range
)
{
DBUG_ENTER
(
"bka_range_seq_next"
);
JOIN_CACHE_BKA
*
cache
=
(
JOIN_CACHE_BKA
*
)
rseq
;
TABLE_REF
*
ref
=
&
cache
->
join_tab
->
ref
;
key_range
*
start_key
=
&
range
->
start_key
;
if
((
start_key
->
length
=
cache
->
get_next_key
((
uchar
**
)
&
start_key
->
key
)))
{
start_key
->
keypart_map
=
(
1
<<
ref
->
key_parts
)
-
1
;
start_key
->
flag
=
HA_READ_KEY_EXACT
;
range
->
end_key
=
*
start_key
;
range
->
end_key
.
flag
=
HA_READ_AFTER_KEY
;
range
->
ptr
=
(
char
*
)
cache
->
get_curr_rec
();
range
->
range_flag
=
EQ_RANGE
;
DBUG_RETURN
(
0
);
}
DBUG_RETURN
(
1
);
DBUG_ENTER
(
"bka_range_seq_next"
);
JOIN_CACHE_BKA
*
cache
=
(
JOIN_CACHE_BKA
*
)
rseq
;
TABLE_REF
*
ref
=
&
cache
->
join_tab
->
ref
;
key_range
*
start_key
=
&
range
->
start_key
;
if
((
start_key
->
length
=
cache
->
get_next_key
((
uchar
**
)
&
start_key
->
key
)))
{
start_key
->
keypart_map
=
(
1
<<
ref
->
key_parts
)
-
1
;
start_key
->
flag
=
HA_READ_KEY_EXACT
;
range
->
end_key
=
*
start_key
;
range
->
end_key
.
flag
=
HA_READ_AFTER_KEY
;
range
->
ptr
=
(
char
*
)
cache
->
get_curr_rec
();
range
->
range_flag
=
EQ_RANGE
;
DBUG_RETURN
(
0
);
}
DBUG_RETURN
(
1
);
}
...
...
@@ -4034,11 +4030,11 @@ RETURN VALUE
static
bool
bka_range_seq_skip_record
(
range_seq_t
rseq
,
range_id_t
range_info
,
uchar
*
rowid
)
{
DBUG_ENTER
(
"bka_range_seq_skip_record"
);
JOIN_CACHE_BKA
*
cache
=
(
JOIN_CACHE_BKA
*
)
rseq
;
bool
res
=
cache
->
get_match_flag_by_pos
((
uchar
*
)
range_info
)
==
JOIN_CACHE
::
MATCH_FOUND
;
DBUG_RETURN
(
res
);
DBUG_ENTER
(
"bka_range_seq_skip_record"
);
JOIN_CACHE_BKA
*
cache
=
(
JOIN_CACHE_BKA
*
)
rseq
;
bool
res
=
cache
->
get_match_flag_by_pos
((
uchar
*
)
range_info
)
==
JOIN_CACHE
::
MATCH_FOUND
;
DBUG_RETURN
(
res
);
}
...
...
@@ -4065,14 +4061,14 @@ RETURN VALUE
static
bool
bka_skip_index_tuple
(
range_seq_t
rseq
,
range_id_t
range_info
)
{
DBUG_ENTER
(
"bka_skip_index_tuple"
);
JOIN_CACHE_BKA
*
cache
=
(
JOIN_CACHE_BKA
*
)
rseq
;
THD
*
thd
=
cache
->
thd
();
bool
res
;
status_var_increment
(
thd
->
status_var
.
ha_icp_attempts
);
if
(
!
(
res
=
cache
->
skip_index_tuple
(
range_info
)))
status_var_increment
(
thd
->
status_var
.
ha_icp_match
);
DBUG_RETURN
(
res
);
DBUG_ENTER
(
"bka_skip_index_tuple"
);
JOIN_CACHE_BKA
*
cache
=
(
JOIN_CACHE_BKA
*
)
rseq
;
THD
*
thd
=
cache
->
thd
();
bool
res
;
status_var_increment
(
thd
->
status_var
.
ha_icp_attempts
);
if
(
!
(
res
=
cache
->
skip_index_tuple
(
range_info
)))
status_var_increment
(
thd
->
status_var
.
ha_icp_match
);
DBUG_RETURN
(
res
);
}
...
...
@@ -4101,10 +4097,10 @@ RETURN VALUE
bool
JOIN_CACHE_BKA
::
prepare_look_for_matches
(
bool
skip_last
)
{
if
(
!
records
)
return
TRUE
;
rem_records
=
1
;
return
FALSE
;
if
(
!
records
)
return
TRUE
;
rem_records
=
1
;
return
FALSE
;
}
...
...
@@ -4133,11 +4129,11 @@ RETURN VALUE
uchar
*
JOIN_CACHE_BKA
::
get_next_candidate_for_match
()
{
if
(
!
rem_records
)
return
0
;
rem_records
--
;
return
curr_association
;
}
if
(
!
rem_records
)
return
0
;
rem_records
--
;
return
curr_association
;
}
/*
...
...
@@ -4161,8 +4157,8 @@ RETURN VALUE
bool
JOIN_CACHE_BKA
::
skip_next_candidate_for_match
(
uchar
*
rec_ptr
)
{
return
join_tab
->
check_only_first_match
()
&&
(
get_match_flag_by_pos
(
rec_ptr
)
==
MATCH_FOUND
);
return
join_tab
->
check_only_first_match
()
&&
(
get_match_flag_by_pos
(
rec_ptr
)
==
MATCH_FOUND
);
}
...
...
@@ -4189,8 +4185,8 @@ RETURN VALUE
void
JOIN_CACHE_BKA
::
read_next_candidate_for_match
(
uchar
*
rec_ptr
)
{
get_record_by_pos
(
rec_ptr
);
}
get_record_by_pos
(
rec_ptr
);
}
/*
...
...
@@ -4216,30 +4212,29 @@ RETURN VALUE
int
JOIN_CACHE_BKA
::
init
(
bool
for_explain
)
{
int
res
;
bool
check_only_first_match
=
join_tab
->
check_only_first_match
();
int
res
;
bool
check_only_first_match
=
join_tab
->
check_only_first_match
();
RANGE_SEQ_IF
rs_funcs
=
{
bka_range_seq_key_info
,
bka_range_seq_init
,
bka_range_seq_next
,
check_only_first_match
?
bka_range_seq_skip_record
:
0
,
bka_skip_index_tuple
};
RANGE_SEQ_IF
rs_funcs
=
{
bka_range_seq_key_info
,
bka_range_seq_init
,
bka_range_seq_next
,
check_only_first_match
?
bka_range_seq_skip_record
:
0
,
bka_skip_index_tuple
};
DBUG_ENTER
(
"JOIN_CACHE_BKA::init"
);
DBUG_ENTER
(
"JOIN_CACHE_BKA::init"
);
JOIN_TAB_SCAN_MRR
*
jsm
;
if
(
!
(
join_tab_scan
=
jsm
=
new
JOIN_TAB_SCAN_MRR
(
join
,
join_tab
,
mrr_mode
,
rs_funcs
)))
DBUG_RETURN
(
1
);
JOIN_TAB_SCAN_MRR
*
jsm
;
if
(
!
(
join_tab_scan
=
jsm
=
new
JOIN_TAB_SCAN_MRR
(
join
,
join_tab
,
mrr_mode
,
rs_funcs
)))
DBUG_RETURN
(
1
);
if
((
res
=
JOIN_CACHE
::
init
(
for_explain
)))
DBUG_RETURN
(
res
);
if
((
res
=
JOIN_CACHE
::
init
(
for_explain
)))
DBUG_RETURN
(
res
);
if
(
use_emb_key
)
jsm
->
mrr_mode
|=
HA_MRR_MATERIALIZED_KEYS
;
if
(
use_emb_key
)
jsm
->
mrr_mode
|=
HA_MRR_MATERIALIZED_KEYS
;
DBUG_RETURN
(
0
);
DBUG_RETURN
(
0
);
}
...
...
@@ -4278,95 +4273,95 @@ RETURN VALUE
uint
JOIN_CACHE_BKA
::
get_next_key
(
uchar
**
key
)
{
uint
len
;
uint32
rec_len
;
uchar
*
init_pos
;
JOIN_CACHE
*
cache
;
uint
len
;
uint32
rec_len
;
uchar
*
init_pos
;
JOIN_CACHE
*
cache
;
start:
/* Any record in a BKA cache is prepended with its length */
DBUG_ASSERT
(
with_length
);
if
((
pos
+
size_of_rec_len
)
>
last_rec_pos
||
!
records
)
return
0
;
/* Any record in a BKA cache is prepended with its length */
DBUG_ASSERT
(
with_length
);
/* Read the length of the record */
rec_len
=
get_rec_length
(
pos
);
pos
+=
size_of_rec_len
;
init_pos
=
pos
;
if
((
pos
+
size_of_rec_len
)
>
last_rec_pos
||
!
records
)
return
0
;
/* Read a reference to the previous cache if any */
if
(
prev_cache
)
pos
+=
prev_cache
->
get_size_of_rec_offset
();
/* Read the length of the record */
rec_len
=
get_rec_length
(
pos
);
pos
+=
size_of_rec_len
;
init_pos
=
pos
;
curr_rec_pos
=
pos
;
/* Read a reference to the previous cache if any */
if
(
prev_cache
)
pos
+=
prev_cache
->
get_size_of_rec_offset
();
/* Read all flag fields of the record */
read_flag_fields
();
curr_rec_pos
=
pos
;
if
(
with_match_flag
&&
(
Match_flag
)
curr_rec_pos
[
0
]
==
MATCH_IMPOSSIBLE
)
{
pos
=
init_pos
+
rec_len
;
goto
start
;
}
/* Read all flag fields of the record */
read_flag_fields
();
if
(
use_emb_key
)
{
/* An embedded key is taken directly from the join buffer */
*
key
=
pos
;
len
=
emb_key_length
;
}
else
{
/* Read key arguments from previous caches if there are any such fields */
if
(
external_key_arg_fields
)
if
(
with_match_flag
&&
(
Match_flag
)
curr_rec_pos
[
0
]
==
MATCH_IMPOSSIBLE
)
{
uchar
*
rec_ptr
=
curr_rec_pos
;
uint
key_arg_count
=
external_key_arg_fields
;
CACHE_FIELD
**
copy_ptr
=
blob_ptr
-
key_arg_count
;
for
(
cache
=
prev_cache
;
key_arg_count
;
cache
=
cache
->
prev_cache
)
{
uint
len
=
0
;
DBUG_ASSERT
(
cache
);
rec_ptr
=
cache
->
get_rec_ref
(
rec_ptr
);
while
(
!
cache
->
referenced_fields
)
pos
=
init_pos
+
rec_len
;
goto
start
;
}
if
(
use_emb_key
)
{
/* An embedded key is taken directly from the join buffer */
*
key
=
pos
;
len
=
emb_key_length
;
}
else
{
/* Read key arguments from previous caches if there are any such fields */
if
(
external_key_arg_fields
)
{
uchar
*
rec_ptr
=
curr_rec_pos
;
uint
key_arg_count
=
external_key_arg_fields
;
CACHE_FIELD
**
copy_ptr
=
blob_ptr
-
key_arg_count
;
for
(
cache
=
prev_cache
;
key_arg_count
;
cache
=
cache
->
prev_cache
)
{
cache
=
cache
->
prev_cache
;
uint
len
=
0
;
DBUG_ASSERT
(
cache
);
rec_ptr
=
cache
->
get_rec_ref
(
rec_ptr
);
}
while
(
key_arg_count
&&
cache
->
read_referenced_field
(
*
copy_ptr
,
rec_ptr
,
&
len
))
{
copy_ptr
++
;
--
key_arg_count
;
while
(
!
cache
->
referenced_fields
)
{
cache
=
cache
->
prev_cache
;
DBUG_ASSERT
(
cache
);
rec_ptr
=
cache
->
get_rec_ref
(
rec_ptr
);
}
while
(
key_arg_count
&&
cache
->
read_referenced_field
(
*
copy_ptr
,
rec_ptr
,
&
len
))
{
copy_ptr
++
;
--
key_arg_count
;
}
}
}
/*
Read the other key arguments from the current record. The fields for
these arguments are always first in the sequence of the record's fields.
*/
CACHE_FIELD
*
copy
=
field_descr
+
flag_fields
;
CACHE_FIELD
*
copy_end
=
copy
+
local_key_arg_fields
;
bool
blob_in_rec_buff
=
blob_data_is_in_rec_buff
(
curr_rec_pos
);
for
(
;
copy
<
copy_end
;
copy
++
)
read_record_field
(
copy
,
blob_in_rec_buff
);
/* Build the key over the fields read into the record buffers */
TABLE_REF
*
ref
=
&
join_tab
->
ref
;
cp_buffer_from_ref
(
join
->
thd
,
join_tab
->
table
,
ref
);
*
key
=
ref
->
key_buff
;
len
=
ref
->
key_length
;
}
/*
Read the other key arguments from the current record. The fields for
these arguments are always first in the sequence of the record's fields.
*/
CACHE_FIELD
*
copy
=
field_descr
+
flag_fields
;
CACHE_FIELD
*
copy_end
=
copy
+
local_key_arg_fields
;
bool
blob_in_rec_buff
=
blob_data_is_in_rec_buff
(
curr_rec_pos
);
for
(
;
copy
<
copy_end
;
copy
++
)
read_record_field
(
copy
,
blob_in_rec_buff
);
/* Build the key over the fields read into the record buffers */
TABLE_REF
*
ref
=
&
join_tab
->
ref
;
cp_buffer_from_ref
(
join
->
thd
,
join_tab
->
table
,
ref
);
*
key
=
ref
->
key_buff
;
len
=
ref
->
key_length
;
}
pos
=
init_pos
+
rec_len
;
pos
=
init_pos
+
rec_len
;
return
len
;
}
return
len
;
}
/*
...
...
@@ -4407,9 +4402,9 @@ RETURN VALUE
bool
JOIN_CACHE_BKA
::
skip_index_tuple
(
range_id_t
range_info
)
{
DBUG_ENTER
(
"JOIN_CACHE_BKA::skip_index_tuple"
);
get_record_by_pos
((
uchar
*
)
range_info
);
DBUG_RETURN
(
!
join_tab
->
cache_idx_cond
->
val_int
());
DBUG_ENTER
(
"JOIN_CACHE_BKA::skip_index_tuple"
);
get_record_by_pos
((
uchar
*
)
range_info
);
DBUG_RETURN
(
!
join_tab
->
cache_idx_cond
->
val_int
());
}
...
...
@@ -4439,10 +4434,10 @@ RETURN VALUE
static
range_seq_t
bkah_range_seq_init
(
void
*
init_param
,
uint
n_ranges
,
uint
flags
)
{
DBUG_ENTER
(
"bkah_range_seq_init"
);
JOIN_CACHE_BKAH
*
cache
=
(
JOIN_CACHE_BKAH
*
)
init_param
;
cache
->
reset
(
0
);
DBUG_RETURN
((
range_seq_t
)
init_param
);
DBUG_ENTER
(
"bkah_range_seq_init"
);
JOIN_CACHE_BKAH
*
cache
=
(
JOIN_CACHE_BKAH
*
)
init_param
;
cache
->
reset
(
0
);
DBUG_RETURN
((
range_seq_t
)
init_param
);
}
...
...
@@ -4467,24 +4462,24 @@ RETURN VALUE
TRUE no more ranges
*/
static
static
bool
bkah_range_seq_next
(
range_seq_t
rseq
,
KEY_MULTI_RANGE
*
range
)
{
DBUG_ENTER
(
"bkah_range_seq_next"
);
JOIN_CACHE_BKAH
*
cache
=
(
JOIN_CACHE_BKAH
*
)
rseq
;
TABLE_REF
*
ref
=
&
cache
->
join_tab
->
ref
;
key_range
*
start_key
=
&
range
->
start_key
;
if
((
start_key
->
length
=
cache
->
get_next_key
((
uchar
**
)
&
start_key
->
key
)))
{
start_key
->
keypart_map
=
(
1
<<
ref
->
key_parts
)
-
1
;
start_key
->
flag
=
HA_READ_KEY_EXACT
;
range
->
end_key
=
*
start_key
;
range
->
end_key
.
flag
=
HA_READ_AFTER_KEY
;
range
->
ptr
=
(
char
*
)
cache
->
get_curr_key_chain
();
range
->
range_flag
=
EQ_RANGE
;
DBUG_RETURN
(
0
);
}
DBUG_RETURN
(
1
);
DBUG_ENTER
(
"bkah_range_seq_next"
);
JOIN_CACHE_BKAH
*
cache
=
(
JOIN_CACHE_BKAH
*
)
rseq
;
TABLE_REF
*
ref
=
&
cache
->
join_tab
->
ref
;
key_range
*
start_key
=
&
range
->
start_key
;
if
((
start_key
->
length
=
cache
->
get_next_key
((
uchar
**
)
&
start_key
->
key
)))
{
start_key
->
keypart_map
=
(
1
<<
ref
->
key_parts
)
-
1
;
start_key
->
flag
=
HA_READ_KEY_EXACT
;
range
->
end_key
=
*
start_key
;
range
->
end_key
.
flag
=
HA_READ_AFTER_KEY
;
range
->
ptr
=
(
char
*
)
cache
->
get_curr_key_chain
();
range
->
range_flag
=
EQ_RANGE
;
DBUG_RETURN
(
0
);
}
DBUG_RETURN
(
1
);
}
...
...
@@ -4512,14 +4507,13 @@ RETURN VALUE
0 the record is to be left in the stream
*/
static
bool
bkah_range_seq_skip_record
(
range_seq_t
rseq
,
range_id_t
range_info
,
uchar
*
rowid
)
static
bool
bkah_range_seq_skip_record
(
range_seq_t
rseq
,
range_id_t
range_info
,
uchar
*
rowid
)
{
DBUG_ENTER
(
"bkah_range_seq_skip_record"
);
JOIN_CACHE_BKAH
*
cache
=
(
JOIN_CACHE_BKAH
*
)
rseq
;
bool
res
=
cache
->
check_all_match_flags_for_key
((
uchar
*
)
range_info
);
DBUG_RETURN
(
res
);
DBUG_ENTER
(
"bkah_range_seq_skip_record"
);
JOIN_CACHE_BKAH
*
cache
=
(
JOIN_CACHE_BKAH
*
)
rseq
;
bool
res
=
cache
->
check_all_match_flags_for_key
((
uchar
*
)
range_info
);
DBUG_RETURN
(
res
);
}
...
...
@@ -4546,14 +4540,14 @@ RETURN VALUE
static
bool
bkah_skip_index_tuple
(
range_seq_t
rseq
,
range_id_t
range_info
)
{
DBUG_ENTER
(
"bka_unique_skip_index_tuple"
);
JOIN_CACHE_BKAH
*
cache
=
(
JOIN_CACHE_BKAH
*
)
rseq
;
THD
*
thd
=
cache
->
thd
();
bool
res
;
status_var_increment
(
thd
->
status_var
.
ha_icp_attempts
);
if
(
!
(
res
=
cache
->
skip_index_tuple
(
range_info
)))
status_var_increment
(
thd
->
status_var
.
ha_icp_match
);
DBUG_RETURN
(
res
);
DBUG_ENTER
(
"bka_unique_skip_index_tuple"
);
JOIN_CACHE_BKAH
*
cache
=
(
JOIN_CACHE_BKAH
*
)
rseq
;
THD
*
thd
=
cache
->
thd
();
bool
res
;
status_var_increment
(
thd
->
status_var
.
ha_icp_attempts
);
if
(
!
(
res
=
cache
->
skip_index_tuple
(
range_info
)))
status_var_increment
(
thd
->
status_var
.
ha_icp_match
);
DBUG_RETURN
(
res
);
}
...
...
@@ -4581,8 +4575,8 @@ RETURN VALUE
bool
JOIN_CACHE_BKAH
::
prepare_look_for_matches
(
bool
skip_last
)
{
last_matching_rec_ref_ptr
=
next_matching_rec_ref_ptr
=
0
;
if
(
no_association
&&
last_matching_rec_ref_ptr
=
next_matching_rec_ref_ptr
=
0
;
if
(
no_association
&&
!
(
curr_matching_chain
=
get_matching_chain_by_join_key
()))
//psergey: added '!'
return
1
;
last_matching_rec_ref_ptr
=
get_next_rec_ref
(
curr_matching_chain
);
...
...
sql/sql_select.cc
View file @
5d7607f3
...
...
@@ -10752,6 +10752,7 @@ uint check_join_cache_usage(JOIN_TAB *tab,
uint
bufsz
=
4096
;
JOIN_CACHE
*
prev_cache
=
0
;
JOIN
*
join
=
tab
->
join
;
MEM_ROOT
*
root
=
join
->
thd
->
mem_root
;
uint
cache_level
=
tab
->
used_join_cache_level
;
bool
force_unlinked_cache
=
!
(
join
->
allowed_join_cache_types
&
JOIN_CACHE_INCREMENTAL_BIT
);
...
...
@@ -10871,7 +10872,7 @@ uint check_join_cache_usage(JOIN_TAB *tab,
case
JT_ALL
:
if
(
cache_level
==
1
)
prev_cache
=
0
;
if
((
tab
->
cache
=
new
JOIN_CACHE_BNL
(
join
,
tab
,
prev_cache
))
&&
if
((
tab
->
cache
=
new
(
root
)
JOIN_CACHE_BNL
(
join
,
tab
,
prev_cache
))
&&
!
tab
->
cache
->
init
(
options
&
SELECT_DESCRIBE
))
{
tab
->
icp_other_tables_ok
=
FALSE
;
...
...
@@ -10906,7 +10907,7 @@ uint check_join_cache_usage(JOIN_TAB *tab,
goto
no_join_cache
;
if
(
cache_level
==
3
)
prev_cache
=
0
;
if
((
tab
->
cache
=
new
JOIN_CACHE_BNLH
(
join
,
tab
,
prev_cache
))
&&
if
((
tab
->
cache
=
new
(
root
)
JOIN_CACHE_BNLH
(
join
,
tab
,
prev_cache
))
&&
!
tab
->
cache
->
init
(
options
&
SELECT_DESCRIBE
))
{
tab
->
icp_other_tables_ok
=
FALSE
;
...
...
@@ -10927,7 +10928,7 @@ uint check_join_cache_usage(JOIN_TAB *tab,
{
if
(
cache_level
==
5
)
prev_cache
=
0
;
if
((
tab
->
cache
=
new
JOIN_CACHE_BKA
(
join
,
tab
,
flags
,
prev_cache
))
&&
if
((
tab
->
cache
=
new
(
root
)
JOIN_CACHE_BKA
(
join
,
tab
,
flags
,
prev_cache
))
&&
!
tab
->
cache
->
init
(
options
&
SELECT_DESCRIBE
))
return
(
6
-
MY_TEST
(
!
prev_cache
));
goto
no_join_cache
;
...
...
@@ -10936,10 +10937,10 @@ uint check_join_cache_usage(JOIN_TAB *tab,
{
if
(
cache_level
==
7
)
prev_cache
=
0
;
if
((
tab
->
cache
=
new
JOIN_CACHE_BKAH
(
join
,
tab
,
flags
,
prev_cache
))
&&
if
((
tab
->
cache
=
new
(
root
)
JOIN_CACHE_BKAH
(
join
,
tab
,
flags
,
prev_cache
))
&&
!
tab
->
cache
->
init
(
options
&
SELECT_DESCRIBE
))
{
tab
->
idx_cond_fact_out
=
FALSE
;
tab
->
idx_cond_fact_out
=
FALSE
;
return
(
8
-
MY_TEST
(
!
prev_cache
));
}
goto
no_join_cache
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment