Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
M
MariaDB
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
nexedi
MariaDB
Commits
58ccbb7c
Commit
58ccbb7c
authored
Dec 23, 2009
by
Satya B
Browse files
Options
Browse Files
Download
Plain Diff
merge to mysql-5.1-bugteam
parents
83d5ca95
a49b18a7
Changes
8
Hide whitespace changes
Inline
Side-by-side
Showing
8 changed files
with
180 additions
and
53 deletions
+180
-53
mysql-test/collections/default.experimental
mysql-test/collections/default.experimental
+0
-1
mysql-test/r/count_distinct.result
mysql-test/r/count_distinct.result
+20
-0
mysql-test/r/user_var.result
mysql-test/r/user_var.result
+15
-0
mysql-test/t/count_distinct.test
mysql-test/t/count_distinct.test
+19
-0
mysql-test/t/user_var.test
mysql-test/t/user_var.test
+20
-0
sql/ha_partition.cc
sql/ha_partition.cc
+93
-51
sql/ha_partition.h
sql/ha_partition.h
+12
-0
sql/item_func.cc
sql/item_func.cc
+1
-1
No files found.
mysql-test/collections/default.experimental
View file @
58ccbb7c
...
@@ -20,7 +20,6 @@ ndb.* # joro : NDB tests marked as experiment
...
@@ -20,7 +20,6 @@ ndb.* # joro : NDB tests marked as experiment
rpl.rpl_get_master_version_and_clock* # Bug #49191 2009-12-01 Daogang rpl_get_master_version_and_clock failed on PB2: COM_REGISTER_SLAVE failed
rpl.rpl_get_master_version_and_clock* # Bug #49191 2009-12-01 Daogang rpl_get_master_version_and_clock failed on PB2: COM_REGISTER_SLAVE failed
rpl.rpl_innodb_bug28430* @solaris # Bug#46029
rpl.rpl_innodb_bug28430* @solaris # Bug#46029
rpl.rpl_trigger* # Bug#47810 2009-10-04 joro rpl.rpl_trigger.test fails with valgrind errors with the innodb plugin
rpl_ndb.* # joro : NDB tests marked as experimental as agreed with bochklin
rpl_ndb.* # joro : NDB tests marked as experimental as agreed with bochklin
rpl_ndb.rpl_ndb_log # Bug#38998
rpl_ndb.rpl_ndb_log # Bug#38998
...
...
mysql-test/r/count_distinct.result
View file @
58ccbb7c
...
@@ -40,6 +40,26 @@ select t2.isbn,city,t1.libname,count(distinct t1.libname) as a from t3 left join
...
@@ -40,6 +40,26 @@ select t2.isbn,city,t1.libname,count(distinct t1.libname) as a from t3 left join
isbn city libname a
isbn city libname a
007 Berkeley Berkeley Public1 2
007 Berkeley Berkeley Public1 2
000 New York New York Public Libra 2
000 New York New York Public Libra 2
select t2.isbn,city,@bar:=t1.libname,count(distinct t1.libname) as a
from t3 left join t1 on t3.libname=t1.libname left join t2
on t3.isbn=t2.isbn group by city having count(distinct
t1.libname) > 1;
isbn city @bar:=t1.libname a
007 Berkeley Berkeley Public1 2
000 New York New York Public Libra 2
SELECT @bar;
@bar
Berkeley Public2
select t2.isbn,city,concat(@bar:=t1.libname),count(distinct t1.libname) as a
from t3 left join t1 on t3.libname=t1.libname left join t2
on t3.isbn=t2.isbn group by city having count(distinct
t1.libname) > 1;
isbn city concat(@bar:=t1.libname) a
007 Berkeley Berkeley Public1 2
000 New York New York Public Libra 2
SELECT @bar;
@bar
Berkeley Public2
drop table t1, t2, t3;
drop table t1, t2, t3;
create table t1 (f1 int);
create table t1 (f1 int);
insert into t1 values (1);
insert into t1 values (1);
...
...
mysql-test/r/user_var.result
View file @
58ccbb7c
...
@@ -409,6 +409,21 @@ SELECT a, b FROM t1 WHERE a=2 AND b=3 GROUP BY a, b;
...
@@ -409,6 +409,21 @@ SELECT a, b FROM t1 WHERE a=2 AND b=3 GROUP BY a, b;
a b
a b
2 3
2 3
DROP TABLE t1;
DROP TABLE t1;
CREATE TABLE t1 (f1 int(11) default NULL, f2 int(11) default NULL);
CREATE TABLE t2 (f1 int(11) default NULL, f2 int(11) default NULL, foo int(11));
CREATE TABLE t3 (f1 int(11) default NULL, f2 int(11) default NULL);
INSERT INTO t1 VALUES(10, 10);
INSERT INTO t1 VALUES(10, 10);
INSERT INTO t2 VALUES(10, 10, 10);
INSERT INTO t2 VALUES(10, 10, 10);
INSERT INTO t3 VALUES(10, 10);
INSERT INTO t3 VALUES(10, 10);
SELECT MIN(t2.f1),
@bar:= (SELECT MIN(t3.f2) FROM t3 WHERE t3.f2 > foo)
FROM t1,t2 WHERE t1.f1 = t2.f1 ORDER BY t2.f1;
MIN(t2.f1) @bar:= (SELECT MIN(t3.f2) FROM t3 WHERE t3.f2 > foo)
10 NULL
DROP TABLE t1, t2, t3;
End of 5.0 tests
End of 5.0 tests
CREATE TABLE t1 (i INT);
CREATE TABLE t1 (i INT);
CREATE TRIGGER t_after_insert AFTER INSERT ON t1 FOR EACH ROW SET @bug42188 = 10;
CREATE TRIGGER t_after_insert AFTER INSERT ON t1 FOR EACH ROW SET @bug42188 = 10;
...
...
mysql-test/t/count_distinct.test
View file @
58ccbb7c
...
@@ -35,6 +35,25 @@ insert into t1 values ('NYC Lib','New York');
...
@@ -35,6 +35,25 @@ insert into t1 values ('NYC Lib','New York');
select
t2
.
isbn
,
city
,
t1
.
libname
,
count
(
t1
.
libname
)
as
a
from
t3
left
join
t1
on
t3
.
libname
=
t1
.
libname
left
join
t2
on
t3
.
isbn
=
t2
.
isbn
group
by
city
,
t1
.
libname
;
select
t2
.
isbn
,
city
,
t1
.
libname
,
count
(
t1
.
libname
)
as
a
from
t3
left
join
t1
on
t3
.
libname
=
t1
.
libname
left
join
t2
on
t3
.
isbn
=
t2
.
isbn
group
by
city
,
t1
.
libname
;
select
t2
.
isbn
,
city
,
t1
.
libname
,
count
(
distinct
t1
.
libname
)
as
a
from
t3
left
join
t1
on
t3
.
libname
=
t1
.
libname
left
join
t2
on
t3
.
isbn
=
t2
.
isbn
group
by
city
having
count
(
distinct
t1
.
libname
)
>
1
;
select
t2
.
isbn
,
city
,
t1
.
libname
,
count
(
distinct
t1
.
libname
)
as
a
from
t3
left
join
t1
on
t3
.
libname
=
t1
.
libname
left
join
t2
on
t3
.
isbn
=
t2
.
isbn
group
by
city
having
count
(
distinct
t1
.
libname
)
>
1
;
select
t2
.
isbn
,
city
,
t1
.
libname
,
count
(
distinct
t1
.
libname
)
as
a
from
t3
left
join
t1
on
t3
.
libname
=
t1
.
libname
left
join
t2
on
t3
.
isbn
=
t2
.
isbn
group
by
city
having
count
(
distinct
concat
(
t1
.
libname
,
'a'
))
>
1
;
select
t2
.
isbn
,
city
,
t1
.
libname
,
count
(
distinct
t1
.
libname
)
as
a
from
t3
left
join
t1
on
t3
.
libname
=
t1
.
libname
left
join
t2
on
t3
.
isbn
=
t2
.
isbn
group
by
city
having
count
(
distinct
concat
(
t1
.
libname
,
'a'
))
>
1
;
select
t2
.
isbn
,
city
,
@
bar
:=
t1
.
libname
,
count
(
distinct
t1
.
libname
)
as
a
from
t3
left
join
t1
on
t3
.
libname
=
t1
.
libname
left
join
t2
on
t3
.
isbn
=
t2
.
isbn
group
by
city
having
count
(
distinct
t1
.
libname
)
>
1
;
#
# Wrong result, see bug#49872
#
SELECT
@
bar
;
select
t2
.
isbn
,
city
,
concat
(
@
bar
:=
t1
.
libname
),
count
(
distinct
t1
.
libname
)
as
a
from
t3
left
join
t1
on
t3
.
libname
=
t1
.
libname
left
join
t2
on
t3
.
isbn
=
t2
.
isbn
group
by
city
having
count
(
distinct
t1
.
libname
)
>
1
;
#
# Wrong result, see bug#49872
#
SELECT
@
bar
;
drop
table
t1
,
t2
,
t3
;
drop
table
t1
,
t2
,
t3
;
#
#
...
...
mysql-test/t/user_var.test
View file @
58ccbb7c
...
@@ -295,6 +295,26 @@ SELECT @a, @b;
...
@@ -295,6 +295,26 @@ SELECT @a, @b;
SELECT
a
,
b
FROM
t1
WHERE
a
=
2
AND
b
=
3
GROUP
BY
a
,
b
;
SELECT
a
,
b
FROM
t1
WHERE
a
=
2
AND
b
=
3
GROUP
BY
a
,
b
;
DROP
TABLE
t1
;
DROP
TABLE
t1
;
#
# Bug#47371: reference by same column name
#
CREATE
TABLE
t1
(
f1
int
(
11
)
default
NULL
,
f2
int
(
11
)
default
NULL
);
CREATE
TABLE
t2
(
f1
int
(
11
)
default
NULL
,
f2
int
(
11
)
default
NULL
,
foo
int
(
11
));
CREATE
TABLE
t3
(
f1
int
(
11
)
default
NULL
,
f2
int
(
11
)
default
NULL
);
INSERT
INTO
t1
VALUES
(
10
,
10
);
INSERT
INTO
t1
VALUES
(
10
,
10
);
INSERT
INTO
t2
VALUES
(
10
,
10
,
10
);
INSERT
INTO
t2
VALUES
(
10
,
10
,
10
);
INSERT
INTO
t3
VALUES
(
10
,
10
);
INSERT
INTO
t3
VALUES
(
10
,
10
);
SELECT
MIN
(
t2
.
f1
),
@
bar
:=
(
SELECT
MIN
(
t3
.
f2
)
FROM
t3
WHERE
t3
.
f2
>
foo
)
FROM
t1
,
t2
WHERE
t1
.
f1
=
t2
.
f1
ORDER
BY
t2
.
f1
;
DROP
TABLE
t1
,
t2
,
t3
;
--
echo
End
of
5.0
tests
--
echo
End
of
5.0
tests
#
#
...
...
sql/ha_partition.cc
View file @
58ccbb7c
...
@@ -5747,6 +5747,23 @@ const key_map *ha_partition::keys_to_use_for_scanning()
...
@@ -5747,6 +5747,23 @@ const key_map *ha_partition::keys_to_use_for_scanning()
DBUG_RETURN
(
m_file
[
0
]
->
keys_to_use_for_scanning
());
DBUG_RETURN
(
m_file
[
0
]
->
keys_to_use_for_scanning
());
}
}
#define MAX_PARTS_FOR_OPTIMIZER_CALLS 10
/*
Prepare start variables for estimating optimizer costs.
@param[out] num_used_parts Number of partitions after pruning.
@param[out] check_min_num Number of partitions to call.
@param[out] first first used partition.
*/
void
ha_partition
::
partitions_optimizer_call_preparations
(
uint
*
first
,
uint
*
num_used_parts
,
uint
*
check_min_num
)
{
*
first
=
bitmap_get_first_set
(
&
(
m_part_info
->
used_partitions
));
*
num_used_parts
=
bitmap_bits_set
(
&
(
m_part_info
->
used_partitions
));
*
check_min_num
=
min
(
MAX_PARTS_FOR_OPTIMIZER_CALLS
,
*
num_used_parts
);
}
/*
/*
Return time for a scan of the table
Return time for a scan of the table
...
@@ -5760,43 +5777,67 @@ const key_map *ha_partition::keys_to_use_for_scanning()
...
@@ -5760,43 +5777,67 @@ const key_map *ha_partition::keys_to_use_for_scanning()
double
ha_partition
::
scan_time
()
double
ha_partition
::
scan_time
()
{
{
double
scan_time
=
0
;
double
scan_time
=
0
.0
;
handler
**
file
;
uint
first
,
part_id
,
num_used_parts
,
check_min_num
,
partitions_called
=
0
;
DBUG_ENTER
(
"ha_partition::scan_time"
);
DBUG_ENTER
(
"ha_partition::scan_time"
);
for
(
file
=
m_file
;
*
file
;
file
++
)
partitions_optimizer_call_preparations
(
&
first
,
&
num_used_parts
,
&
check_min_num
);
if
(
bitmap_is_set
(
&
(
m_part_info
->
used_partitions
),
(
file
-
m_file
)))
for
(
part_id
=
first
;
partitions_called
<
num_used_parts
;
part_id
++
)
scan_time
+=
(
*
file
)
->
scan_time
();
{
if
(
!
bitmap_is_set
(
&
(
m_part_info
->
used_partitions
),
part_id
))
continue
;
scan_time
+=
m_file
[
part_id
]
->
scan_time
();
partitions_called
++
;
if
(
partitions_called
>=
check_min_num
&&
scan_time
!=
0.0
)
{
DBUG_RETURN
(
scan_time
*
(
double
)
num_used_parts
/
(
double
)
partitions_called
);
}
}
DBUG_RETURN
(
scan_time
);
DBUG_RETURN
(
scan_time
);
}
}
/*
/*
Get time to read
Estimate rows for records_in_range or estimate_rows_upper_bound.
SYNOPSIS
@param is_records_in_range call records_in_range instead of
read_time()
estimate_rows_upper_bound.
index Index number used
@param inx (only for records_in_range) index to use.
ranges Number of ranges
@param min_key (only for records_in_range) start of range.
rows Number of rows
@param max_key (only for records_in_range) end of range.
RETURN VALUE
time for read
DESCRIPTION
@return Number of rows or HA_POS_ERROR.
This will be optimised later to include whether or not the index can
be used with partitioning. To achieve we need to add another parameter
that specifies how many of the index fields that are bound in the ranges.
Possibly added as a new call to handlers.
*/
*/
ha_rows
ha_partition
::
estimate_rows
(
bool
is_records_in_range
,
uint
inx
,
double
ha_partition
::
read_time
(
uint
index
,
uint
ranges
,
ha_rows
rows
)
key_range
*
min_key
,
key_range
*
max_key
)
{
{
DBUG_ENTER
(
"ha_partition::read_time"
);
ha_rows
rows
,
estimated_rows
=
0
;
uint
first
,
part_id
,
num_used_parts
,
check_min_num
,
partitions_called
=
0
;
DBUG_ENTER
(
"ha_partition::records_in_range"
);
DBUG_RETURN
(
m_file
[
0
]
->
read_time
(
index
,
ranges
,
rows
));
partitions_optimizer_call_preparations
(
&
first
,
&
num_used_parts
,
&
check_min_num
);
for
(
part_id
=
first
;
partitions_called
<
num_used_parts
;
part_id
++
)
{
if
(
!
bitmap_is_set
(
&
(
m_part_info
->
used_partitions
),
part_id
))
continue
;
if
(
is_records_in_range
)
rows
=
m_file
[
part_id
]
->
records_in_range
(
inx
,
min_key
,
max_key
);
else
rows
=
m_file
[
part_id
]
->
estimate_rows_upper_bound
();
if
(
rows
==
HA_POS_ERROR
)
DBUG_RETURN
(
HA_POS_ERROR
);
estimated_rows
+=
rows
;
partitions_called
++
;
if
(
partitions_called
>=
check_min_num
&&
estimated_rows
)
{
DBUG_RETURN
(
estimated_rows
*
num_used_parts
/
partitions_called
);
}
}
DBUG_RETURN
(
estimated_rows
);
}
}
/*
/*
Find number of records in a range
Find number of records in a range
...
@@ -5824,22 +5865,9 @@ double ha_partition::read_time(uint index, uint ranges, ha_rows rows)
...
@@ -5824,22 +5865,9 @@ double ha_partition::read_time(uint index, uint ranges, ha_rows rows)
ha_rows
ha_partition
::
records_in_range
(
uint
inx
,
key_range
*
min_key
,
ha_rows
ha_partition
::
records_in_range
(
uint
inx
,
key_range
*
min_key
,
key_range
*
max_key
)
key_range
*
max_key
)
{
{
handler
**
file
;
ha_rows
in_range
=
0
;
DBUG_ENTER
(
"ha_partition::records_in_range"
);
DBUG_ENTER
(
"ha_partition::records_in_range"
);
file
=
m_file
;
DBUG_RETURN
(
estimate_rows
(
TRUE
,
inx
,
min_key
,
max_key
));
do
{
if
(
bitmap_is_set
(
&
(
m_part_info
->
used_partitions
),
(
file
-
m_file
)))
{
ha_rows
tmp_in_range
=
(
*
file
)
->
records_in_range
(
inx
,
min_key
,
max_key
);
if
(
tmp_in_range
==
HA_POS_ERROR
)
DBUG_RETURN
(
tmp_in_range
);
in_range
+=
tmp_in_range
;
}
}
while
(
*
(
++
file
));
DBUG_RETURN
(
in_range
);
}
}
...
@@ -5855,22 +5883,36 @@ ha_rows ha_partition::records_in_range(uint inx, key_range *min_key,
...
@@ -5855,22 +5883,36 @@ ha_rows ha_partition::records_in_range(uint inx, key_range *min_key,
ha_rows
ha_partition
::
estimate_rows_upper_bound
()
ha_rows
ha_partition
::
estimate_rows_upper_bound
()
{
{
ha_rows
rows
,
tot_rows
=
0
;
handler
**
file
;
DBUG_ENTER
(
"ha_partition::estimate_rows_upper_bound"
);
DBUG_ENTER
(
"ha_partition::estimate_rows_upper_bound"
);
file
=
m_file
;
DBUG_RETURN
(
estimate_rows
(
FALSE
,
0
,
NULL
,
NULL
));
do
}
{
if
(
bitmap_is_set
(
&
(
m_part_info
->
used_partitions
),
(
file
-
m_file
)))
{
/*
rows
=
(
*
file
)
->
estimate_rows_upper_bound
();
Get time to read
if
(
rows
==
HA_POS_ERROR
)
DBUG_RETURN
(
HA_POS_ERROR
);
SYNOPSIS
tot_rows
+=
rows
;
read_time()
}
index Index number used
}
while
(
*
(
++
file
));
ranges Number of ranges
DBUG_RETURN
(
tot_rows
);
rows Number of rows
RETURN VALUE
time for read
DESCRIPTION
This will be optimised later to include whether or not the index can
be used with partitioning. To achieve we need to add another parameter
that specifies how many of the index fields that are bound in the ranges.
Possibly added as a new call to handlers.
*/
double
ha_partition
::
read_time
(
uint
index
,
uint
ranges
,
ha_rows
rows
)
{
DBUG_ENTER
(
"ha_partition::read_time"
);
DBUG_RETURN
(
m_file
[
0
]
->
read_time
(
index
,
ranges
,
rows
));
}
}
...
...
sql/ha_partition.h
View file @
58ccbb7c
...
@@ -547,6 +547,18 @@ class ha_partition :public handler
...
@@ -547,6 +547,18 @@ class ha_partition :public handler
-------------------------------------------------------------------------
-------------------------------------------------------------------------
*/
*/
private:
/*
Helper function to get the minimum number of partitions to use for
the optimizer hints/cost calls.
*/
void
partitions_optimizer_call_preparations
(
uint
*
num_used_parts
,
uint
*
check_min_num
,
uint
*
first
);
ha_rows
estimate_rows
(
bool
is_records_in_range
,
uint
inx
,
key_range
*
min_key
,
key_range
*
max_key
);
public:
/*
/*
keys_to_use_for_scanning can probably be implemented as the
keys_to_use_for_scanning can probably be implemented as the
intersection of all underlying handlers if mixed handlers are used.
intersection of all underlying handlers if mixed handlers are used.
...
...
sql/item_func.cc
View file @
58ccbb7c
...
@@ -606,7 +606,7 @@ void Item_func::signal_divide_by_null()
...
@@ -606,7 +606,7 @@ void Item_func::signal_divide_by_null()
Item
*
Item_func
::
get_tmp_table_item
(
THD
*
thd
)
Item
*
Item_func
::
get_tmp_table_item
(
THD
*
thd
)
{
{
if
(
!
with_sum_func
&&
!
const_item
()
&&
functype
()
!=
SUSERVAR_FUNC
)
if
(
!
with_sum_func
&&
!
const_item
())
return
new
Item_field
(
result_field
);
return
new
Item_field
(
result_field
);
return
copy_or_same
(
thd
);
return
copy_or_same
(
thd
);
}
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment