Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
M
MariaDB
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
nexedi
MariaDB
Commits
a0b4a868
Commit
a0b4a868
authored
Aug 28, 2021
by
Sergei Petrunia
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Code cleanup part #2.
parent
1496a52d
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
27 additions
and
45 deletions
+27
-45
sql/sql_statistics.cc
sql/sql_statistics.cc
+18
-41
sql/sql_statistics.h
sql/sql_statistics.h
+9
-4
No files found.
sql/sql_statistics.cc
View file @
a0b4a868
...
...
@@ -1292,8 +1292,7 @@ void Histogram_json::init_for_collection(MEM_ROOT *mem_root,
Histogram_type
htype_arg
,
ulonglong
size_arg
)
{
type
=
htype_arg
;
//values_ = (uchar*)alloc_root(mem_root, size_arg);
DBUG_ASSERT
(
htype_arg
==
JSON_HB
);
size
=
(
uint8
)
size_arg
;
}
...
...
@@ -1302,6 +1301,9 @@ void Histogram_json::init_for_collection(MEM_ROOT *mem_root,
@brief
Parse the histogram from its on-disk representation
@return
false OK
True Error
*/
bool
Histogram_json
::
parse
(
MEM_ROOT
*
mem_root
,
Field
*
field
,
...
...
@@ -1309,8 +1311,8 @@ bool Histogram_json::parse(MEM_ROOT *mem_root, Field *field,
uint
size_arg
)
{
DBUG_ENTER
(
"Histogram_json::parse"
);
DBUG_ASSERT
(
type_arg
==
JSON_HB
);
size
=
(
uint8
)
size_arg
;
type
=
type_arg
;
const
char
*
json
=
(
char
*
)
ptr
;
int
vt
;
std
::
vector
<
std
::
string
>
hist_buckets_text
;
...
...
@@ -1595,6 +1597,7 @@ void Histogram_json::serialize(Field *field)
field
->
store
((
char
*
)
json_text
,
strlen
((
char
*
)
json_text
),
&
my_charset_bin
);
}
int
Histogram_json
::
find_bucket
(
Field
*
field
,
const
uchar
*
endpoint
)
{
int
low
=
0
;
...
...
@@ -2061,15 +2064,22 @@ class Histogram_builder_json : public Histogram_builder
}
};
Histogram_base
*
create_histogram
(
Histogram_type
hist_type
)
{
// assumes the caller already checked for invalid histograms
if
(
hist_type
==
JSON_HB
)
return
new
Histogram_json
;
else
return
new
Histogram_binary
;
switch
(
hist_type
)
{
case
SINGLE_PREC_HB
:
case
DOUBLE_PREC_HB
:
return
new
Histogram_binary
();
case
JSON_HB
:
return
new
Histogram_json
();
default:
DBUG_ASSERT
(
0
);
}
return
NULL
;
}
bool
json_get_array_items
(
const
char
*
json
,
const
char
*
json_end
,
int
*
value_type
,
std
::
vector
<
std
::
string
>
&
container
)
{
json_engine_t
je
;
int
vl
;
...
...
@@ -2255,16 +2265,6 @@ class Count_distinct_field: public Sql_alloc
return
distincts_single_occurence
;
}
/*
@brief
Get the size of the histogram in bytes built for table_field
*/
/*
uint get_hist_size()
{
return table_field->collected_stats->histogram.get_size();
}*/
/*
@brief
Get the pointer to the histogram built for table_field
...
...
@@ -2916,27 +2916,6 @@ bool Column_statistics_collected::add()
return
err
;
}
/*
Create an empty Histogram object from histogram_type.
Note: it is not yet clear whether collection-time histogram should be the same
as lookup-time histogram. At the moment, they are.
*/
Histogram_base
*
get_histogram_by_type
(
MEM_ROOT
*
mem_root
,
Histogram_type
hist_type
)
{
switch
(
hist_type
)
{
case
SINGLE_PREC_HB
:
case
DOUBLE_PREC_HB
:
return
new
Histogram_binary
();
case
JSON_HB
:
return
new
Histogram_json
();
default:
DBUG_ASSERT
(
0
);
}
return
NULL
;
};
/**
@brief
Get the results of aggregation when collecting the statistics on a column
...
...
@@ -3488,7 +3467,6 @@ int read_statistics_for_table(THD *thd, TABLE *table, TABLE_LIST *stat_tables)
/* Read statistics from the statistical table column_stats */
stat_table
=
stat_tables
[
COLUMN_STAT
].
table
;
//ulong total_hist_size= 0;
bool
have_histograms
=
false
;
Column_stat
column_stat
(
stat_table
,
table
);
for
(
field_ptr
=
table_share
->
field
;
*
field_ptr
;
field_ptr
++
)
...
...
@@ -3496,7 +3474,6 @@ int read_statistics_for_table(THD *thd, TABLE *table, TABLE_LIST *stat_tables)
table_field
=
*
field_ptr
;
column_stat
.
set_key_fields
(
table_field
);
column_stat
.
get_stat_values
();
//total_hist_size+= table_field->read_stats->histogram.get_size();
if
(
table_field
->
read_stats
->
histogram_type_on_disk
!=
INVALID_HISTOGRAM
)
have_histograms
=
true
;
}
...
...
sql/sql_statistics.h
View file @
a0b4a868
...
...
@@ -17,6 +17,8 @@
#define SQL_STATISTICS_H
#include <vector>
#include <string>
/*
For COMPLEMENTARY_FOR_QUERIES and PREFERABLY_FOR_QUERIES they are
similar to the COMPLEMENTARY and PREFERABLY respectively except that
...
...
@@ -279,7 +281,7 @@ class Histogram_binary : public Histogram_base
uint
get_size
()
override
{
return
(
uint
)
size
;}
bool
is_available
()
override
{
return
get_size
()
>
0
&&
(
values
!=
NULL
);
}
bool
is_available
()
override
{
return
(
values
!=
NULL
);
}
/*
This function checks that histograms should be usable only when
...
...
@@ -335,13 +337,17 @@ class Histogram_binary : public Histogram_base
/*
An equi-height histogram which stores real values for bucket bounds.
Handles @@histogram_type=JSON_HB
On-disk format is JSON:
(TODO description)
*/
class
Histogram_json
:
public
Histogram_base
{
private:
Histogram_type
type
;
uint8
size
;
/* Number of elements in the histogram*/
uint8
size
;
/* Number of elements in the histogram */
/* Collection-time only: collected histogram in the JSON form. */
uchar
*
json_text
;
...
...
@@ -414,7 +420,6 @@ class Table_statistics
/* Array of records per key for index prefixes */
ulonglong
*
idx_avg_frequency
;
//uchar *histograms; /* Sequence of histograms */
};
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment