Commit 36ab6cc8 authored by Sergei Petrunia's avatar Sergei Petrunia

MDEV-34125: ANALYZE FORMAT=JSON: r_engine_stats.pages_read_time_ms has wrong scale

- Change the comments in class ha_handler_stats to say the members
  are in ticks, not milliseconds.
- In sql_explain.cc, adjust the scale to print milliseconds.
parent 2d5cba22
--innodb_buffer_pool_dump_at_shutdown=off --innodb_buffer_pool_load_at_startup=off --innodb-stats-persistent=1 --innodb-stats-auto-recalc=off
#
# MDEV-34125: ANALYZE FORMAT=JSON: r_engine_stats.pages_read_time_ms has wrong scale
#
create table t1 (
a varchar(255),
b varchar(255),
c varchar(255),
d varchar(255),
primary key(a,b,c,d)
) engine=innodb;
insert into t1 select
repeat(uuid(), 7),
repeat(uuid(), 7),
repeat(uuid(), 7),
repeat(uuid(), 7)
from seq_1_to_16384;
# restart
set log_slow_verbosity='engine';
set long_query_time=0.0;
set @js='$analyze_output';
select @js;
@js
{
"query_block": {
"select_id": 1,
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"table": {
"table_name": "t1",
"access_type": "index",
"key": "PRIMARY",
"key_length": "1028",
"used_key_parts": ["a", "b", "c", "d"],
"r_loops": 1,
"rows": 1,
"r_rows": 16384,
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"r_engine_stats": {
"pages_accessed": "REPLACED",
"pages_read_count": "REPLACED",
"pages_read_time_ms": "REPLACED"
},
"filtered": 100,
"r_filtered": 100,
"using_index": true
}
}
}
set @pages_read_time_ms=
(select json_value(@js,'$.query_block.table.r_engine_stats.pages_read_time_ms'));
OK: pages_read_time is same in slow log and ANALYZE
set long_query_time=default;
drop table t1;
#
# r_engine_stats tests that require slow query log.
#
--source include/analyze-format.inc
--source include/have_sequence.inc
--source include/have_innodb.inc
--echo #
--echo # MDEV-34125: ANALYZE FORMAT=JSON: r_engine_stats.pages_read_time_ms has wrong scale
--echo #
# Each row is 1K.
create table t1 (
a varchar(255),
b varchar(255),
c varchar(255),
d varchar(255),
primary key(a,b,c,d)
) engine=innodb;
# The data size is 160K * 1K = 160M
# 16M / (page_size=16K) = 1K pages.
insert into t1 select
repeat(uuid(), 7),
repeat(uuid(), 7),
repeat(uuid(), 7),
repeat(uuid(), 7)
from seq_1_to_16384;
source include/restart_mysqld.inc;
set log_slow_verbosity='engine';
set long_query_time=0.0;
let $analyze_output= `analyze format=json
select * from t1 force index (PRIMARY) order by a desc, b desc, c desc, d desc`;
evalp set @js='$analyze_output';
# Print it out for user-friendlines
--replace_regex /("(r_[a-z_]*_time_ms|pages[^"]*)": )[^, \n]*/\1"REPLACED"/
select @js;
set @pages_read_time_ms=
(select json_value(@js,'$.query_block.table.r_engine_stats.pages_read_time_ms'));
let ANALYZE_PAGES=`select @pages_read_time_ms`;
let SLOW_LOG_FILE= `select @@slow_query_log_file`;
perl;
my $slow_log_file= $ENV{'SLOW_LOG_FILE'} or die "SLOW_LOG_FILE not set";
my $analyze_pages=$ENV{'ANALYZE_PAGES'};
open(FILE, $slow_log_file) or die "Failed to open $slow_log_file";
# We didn't run any queries touching a storage engine after the query of
# interest, so we will be fine here if we just get the last occurrence of
# Pages_read_time: NNNN in the file
while(<FILE>) {
$slow_log_pages=$1 if (/Pages_read_time: ([0-9.]+)/);
}
close(FILE);
if ( $slow_log_pages > $analyze_pages * 0.95 &&
$slow_log_pages < $analyze_pages * 1.05) {
print "\n\n OK: pages_read_time is same in slow log and ANALYZE\n\n";
} else {
print "\n\n FAIL: $slow_log_pages not equal to $analyze_pages\n";
}
EOF
set long_query_time=default;
drop table t1;
......@@ -26,9 +26,14 @@ class ha_handler_stats
ulonglong pages_accessed; /* Pages accessed from page cache */
ulonglong pages_updated; /* Pages changed in page cache */
ulonglong pages_read_count; /* Pages read from disk */
ulonglong pages_read_time; /* Time reading pages, in microsec. */
/* Time spent reading pages, in timer_tracker_frequency() units */
ulonglong pages_read_time;
ulonglong undo_records_read;
ulonglong engine_time; /* Time spent in engine in microsec */
/* Time spent in engine, in timer_tracker_frequency() units */
ulonglong engine_time;
uint active; /* <> 0 if status has to be updated */
#define first_stat pages_accessed
#define last_stat engine_time
......
......@@ -207,6 +207,9 @@ int Explain_query::send_explain(THD *thd, bool extended)
int Explain_query::print_explain(select_result_sink *output,
uint8 explain_flags, bool is_analyze)
{
/* A sanity check for ANALYZE: */
DBUG_ASSERT(timer_tracker_frequency() != 0.0);
if (upd_del_plan)
{
upd_del_plan->print_explain(this, output, explain_flags, is_analyze);
......@@ -1723,7 +1726,7 @@ static void trace_engine_stats(handler *file, Json_writer *writer)
writer->add_member("pages_read_count").add_ull(hs->pages_read_count);
if (hs->pages_read_time)
writer->add_member("pages_read_time_ms").
add_double(hs->pages_read_time / 1000.0);
add_double(hs->pages_read_time * 1000. / timer_tracker_frequency());
if (hs->undo_records_read)
writer->add_member("old_rows_read").add_ull(hs->undo_records_read);
writer->end_object();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment