Commit b4ea1830 authored by Sergei Petrunia's avatar Sergei Petrunia

Apply this patch from Percona Server:

commit cd7201514fee78aaf7d3eb2b28d2573c76f53b84
Author: Laurynas Biveinis <laurynas.biveinis@gmail.com>
Date:   Tue Nov 14 06:34:19 2017 +0200

    Fix bug 1704195 / 87065 / TDB-83 (Stop ANALYZE TABLE from flushing table definition cache)

    Make ANALYZE TABLE stop flushing affected tables from the table
    definition cache, which has the effect of not blocking any subsequent
    new queries involving the table if there's a parallel long-running
    query:

    - new table flag HA_ONLINE_ANALYZE, return it for InnoDB and TokuDB
      tables;
    - in mysql_admin_table, if we are performing ANALYZE TABLE, and the
      table flag is set, do not remove the table from the table
      definition cache, do not invalidate query cache;
    - in partitioning handler, refresh the query optimizer statistics
      after ANALYZE if the underlying handler supports HA_ONLINE_ANALYZE;
    - new testcases main.percona_nonflushing_analyze_debug,
      parts.percona_nonflushing_abalyze_debug and a supporting debug sync
      point.

    For TokuDB, this change exposes bug TDB-83 (Index cardinality stats
    updated for handler::info(HA_STATUS_CONST), not often enough for
    tokudb_cardinality_scale_percent). TokuDB may return different
    rec_per_key values depending on dynamic variable
    tokudb_cardinality_scale_percent value. The server does not have a way
    of knowing that changing this variable invalidates the previous
    rec_per_key values in any opened table shares, and so does not call
    info(HA_STATUS_CONST) again. Fix by updating rec_per_key for both
    HA_STATUS_CONST and HA_STATUS_VARIABLE. This also forces a re-record
    of tokudb.bugs.db756_card_part_hash_1_pick, with the new output
    seeming to be more correct.
parent 82490a97
#
# Test ANALYZE TABLE that does not flush table definition cache
# Arguments:
# $percona_nonflushing_analyze_table - table to test
#
--source include/count_sessions.inc
--connect con1,localhost,root
SET DEBUG_SYNC="handler_ha_index_next_end SIGNAL idx_scan_in_progress WAIT_FOR finish_scan";
send_eval SELECT * FROM $percona_nonflushing_analyze_table;
--connection default
SET DEBUG_SYNC="now WAIT_FOR idx_scan_in_progress";
eval ANALYZE TABLE $percona_nonflushing_analyze_table;
# With the bug fixed this should not block
eval SELECT * FROM $percona_nonflushing_analyze_table;
SET DEBUG_SYNC="now SIGNAL finish_scan";
--connection con1
reap;
--disconnect con1
--connection default
SET DEBUG_SYNC='reset';
--source include/wait_until_count_sessions.inc
CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1), (2), (3);
connect con1,localhost,root;
SET DEBUG_SYNC="handler_ha_index_next_end SIGNAL idx_scan_in_progress WAIT_FOR finish_scan";
SELECT * FROM t1;
connection default;
SET DEBUG_SYNC="now WAIT_FOR idx_scan_in_progress";
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
test.t1 analyze status OK
SELECT * FROM t1;
a
1
2
3
SET DEBUG_SYNC="now SIGNAL finish_scan";
connection con1;
a
1
2
3
disconnect con1;
connection default;
SET DEBUG_SYNC='reset';
DROP TABLE t1;
--source include/have_debug_sync.inc
--source include/have_innodb.inc
CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1), (2), (3);
--let $percona_nonflushing_analyze_table= t1
--source include/percona_nonflushing_analyze_debug.inc
DROP TABLE t1;
CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB
PARTITION BY RANGE (a) (
PARTITION p0 VALUES LESS THAN (3),
PARTITION p1 VALUES LESS THAN (10));
INSERT INTO t1 VALUES (1), (2), (3), (4);
connect con1,localhost,root;
SET DEBUG_SYNC="handler_ha_index_next_end SIGNAL idx_scan_in_progress WAIT_FOR finish_scan";
SELECT * FROM t1;
connection default;
SET DEBUG_SYNC="now WAIT_FOR idx_scan_in_progress";
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
test.t1 analyze status OK
SELECT * FROM t1;
a
1
2
3
4
SET DEBUG_SYNC="now SIGNAL finish_scan";
connection con1;
a
1
2
3
4
disconnect con1;
connection default;
SET DEBUG_SYNC='reset';
DROP TABLE t1;
CREATE TABLE t2 (a INT PRIMARY KEY) ENGINE=InnoDB
PARTITION BY RANGE (a)
SUBPARTITION BY HASH (A)
SUBPARTITIONS 2 (
PARTITION p0 VALUES LESS THAN (3),
PARTITION p1 VALUES LESS THAN (10));
INSERT INTO t2 VALUES (1), (2), (3), (4);
connect con1,localhost,root;
SET DEBUG_SYNC="handler_ha_index_next_end SIGNAL idx_scan_in_progress WAIT_FOR finish_scan";
SELECT * FROM t2;
connection default;
SET DEBUG_SYNC="now WAIT_FOR idx_scan_in_progress";
ANALYZE TABLE t2;
Table Op Msg_type Msg_text
test.t2 analyze status OK
SELECT * FROM t2;
a
1
2
3
4
SET DEBUG_SYNC="now SIGNAL finish_scan";
connection con1;
a
1
2
3
4
disconnect con1;
connection default;
SET DEBUG_SYNC='reset';
DROP TABLE t2;
--source include/have_debug_sync.inc
--source include/have_innodb.inc
--source include/have_partition.inc
CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB
PARTITION BY RANGE (a) (
PARTITION p0 VALUES LESS THAN (3),
PARTITION p1 VALUES LESS THAN (10));
INSERT INTO t1 VALUES (1), (2), (3), (4);
--let $percona_nonflushing_analyze_table= t1
--source include/percona_nonflushing_analyze_debug.inc
DROP TABLE t1;
CREATE TABLE t2 (a INT PRIMARY KEY) ENGINE=InnoDB
PARTITION BY RANGE (a)
SUBPARTITION BY HASH (A)
SUBPARTITIONS 2 (
PARTITION p0 VALUES LESS THAN (3),
PARTITION p1 VALUES LESS THAN (10));
INSERT INTO t2 VALUES (1), (2), (3), (4);
--let $percona_nonflushing_analyze_table= t2
--source include/percona_nonflushing_analyze_debug.inc
DROP TABLE t2;
...@@ -1162,7 +1162,17 @@ int ha_partition::analyze(THD *thd, HA_CHECK_OPT *check_opt) ...@@ -1162,7 +1162,17 @@ int ha_partition::analyze(THD *thd, HA_CHECK_OPT *check_opt)
{ {
DBUG_ENTER("ha_partition::analyze"); DBUG_ENTER("ha_partition::analyze");
DBUG_RETURN(handle_opt_partitions(thd, check_opt, ANALYZE_PARTS)); int result= handle_opt_partitions(thd, check_opt, ANALYZE_PARTS);
if ((result == 0) && m_file[0]
&& (m_file[0]->ha_table_flags() & HA_ONLINE_ANALYZE))
{
/* If this is ANALYZE TABLE that will not force table definition cache
eviction, update statistics for the partition handler. */
this->info(HA_STATUS_CONST | HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK);
}
DBUG_RETURN(result);
} }
......
...@@ -2880,6 +2880,9 @@ int handler::ha_index_next(uchar * buf) ...@@ -2880,6 +2880,9 @@ int handler::ha_index_next(uchar * buf)
table->update_virtual_fields(this, VCOL_UPDATE_FOR_READ); table->update_virtual_fields(this, VCOL_UPDATE_FOR_READ);
} }
table->status=result ? STATUS_NOT_FOUND: 0; table->status=result ? STATUS_NOT_FOUND: 0;
DEBUG_SYNC(ha_thd(), "handler_ha_index_next_end");
DBUG_RETURN(result); DBUG_RETURN(result);
} }
......
...@@ -304,6 +304,12 @@ enum enum_alter_inplace_result { ...@@ -304,6 +304,12 @@ enum enum_alter_inplace_result {
/* Engine wants primary keys for everything except sequences */ /* Engine wants primary keys for everything except sequences */
#define HA_WANTS_PRIMARY_KEY (1ULL << 55) #define HA_WANTS_PRIMARY_KEY (1ULL << 55)
/*
There is no need to evict the table from the table definition cache having
run ANALYZE TABLE on it
*/
#define HA_ONLINE_ANALYZE (1ULL << 56)
/* bits in index_flags(index_number) for what you can do with index */ /* bits in index_flags(index_number) for what you can do with index */
#define HA_READ_NEXT 1 /* TODO really use this flag */ #define HA_READ_NEXT 1 /* TODO really use this flag */
#define HA_READ_PREV 2 /* supports ::index_prev */ #define HA_READ_PREV 2 /* supports ::index_prev */
......
...@@ -1143,6 +1143,9 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables, ...@@ -1143,6 +1143,9 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
} }
if (table->table && !table->view) if (table->table && !table->view)
{ {
const bool skip_flush=
(operator_func == &handler::ha_analyze)
&& (table->table->file->ha_table_flags() & HA_ONLINE_ANALYZE);
if (table->table->s->tmp_table) if (table->table->s->tmp_table)
{ {
/* /*
...@@ -1152,7 +1155,7 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables, ...@@ -1152,7 +1155,7 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
if (open_for_modify && !open_error) if (open_for_modify && !open_error)
table->table->file->info(HA_STATUS_CONST); table->table->file->info(HA_STATUS_CONST);
} }
else if (open_for_modify || fatal_error) else if ((!skip_flush && open_for_modify) || fatal_error)
{ {
tdc_remove_table(thd, TDC_RT_REMOVE_UNUSED, tdc_remove_table(thd, TDC_RT_REMOVE_UNUSED,
table->db.str, table->table_name.str, FALSE); table->db.str, table->table_name.str, FALSE);
......
...@@ -2912,6 +2912,7 @@ ha_innobase::ha_innobase( ...@@ -2912,6 +2912,7 @@ ha_innobase::ha_innobase(
| HA_CAN_FULLTEXT_HINTS | HA_CAN_FULLTEXT_HINTS
*/ */
| HA_CAN_EXPORT | HA_CAN_EXPORT
| HA_ONLINE_ANALYZE
| HA_CAN_RTREEKEYS | HA_CAN_RTREEKEYS
| HA_CAN_TABLES_WITHOUT_ROLLBACK | HA_CAN_TABLES_WITHOUT_ROLLBACK
| HA_CONCURRENT_OPTIMIZE | HA_CONCURRENT_OPTIMIZE
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment