Commit 0bfa3dff authored by Marko Mäkelä's avatar Marko Mäkelä

MDEV-12698 innodb.innodb_stats_del_mark test failure

In my merge of the MySQL fix for Oracle Bug#23333990 / WL#9513
I overlooked some subsequent revisions to the test, and I also
failed to notice that the test is actually always failing.

Oracle introduced the parameter innodb_stats_include_delete_marked
but failed to consistently take it into account in FOREIGN KEY
constraints that involve CASCADE or SET NULL.

When innodb_stats_include_delete_marked=ON, obviously the purge of
delete-marked records should update the statistics as well.

One more omission was that statistics were never updated on ROLLBACK.
We are fixing that as well, properly taking into account the
parameter innodb_stats_include_delete_marked.

dict_stats_analyze_index_level(): Simplify an expression.
(Using the ternary operator with a constant operand is unnecessary
obfuscation.)

page_scan_method_t: Revert the change done by Oracle. Instead,
examine srv_stats_include_delete_marked directly where it is needed.

dict_stats_update_if_needed(): Renamed from
row_update_statistics_if_needed().

row_update_for_mysql_using_upd_graph(): Assert that the table statistics
are initialized, as guaranteed by ha_innobase::open(). Update the
statistics in a consistent way, both for FOREIGN KEY triggers and
for the main table. If FOREIGN KEY constraints exist, do not dereference
a freed pointer, but cache the proper value of node->is_delete so that
it matches prebuilt->table.

row_purge_record_func(): Update statistics if
innodb_stats_include_delete_marked=ON.

row_undo_ins(): Update statistics (on ROLLBACK of a fresh INSERT).
This is independent of the parameter; the record is not delete-marked.

row_undo_mod(): Update statistics on the ROLLBACK of updating key columns,
or (if innodb_stats_include_delete_marked=OFF) updating delete-marks.

innodb.innodb_stats_persistent: Renamed and extended from
innodb.innodb_stats_del_mark. Reduced the unnecessarily large dataset
from 262,144 to 32 rows. Test both values of the configuration
parameter innodb_stats_include_delete_marked.
Test that purge is updating the statistics.

innodb_fts.innodb_fts_multiple_index: Adjust the result. The test
is performing a ROLLBACK of an INSERT, which now affects the statistics.

include/wait_all_purged.inc: Moved from innodb.innodb_truncate_debug
to its own file.
parent 5e9d6511
# Wait for everything to be purged.
# The user should have set innodb_purge_rseg_truncate_frequency=1.
let $wait_counter= 300;
while ($wait_counter)
{
--replace_regex /.*History list length ([0-9]+).*/\1/
let $remaining= `SHOW ENGINE INNODB STATUS`;
if ($remaining == 'InnoDB 0')
{
let $wait_counter= 0;
}
if ($wait_counter)
{
real_sleep 0.1;
dec $wait_counter;
}
}
echo $remaining transactions not purged;
#
# Bug 23333990 PERSISTENT INDEX STATISTICS UPDATE BEFORE
# TRANSACTION IS COMMITTED
#
"Test 1:- Uncommited delete test"
CREATE TABLE t1 (id INT UNSIGNED NOT NULL AUTO_INCREMENT PRIMARY KEY,
val INT UNSIGNED NOT NULL,
INDEX (val)) ENGINE=INNODB
STATS_PERSISTENT=1,STATS_AUTO_RECALC=1;
INSERT INTO t1 (val) VALUES (CEIL(RAND()*20));
INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
SELECT COUNT(*) FROM t1;
COUNT(*)
262144
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
test.t1 analyze status OK
connect con1, localhost, root,,;
START TRANSACTION;
DELETE FROM t1;
SELECT COUNT(*) FROM t1;
connection default;
Test correctly estimates the number of rows as > 20000
even when in other uncommmited transaction
all rows have been deleted.
connection con1;
COUNT(*)
0
commit;
connection default;
Test 2:- Insert and rollback test
CREATE TABLE t2 (id INT UNSIGNED NOT NULL AUTO_INCREMENT PRIMARY KEY,
val INT UNSIGNED NOT NULL,
INDEX (val)) ENGINE=INNODB
STATS_PERSISTENT=1,STATS_AUTO_RECALC=1;
connection con1;
START TRANSACTION;
INSERT INTO t2 (val) VALUES (CEIL(RAND()*20));
INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
SELECT COUNT(*) FROM t2;
connection default;
select count(*) from t2;
count(*)
0
Test correctly estimates the number of rows as > 20000
even when in other uncommited transaction
many rows are inserted.
connection con1;
COUNT(*)
262144
Rollback the insert
rollback;
disconnect con1;
connection default;
Test correctly estimates the number of rows as 1
after rollback.
DROP TABLE t1,t2;
SET @saved_include_delete_marked = @@GLOBAL.innodb_stats_include_delete_marked;
SET GLOBAL innodb_stats_include_delete_marked = ON;
SET @saved_traditional = @@GLOBAL.innodb_stats_traditional;
SET GLOBAL innodb_stats_traditional=false;
SET @saved_modified_counter = @@GLOBAL.innodb_stats_modified_counter;
SET GLOBAL innodb_stats_modified_counter=1;
CREATE TABLE t0 (id SERIAL, val INT UNSIGNED NOT NULL, KEY(val))
ENGINE=INNODB STATS_PERSISTENT=1,STATS_AUTO_RECALC=1;
CREATE TABLE t1 LIKE t0;
CREATE TABLE t2 LIKE t0;
INSERT INTO t0 (val) VALUES (4);
INSERT INTO t0 (val) SELECT 4 FROM t0;
INSERT INTO t0 (val) SELECT 4 FROM t0;
INSERT INTO t0 (val) SELECT 4 FROM t0;
INSERT INTO t0 (val) SELECT 4 FROM t0;
INSERT INTO t1 SELECT * FROM t0;
SELECT COUNT(*) FROM t1;
COUNT(*)
16
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
test.t1 analyze status OK
connect con1, localhost, root,,;
START TRANSACTION;
DELETE FROM t1;
SELECT COUNT(*) FROM t1;
connection default;
# With innodb_stats_include_delete_marked=ON,
# DELETE must not affect statistics before COMMIT.
EXPLAIN SELECT * FROM t1 WHERE val=4;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ref val val 4 const 16 Using index
connection con1;
COUNT(*)
0
ROLLBACK;
SELECT COUNT(*) FROM t1;
COUNT(*)
16
EXPLAIN SELECT * FROM t1 WHERE val=4;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ref val val 4 const 16 Using index
BEGIN;
DELETE FROM t1;
COMMIT;
SELECT COUNT(*) FROM t1;
COUNT(*)
0
connection default;
BEGIN;
INSERT INTO t2 SELECT * FROM t0;
# The INSERT will show up before COMMIT.
EXPLAIN SELECT * FROM t2 WHERE val=4;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ref val val 4 const 16 Using index
SELECT COUNT(*) FROM t2;
COUNT(*)
16
# The ROLLBACK of the INSERT must affect the statistics.
ROLLBACK;
SELECT COUNT(*) FROM t2;
COUNT(*)
0
connection con1;
EXPLAIN SELECT * FROM t2 WHERE val=4;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ref val val 4 const 1 Using index
SET @saved_frequency = @@GLOBAL.innodb_purge_rseg_truncate_frequency;
SET GLOBAL innodb_purge_rseg_truncate_frequency = 1;
InnoDB 0 transactions not purged
SET GLOBAL innodb_purge_rseg_truncate_frequency = @saved_frequency;
# After COMMIT and purge, the DELETE must show up.
EXPLAIN SELECT * FROM t1 WHERE val=4;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ref val val 4 const 1 Using index
SET GLOBAL innodb_stats_include_delete_marked = OFF;
BEGIN;
INSERT INTO t1 SELECT * FROM t0;
EXPLAIN SELECT * FROM t1 WHERE val=4;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ref val val 4 const 16 Using index
ROLLBACK;
EXPLAIN SELECT * FROM t1 WHERE val=4;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ref val val 4 const 1 Using index
BEGIN;
INSERT INTO t1 SELECT * FROM t0;
COMMIT;
EXPLAIN SELECT * FROM t1 WHERE val=4;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ref val val 4 const 16 Using index
BEGIN;
DELETE FROM t1;
SELECT COUNT(*) FROM t1;
COUNT(*)
0
# With innodb_stats_include_delete_marked=OFF,
# DELETE must affect statistics even before COMMIT.
# However, if there was a WHERE condition,
# ha_innobase::records_in_range() would count the delete-marked records.
EXPLAIN SELECT * FROM t1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index NULL val 4 NULL 1 Using index
ROLLBACK;
EXPLAIN SELECT * FROM t1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index NULL val 4 NULL 16 Using index
SELECT COUNT(*) FROM t1;
COUNT(*)
16
disconnect con1;
connection default;
DROP TABLE t0,t1,t2;
SET GLOBAL innodb_stats_include_delete_marked = @saved_include_delete_marked;
SET GLOBAL innodb_stats_traditional = @saved_traditional;
SET GLOBAL innodb_stats_modified_counter = @saved_modified_counter;
--source include/have_innodb.inc
--source include/big_test.inc
--echo #
--echo # Bug 23333990 PERSISTENT INDEX STATISTICS UPDATE BEFORE
--echo # TRANSACTION IS COMMITTED
--echo #
--echo "Test 1:- Uncommited delete test"
CREATE TABLE t1 (id INT UNSIGNED NOT NULL AUTO_INCREMENT PRIMARY KEY,
val INT UNSIGNED NOT NULL,
INDEX (val)) ENGINE=INNODB
STATS_PERSISTENT=1,STATS_AUTO_RECALC=1;
INSERT INTO t1 (val) VALUES (CEIL(RAND()*20));
INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
INSERT INTO t1 (val) SELECT CEIL(RAND()*20) FROM t1;
SELECT COUNT(*) FROM t1;
ANALYZE TABLE t1;
connect(con1, localhost, root,,);
START TRANSACTION;
DELETE FROM t1;
send SELECT COUNT(*) FROM t1;
connection default;
let $row_count= query_get_value(EXPLAIN SELECT * FROM t1 WHERE val=4, rows,1);
if ($row_count > 20000)
{
--echo Test correctly estimates the number of rows as > 20000
--echo even when in other uncommmited transaction
--echo all rows have been deleted.
}
connection con1;
reap;
commit;
connection default;
--echo Test 2:- Insert and rollback test
CREATE TABLE t2 (id INT UNSIGNED NOT NULL AUTO_INCREMENT PRIMARY KEY,
val INT UNSIGNED NOT NULL,
INDEX (val)) ENGINE=INNODB
STATS_PERSISTENT=1,STATS_AUTO_RECALC=1;
connection con1;
START TRANSACTION;
INSERT INTO t2 (val) VALUES (CEIL(RAND()*20));
INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
INSERT INTO t2 (val) SELECT CEIL(RAND()*20) FROM t2;
send SELECT COUNT(*) FROM t2;
connection default;
select count(*) from t2;
let $row_count= query_get_value(EXPLAIN SELECT * FROM t2 WHERE val=4, rows,1);
if ($row_count > 20000)
{
--echo Test correctly estimates the number of rows as > 20000
--echo even when in other uncommited transaction
--echo many rows are inserted.
}
connection con1;
reap;
--echo Rollback the insert
rollback;
disconnect con1;
connection default;
let $row_count= query_get_value(EXPLAIN SELECT * FROM t2 WHERE val=4, rows,1);
if ($row_count <= 1)
{
--echo Test correctly estimates the number of rows as $row_count
--echo after rollback.
}
DROP TABLE t1,t2;
--source include/have_innodb.inc
--source include/big_test.inc
SET @saved_include_delete_marked = @@GLOBAL.innodb_stats_include_delete_marked;
SET GLOBAL innodb_stats_include_delete_marked = ON;
SET @saved_traditional = @@GLOBAL.innodb_stats_traditional;
SET GLOBAL innodb_stats_traditional=false;
SET @saved_modified_counter = @@GLOBAL.innodb_stats_modified_counter;
SET GLOBAL innodb_stats_modified_counter=1;
CREATE TABLE t0 (id SERIAL, val INT UNSIGNED NOT NULL, KEY(val))
ENGINE=INNODB STATS_PERSISTENT=1,STATS_AUTO_RECALC=1;
CREATE TABLE t1 LIKE t0;
CREATE TABLE t2 LIKE t0;
INSERT INTO t0 (val) VALUES (4);
INSERT INTO t0 (val) SELECT 4 FROM t0;
INSERT INTO t0 (val) SELECT 4 FROM t0;
INSERT INTO t0 (val) SELECT 4 FROM t0;
INSERT INTO t0 (val) SELECT 4 FROM t0;
INSERT INTO t1 SELECT * FROM t0;
SELECT COUNT(*) FROM t1;
ANALYZE TABLE t1;
connect(con1, localhost, root,,);
START TRANSACTION;
DELETE FROM t1;
send SELECT COUNT(*) FROM t1;
connection default;
--echo # With innodb_stats_include_delete_marked=ON,
--echo # DELETE must not affect statistics before COMMIT.
EXPLAIN SELECT * FROM t1 WHERE val=4;
connection con1;
reap;
ROLLBACK;
SELECT COUNT(*) FROM t1;
EXPLAIN SELECT * FROM t1 WHERE val=4;
BEGIN;
DELETE FROM t1;
COMMIT;
SELECT COUNT(*) FROM t1;
connection default;
BEGIN;
INSERT INTO t2 SELECT * FROM t0;
--echo # The INSERT will show up before COMMIT.
EXPLAIN SELECT * FROM t2 WHERE val=4;
SELECT COUNT(*) FROM t2;
--echo # The ROLLBACK of the INSERT must affect the statistics.
ROLLBACK;
SELECT COUNT(*) FROM t2;
connection con1;
EXPLAIN SELECT * FROM t2 WHERE val=4;
SET @saved_frequency = @@GLOBAL.innodb_purge_rseg_truncate_frequency;
SET GLOBAL innodb_purge_rseg_truncate_frequency = 1;
--source include/wait_all_purged.inc
SET GLOBAL innodb_purge_rseg_truncate_frequency = @saved_frequency;
--echo # After COMMIT and purge, the DELETE must show up.
EXPLAIN SELECT * FROM t1 WHERE val=4;
SET GLOBAL innodb_stats_include_delete_marked = OFF;
BEGIN;
INSERT INTO t1 SELECT * FROM t0;
EXPLAIN SELECT * FROM t1 WHERE val=4;
ROLLBACK;
EXPLAIN SELECT * FROM t1 WHERE val=4;
BEGIN;
INSERT INTO t1 SELECT * FROM t0;
COMMIT;
EXPLAIN SELECT * FROM t1 WHERE val=4;
BEGIN;
DELETE FROM t1;
SELECT COUNT(*) FROM t1;
--echo # With innodb_stats_include_delete_marked=OFF,
--echo # DELETE must affect statistics even before COMMIT.
--echo # However, if there was a WHERE condition,
--echo # ha_innobase::records_in_range() would count the delete-marked records.
EXPLAIN SELECT * FROM t1;
ROLLBACK;
EXPLAIN SELECT * FROM t1;
SELECT COUNT(*) FROM t1;
disconnect con1;
connection default;
DROP TABLE t0,t1,t2;
SET GLOBAL innodb_stats_include_delete_marked = @saved_include_delete_marked;
SET GLOBAL innodb_stats_traditional = @saved_traditional;
SET GLOBAL innodb_stats_modified_counter = @saved_modified_counter;
......@@ -33,25 +33,8 @@ COMMIT;
disconnect con2;
connection default;
--source include/wait_all_purged.inc
# Wait for everything to be purged.
let $wait_counter= 300;
while ($wait_counter)
{
--replace_regex /.*History list length ([0-9]+).*/\1/
let $remaining= `SHOW ENGINE INNODB STATUS`;
if ($remaining == 'InnoDB 0')
{
let $wait_counter= 0;
}
if ($wait_counter)
{
real_sleep 0.1;
dec $wait_counter;
}
}
echo $remaining transactions not purged;
SET GLOBAL innodb_purge_rseg_truncate_frequency = @saved_frequency;
SET DEBUG_SYNC = 'now SIGNAL finish_scan';
......
......@@ -46,9 +46,9 @@ id a b
1 MySQL Tutorial DBMS stands for DataBase ...
select *, MATCH(a) AGAINST("Optimizing MySQL" IN BOOLEAN MODE) as x from t1;
id a b x
1 MySQL Tutorial DBMS stands for DataBase ... 0.0906190574169159
2 How To Use MySQL Well After you went through a ... 0.0906190574169159
3 Optimizing MySQL In this tutorial we will show ... 0.6961383819580078
1 MySQL Tutorial DBMS stands for DataBase ... 0.000000001885928302414186
2 How To Use MySQL Well After you went through a ... 0.000000001885928302414186
3 Optimizing MySQL In this tutorial we will show ... 0.22764469683170319
select *, MATCH(b) AGAINST("collections support" IN BOOLEAN MODE) as x from t1;
id a b x
1 MySQL Tutorial DBMS stands for DataBase ... 0
......@@ -90,9 +90,9 @@ id a b
1 MySQL Tutorial DBMS stands for DataBase ...
select *, MATCH(a) AGAINST("Optimizing MySQL" IN BOOLEAN MODE) as x from t1;
id a b x
1 MySQL Tutorial DBMS stands for DataBase ... 0.0906190574169159
2 How To Use MySQL Well After you went through a ... 0.0906190574169159
3 Optimizing MySQL In this tutorial we will show ... 0.6961383819580078
1 MySQL Tutorial DBMS stands for DataBase ... 0.000000001885928302414186
2 How To Use MySQL Well After you went through a ... 0.000000001885928302414186
3 Optimizing MySQL In this tutorial we will show ... 0.22764469683170319
select *, MATCH(b) AGAINST("collections support" IN BOOLEAN MODE) as x from t1;
id a b x
1 MySQL Tutorial DBMS stands for DataBase ... 0
......
......@@ -1159,10 +1159,11 @@ dict_stats_analyze_index_level(
leaf-level delete marks because delete marks on
non-leaf level do not make sense. */
if (level == 0 && (srv_stats_include_delete_marked ? 0:
rec_get_deleted_flag(
if (level == 0
&& !srv_stats_include_delete_marked
&& rec_get_deleted_flag(
rec,
page_is_comp(btr_pcur_get_page(&pcur))))) {
page_is_comp(btr_pcur_get_page(&pcur)))) {
if (rec_is_last_on_page
&& !prev_rec_is_copied
......@@ -1336,16 +1337,11 @@ dict_stats_analyze_index_level(
/* aux enum for controlling the behavior of dict_stats_scan_page() @{ */
enum page_scan_method_t {
COUNT_ALL_NON_BORING_AND_SKIP_DEL_MARKED,/* scan all records on
the given page and count the number of
distinct ones, also ignore delete marked
records */
QUIT_ON_FIRST_NON_BORING,/* quit when the first record that differs
from its right neighbor is found */
COUNT_ALL_NON_BORING_INCLUDE_DEL_MARKED/* scan all records on
the given page and count the number of
distinct ones, include delete marked
records */
/** scan the records on the given page, counting the number
of distinct ones; @see srv_stats_include_delete_marked */
COUNT_ALL_NON_BORING,
/** quit on the first record that differs from its right neighbor */
QUIT_ON_FIRST_NON_BORING
};
/* @} */
......@@ -1392,13 +1388,10 @@ dict_stats_scan_page(
Because offsets1,offsets2 should be big enough,
this memory heap should never be used. */
mem_heap_t* heap = NULL;
const rec_t* (*get_next)(const rec_t*);
if (scan_method == COUNT_ALL_NON_BORING_AND_SKIP_DEL_MARKED) {
get_next = page_rec_get_next_non_del_marked;
} else {
get_next = page_rec_get_next_const;
}
const rec_t* (*get_next)(const rec_t*)
= srv_stats_include_delete_marked
? page_rec_get_next_const
: page_rec_get_next_non_del_marked;
const bool should_count_external_pages = n_external_pages != NULL;
......@@ -1618,9 +1611,7 @@ dict_stats_analyze_index_below_cur(
offsets_rec = dict_stats_scan_page(
&rec, offsets1, offsets2, index, page, n_prefix,
srv_stats_include_delete_marked ?
COUNT_ALL_NON_BORING_INCLUDE_DEL_MARKED:
COUNT_ALL_NON_BORING_AND_SKIP_DEL_MARKED, n_diff,
COUNT_ALL_NON_BORING, n_diff,
n_external_pages);
#if 0
......
......@@ -120,6 +120,7 @@ background stats gathering thread. Only the table id is added to the
list, so the table can be closed after being enqueued and it will be
opened when needed. If the table does not exist later (has been DROPped),
then it will be removed from the pool and skipped. */
static
void
dict_stats_recalc_pool_add(
/*=======================*/
......@@ -147,6 +148,44 @@ dict_stats_recalc_pool_add(
os_event_set(dict_stats_event);
}
/** Update the table modification counter and if necessary,
schedule new estimates for table and index statistics to be calculated.
@param[in,out] table persistent or temporary table */
void
dict_stats_update_if_needed(dict_table_t* table)
{
ut_ad(table->stat_initialized);
ut_ad(!mutex_own(&dict_sys->mutex));
ulonglong counter = table->stat_modified_counter++;
ulonglong n_rows = dict_table_get_n_rows(table);
if (dict_stats_is_persistent_enabled(table)) {
if (counter > n_rows / 10 /* 10% */
&& dict_stats_auto_recalc_is_enabled(table)) {
dict_stats_recalc_pool_add(table);
table->stat_modified_counter = 0;
}
return;
}
/* Calculate new statistics if 1 / 16 of table has been modified
since the last time a statistics batch was run.
We calculate statistics at most every 16th round, since we may have
a counter table which is very small and updated very often. */
ulonglong threshold = 16 + n_rows / 16; /* 6.25% */
if (srv_stats_modified_counter) {
threshold = std::min(srv_stats_modified_counter, threshold);
}
if (counter > threshold) {
/* this will reset table->stat_modified_counter to 0 */
dict_stats_update(table, DICT_STATS_RECALC_TRANSIENT);
}
}
/*****************************************************************//**
Get a table from the auto recalc pool. The returned table id is removed
from the pool.
......
......@@ -110,6 +110,13 @@ dict_stats_deinit(
dict_table_t* table) /*!< in/out: table */
MY_ATTRIBUTE((nonnull));
/** Update the table modification counter and if necessary,
schedule new estimates for table and index statistics to be calculated.
@param[in,out] table persistent or temporary table */
void
dict_stats_update_if_needed(dict_table_t* table)
MY_ATTRIBUTE((nonnull));
/*********************************************************************//**
Calculates new estimates for table and index statistics. The statistics
are used in query optimization.
......
......@@ -46,17 +46,6 @@ extern mysql_pfs_key_t dict_stats_recalc_pool_mutex_key;
extern my_bool innodb_dict_stats_disabled_debug;
#endif /* UNIV_DEBUG */
/*****************************************************************//**
Add a table to the recalc pool, which is processed by the
background stats gathering thread. Only the table id is added to the
list, so the table can be closed after being enqueued and it will be
opened when needed. If the table does not exist later (has been DROPped),
then it will be removed from the pool and skipped. */
void
dict_stats_recalc_pool_add(
/*=======================*/
const dict_table_t* table); /*!< in: table to add */
/*****************************************************************//**
Delete a given table from the auto recalc pool.
dict_stats_recalc_pool_del() */
......
......@@ -204,6 +204,7 @@ row_update_prebuilt_trx(
row_prebuilt_t* prebuilt, /*!< in/out: prebuilt struct
in MySQL handle */
trx_t* trx); /*!< in: transaction handle */
/*********************************************************************//**
Sets an AUTO_INC type lock on the table mentioned in prebuilt. The
AUTO_INC lock gives exclusive access to the auto-inc counter of the
......
......@@ -1196,58 +1196,6 @@ row_get_prebuilt_insert_row(
return(prebuilt->ins_node->row);
}
/*********************************************************************//**
Updates the table modification counter and calculates new estimates
for table and index statistics if necessary. */
UNIV_INLINE
void
row_update_statistics_if_needed(
/*============================*/
dict_table_t* table) /*!< in: table */
{
ib_uint64_t counter;
ib_uint64_t n_rows;
if (!table->stat_initialized) {
DBUG_EXECUTE_IF(
"test_upd_stats_if_needed_not_inited",
fprintf(stderr, "test_upd_stats_if_needed_not_inited"
" was executed\n");
);
return;
}
counter = table->stat_modified_counter++;
n_rows = dict_table_get_n_rows(table);
if (dict_stats_is_persistent_enabled(table)) {
if (counter > n_rows / 10 /* 10% */
&& dict_stats_auto_recalc_is_enabled(table)) {
dict_stats_recalc_pool_add(table);
table->stat_modified_counter = 0;
}
return;
}
/* Calculate new statistics if 1 / 16 of table has been modified
since the last time a statistics batch was run.
We calculate statistics at most every 16th round, since we may have
a counter table which is very small and updated very often. */
ib_uint64_t threshold= 16 + n_rows / 16; /* 6.25% */
if (srv_stats_modified_counter) {
threshold= ut_min((ib_uint64_t)srv_stats_modified_counter, threshold);
}
if (counter > threshold) {
ut_ad(!mutex_own(&dict_sys->mutex));
/* this will reset table->stat_modified_counter to 0 */
dict_stats_update(table, DICT_STATS_RECALC_TRANSIENT);
}
}
/*********************************************************************//**
Sets an AUTO_INC type lock on the table mentioned in prebuilt. The
AUTO_INC lock gives exclusive access to the auto-inc counter of the
......@@ -1649,7 +1597,7 @@ row_insert_for_mysql(
ut_memcpy(prebuilt->row_id, node->row_id_buf, DATA_ROW_ID_LEN);
}
row_update_statistics_if_needed(table);
dict_stats_update_if_needed(table);
trx->op_info = "";
if (blob_heap != NULL) {
......@@ -1895,6 +1843,7 @@ row_update_for_mysql_using_upd_graph(
ut_ad(trx);
ut_a(prebuilt->magic_n == ROW_PREBUILT_ALLOCATED);
ut_a(prebuilt->magic_n2 == ROW_PREBUILT_ALLOCATED);
ut_ad(table->stat_initialized);
UT_NOT_USED(mysql_rec);
if (!table->is_readable()) {
......@@ -1929,6 +1878,8 @@ row_update_for_mysql_using_upd_graph(
}
node = prebuilt->upd_node;
const bool is_delete = node->is_delete;
ut_ad(node->table == table);
if (node->cascade_heap) {
mem_heap_empty(node->cascade_heap);
......@@ -2099,8 +2050,11 @@ row_update_for_mysql_using_upd_graph(
thr->fk_cascade_depth = 0;
/* Update the statistics only after completing all cascaded
operations */
/* Update the statistics of each involved table
only after completing all operations, including
FOREIGN KEY...ON...CASCADE|SET NULL. */
bool update_statistics;
for (upd_cascade_t::iterator i = processed_cascades->begin();
i != processed_cascades->end();
++i) {
......@@ -2114,16 +2068,25 @@ row_update_for_mysql_using_upd_graph(
than protecting the following code with a latch. */
dict_table_n_rows_dec(node->table);
update_statistics = !srv_stats_include_delete_marked;
srv_stats.n_rows_deleted.inc(size_t(trx->id));
} else {
update_statistics
= !(node->cmpl_info & UPD_NODE_NO_ORD_CHANGE);
srv_stats.n_rows_updated.inc(size_t(trx->id));
}
row_update_statistics_if_needed(node->table);
if (update_statistics) {
dict_stats_update_if_needed(node->table);
} else {
/* Always update the table modification counter. */
node->table->stat_modified_counter++;
}
que_graph_free_recursive(node);
}
if (node->is_delete) {
if (is_delete) {
/* Not protected by dict_table_stats_lock() for performance
reasons, we would rather get garbage in stat_n_rows (which is
just an estimate anyway) than protecting the following code
......@@ -2135,25 +2098,24 @@ row_update_for_mysql_using_upd_graph(
} else {
srv_stats.n_rows_deleted.inc(size_t(trx->id));
}
update_statistics = !srv_stats_include_delete_marked;
} else {
if (table->is_system_db) {
srv_stats.n_system_rows_updated.inc(size_t(trx->id));
} else {
srv_stats.n_rows_updated.inc(size_t(trx->id));
}
update_statistics
= !(node->cmpl_info & UPD_NODE_NO_ORD_CHANGE);
}
/* We update table statistics only if it is a DELETE or UPDATE
that changes indexed columns, UPDATEs that change only non-indexed
columns would not affect statistics. */
if (node->is_delete || !(node->cmpl_info & UPD_NODE_NO_ORD_CHANGE)) {
row_update_statistics_if_needed(prebuilt->table);
if (update_statistics) {
dict_stats_update_if_needed(prebuilt->table);
} else {
/* Update the table modification counter even when
non-indexed columns change if statistics is initialized. */
if (prebuilt->table->stat_initialized) {
prebuilt->table->stat_modified_counter++;
}
/* Always update the table modification counter. */
prebuilt->table->stat_modified_counter++;
}
trx->op_info = "";
......
......@@ -27,6 +27,7 @@ Created 3/14/1997 Heikki Tuuri
#include "row0purge.h"
#include "fsp0fsp.h"
#include "mach0data.h"
#include "dict0stats.h"
#include "trx0rseg.h"
#include "trx0trx.h"
#include "trx0roll.h"
......@@ -952,10 +953,13 @@ row_purge_record_func(
switch (node->rec_type) {
case TRX_UNDO_DEL_MARK_REC:
purged = row_purge_del_mark(node);
if (!purged) {
break;
if (purged) {
if (node->table->stat_initialized
&& srv_stats_include_delete_marked) {
dict_stats_update_if_needed(node->table);
}
MONITOR_INC(MONITOR_N_DEL_ROW_PURGE);
}
MONITOR_INC(MONITOR_N_DEL_ROW_PURGE);
break;
default:
if (!updated_extern) {
......
......@@ -25,6 +25,7 @@ Created 2/25/1997 Heikki Tuuri
#include "row0uins.h"
#include "dict0dict.h"
#include "dict0stats.h"
#include "dict0boot.h"
#include "dict0crea.h"
#include "trx0undo.h"
......@@ -508,6 +509,23 @@ row_undo_ins(
mutex_exit(&dict_sys->mutex);
}
if (err == DB_SUCCESS && node->table->stat_initialized) {
/* Not protected by dict_table_stats_lock() for
performance reasons, we would rather get garbage
in stat_n_rows (which is just an estimate anyway)
than protecting the following code with a latch. */
dict_table_n_rows_dec(node->table);
/* Do not attempt to update statistics when
executing ROLLBACK in the InnoDB SQL
interpreter, because in that case we would
already be holding dict_sys->mutex, which
would be acquired when updating statistics. */
if (!dict_locked) {
dict_stats_update_if_needed(node->table);
}
}
}
dict_table_close(node->table, dict_locked, FALSE);
......
/*****************************************************************************
Copyright (c) 1997, 2017, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
......@@ -27,6 +28,7 @@ Created 2/27/1997 Heikki Tuuri
#include "row0umod.h"
#include "dict0dict.h"
#include "dict0stats.h"
#include "dict0boot.h"
#include "trx0undo.h"
#include "trx0roll.h"
......@@ -1251,8 +1253,38 @@ row_undo_mod(
}
if (err == DB_SUCCESS) {
err = row_undo_mod_clust(node, thr);
bool update_statistics
= !(node->cmpl_info & UPD_NODE_NO_ORD_CHANGE);
if (err == DB_SUCCESS && node->table->stat_initialized) {
switch (node->rec_type) {
case TRX_UNDO_UPD_EXIST_REC:
break;
case TRX_UNDO_DEL_MARK_REC:
dict_table_n_rows_inc(node->table);
update_statistics = update_statistics
|| !srv_stats_include_delete_marked;
break;
case TRX_UNDO_UPD_DEL_REC:
dict_table_n_rows_dec(node->table);
update_statistics = update_statistics
|| !srv_stats_include_delete_marked;
break;
}
/* Do not attempt to update statistics when
executing ROLLBACK in the InnoDB SQL
interpreter, because in that case we would
already be holding dict_sys->mutex, which
would be acquired when updating statistics. */
if (update_statistics && !dict_locked) {
dict_stats_update_if_needed(node->table);
} else {
node->table->stat_modified_counter++;
}
}
}
dict_table_close(node->table, dict_locked, FALSE);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment