Commit 41cde4fe authored by Sujatha's avatar Sujatha

MDEV-18514: Assertion `!writer.checksum_len || writer.remains == 0' failed

Analysis:
========
'max_binlog_cache_size' is configured and a huge transaction is executed. When
the transaction specific events size exceeds 'max_binlog_cache_size' the event
cannot be written to the binary log cache and cache write error is raised.
Upon cache write error the statement is rolled back and the transaction cache
should be truncated to a previous statement specific position.  The truncate
operation should reset the cache to earlier valid positions and flush the new
changes. Even though the flush is successful the cache write error is still in
marked state. The truncate code interprets the cache write error as cache flush
failure and returns abruptly without modifying the write cache parameters.
Hence cache is in a invalid state. When a COMMIT statement is executed in this
session it tries to flush the contents of transaction cache to binary log.
Since cache has partial events the cache write operation will report
'writer.remains' assert.

Fix:
===
Binlog truncate function resets the cache to a specified size. As a first step
of truncation, clear the cache write error flag that was raised during earlier
execution. With this new errors that surface during cache truncation can be
clearly identified.
parent 8317f77c
include/master-slave.inc
[connection master]
connection master;
SET GLOBAL max_binlog_cache_size = 65536;
CREATE TABLE t1(a INT PRIMARY KEY, data VARCHAR(30000)) ENGINE=INNODB;
ERROR HY000: Multi-statement transaction required more than 'max_binlog_cache_size' bytes of storage; increase this mysqld variable and try again
SET GLOBAL max_binlog_cache_size= ORIGINAL_VALUE;
DROP TABLE t1;
include/rpl_end.inc
# ==== Purpose ====
#
# Test verifies that when flushing an event to binary log fails the transaction
# is successfully rolled back and following COMMIT command doesn't report any
# assert.
#
# ==== Implementation ====
#
# Steps:
# 0 - SET max_binlog_cache_size=64K
# 1 - Create an Innodb table and insert required amount of data. Execute an
# UPDATE operation which generates a big update event whose size exceeds
# max_binlog_cache_size.
# 2 - Wait for error 1197. Execute COMMIT command.
# 3 - COMMIT should be successful.
#
# ==== References ====
#
# MDEV-18514: Assertion `!writer.checksum_len || writer.remains == 0' failed
#
--source include/have_innodb.inc
--source include/have_binlog_format_row.inc
--source include/master-slave.inc
--connection master
let $old_max_binlog_cache_size= query_get_value(SHOW VARIABLES LIKE "max_binlog_cache_size", Value, 1);
SET GLOBAL max_binlog_cache_size = 65536;
CREATE TABLE t1(a INT PRIMARY KEY, data VARCHAR(30000)) ENGINE=INNODB;
let $data = `select concat('"', repeat('a',6000), '"')`;
let $data1 = `select concat('"', repeat('b',6000), '"')`;
--disable_query_log
eval INSERT INTO t1 (a, data) VALUES (1, CONCAT($data, $data));
eval INSERT INTO t1 (a, data) VALUES (2, CONCAT($data, $data));
eval INSERT INTO t1 (a, data) VALUES (3, CONCAT($data, $data));
eval INSERT INTO t1 (a, data) VALUES (4, CONCAT($data, $data));
eval INSERT INTO t1 (a, data) VALUES (5, CONCAT($data, $data));
START TRANSACTION;
--error ER_TRANS_CACHE_FULL
eval UPDATE t1 SET data=$data1;
COMMIT;
--enable_query_log
--replace_result $old_max_binlog_cache_size ORIGINAL_VALUE
--eval SET GLOBAL max_binlog_cache_size= $old_max_binlog_cache_size
DROP TABLE t1;
--source include/rpl_end.inc
......@@ -448,6 +448,7 @@ class binlog_cache_data
void truncate(my_off_t pos)
{
DBUG_PRINT("info", ("truncating to position %lu", (ulong) pos));
cache_log.error=0;
if (pending())
{
delete pending();
......@@ -456,7 +457,7 @@ class binlog_cache_data
reinit_io_cache(&cache_log, WRITE_CACHE, pos, 0, 0);
cache_log.end_of_file= saved_max_binlog_cache_size;
}
binlog_cache_data& operator=(const binlog_cache_data& info);
binlog_cache_data(const binlog_cache_data& info);
};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment