MDEV-28800 SIGABRT due to running out of memory for InnoDB locks

This regression is introduced in 10.6 by following commit.
commit 898dcf93
(Cleanup the lock creation)

It removed one important optimization for lock bitmap pre-allocation.
We pre-allocate about 8 byte extra space along with every lock object to
adjust for similar locks on newly created records on the same page by
same transaction. When it is exhausted, a new lock object is created
with similar 8 byte pre-allocation. With this optimization removed we
are left with only 1 byte pre-allocation. When large number of records
are inserted and locked in a single page, we end up creating too many
new locks almost in n^2 order.

Fix-1: Bring back LOCK_PAGE_BITMAP_MARGIN for pre-allocation.

Fix-2: Use the extra space (40 bytes) for bitmap in trx->lock.rec_pool.
parent 9a95f6b5
#
# MDEV-28800 SIGABRT due to running out of memory for InnoDB locks
#
CREATE TABLE t1 (col1 INT) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1),(2),(3),(4);
INSERT INTO t1 SELECT * FROM t1;
INSERT INTO t1 SELECT * FROM t1;
START TRANSACTION;
INSERT INTO t1 SELECT a.* FROM t1 a, t1 b, t1 c, t1 d;
SELECT CASE WHEN (POOL_SIZE - (FREE_BUFFERS + DATABASE_PAGES)) <= 10 THEN "PASSED"
ELSE (POOL_SIZE - (FREE_BUFFERS + DATABASE_PAGES)) END
FROM information_schema.innodb_buffer_pool_stats;
CASE WHEN (POOL_SIZE - (FREE_BUFFERS + DATABASE_PAGES)) <= 10 THEN "PASSED"
ELSE (POOL_SIZE - (FREE_BUFFERS + DATABASE_PAGES)) END
PASSED
COMMIT;
SELECT COUNT(*) FROM t1;
COUNT(*)
65552
DROP TABLE t1;
--source include/have_innodb.inc
--source include/have_innodb_16k.inc
--echo #
--echo # MDEV-28800 SIGABRT due to running out of memory for InnoDB locks
--echo #
CREATE TABLE t1 (col1 INT) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1),(2),(3),(4);
INSERT INTO t1 SELECT * FROM t1;
INSERT INTO t1 SELECT * FROM t1;
START TRANSACTION;
# Insert 64K records
INSERT INTO t1 SELECT a.* FROM t1 a, t1 b, t1 c, t1 d;
# The check needs to be adjusted if we start using more memory for locks. It
# needs 9 pages for 16k page size and we put the limit as 10.
SELECT CASE WHEN (POOL_SIZE - (FREE_BUFFERS + DATABASE_PAGES)) <= 10 THEN "PASSED"
ELSE (POOL_SIZE - (FREE_BUFFERS + DATABASE_PAGES)) END
FROM information_schema.innodb_buffer_pool_stats;
COMMIT;
SELECT COUNT(*) FROM t1;
DROP TABLE t1;
......@@ -1242,6 +1242,13 @@ lock_rec_create_low(
type_mode = type_mode & ~(LOCK_GAP | LOCK_REC_NOT_GAP);
}
/* Extra bitmap size in bytes over and above the current number of
records when a record lock is created. 8 x LOCK_PAGE_DEFAULT_BITMAP_SIZE
extra record locks of same type for newly inserted records can be added
without needing to create a new lock object. Useful when the number of
records in a page is growing. */
static constexpr size_t LOCK_PAGE_DEFAULT_BITMAP_SIZE = 8;
if (UNIV_LIKELY(!(type_mode & (LOCK_PREDICATE | LOCK_PRDT_PAGE)))) {
n_bytes = (page_dir_get_n_heap(page) + 7) / 8;
} else {
......@@ -1270,13 +1277,19 @@ lock_rec_create_low(
ut_ad(trx->mutex_is_owner());
ut_ad(trx->state != TRX_STATE_NOT_STARTED);
auto cached_bytes = sizeof *trx->lock.rec_pool - sizeof *lock;
if (trx->lock.rec_cached >= UT_ARR_SIZE(trx->lock.rec_pool)
|| sizeof *lock + n_bytes > sizeof *trx->lock.rec_pool) {
|| n_bytes > cached_bytes) {
n_bytes += LOCK_PAGE_DEFAULT_BITMAP_SIZE;
lock = static_cast<lock_t*>(
mem_heap_alloc(trx->lock.lock_heap,
sizeof *lock + n_bytes));
} else {
lock = &trx->lock.rec_pool[trx->lock.rec_cached++].lock;
/* Use all the extra bytes for lock bitmap. */
ut_ad(n_bytes <= cached_bytes);
n_bytes = cached_bytes;
}
lock->trx = trx;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment