Commit 7e07e38c authored by Marko Mäkelä's avatar Marko Mäkelä

Merge 10.2 into 10.3

parents e976f461 0eb38243
...@@ -739,4 +739,15 @@ t2 CREATE TABLE `t2` ( ...@@ -739,4 +739,15 @@ t2 CREATE TABLE `t2` (
CREATE TABLE t2 (f1 INT NOT NULL)ENGINE=InnoDB; CREATE TABLE t2 (f1 INT NOT NULL)ENGINE=InnoDB;
ERROR 42S01: Table 't2' already exists ERROR 42S01: Table 't2' already exists
DROP TABLE t2, t1; DROP TABLE t2, t1;
#
# MDEV-23685 SIGSEGV on ADD FOREIGN KEY after failed attempt
# to create unique key on virtual column
#
CREATE TABLE t1 (pk INT PRIMARY KEY, a INT, b INT AS (a)) ENGINE=InnODB;
INSERT INTO t1 (pk,a) VALUES (1,10),(2,10);
ALTER TABLE t1 ADD UNIQUE INDEX ind9 (b), LOCK=SHARED;
ERROR 23000: Duplicate entry '10' for key 'ind9'
SET FOREIGN_KEY_CHECKS= 0;
ALTER TABLE t1 ADD FOREIGN KEY (a) REFERENCES t1 (pk);
DROP TABLE t1;
# End of 10.2 tests # End of 10.2 tests
...@@ -720,6 +720,19 @@ SHOW CREATE TABLE t2; ...@@ -720,6 +720,19 @@ SHOW CREATE TABLE t2;
CREATE TABLE t2 (f1 INT NOT NULL)ENGINE=InnoDB; CREATE TABLE t2 (f1 INT NOT NULL)ENGINE=InnoDB;
DROP TABLE t2, t1; DROP TABLE t2, t1;
--echo #
--echo # MDEV-23685 SIGSEGV on ADD FOREIGN KEY after failed attempt
--echo # to create unique key on virtual column
--echo #
CREATE TABLE t1 (pk INT PRIMARY KEY, a INT, b INT AS (a)) ENGINE=InnODB;
INSERT INTO t1 (pk,a) VALUES (1,10),(2,10);
--error ER_DUP_ENTRY
ALTER TABLE t1 ADD UNIQUE INDEX ind9 (b), LOCK=SHARED;
SET FOREIGN_KEY_CHECKS= 0;
ALTER TABLE t1 ADD FOREIGN KEY (a) REFERENCES t1 (pk);
DROP TABLE t1;
--echo # End of 10.2 tests --echo # End of 10.2 tests
--source include/wait_until_count_sessions.inc --source include/wait_until_count_sessions.inc
...@@ -1082,8 +1082,7 @@ btr_create( ...@@ -1082,8 +1082,7 @@ btr_create(
if (type & DICT_IBUF) { if (type & DICT_IBUF) {
/* Allocate first the ibuf header page */ /* Allocate first the ibuf header page */
buf_block_t* ibuf_hdr_block = fseg_create( buf_block_t* ibuf_hdr_block = fseg_create(
space, 0, space, IBUF_HEADER + IBUF_TREE_SEG_HEADER, mtr);
IBUF_HEADER + IBUF_TREE_SEG_HEADER, mtr);
if (ibuf_hdr_block == NULL) { if (ibuf_hdr_block == NULL) {
return(FIL_NULL); return(FIL_NULL);
...@@ -1114,7 +1113,7 @@ btr_create( ...@@ -1114,7 +1113,7 @@ btr_create(
flst_init(block->frame + PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST, flst_init(block->frame + PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST,
mtr); mtr);
} else { } else {
block = fseg_create(space, 0, block = fseg_create(space,
PAGE_HEADER + PAGE_BTR_SEG_TOP, mtr); PAGE_HEADER + PAGE_BTR_SEG_TOP, mtr);
if (block == NULL) { if (block == NULL) {
...@@ -1123,8 +1122,9 @@ btr_create( ...@@ -1123,8 +1122,9 @@ btr_create(
buf_block_dbg_add_level(block, SYNC_TREE_NODE_NEW); buf_block_dbg_add_level(block, SYNC_TREE_NODE_NEW);
if (!fseg_create(space, block->page.id.page_no(), if (!fseg_create(space,
PAGE_HEADER + PAGE_BTR_SEG_LEAF, mtr)) { PAGE_HEADER + PAGE_BTR_SEG_LEAF, mtr,
false, block)) {
/* Not enough space for new segment, free root /* Not enough space for new segment, free root
segment before return. */ segment before return. */
btr_free_root(block, mtr, btr_free_root(block, mtr,
......
...@@ -5626,14 +5626,13 @@ buf_page_create( ...@@ -5626,14 +5626,13 @@ buf_page_create(
buf_frame_t* frame; buf_frame_t* frame;
buf_block_t* block; buf_block_t* block;
buf_block_t* free_block = NULL; buf_block_t* free_block = NULL;
buf_pool_t* buf_pool = buf_pool_get(page_id); buf_pool_t* buf_pool= buf_pool_get(page_id);
rw_lock_t* hash_lock; rw_lock_t* hash_lock;
ut_ad(mtr->is_active()); ut_ad(mtr->is_active());
ut_ad(page_id.space() != 0 || !page_size.is_compressed()); ut_ad(page_id.space() != 0 || !page_size.is_compressed());
loop:
free_block = buf_LRU_get_free_block(buf_pool); free_block = buf_LRU_get_free_block(buf_pool);
buf_pool_mutex_enter(buf_pool); buf_pool_mutex_enter(buf_pool);
hash_lock = buf_page_hash_lock_get(buf_pool, page_id); hash_lock = buf_page_hash_lock_get(buf_pool, page_id);
...@@ -5645,20 +5644,67 @@ buf_page_create( ...@@ -5645,20 +5644,67 @@ buf_page_create(
&& buf_page_in_file(&block->page) && buf_page_in_file(&block->page)
&& !buf_pool_watch_is_sentinel(buf_pool, &block->page)) { && !buf_pool_watch_is_sentinel(buf_pool, &block->page)) {
ut_d(block->page.file_page_was_freed = FALSE); ut_d(block->page.file_page_was_freed = FALSE);
buf_page_state page_state = buf_block_get_state(block);
#ifdef BTR_CUR_HASH_ADAPT #ifdef BTR_CUR_HASH_ADAPT
bool drop_hash_entry = const dict_index_t *drop_hash_entry= NULL;
(block->page.state == BUF_BLOCK_FILE_PAGE #endif
&& block->index); switch (page_state) {
default:
ut_ad(0);
break;
case BUF_BLOCK_ZIP_PAGE:
case BUF_BLOCK_ZIP_DIRTY:
buf_block_init_low(free_block);
mutex_enter(&buf_pool->zip_mutex);
if (drop_hash_entry) { buf_page_mutex_enter(free_block);
mutex_enter(&block->mutex); if (buf_page_get_io_fix(&block->page) != BUF_IO_NONE) {
/* Avoid a hang if I/O is going on. Release mutex_exit(&buf_pool->zip_mutex);
the buffer pool mutex and page hash lock rw_lock_x_unlock(hash_lock);
and wait for I/O to complete */ buf_LRU_block_free_non_file_page(free_block);
while (buf_block_get_io_fix(block) != BUF_IO_NONE) { buf_pool_mutex_exit(buf_pool);
buf_block_fix(block); buf_page_mutex_exit(free_block);
mutex_exit(&block->mutex);
goto loop;
}
rw_lock_x_lock(&free_block->lock);
buf_relocate(&block->page, &free_block->page);
if (page_state == BUF_BLOCK_ZIP_DIRTY) {
ut_ad(block->page.in_flush_list);
ut_ad(block->page.oldest_modification > 0);
buf_flush_relocate_on_flush_list(
&block->page, &free_block->page);
} else {
ut_ad(block->page.oldest_modification == 0);
ut_ad(!block->page.in_flush_list);
#ifdef UNIV_DEBUG
UT_LIST_REMOVE(
buf_pool->zip_clean, &block->page);
#endif
}
free_block->page.state = BUF_BLOCK_FILE_PAGE;
mutex_exit(&buf_pool->zip_mutex);
free_block->lock_hash_val = lock_rec_hash(
page_id.space(), page_id.page_no());
buf_unzip_LRU_add_block(free_block, false);
buf_page_free_descriptor(&block->page);
block = free_block;
buf_block_fix(block);
buf_page_mutex_exit(free_block);
free_block = NULL;
break;
case BUF_BLOCK_FILE_PAGE:
buf_block_fix(block);
const int32_t num_fix_count =
mtr->get_fix_count(block) + 1;
buf_page_mutex_enter(block);
while (buf_block_get_io_fix(block) != BUF_IO_NONE
|| (num_fix_count
!= block->page.buf_fix_count)) {
buf_page_mutex_exit(block);
buf_pool_mutex_exit(buf_pool); buf_pool_mutex_exit(buf_pool);
rw_lock_x_unlock(hash_lock); rw_lock_x_unlock(hash_lock);
...@@ -5666,33 +5712,39 @@ buf_page_create( ...@@ -5666,33 +5712,39 @@ buf_page_create(
buf_pool_mutex_enter(buf_pool); buf_pool_mutex_enter(buf_pool);
rw_lock_x_lock(hash_lock); rw_lock_x_lock(hash_lock);
mutex_enter(&block->mutex); buf_page_mutex_enter(block);
buf_block_unfix(block);
} }
rw_lock_x_lock(&block->lock); rw_lock_x_lock(&block->lock);
mutex_exit(&block->mutex); buf_page_mutex_exit(block);
} #ifdef BTR_CUR_HASH_ADAPT
drop_hash_entry = block->index;
#endif #endif
break;
}
/* Page can be found in buf_pool */ /* Page can be found in buf_pool */
buf_pool_mutex_exit(buf_pool); buf_pool_mutex_exit(buf_pool);
rw_lock_x_unlock(hash_lock); rw_lock_x_unlock(hash_lock);
buf_block_free(free_block); if (free_block) {
buf_block_free(free_block);
}
#ifdef BTR_CUR_HASH_ADAPT #ifdef BTR_CUR_HASH_ADAPT
if (drop_hash_entry) { if (drop_hash_entry) {
btr_search_drop_page_hash_index(block); btr_search_drop_page_hash_index(block);
rw_lock_x_unlock(&block->lock);
} }
#endif /* BTR_CUR_HASH_ADAPT */ #endif /* BTR_CUR_HASH_ADAPT */
if (!recv_recovery_is_on()) { #ifdef UNIV_DEBUG
return buf_page_get_with_no_latch(page_id, page_size, if (!fsp_is_system_temporary(page_id.space())) {
mtr); rw_lock_s_lock_nowait(
&block->debug_latch,
__FILE__, __LINE__);
} }
#endif /* UNIV_DEBUG */
mtr_memo_push(mtr, block, MTR_MEMO_PAGE_X_FIX);
mutex_exit(&recv_sys->mutex);
block = buf_page_get_with_no_latch(page_id, page_size, mtr);
mutex_enter(&recv_sys->mutex);
return block; return block;
} }
...@@ -5707,6 +5759,8 @@ buf_page_create( ...@@ -5707,6 +5759,8 @@ buf_page_create(
buf_page_init(buf_pool, page_id, page_size, block); buf_page_init(buf_pool, page_id, page_size, block);
rw_lock_x_lock(&block->lock);
rw_lock_x_unlock(hash_lock); rw_lock_x_unlock(hash_lock);
/* The block must be put to the LRU list */ /* The block must be put to the LRU list */
...@@ -5724,7 +5778,6 @@ buf_page_create( ...@@ -5724,7 +5778,6 @@ buf_page_create(
by IO-fixing and X-latching the block. */ by IO-fixing and X-latching the block. */
buf_page_set_io_fix(&block->page, BUF_IO_READ); buf_page_set_io_fix(&block->page, BUF_IO_READ);
rw_lock_x_lock(&block->lock);
buf_page_mutex_exit(block); buf_page_mutex_exit(block);
/* buf_pool->mutex may be released and reacquired by /* buf_pool->mutex may be released and reacquired by
...@@ -5746,12 +5799,11 @@ buf_page_create( ...@@ -5746,12 +5799,11 @@ buf_page_create(
buf_unzip_LRU_add_block(block, FALSE); buf_unzip_LRU_add_block(block, FALSE);
buf_page_set_io_fix(&block->page, BUF_IO_NONE); buf_page_set_io_fix(&block->page, BUF_IO_NONE);
rw_lock_x_unlock(&block->lock);
} }
buf_pool_mutex_exit(buf_pool); buf_pool_mutex_exit(buf_pool);
mtr_memo_push(mtr, block, MTR_MEMO_BUF_FIX); mtr_memo_push(mtr, block, MTR_MEMO_PAGE_X_FIX);
buf_page_set_accessed(&block->page); buf_page_set_accessed(&block->page);
......
...@@ -170,6 +170,7 @@ buf_dblwr_create() ...@@ -170,6 +170,7 @@ buf_dblwr_create()
{ {
buf_block_t* block2; buf_block_t* block2;
buf_block_t* new_block; buf_block_t* new_block;
buf_block_t* trx_sys_block;
byte* doublewrite; byte* doublewrite;
byte* fseg_header; byte* fseg_header;
ulint page_no; ulint page_no;
...@@ -205,9 +206,13 @@ buf_dblwr_create() ...@@ -205,9 +206,13 @@ buf_dblwr_create()
} }
} }
block2 = fseg_create(fil_system.sys_space, TRX_SYS_PAGE_NO, trx_sys_block = buf_page_get(
TRX_SYS_DOUBLEWRITE page_id_t(TRX_SYS_SPACE, TRX_SYS_PAGE_NO),
+ TRX_SYS_DOUBLEWRITE_FSEG, &mtr); univ_page_size, RW_X_LATCH, &mtr);
block2 = fseg_create(fil_system.sys_space,
TRX_SYS_DOUBLEWRITE + TRX_SYS_DOUBLEWRITE_FSEG,
&mtr, false, trx_sys_block);
if (block2 == NULL) { if (block2 == NULL) {
too_small: too_small:
......
/***************************************************************************** /*****************************************************************************
Copyright (c) 1996, 2017, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 1996, 2017, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2016, 2019, MariaDB Corporation. Copyright (c) 2016, 2020, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software the terms of the GNU General Public License as published by the Free Software
...@@ -180,7 +180,7 @@ dict_hdr_create( ...@@ -180,7 +180,7 @@ dict_hdr_create(
/* Create the dictionary header file block in a new, allocated file /* Create the dictionary header file block in a new, allocated file
segment in the system tablespace */ segment in the system tablespace */
block = fseg_create(fil_system.sys_space, 0, block = fseg_create(fil_system.sys_space,
DICT_HDR + DICT_HDR_FSEG_HEADER, mtr); DICT_HDR + DICT_HDR_FSEG_HEADER, mtr);
ut_a(DICT_HDR_PAGE_NO == block->page.id.page_no()); ut_a(DICT_HDR_PAGE_NO == block->page.id.page_no());
......
...@@ -6514,7 +6514,11 @@ dict_foreign_qualify_index( ...@@ -6514,7 +6514,11 @@ dict_foreign_qualify_index(
return(false); return(false);
} }
if (index->type & (DICT_SPATIAL | DICT_FTS)) { if (index->type & (DICT_SPATIAL | DICT_FTS | DICT_CORRUPT)) {
return false;
}
if (index->online_status >= ONLINE_INDEX_ABORTED) {
return false; return false;
} }
......
...@@ -117,7 +117,6 @@ to minimize file space fragmentation. ...@@ -117,7 +117,6 @@ to minimize file space fragmentation.
@param[in] direction if the new page is needed because of @param[in] direction if the new page is needed because of
an index page split, and records are inserted there in order, into which an index page split, and records are inserted there in order, into which
direction they go alphabetically: FSP_DOWN, FSP_UP, FSP_NO_DIR direction they go alphabetically: FSP_DOWN, FSP_UP, FSP_NO_DIR
@param[in] rw_latch RW_SX_LATCH, RW_X_LATCH
@param[in,out] mtr mini-transaction @param[in,out] mtr mini-transaction
@param[in,out] init_mtr mtr or another mini-transaction in @param[in,out] init_mtr mtr or another mini-transaction in
which the page should be initialized. If init_mtr != mtr, but the page is which the page should be initialized. If init_mtr != mtr, but the page is
...@@ -136,7 +135,6 @@ fseg_alloc_free_page_low( ...@@ -136,7 +135,6 @@ fseg_alloc_free_page_low(
fseg_inode_t* seg_inode, fseg_inode_t* seg_inode,
ulint hint, ulint hint,
byte direction, byte direction,
rw_lock_type_t rw_latch,
mtr_t* mtr, mtr_t* mtr,
mtr_t* init_mtr mtr_t* init_mtr
#ifdef UNIV_DEBUG #ifdef UNIV_DEBUG
...@@ -210,7 +208,9 @@ xdes_set_bit( ...@@ -210,7 +208,9 @@ xdes_set_bit(
ulint bit_index; ulint bit_index;
ulint descr_byte; ulint descr_byte;
ut_ad(mtr_memo_contains_page(mtr, descr, MTR_MEMO_PAGE_SX_FIX)); ut_ad(mtr_memo_contains_page(
mtr, descr,
MTR_MEMO_PAGE_SX_FIX | MTR_MEMO_PAGE_X_FIX));
ut_ad((bit == XDES_FREE_BIT) || (bit == XDES_CLEAN_BIT)); ut_ad((bit == XDES_FREE_BIT) || (bit == XDES_CLEAN_BIT));
ut_ad(offset < FSP_EXTENT_SIZE); ut_ad(offset < FSP_EXTENT_SIZE);
...@@ -337,7 +337,9 @@ xdes_set_state( ...@@ -337,7 +337,9 @@ xdes_set_state(
ut_ad(descr && mtr); ut_ad(descr && mtr);
ut_ad(state >= XDES_FREE); ut_ad(state >= XDES_FREE);
ut_ad(state <= XDES_FSEG); ut_ad(state <= XDES_FSEG);
ut_ad(mtr_memo_contains_page(mtr, descr, MTR_MEMO_PAGE_SX_FIX)); ut_ad(mtr_memo_contains_page(
mtr, descr,
MTR_MEMO_PAGE_SX_FIX | MTR_MEMO_PAGE_X_FIX));
mlog_write_ulint(descr + XDES_STATE, state, MLOG_4BYTES, mtr); mlog_write_ulint(descr + XDES_STATE, state, MLOG_4BYTES, mtr);
} }
...@@ -374,7 +376,9 @@ xdes_init( ...@@ -374,7 +376,9 @@ xdes_init(
ulint i; ulint i;
ut_ad(descr && mtr); ut_ad(descr && mtr);
ut_ad(mtr_memo_contains_page(mtr, descr, MTR_MEMO_PAGE_SX_FIX)); ut_ad(mtr_memo_contains_page(
mtr, descr,
MTR_MEMO_PAGE_SX_FIX | MTR_MEMO_PAGE_X_FIX));
ut_ad((XDES_SIZE - XDES_BITMAP) % 4 == 0); ut_ad((XDES_SIZE - XDES_BITMAP) % 4 == 0);
for (i = XDES_BITMAP; i < XDES_SIZE; i += 4) { for (i = XDES_BITMAP; i < XDES_SIZE; i += 4) {
...@@ -409,7 +413,8 @@ xdes_get_descriptor_with_space_hdr( ...@@ -409,7 +413,8 @@ xdes_get_descriptor_with_space_hdr(
ulint descr_page_no; ulint descr_page_no;
page_t* descr_page; page_t* descr_page;
ut_ad(mtr_memo_contains(mtr, space, MTR_MEMO_SPACE_X_LOCK)); ut_ad(mtr_memo_contains(mtr, space, MTR_MEMO_SPACE_X_LOCK));
ut_ad(mtr_memo_contains_page(mtr, sp_header, MTR_MEMO_PAGE_SX_FIX)); ut_ad(mtr_memo_contains_page(mtr, sp_header, MTR_MEMO_PAGE_SX_FIX)
|| mtr_memo_contains_page(mtr, sp_header, MTR_MEMO_PAGE_X_FIX));
ut_ad(page_offset(sp_header) == FSP_HEADER_OFFSET); ut_ad(page_offset(sp_header) == FSP_HEADER_OFFSET);
/* Read free limit and space size */ /* Read free limit and space size */
limit = mach_read_from_4(sp_header + FSP_FREE_LIMIT); limit = mach_read_from_4(sp_header + FSP_FREE_LIMIT);
...@@ -659,7 +664,6 @@ void fsp_header_init(fil_space_t* space, ulint size, mtr_t* mtr) ...@@ -659,7 +664,6 @@ void fsp_header_init(fil_space_t* space, ulint size, mtr_t* mtr)
mtr_x_lock_space(space, mtr); mtr_x_lock_space(space, mtr);
buf_block_t* block = buf_page_create(page_id, page_size, mtr); buf_block_t* block = buf_page_create(page_id, page_size, mtr);
buf_page_get(page_id, page_size, RW_SX_LATCH, mtr);
buf_block_dbg_add_level(block, SYNC_FSP_PAGE); buf_block_dbg_add_level(block, SYNC_FSP_PAGE);
space->size_in_header = size; space->size_in_header = size;
...@@ -1002,9 +1006,6 @@ fsp_fill_free_list( ...@@ -1002,9 +1006,6 @@ fsp_fill_free_list(
block = buf_page_create( block = buf_page_create(
page_id, page_size, mtr); page_id, page_size, mtr);
buf_page_get(
page_id, page_size, RW_SX_LATCH, mtr);
buf_block_dbg_add_level(block, SYNC_FSP_PAGE); buf_block_dbg_add_level(block, SYNC_FSP_PAGE);
fsp_init_file_page(space, block, mtr); fsp_init_file_page(space, block, mtr);
...@@ -1022,9 +1023,6 @@ fsp_fill_free_list( ...@@ -1022,9 +1023,6 @@ fsp_fill_free_list(
block = buf_page_create( block = buf_page_create(
page_id, page_size, mtr); page_id, page_size, mtr);
buf_page_get(
page_id, page_size, RW_SX_LATCH, mtr);
buf_block_dbg_add_level(block, SYNC_FSP_PAGE); buf_block_dbg_add_level(block, SYNC_FSP_PAGE);
fsp_init_file_page(space, block, mtr); fsp_init_file_page(space, block, mtr);
...@@ -1170,7 +1168,6 @@ x-latched only by mtr, and freed in mtr in that case. ...@@ -1170,7 +1168,6 @@ x-latched only by mtr, and freed in mtr in that case.
@param[in,out] space tablespace @param[in,out] space tablespace
@param[in] offset page number of the allocated page @param[in] offset page number of the allocated page
@param[in] page_size page size of the allocated page @param[in] page_size page size of the allocated page
@param[in] rw_latch RW_SX_LATCH, RW_X_LATCH
@param[in,out] mtr mini-transaction of the allocation @param[in,out] mtr mini-transaction of the allocation
@param[in,out] init_mtr mini-transaction for initializing the page @param[in,out] init_mtr mini-transaction for initializing the page
@return block, initialized if init_mtr==mtr @return block, initialized if init_mtr==mtr
...@@ -1181,7 +1178,6 @@ fsp_page_create( ...@@ -1181,7 +1178,6 @@ fsp_page_create(
fil_space_t* space, fil_space_t* space,
page_no_t offset, page_no_t offset,
const page_size_t& page_size, const page_size_t& page_size,
rw_lock_type_t rw_latch,
mtr_t* mtr, mtr_t* mtr,
mtr_t* init_mtr) mtr_t* init_mtr)
{ {
...@@ -1191,26 +1187,10 @@ fsp_page_create( ...@@ -1191,26 +1187,10 @@ fsp_page_create(
page_size, init_mtr); page_size, init_mtr);
ut_d(bool latched = mtr_memo_contains_flagged(mtr, block, ut_d(bool latched = mtr_memo_contains_flagged(mtr, block,
MTR_MEMO_PAGE_X_FIX MTR_MEMO_PAGE_X_FIX));
| MTR_MEMO_PAGE_SX_FIX));
ut_ad(rw_latch == RW_X_LATCH || rw_latch == RW_SX_LATCH);
/* Mimic buf_page_get(), but avoid the buf_pool->page_hash lookup. */
if (rw_latch == RW_X_LATCH) {
rw_lock_x_lock(&block->lock);
} else {
rw_lock_sx_lock(&block->lock);
}
buf_block_buf_fix_inc(block, __FILE__, __LINE__);
mtr_memo_push(init_mtr, block, rw_latch == RW_X_LATCH
? MTR_MEMO_PAGE_X_FIX : MTR_MEMO_PAGE_SX_FIX);
if (init_mtr == mtr if (init_mtr == mtr
|| (rw_latch == RW_X_LATCH || rw_lock_get_x_lock_count(&block->lock) == 1) {
? rw_lock_get_x_lock_count(&block->lock) == 1
: rw_lock_get_sx_lock_count(&block->lock) == 1)) {
/* Initialize the page, unless it was already /* Initialize the page, unless it was already
SX-latched in mtr. (In this case, we would want to SX-latched in mtr. (In this case, we would want to
...@@ -1227,7 +1207,6 @@ The page is marked as used. ...@@ -1227,7 +1207,6 @@ The page is marked as used.
@param[in,out] space tablespace @param[in,out] space tablespace
@param[in] page_size page size @param[in] page_size page size
@param[in] hint hint of which page would be desirable @param[in] hint hint of which page would be desirable
@param[in] rw_latch RW_SX_LATCH, RW_X_LATCH
@param[in,out] mtr mini-transaction @param[in,out] mtr mini-transaction
@param[in,out] init_mtr mini-transaction in which the page should be @param[in,out] init_mtr mini-transaction in which the page should be
initialized (may be the same as mtr) initialized (may be the same as mtr)
...@@ -1241,7 +1220,6 @@ fsp_alloc_free_page( ...@@ -1241,7 +1220,6 @@ fsp_alloc_free_page(
fil_space_t* space, fil_space_t* space,
const page_size_t& page_size, const page_size_t& page_size,
ulint hint, ulint hint,
rw_lock_type_t rw_latch,
mtr_t* mtr, mtr_t* mtr,
mtr_t* init_mtr) mtr_t* init_mtr)
{ {
...@@ -1333,8 +1311,7 @@ fsp_alloc_free_page( ...@@ -1333,8 +1311,7 @@ fsp_alloc_free_page(
} }
fsp_alloc_from_free_frag(header, descr, free, mtr); fsp_alloc_from_free_frag(header, descr, free, mtr);
return(fsp_page_create(space, page_no, page_size, rw_latch, return(fsp_page_create(space, page_no, page_size, mtr, init_mtr));
mtr, init_mtr));
} }
/** Frees a single page of a space. /** Frees a single page of a space.
...@@ -1571,8 +1548,7 @@ fsp_alloc_seg_inode_page( ...@@ -1571,8 +1548,7 @@ fsp_alloc_seg_inode_page(
const page_size_t page_size(space->flags); const page_size_t page_size(space->flags);
block = fsp_alloc_free_page( block = fsp_alloc_free_page(space, page_size, 0, mtr, mtr);
space, page_size, 0, RW_SX_LATCH, mtr, mtr);
if (block == NULL) { if (block == NULL) {
...@@ -1580,7 +1556,7 @@ fsp_alloc_seg_inode_page( ...@@ -1580,7 +1556,7 @@ fsp_alloc_seg_inode_page(
} }
buf_block_dbg_add_level(block, SYNC_FSP_PAGE); buf_block_dbg_add_level(block, SYNC_FSP_PAGE);
ut_ad(rw_lock_get_sx_lock_count(&block->lock) == 1); ut_ad(rw_lock_get_x_lock_count(&block->lock) == 1);
mlog_write_ulint(block->frame + FIL_PAGE_TYPE, FIL_PAGE_INODE, mlog_write_ulint(block->frame + FIL_PAGE_TYPE, FIL_PAGE_INODE,
MLOG_2BYTES, mtr); MLOG_2BYTES, mtr);
...@@ -1889,31 +1865,22 @@ fseg_get_n_frag_pages( ...@@ -1889,31 +1865,22 @@ fseg_get_n_frag_pages(
return(count); return(count);
} }
/**********************************************************************//** /** Create a new segment.
Creates a new segment. @param space tablespace
@return the block where the segment header is placed, x-latched, NULL @param byte_offset byte offset of the created segment header
if could not create segment because of lack of space */ @param mtr mini-transaction
@param has_done_reservation whether fsp_reserve_free_extents() was invoked
@param block block where segment header is placed,
or NULL to allocate an additional page for that
@return the block where the segment header is placed, x-latched
@retval NULL if could not create segment because of lack of space */
buf_block_t* buf_block_t*
fseg_create( fseg_create(fil_space_t *space, ulint byte_offset, mtr_t *mtr,
fil_space_t* space, /*!< in,out: tablespace */ bool has_done_reservation, buf_block_t *block)
ulint page, /*!< in: page where the segment header is placed: if
this is != 0, the page must belong to another segment,
if this is 0, a new page will be allocated and it
will belong to the created segment */
ulint byte_offset, /*!< in: byte offset of the created segment header
on the page */
mtr_t* mtr,
bool has_done_reservation) /*!< in: whether the caller
has already done the reservation for the pages with
fsp_reserve_free_extents (at least 2 extents: one for
the inode and the other for the segment) then there is
no need to do the check for this individual
operation */
{ {
fsp_header_t* space_header; fsp_header_t* space_header;
fseg_inode_t* inode; fseg_inode_t* inode;
ib_id_t seg_id; ib_id_t seg_id;
buf_block_t* block = 0; /* remove warning */
fseg_header_t* header = 0; /* remove warning */ fseg_header_t* header = 0; /* remove warning */
ulint n_reserved; ulint n_reserved;
ulint i; ulint i;
...@@ -1921,6 +1888,7 @@ fseg_create( ...@@ -1921,6 +1888,7 @@ fseg_create(
DBUG_ENTER("fseg_create"); DBUG_ENTER("fseg_create");
ut_ad(mtr); ut_ad(mtr);
ut_ad(byte_offset >= FIL_PAGE_DATA);
ut_ad(byte_offset + FSEG_HEADER_SIZE ut_ad(byte_offset + FSEG_HEADER_SIZE
<= srv_page_size - FIL_PAGE_DATA_END); <= srv_page_size - FIL_PAGE_DATA_END);
...@@ -1928,14 +1896,11 @@ fseg_create( ...@@ -1928,14 +1896,11 @@ fseg_create(
const page_size_t page_size(space->flags); const page_size_t page_size(space->flags);
ut_d(space->modify_check(*mtr)); ut_d(space->modify_check(*mtr));
if (page != 0) { if (block) {
block = buf_page_get(page_id_t(space->id, page), page_size,
RW_SX_LATCH, mtr);
header = byte_offset + buf_block_get_frame(block); header = byte_offset + buf_block_get_frame(block);
const ulint type = space->id == TRX_SYS_SPACE const ulint type = block->page.id == page_id_t(TRX_SYS_SPACE,
&& page == TRX_SYS_PAGE_NO TRX_SYS_PAGE_NO)
? FIL_PAGE_TYPE_TRX_SYS ? FIL_PAGE_TYPE_TRX_SYS
: FIL_PAGE_TYPE_SYS; : FIL_PAGE_TYPE_SYS;
...@@ -1976,9 +1941,9 @@ fseg_create( ...@@ -1976,9 +1941,9 @@ fseg_create(
fseg_set_nth_frag_page_no(inode, i, FIL_NULL, mtr); fseg_set_nth_frag_page_no(inode, i, FIL_NULL, mtr);
} }
if (page == 0) { if (!block) {
block = fseg_alloc_free_page_low(space, page_size, block = fseg_alloc_free_page_low(space, page_size,
inode, 0, FSP_UP, RW_SX_LATCH, inode, 0, FSP_UP,
mtr, mtr mtr, mtr
#ifdef UNIV_DEBUG #ifdef UNIV_DEBUG
, has_done_reservation , has_done_reservation
...@@ -1996,7 +1961,7 @@ fseg_create( ...@@ -1996,7 +1961,7 @@ fseg_create(
goto funct_exit; goto funct_exit;
} }
ut_ad(rw_lock_get_sx_lock_count(&block->lock) == 1); ut_ad(rw_lock_get_x_lock_count(&block->lock) == 1);
header = byte_offset + buf_block_get_frame(block); header = byte_offset + buf_block_get_frame(block);
mlog_write_ulint(buf_block_get_frame(block) + FIL_PAGE_TYPE, mlog_write_ulint(buf_block_get_frame(block) + FIL_PAGE_TYPE,
...@@ -2215,7 +2180,6 @@ minimize file space fragmentation. ...@@ -2215,7 +2180,6 @@ minimize file space fragmentation.
@param[in] direction if the new page is needed because of @param[in] direction if the new page is needed because of
an index page split, and records are inserted there in order, into which an index page split, and records are inserted there in order, into which
direction they go alphabetically: FSP_DOWN, FSP_UP, FSP_NO_DIR direction they go alphabetically: FSP_DOWN, FSP_UP, FSP_NO_DIR
@param[in] rw_latch RW_SX_LATCH, RW_X_LATCH
@param[in,out] mtr mini-transaction @param[in,out] mtr mini-transaction
@param[in,out] init_mtr mtr or another mini-transaction in @param[in,out] init_mtr mtr or another mini-transaction in
which the page should be initialized. If init_mtr != mtr, but the page is which the page should be initialized. If init_mtr != mtr, but the page is
...@@ -2234,7 +2198,6 @@ fseg_alloc_free_page_low( ...@@ -2234,7 +2198,6 @@ fseg_alloc_free_page_low(
fseg_inode_t* seg_inode, fseg_inode_t* seg_inode,
ulint hint, ulint hint,
byte direction, byte direction,
rw_lock_type_t rw_latch,
mtr_t* mtr, mtr_t* mtr,
mtr_t* init_mtr mtr_t* init_mtr
#ifdef UNIV_DEBUG #ifdef UNIV_DEBUG
...@@ -2377,7 +2340,7 @@ fseg_alloc_free_page_low( ...@@ -2377,7 +2340,7 @@ fseg_alloc_free_page_low(
/* 6. We allocate an individual page from the space /* 6. We allocate an individual page from the space
===================================================*/ ===================================================*/
buf_block_t* block = fsp_alloc_free_page( buf_block_t* block = fsp_alloc_free_page(
space, page_size, hint, rw_latch, mtr, init_mtr); space, page_size, hint, mtr, init_mtr);
ut_ad(!has_done_reservation || block != NULL); ut_ad(!has_done_reservation || block != NULL);
...@@ -2458,8 +2421,7 @@ fseg_alloc_free_page_low( ...@@ -2458,8 +2421,7 @@ fseg_alloc_free_page_low(
fseg_mark_page_used(seg_inode, ret_page, ret_descr, mtr); fseg_mark_page_used(seg_inode, ret_page, ret_descr, mtr);
} }
return(fsp_page_create(space, ret_page, page_size, rw_latch, return(fsp_page_create(space, ret_page, page_size, mtr, init_mtr));
mtr, init_mtr));
} }
/**********************************************************************//** /**********************************************************************//**
...@@ -2514,7 +2476,7 @@ fseg_alloc_free_page_general( ...@@ -2514,7 +2476,7 @@ fseg_alloc_free_page_general(
block = fseg_alloc_free_page_low(space, page_size, block = fseg_alloc_free_page_low(space, page_size,
inode, hint, direction, inode, hint, direction,
RW_X_LATCH, mtr, init_mtr mtr, init_mtr
#ifdef UNIV_DEBUG #ifdef UNIV_DEBUG
, has_done_reservation , has_done_reservation
#endif /* UNIV_DEBUG */ #endif /* UNIV_DEBUG */
...@@ -3209,7 +3171,7 @@ fseg_print_low( ...@@ -3209,7 +3171,7 @@ fseg_print_low(
ulint page_no; ulint page_no;
ib_id_t seg_id; ib_id_t seg_id;
ut_ad(mtr_memo_contains_page(mtr, inode, MTR_MEMO_PAGE_SX_FIX)); ut_ad(mtr_memo_contains_page(mtr, inode, MTR_MEMO_PAGE_X_FIX));
space = page_get_space_id(page_align(inode)); space = page_get_space_id(page_align(inode));
page_no = page_get_page_no(page_align(inode)); page_no = page_get_page_no(page_align(inode));
......
...@@ -1047,6 +1047,25 @@ struct dict_index_t{ ...@@ -1047,6 +1047,25 @@ struct dict_index_t{
/** @return whether the index includes virtual columns */ /** @return whether the index includes virtual columns */
bool has_virtual() const { return type & DICT_VIRTUAL; } bool has_virtual() const { return type & DICT_VIRTUAL; }
/** @return the position of DB_TRX_ID */
uint16_t db_trx_id() const {
DBUG_ASSERT(is_primary());
DBUG_ASSERT(n_uniq);
return n_uniq;
}
/** @return the position of DB_ROLL_PTR */
uint16_t db_roll_ptr() const
{
return static_cast<uint16_t>(db_trx_id() + 1);
}
/** @return the offset of the metadata BLOB field,
or the first user field after the PRIMARY KEY,DB_TRX_ID,DB_ROLL_PTR */
uint16_t first_user_field() const
{
return static_cast<uint16_t>(db_trx_id() + 2);
}
/** @return whether the index is corrupted */ /** @return whether the index is corrupted */
inline bool is_corrupted() const; inline bool is_corrupted() const;
......
...@@ -383,26 +383,18 @@ fsp_header_init_fields( ...@@ -383,26 +383,18 @@ fsp_header_init_fields(
void fsp_header_init(fil_space_t* space, ulint size, mtr_t* mtr) void fsp_header_init(fil_space_t* space, ulint size, mtr_t* mtr)
MY_ATTRIBUTE((nonnull)); MY_ATTRIBUTE((nonnull));
/**********************************************************************//** /** Create a new segment.
Creates a new segment. @param space tablespace
@return the block where the segment header is placed, x-latched, NULL @param byte_offset byte offset of the created segment header
if could not create segment because of lack of space */ @param mtr mini-transaction
@param has_done_reservation whether fsp_reserve_free_extents() was invoked
@param block block where segment header is placed,
or NULL to allocate an additional page for that
@return the block where the segment header is placed, x-latched
@retval NULL if could not create segment because of lack of space */
buf_block_t* buf_block_t*
fseg_create( fseg_create(fil_space_t *space, ulint byte_offset, mtr_t *mtr,
fil_space_t* space, /*!< in,out: tablespace */ bool has_done_reservation= false, buf_block_t *block= NULL);
ulint page, /*!< in: page where the segment header is placed: if
this is != 0, the page must belong to another segment,
if this is 0, a new page will be allocated and it
will belong to the created segment */
ulint byte_offset, /*!< in: byte offset of the created segment header
on the page */
mtr_t* mtr,
bool has_done_reservation = false); /*!< in: whether the caller
has already done the reservation for the pages with
fsp_reserve_free_extents (at least 2 extents: one for
the inode and the other for the segment) then there is
no need to do the check for this individual
operation */
/**********************************************************************//** /**********************************************************************//**
Calculates the number of pages reserved by a segment, and how many pages are Calculates the number of pages reserved by a segment, and how many pages are
......
...@@ -437,6 +437,10 @@ struct mtr_t { ...@@ -437,6 +437,10 @@ struct mtr_t {
static inline bool is_block_dirtied(const buf_block_t* block) static inline bool is_block_dirtied(const buf_block_t* block)
MY_ATTRIBUTE((warn_unused_result)); MY_ATTRIBUTE((warn_unused_result));
/** Get the buffer fix count for the block added by this mtr.
@param[in] block block to be checked
@return number of buffer count added by this mtr */
int32_t get_fix_count(const buf_block_t *block);
private: private:
/** Prepare to write the mini-transaction log to the redo log buffer. /** Prepare to write the mini-transaction log to the redo log buffer.
@return number of bytes to write in finish_write() */ @return number of bytes to write in finish_write() */
......
...@@ -2323,7 +2323,6 @@ static buf_block_t* recv_recovery_create_page_low(const page_id_t page_id, ...@@ -2323,7 +2323,6 @@ static buf_block_t* recv_recovery_create_page_low(const page_id_t page_id,
{ {
i.created= true; i.created= true;
buf_block_dbg_add_level(block, SYNC_NO_ORDER_CHECK); buf_block_dbg_add_level(block, SYNC_NO_ORDER_CHECK);
mtr.x_latch_at_savepoint(0, block);
recv_recover_page(block, mtr, recv_addr, i.lsn); recv_recover_page(block, mtr, recv_addr, i.lsn);
ut_ad(mtr.has_committed()); ut_ad(mtr.has_committed());
} }
......
...@@ -308,6 +308,24 @@ struct DebugCheck { ...@@ -308,6 +308,24 @@ struct DebugCheck {
}; };
#endif #endif
/** Find buffer fix count of the given block acquired by the
mini-transaction */
struct FindBlock
{
int32_t num_fix;
const buf_block_t *const block;
FindBlock(const buf_block_t *block_buf): num_fix(0), block(block_buf) {}
bool operator()(const mtr_memo_slot_t* slot)
{
if (slot->object == block)
ut_d(if (slot->type != MTR_MEMO_MODIFY))
num_fix++;
return true;
}
};
/** Release a resource acquired by the mini-transaction. */ /** Release a resource acquired by the mini-transaction. */
struct ReleaseBlocks { struct ReleaseBlocks {
/** Release specific object */ /** Release specific object */
...@@ -736,6 +754,14 @@ inline lsn_t mtr_t::finish_write(ulint len) ...@@ -736,6 +754,14 @@ inline lsn_t mtr_t::finish_write(ulint len)
return start_lsn; return start_lsn;
} }
int32_t mtr_t::get_fix_count(const buf_block_t *block)
{
Iterate<FindBlock> iteration((FindBlock(block)));
if (m_memo.for_each_block(iteration))
return iteration.functor.num_fix;
return 0;
}
#ifdef UNIV_DEBUG #ifdef UNIV_DEBUG
/** Check if memo contains the given item. /** Check if memo contains the given item.
@return true if contains */ @return true if contains */
......
...@@ -3216,20 +3216,46 @@ row_sel_build_prev_vers_for_mysql( ...@@ -3216,20 +3216,46 @@ row_sel_build_prev_vers_for_mysql(
return(err); return(err);
} }
/** Helper class to cache clust_rec and old_ver */ /** Helper class to cache clust_rec and old_vers */
class Row_sel_get_clust_rec_for_mysql class Row_sel_get_clust_rec_for_mysql
{ {
const rec_t *cached_clust_rec; const rec_t *cached_clust_rec;
rec_t *cached_old_vers; rec_t *cached_old_vers;
lsn_t cached_lsn;
page_id_t cached_page_id;
public: #ifdef UNIV_DEBUG
Row_sel_get_clust_rec_for_mysql() : void check_eq(const dict_index_t *index, const rec_offs *offsets) const
cached_clust_rec(NULL), cached_old_vers(NULL) {} {
rec_offs vers_offs[REC_OFFS_HEADER_SIZE + MAX_REF_PARTS];
rec_offs_init(vers_offs);
mem_heap_t *heap= NULL;
ut_ad(rec_offs_validate(cached_clust_rec, index, offsets));
ut_ad(index->first_user_field() <= rec_offs_n_fields(offsets));
ut_ad(vers_offs == rec_get_offsets(cached_old_vers, index, vers_offs, true,
index->db_trx_id(), &heap));
ut_ad(!heap);
for (unsigned n= index->db_trx_id(); n--; )
{
const dict_col_t *col= dict_index_get_nth_col(index, n);
ulint len1, len2;
const byte *b1= rec_get_nth_field(cached_clust_rec, offsets, n, &len1);
const byte *b2= rec_get_nth_field(cached_old_vers, vers_offs, n, &len2);
ut_ad(!cmp_data_data(col->mtype, col->prtype, b1, len1, b2, len2));
}
}
#endif
dberr_t operator()(row_prebuilt_t *prebuilt, dict_index_t *sec_index, public:
const rec_t *rec, que_thr_t *thr, const rec_t **out_rec, Row_sel_get_clust_rec_for_mysql() :
rec_offs **offsets, mem_heap_t **offset_heap, cached_clust_rec(NULL), cached_old_vers(NULL), cached_lsn(0),
dtuple_t **vrow, mtr_t *mtr); cached_page_id(page_id_t(0,0)) {}
dberr_t operator()(row_prebuilt_t *prebuilt, dict_index_t *sec_index,
const rec_t *rec, que_thr_t *thr, const rec_t **out_rec,
rec_offs **offsets, mem_heap_t **offset_heap,
dtuple_t **vrow, mtr_t *mtr);
}; };
/*********************************************************************//** /*********************************************************************//**
...@@ -3429,8 +3455,18 @@ Row_sel_get_clust_rec_for_mysql::operator()( ...@@ -3429,8 +3455,18 @@ Row_sel_get_clust_rec_for_mysql::operator()(
&& !lock_clust_rec_cons_read_sees( && !lock_clust_rec_cons_read_sees(
clust_rec, clust_index, *offsets, clust_rec, clust_index, *offsets,
&trx->read_view)) { &trx->read_view)) {
const buf_page_t& bpage = btr_pcur_get_block(
prebuilt->clust_pcur)->page;
lsn_t lsn = bpage.newest_modification;
if (!lsn) {
lsn = mach_read_from_8(
page_align(clust_rec) + FIL_PAGE_LSN);
}
if (clust_rec != cached_clust_rec) { if (lsn != cached_lsn
|| bpage.id != cached_page_id
|| clust_rec != cached_clust_rec) {
/* The following call returns 'offsets' associated with /* The following call returns 'offsets' associated with
'old_vers' */ 'old_vers' */
err = row_sel_build_prev_vers_for_mysql( err = row_sel_build_prev_vers_for_mysql(
...@@ -3442,6 +3478,8 @@ Row_sel_get_clust_rec_for_mysql::operator()( ...@@ -3442,6 +3478,8 @@ Row_sel_get_clust_rec_for_mysql::operator()(
goto err_exit; goto err_exit;
} }
cached_lsn = lsn;
cached_page_id = bpage.id;
cached_clust_rec = clust_rec; cached_clust_rec = clust_rec;
cached_old_vers = old_vers; cached_old_vers = old_vers;
} else { } else {
...@@ -3452,7 +3490,8 @@ Row_sel_get_clust_rec_for_mysql::operator()( ...@@ -3452,7 +3490,8 @@ Row_sel_get_clust_rec_for_mysql::operator()(
version of clust_rec and its old version version of clust_rec and its old version
old_vers. Re-calculate the offsets for old_vers. */ old_vers. Re-calculate the offsets for old_vers. */
if (old_vers != NULL) { if (old_vers) {
ut_d(check_eq(clust_index, *offsets));
*offsets = rec_get_offsets( *offsets = rec_get_offsets(
old_vers, clust_index, *offsets, old_vers, clust_index, *offsets,
true, ULINT_UNDEFINED, offset_heap); true, ULINT_UNDEFINED, offset_heap);
......
...@@ -303,7 +303,7 @@ trx_rseg_header_create( ...@@ -303,7 +303,7 @@ trx_rseg_header_create(
ut_ad(!sys_header == (space == fil_system.temp_space)); ut_ad(!sys_header == (space == fil_system.temp_space));
/* Allocate a new file segment for the rollback segment */ /* Allocate a new file segment for the rollback segment */
block = fseg_create(space, 0, TRX_RSEG + TRX_RSEG_FSEG_HEADER, mtr); block = fseg_create(space, TRX_RSEG + TRX_RSEG_FSEG_HEADER, mtr);
if (block == NULL) { if (block == NULL) {
/* No space left */ /* No space left */
......
...@@ -160,7 +160,7 @@ trx_sysf_create( ...@@ -160,7 +160,7 @@ trx_sysf_create(
compile_time_assert(TRX_SYS_SPACE == 0); compile_time_assert(TRX_SYS_SPACE == 0);
/* Create the trx sys file block in a new allocated file segment */ /* Create the trx sys file block in a new allocated file segment */
block = fseg_create(fil_system.sys_space, 0, block = fseg_create(fil_system.sys_space,
TRX_SYS + TRX_SYS_FSEG_HEADER, TRX_SYS + TRX_SYS_FSEG_HEADER,
mtr); mtr);
buf_block_dbg_add_level(block, SYNC_TRX_SYS_HEADER); buf_block_dbg_add_level(block, SYNC_TRX_SYS_HEADER);
......
...@@ -526,7 +526,7 @@ trx_undo_seg_create(fil_space_t* space, trx_rsegf_t* rseg_hdr, ulint* id, ...@@ -526,7 +526,7 @@ trx_undo_seg_create(fil_space_t* space, trx_rsegf_t* rseg_hdr, ulint* id,
} }
/* Allocate a new file segment for the undo log */ /* Allocate a new file segment for the undo log */
block = fseg_create(space, 0, TRX_UNDO_SEG_HDR + TRX_UNDO_FSEG_HEADER, block = fseg_create(space, TRX_UNDO_SEG_HDR + TRX_UNDO_FSEG_HEADER,
mtr, true); mtr, true);
space->release_free_extents(n_reserved); space->release_free_extents(n_reserved);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment