Commit f0de610d authored by Marko Mäkelä's avatar Marko Mäkelä

Merge 10.11 into 11.2

parents abd98336 f9f92b48
connection node_2;
connection node_1;
connect node_1a,127.0.0.1,root,,test,$NODE_MYPORT_1;
connection node_1;
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
SET GLOBAL DEBUG_DBUG = 'd,sync.wsrep_rollback_mdl_release';
connection node_2;
SET SESSION wsrep_trx_fragment_size = 1;
START TRANSACTION;
INSERT INTO t1 VALUES (1);
connection node_1a;
SELECT COUNT(*) FROM t1;
COUNT(*)
0
SET SESSION wsrep_retry_autocommit = 0;
SET DEBUG_SYNC = 'ha_write_row_start SIGNAL may_toi WAIT_FOR bf_abort';
INSERT INTO t1 VALUES (2);
connection node_1;
SET DEBUG_SYNC = 'now WAIT_FOR may_toi';
SET DEBUG_SYNC = 'after_wsrep_thd_abort WAIT_FOR sync.wsrep_rollback_mdl_release_reached';
TRUNCATE TABLE t1;
connection node_1a;
ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
SET DEBUG_SYNC = 'now SIGNAL signal.wsrep_rollback_mdl_release';
connection node_2;
INSERT INTO t1 VALUES (3);
ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
connection node_1;
SET GLOBAL DEBUG_DBUG = '';
SET DEBUG_SYNC = 'RESET';
DROP TABLE t1;
disconnect node_1a;
disconnect node_2;
disconnect node_1;
#
# MDEV-33133: MDL conflict handling code should skip transactions
# BF-aborted before.
#
# It's possible that MDL conflict handling code is called more
# than once for a transaction when:
# - it holds more than one conflicting MDL lock
# - reschedule_waiters() is executed,
# which results in repeated attempts to BF-abort already aborted
# transaction.
# In such situations, it might be that BF-aborting logic sees
# a partially rolled back transaction and erroneously decides
# on future actions for such a transaction.
#
# The specific situation tested and fixed is when a SR transaction
# applied in the node gets BF-aborted by a started TOI operation.
# It's then caught with the server transaction already rolled back,
# but with no MDL locks yet released. This caused wrong state
# detection for such a transaction during repeated MDL conflict
# handling code execution.
#
--source include/galera_cluster.inc
--source include/have_debug_sync.inc
--source include/have_debug.inc
--connect node_1a,127.0.0.1,root,,test,$NODE_MYPORT_1
--connection node_1
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
SET GLOBAL DEBUG_DBUG = 'd,sync.wsrep_rollback_mdl_release';
--connection node_2
SET SESSION wsrep_trx_fragment_size = 1;
START TRANSACTION;
INSERT INTO t1 VALUES (1);
--connection node_1a
# Sync wait for SR transaction to replicate and apply fragment.
SELECT COUNT(*) FROM t1;
SET SESSION wsrep_retry_autocommit = 0;
SET DEBUG_SYNC = 'ha_write_row_start SIGNAL may_toi WAIT_FOR bf_abort';
--send
INSERT INTO t1 VALUES (2);
--connection node_1
SET DEBUG_SYNC = 'now WAIT_FOR may_toi';
# BF-abort SR transaction and wait until it reaches the point
# prior to release MDL locks.
# Then abort local INSERT, which will go through rescedule_waiters()
# and see SR transaction holding MDL locks but already rolled back.
# In this case SR transaction should be skipped in MDL conflict
# handling code.
SET DEBUG_SYNC = 'after_wsrep_thd_abort WAIT_FOR sync.wsrep_rollback_mdl_release_reached';
--send
TRUNCATE TABLE t1;
--connection node_1a
# Local INSERT gets aborted.
--error ER_LOCK_DEADLOCK
--reap
# Let the aborted SR transaction continue and finally release MDL locks,
# which in turn allows TRUNCATE to complete.
SET DEBUG_SYNC = 'now SIGNAL signal.wsrep_rollback_mdl_release';
--connection node_2
# SR transaction has been BF-aborted.
--error ER_LOCK_DEADLOCK
INSERT INTO t1 VALUES (3);
--connection node_1
# TRUNCATE completes.
--reap
# Cleanup
SET GLOBAL DEBUG_DBUG = '';
SET DEBUG_SYNC = 'RESET';
DROP TABLE t1;
--disconnect node_1a
--source include/galera_end.inc
This source diff could not be displayed because it is too large. You can view the blob instead.
...@@ -392,6 +392,18 @@ int Wsrep_high_priority_service::rollback(const wsrep::ws_handle& ws_handle, ...@@ -392,6 +392,18 @@ int Wsrep_high_priority_service::rollback(const wsrep::ws_handle& ws_handle,
wsrep_thd_transaction_state_str(m_thd), wsrep_thd_transaction_state_str(m_thd),
m_thd->killed); m_thd->killed);
#ifdef ENABLED_DEBUG_SYNC
DBUG_EXECUTE_IF("sync.wsrep_rollback_mdl_release",
{
const char act[]=
"now "
"SIGNAL sync.wsrep_rollback_mdl_release_reached "
"WAIT_FOR signal.wsrep_rollback_mdl_release";
DBUG_ASSERT(!debug_sync_set_action(m_thd,
STRING_WITH_LEN(act)));
};);
#endif
m_thd->release_transactional_locks(); m_thd->release_transactional_locks();
free_root(m_thd->mem_root, MYF(MY_KEEP_PREALLOC)); free_root(m_thd->mem_root, MYF(MY_KEEP_PREALLOC));
......
...@@ -3209,7 +3209,12 @@ void wsrep_handle_mdl_conflict(MDL_context *requestor_ctx, ...@@ -3209,7 +3209,12 @@ void wsrep_handle_mdl_conflict(MDL_context *requestor_ctx,
mysql_mutex_lock(&granted_thd->LOCK_thd_kill); mysql_mutex_lock(&granted_thd->LOCK_thd_kill);
mysql_mutex_lock(&granted_thd->LOCK_thd_data); mysql_mutex_lock(&granted_thd->LOCK_thd_data);
if (wsrep_thd_is_toi(granted_thd) || if (granted_thd->wsrep_aborter != 0)
{
DBUG_ASSERT(granted_thd->wsrep_aborter == request_thd->thread_id);
WSREP_DEBUG("BF thread waiting for a victim to release locks");
}
else if (wsrep_thd_is_toi(granted_thd) ||
wsrep_thd_is_applying(granted_thd)) wsrep_thd_is_applying(granted_thd))
{ {
if (wsrep_thd_is_aborting(granted_thd)) if (wsrep_thd_is_aborting(granted_thd))
...@@ -3300,6 +3305,7 @@ void wsrep_handle_mdl_conflict(MDL_context *requestor_ctx, ...@@ -3300,6 +3305,7 @@ void wsrep_handle_mdl_conflict(MDL_context *requestor_ctx,
} }
mysql_mutex_unlock(&granted_thd->LOCK_thd_data); mysql_mutex_unlock(&granted_thd->LOCK_thd_data);
mysql_mutex_unlock(&granted_thd->LOCK_thd_kill); mysql_mutex_unlock(&granted_thd->LOCK_thd_kill);
DEBUG_SYNC(request_thd, "after_wsrep_thd_abort");
} }
else else
{ {
......
...@@ -1126,7 +1126,7 @@ void btr_drop_temporary_table(const dict_table_t &table) ...@@ -1126,7 +1126,7 @@ void btr_drop_temporary_table(const dict_table_t &table)
{ {
if (buf_block_t *block= buf_page_get_gen({SRV_TMP_SPACE_ID, index->page}, if (buf_block_t *block= buf_page_get_gen({SRV_TMP_SPACE_ID, index->page},
0, RW_X_LATCH, nullptr, BUF_GET, 0, RW_X_LATCH, nullptr, BUF_GET,
&mtr, nullptr, nullptr)) &mtr, nullptr))
{ {
btr_free_but_not_root(block, MTR_LOG_NO_REDO); btr_free_but_not_root(block, MTR_LOG_NO_REDO);
mtr.set_log_mode(MTR_LOG_NO_REDO); mtr.set_log_mode(MTR_LOG_NO_REDO);
......
This diff is collapsed.
...@@ -533,8 +533,7 @@ btr_pcur_move_to_next_page( ...@@ -533,8 +533,7 @@ btr_pcur_move_to_next_page(
const auto s = mtr->get_savepoint(); const auto s = mtr->get_savepoint();
mtr->rollback_to_savepoint(s - 2, s - 1); mtr->rollback_to_savepoint(s - 2, s - 1);
if (first_access) { if (first_access) {
buf_read_ahead_linear(next_block->page.id(), buf_read_ahead_linear(next_block->page.id());
next_block->zip_size());
} }
return DB_SUCCESS; return DB_SUCCESS;
} }
......
This diff is collapsed.
...@@ -2682,12 +2682,12 @@ ATTRIBUTE_COLD void buf_flush_page_cleaner_init() ...@@ -2682,12 +2682,12 @@ ATTRIBUTE_COLD void buf_flush_page_cleaner_init()
/** Flush the buffer pool on shutdown. */ /** Flush the buffer pool on shutdown. */
ATTRIBUTE_COLD void buf_flush_buffer_pool() ATTRIBUTE_COLD void buf_flush_buffer_pool()
{ {
ut_ad(!os_aio_pending_reads());
ut_ad(!buf_page_cleaner_is_active); ut_ad(!buf_page_cleaner_is_active);
ut_ad(!buf_flush_sync_lsn); ut_ad(!buf_flush_sync_lsn);
service_manager_extend_timeout(INNODB_EXTEND_TIMEOUT_INTERVAL, service_manager_extend_timeout(INNODB_EXTEND_TIMEOUT_INTERVAL,
"Waiting to flush the buffer pool"); "Waiting to flush the buffer pool");
os_aio_wait_until_no_pending_reads(false);
mysql_mutex_lock(&buf_pool.flush_list_mutex); mysql_mutex_lock(&buf_pool.flush_list_mutex);
......
...@@ -303,10 +303,9 @@ pages: to avoid deadlocks this function must be written such that it cannot ...@@ -303,10 +303,9 @@ pages: to avoid deadlocks this function must be written such that it cannot
end up waiting for these latches! end up waiting for these latches!
@param[in] page_id page id of a page which the current thread @param[in] page_id page id of a page which the current thread
wants to access wants to access
@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
@return number of page read requests issued */ @return number of page read requests issued */
TRANSACTIONAL_TARGET TRANSACTIONAL_TARGET
ulint buf_read_ahead_random(const page_id_t page_id, ulint zip_size) ulint buf_read_ahead_random(const page_id_t page_id)
{ {
if (!srv_random_read_ahead || page_id.space() >= SRV_TMP_SPACE_ID) if (!srv_random_read_ahead || page_id.space() >= SRV_TMP_SPACE_ID)
/* Disable the read-ahead for temporary tablespace */ /* Disable the read-ahead for temporary tablespace */
...@@ -353,6 +352,7 @@ ulint buf_read_ahead_random(const page_id_t page_id, ulint zip_size) ...@@ -353,6 +352,7 @@ ulint buf_read_ahead_random(const page_id_t page_id, ulint zip_size)
/* Read all the suitable blocks within the area */ /* Read all the suitable blocks within the area */
buf_block_t *block= nullptr; buf_block_t *block= nullptr;
unsigned zip_size{space->zip_size()};
if (UNIV_LIKELY(!zip_size)) if (UNIV_LIKELY(!zip_size))
{ {
allocate_block: allocate_block:
...@@ -405,15 +405,14 @@ if it is not already there. Sets the io_fix and an exclusive lock ...@@ -405,15 +405,14 @@ if it is not already there. Sets the io_fix and an exclusive lock
on the buffer frame. The flag is cleared and the x-lock on the buffer frame. The flag is cleared and the x-lock
released by the i/o-handler thread. released by the i/o-handler thread.
@param page_id page id @param page_id page id
@param zip_size ROW_FORMAT=COMPRESSED page size, or 0
@param chain buf_pool.page_hash cell for page_id @param chain buf_pool.page_hash cell for page_id
@retval DB_SUCCESS if the page was read and is not corrupted, @retval DB_SUCCESS if the page was read and is not corrupted
@retval DB_SUCCESS_LOCKED_REC if the page was not read @retval DB_SUCCESS_LOCKED_REC if the page was not read
@retval DB_PAGE_CORRUPTED if page based on checksum check is corrupted, @retval DB_PAGE_CORRUPTED if page based on checksum check is corrupted,
@retval DB_DECRYPTION_FAILED if page post encryption checksum matches but @retval DB_DECRYPTION_FAILED if page post encryption checksum matches but
after decryption normal page checksum does not match. after decryption normal page checksum does not match.
@retval DB_TABLESPACE_DELETED if tablespace .ibd file is missing */ @retval DB_TABLESPACE_DELETED if tablespace .ibd file is missing */
dberr_t buf_read_page(const page_id_t page_id, ulint zip_size, dberr_t buf_read_page(const page_id_t page_id,
buf_pool_t::hash_chain &chain) buf_pool_t::hash_chain &chain)
{ {
fil_space_t *space= fil_space_t::get(page_id.space()); fil_space_t *space= fil_space_t::get(page_id.space());
...@@ -427,6 +426,8 @@ dberr_t buf_read_page(const page_id_t page_id, ulint zip_size, ...@@ -427,6 +426,8 @@ dberr_t buf_read_page(const page_id_t page_id, ulint zip_size,
/* Our caller should already have ensured that the page does not /* Our caller should already have ensured that the page does not
exist in buf_pool.page_hash. */ exist in buf_pool.page_hash. */
buf_block_t *block= nullptr; buf_block_t *block= nullptr;
unsigned zip_size= space->zip_size();
if (UNIV_LIKELY(!zip_size)) if (UNIV_LIKELY(!zip_size))
{ {
allocate_block: allocate_block:
...@@ -511,10 +512,9 @@ NOTE 2: the calling thread may own latches on pages: to avoid deadlocks this ...@@ -511,10 +512,9 @@ NOTE 2: the calling thread may own latches on pages: to avoid deadlocks this
function must be written such that it cannot end up waiting for these function must be written such that it cannot end up waiting for these
latches! latches!
@param[in] page_id page id; see NOTE 3 above @param[in] page_id page id; see NOTE 3 above
@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
@return number of page read requests issued */ @return number of page read requests issued */
TRANSACTIONAL_TARGET TRANSACTIONAL_TARGET
ulint buf_read_ahead_linear(const page_id_t page_id, ulint zip_size) ulint buf_read_ahead_linear(const page_id_t page_id)
{ {
/* check if readahead is disabled. /* check if readahead is disabled.
Disable the read ahead logic for temporary tablespace */ Disable the read ahead logic for temporary tablespace */
...@@ -553,6 +553,11 @@ ulint buf_read_ahead_linear(const page_id_t page_id, ulint zip_size) ...@@ -553,6 +553,11 @@ ulint buf_read_ahead_linear(const page_id_t page_id, ulint zip_size)
return 0; return 0;
} }
if (trx_sys_hdr_page(page_id))
/* If it is an ibuf bitmap page or trx sys hdr, we do no
read-ahead, as that could break the ibuf page access order */
goto fail;
/* How many out of order accessed pages can we ignore /* How many out of order accessed pages can we ignore
when working out the access pattern for linear readahead */ when working out the access pattern for linear readahead */
ulint count= std::min<ulint>(buf_pool_t::READ_AHEAD_PAGES - ulint count= std::min<ulint>(buf_pool_t::READ_AHEAD_PAGES -
...@@ -647,6 +652,7 @@ ulint buf_read_ahead_linear(const page_id_t page_id, ulint zip_size) ...@@ -647,6 +652,7 @@ ulint buf_read_ahead_linear(const page_id_t page_id, ulint zip_size)
/* If we got this far, read-ahead can be sensible: do it */ /* If we got this far, read-ahead can be sensible: do it */
buf_block_t *block= nullptr; buf_block_t *block= nullptr;
unsigned zip_size{space->zip_size()};
if (UNIV_LIKELY(!zip_size)) if (UNIV_LIKELY(!zip_size))
{ {
allocate_block: allocate_block:
......
...@@ -647,7 +647,7 @@ dberr_t rtr_search_to_nth_level(btr_cur_t *cur, que_thr_t *thr, ...@@ -647,7 +647,7 @@ dberr_t rtr_search_to_nth_level(btr_cur_t *cur, que_thr_t *thr,
search_loop: search_loop:
auto buf_mode= BUF_GET; auto buf_mode= BUF_GET;
ulint rw_latch= RW_NO_LATCH; rw_lock_type_t rw_latch= RW_NO_LATCH;
if (height) if (height)
{ {
...@@ -658,7 +658,7 @@ dberr_t rtr_search_to_nth_level(btr_cur_t *cur, que_thr_t *thr, ...@@ -658,7 +658,7 @@ dberr_t rtr_search_to_nth_level(btr_cur_t *cur, que_thr_t *thr,
rw_latch= upper_rw_latch; rw_latch= upper_rw_latch;
} }
else if (latch_mode <= BTR_MODIFY_LEAF) else if (latch_mode <= BTR_MODIFY_LEAF)
rw_latch= latch_mode; rw_latch= rw_lock_type_t(latch_mode);
dberr_t err; dberr_t err;
auto block_savepoint= mtr->get_savepoint(); auto block_savepoint= mtr->get_savepoint();
......
...@@ -191,33 +191,29 @@ be implemented at a higher level. In other words, all possible ...@@ -191,33 +191,29 @@ be implemented at a higher level. In other words, all possible
accesses to a given page through this function must be protected by accesses to a given page through this function must be protected by
the same set of mutexes or latches. the same set of mutexes or latches.
@param page_id page identifier @param page_id page identifier
@param zip_size ROW_FORMAT=COMPRESSED page size in bytes
@return pointer to the block, s-latched */ @return pointer to the block, s-latched */
buf_page_t *buf_page_get_zip(const page_id_t page_id, ulint zip_size); buf_page_t *buf_page_get_zip(const page_id_t page_id);
/** Get access to a database page. Buffered redo log may be applied. /** Get access to a database page. Buffered redo log may be applied.
@param[in] page_id page id @param[in] page_id page id
@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0 @param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
@param[in] rw_latch RW_S_LATCH, RW_X_LATCH, RW_NO_LATCH @param[in] rw_latch latch mode
@param[in] guess guessed block or NULL @param[in] guess guessed block or NULL
@param[in] mode BUF_GET, BUF_GET_IF_IN_POOL, @param[in] mode BUF_GET, BUF_GET_IF_IN_POOL,
or BUF_PEEK_IF_IN_POOL or BUF_PEEK_IF_IN_POOL
@param[in,out] mtr mini-transaction @param[in,out] mtr mini-transaction
@param[out] err DB_SUCCESS or error code @param[out] err DB_SUCCESS or error code
@param[in,out] no_wait If not NULL on input, then we must not @return pointer to the block
wait for current page latch. On output, the value is set to true if we had to @retval nullptr if the block is corrupted or unavailable */
return because we could not wait on page latch.
@return pointer to the block or NULL */
buf_block_t* buf_block_t*
buf_page_get_gen( buf_page_get_gen(
const page_id_t page_id, const page_id_t page_id,
ulint zip_size, ulint zip_size,
ulint rw_latch, rw_lock_type_t rw_latch,
buf_block_t* guess, buf_block_t* guess,
ulint mode, ulint mode,
mtr_t* mtr, mtr_t* mtr,
dberr_t* err = nullptr, dberr_t* err = nullptr);
bool* no_wait = nullptr);
/** Initialize a page in the buffer pool. The page is usually not read /** Initialize a page in the buffer pool. The page is usually not read
from a file even if it cannot be found in the buffer buf_pool. This is one from a file even if it cannot be found in the buffer buf_pool. This is one
...@@ -357,8 +353,8 @@ void buf_page_print(const byte* read_buf, ulint zip_size = 0) ...@@ -357,8 +353,8 @@ void buf_page_print(const byte* read_buf, ulint zip_size = 0)
ATTRIBUTE_COLD __attribute__((nonnull)); ATTRIBUTE_COLD __attribute__((nonnull));
/********************************************************************//** /********************************************************************//**
Decompress a block. Decompress a block.
@return TRUE if successful */ @return true if successful */
ibool bool
buf_zip_decompress( buf_zip_decompress(
/*===============*/ /*===============*/
buf_block_t* block, /*!< in/out: block */ buf_block_t* block, /*!< in/out: block */
...@@ -627,30 +623,42 @@ class buf_page_t ...@@ -627,30 +623,42 @@ class buf_page_t
public: public:
const page_id_t &id() const { return id_; } const page_id_t &id() const { return id_; }
uint32_t state() const { return zip.fix; } uint32_t state() const { return zip.fix; }
uint32_t buf_fix_count() const static uint32_t buf_fix_count(uint32_t s)
{ { ut_ad(s >= FREED); return s < UNFIXED ? (s - FREED) : (~LRU_MASK & s); }
uint32_t f= state();
ut_ad(f >= FREED); uint32_t buf_fix_count() const { return buf_fix_count(state()); }
return f < UNFIXED ? (f - FREED) : (~LRU_MASK & f); /** Check if a file block is io-fixed.
} @param s state()
@return whether s corresponds to an io-fixed block */
static bool is_io_fixed(uint32_t s)
{ ut_ad(s >= FREED); return s >= READ_FIX; }
/** Check if a file block is read-fixed.
@param s state()
@return whether s corresponds to a read-fixed block */
static bool is_read_fixed(uint32_t s)
{ return is_io_fixed(s) && s < WRITE_FIX; }
/** Check if a file block is write-fixed.
@param s state()
@return whether s corresponds to a write-fixed block */
static bool is_write_fixed(uint32_t s)
{ ut_ad(s >= FREED); return s >= WRITE_FIX; }
/** @return whether this block is read or write fixed; /** @return whether this block is read or write fixed;
read_complete() or write_complete() will always release read_complete() or write_complete() will always release
the io-fix before releasing U-lock or X-lock */ the io-fix before releasing U-lock or X-lock */
bool is_io_fixed() const bool is_io_fixed() const { return is_io_fixed(state()); }
{ const auto s= state(); ut_ad(s >= FREED); return s >= READ_FIX; }
/** @return whether this block is write fixed; /** @return whether this block is write fixed;
write_complete() will always release the write-fix before releasing U-lock */ write_complete() will always release the write-fix before releasing U-lock */
bool is_write_fixed() const { return state() >= WRITE_FIX; } bool is_write_fixed() const { return is_write_fixed(state()); }
/** @return whether this block is read fixed; this should never hold /** @return whether this block is read fixed */
when a thread is holding the block lock in any mode */ bool is_read_fixed() const { return is_read_fixed(state()); }
bool is_read_fixed() const { return is_io_fixed() && !is_write_fixed(); }
/** @return if this belongs to buf_pool.unzip_LRU */ /** @return if this belongs to buf_pool.unzip_LRU */
bool belongs_to_unzip_LRU() const bool belongs_to_unzip_LRU() const
{ return UNIV_LIKELY_NULL(zip.data) && frame; } { return UNIV_LIKELY_NULL(zip.data) && frame; }
bool is_freed() const static bool is_freed(uint32_t s) { ut_ad(s >= FREED); return s < UNFIXED; }
{ const auto s= state(); ut_ad(s >= FREED); return s < UNFIXED; } bool is_freed() const { return is_freed(state()); }
bool is_reinit() const { return !(~state() & REINIT); } bool is_reinit() const { return !(~state() & REINIT); }
void set_reinit(uint32_t prev_state) void set_reinit(uint32_t prev_state)
...@@ -1358,11 +1366,43 @@ class buf_pool_t ...@@ -1358,11 +1366,43 @@ class buf_pool_t
} }
public: public:
/** page_fix() mode of operation */
enum page_fix_conflicts{
/** Fetch if in the buffer pool, also blocks marked as free */
FIX_ALSO_FREED= -1,
/** Fetch, waiting for page read completion */
FIX_WAIT_READ,
/** Fetch, but avoid any waits for */
FIX_NOWAIT
};
/** Look up and buffer-fix a page. /** Look up and buffer-fix a page.
Note: If the page is read-fixed (being read into the buffer pool),
we would have to wait for the page latch before determining if the page
is accessible (it could be corrupted and have been evicted again).
If the caller is holding other page latches so that waiting for this
page latch could lead to lock order inversion (latching order violation),
the mode c=FIX_WAIT_READ must not be used.
@param id page identifier @param id page identifier
@param err error code (will only be assigned when returning nullptr)
@param c how to handle conflicts
@return undo log page, buffer-fixed @return undo log page, buffer-fixed
@retval -1 if c=FIX_NOWAIT and buffer-fixing would require waiting
@retval nullptr if the undo page was corrupted or freed */ @retval nullptr if the undo page was corrupted or freed */
buf_block_t *page_fix(const page_id_t id); buf_block_t *page_fix(const page_id_t id, dberr_t *err,
page_fix_conflicts c);
buf_block_t *page_fix(const page_id_t id)
{ return page_fix(id, nullptr, FIX_WAIT_READ); }
/** Decompress a page and relocate the block descriptor
@param b buffer-fixed compressed-only ROW_FORMAT=COMPRESSED page
@param chain hash table chain for b->id().fold()
@return the decompressed block, x-latched and read-fixed
@retval nullptr if the decompression failed (b->unfix() will be invoked) */
ATTRIBUTE_COLD __attribute__((nonnull, warn_unused_result))
buf_block_t *unzip(buf_page_t *b, hash_chain &chain);
/** @return whether the buffer pool contains a page /** @return whether the buffer pool contains a page
@param page_id page identifier @param page_id page identifier
...@@ -1572,8 +1612,8 @@ class buf_pool_t ...@@ -1572,8 +1612,8 @@ class buf_pool_t
/** map of block->frame to buf_block_t blocks that belong /** map of block->frame to buf_block_t blocks that belong
to buf_buddy_alloc(); protected by buf_pool.mutex */ to buf_buddy_alloc(); protected by buf_pool.mutex */
hash_table_t zip_hash; hash_table_t zip_hash;
Atomic_counter<ulint> /** number of pending unzip() */
n_pend_unzip; /*!< number of pending decompressions */ Atomic_counter<ulint> n_pend_unzip;
time_t last_printout_time; time_t last_printout_time;
/*!< when buf_print_io was last time /*!< when buf_print_io was last time
......
...@@ -33,15 +33,14 @@ buffer buf_pool if it is not already there. Sets the io_fix flag and sets ...@@ -33,15 +33,14 @@ buffer buf_pool if it is not already there. Sets the io_fix flag and sets
an exclusive lock on the buffer frame. The flag is cleared and the x-lock an exclusive lock on the buffer frame. The flag is cleared and the x-lock
released by the i/o-handler thread. released by the i/o-handler thread.
@param page_id page id @param page_id page id
@param zip_size ROW_FORMAT=COMPRESSED page size, or 0
@param chain buf_pool.page_hash cell for page_id @param chain buf_pool.page_hash cell for page_id
@retval DB_SUCCESS if the page was read and is not corrupted, @retval DB_SUCCESS if the page was read and is not corrupted
@retval DB_SUCCESS_LOCKED_REC if the page was not read @retval DB_SUCCESS_LOCKED_REC if the page was not read
@retval DB_PAGE_CORRUPTED if page based on checksum check is corrupted, @retval DB_PAGE_CORRUPTED if page based on checksum check is corrupted,
@retval DB_DECRYPTION_FAILED if page post encryption checksum matches but @retval DB_DECRYPTION_FAILED if page post encryption checksum matches but
after decryption normal page checksum does not match. after decryption normal page checksum does not match.
@retval DB_TABLESPACE_DELETED if tablespace .ibd file is missing */ @retval DB_TABLESPACE_DELETED if tablespace .ibd file is missing */
dberr_t buf_read_page(const page_id_t page_id, ulint zip_size, dberr_t buf_read_page(const page_id_t page_id,
buf_pool_t::hash_chain &chain); buf_pool_t::hash_chain &chain);
/** High-level function which reads a page asynchronously from a file to the /** High-level function which reads a page asynchronously from a file to the
...@@ -63,9 +62,8 @@ pages: to avoid deadlocks this function must be written such that it cannot ...@@ -63,9 +62,8 @@ pages: to avoid deadlocks this function must be written such that it cannot
end up waiting for these latches! end up waiting for these latches!
@param[in] page_id page id of a page which the current thread @param[in] page_id page id of a page which the current thread
wants to access wants to access
@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
@return number of page read requests issued */ @return number of page read requests issued */
ulint buf_read_ahead_random(const page_id_t page_id, ulint zip_size); ulint buf_read_ahead_random(const page_id_t page_id);
/** Applies linear read-ahead if in the buf_pool the page is a border page of /** Applies linear read-ahead if in the buf_pool the page is a border page of
a linear read-ahead area and all the pages in the area have been accessed. a linear read-ahead area and all the pages in the area have been accessed.
...@@ -87,9 +85,8 @@ NOTE 2: the calling thread may own latches on pages: to avoid deadlocks this ...@@ -87,9 +85,8 @@ NOTE 2: the calling thread may own latches on pages: to avoid deadlocks this
function must be written such that it cannot end up waiting for these function must be written such that it cannot end up waiting for these
latches! latches!
@param[in] page_id page id; see NOTE 3 above @param[in] page_id page id; see NOTE 3 above
@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
@return number of page read requests issued */ @return number of page read requests issued */
ulint buf_read_ahead_linear(const page_id_t page_id, ulint zip_size); ulint buf_read_ahead_linear(const page_id_t page_id);
/** Schedule a page for recovery. /** Schedule a page for recovery.
@param space tablespace @param space tablespace
......
...@@ -2178,36 +2178,41 @@ updated then its state must be set to BUF_PAGE_NOT_USED. ...@@ -2178,36 +2178,41 @@ updated then its state must be set to BUF_PAGE_NOT_USED.
@retval DB_SUCCESS or error code. */ @retval DB_SUCCESS or error code. */
dberr_t PageConverter::operator()(buf_block_t* block) UNIV_NOTHROW dberr_t PageConverter::operator()(buf_block_t* block) UNIV_NOTHROW
{ {
/* If we already had an old page with matching number /* If we already had an old page with matching number in the buffer
in the buffer pool, evict it now, because pool, evict it now, because we no longer evict the pages on
we no longer evict the pages on DISCARD TABLESPACE. */ DISCARD TABLESPACE. */
buf_page_get_gen(block->page.id(), get_zip_size(), RW_NO_LATCH, if (buf_block_t *b= buf_pool.page_fix(block->page.id(), nullptr,
nullptr, BUF_PEEK_IF_IN_POOL, buf_pool_t::FIX_ALSO_FREED))
nullptr, nullptr, nullptr); {
ut_ad(!b->page.oldest_modification());
mysql_mutex_lock(&buf_pool.mutex);
b->unfix();
if (!buf_LRU_free_page(&b->page, true))
ut_ad(0);
mysql_mutex_unlock(&buf_pool.mutex);
}
uint16_t page_type; uint16_t page_type;
if (dberr_t err = update_page(block, page_type)) { if (dberr_t err= update_page(block, page_type))
return err; return err;
}
const bool full_crc32 = fil_space_t::full_crc32(get_space_flags()); const bool full_crc32= fil_space_t::full_crc32(get_space_flags());
byte* frame = get_frame(block); byte *frame= get_frame(block);
memset_aligned<8>(frame + FIL_PAGE_LSN, 0, 8); memset_aligned<8>(frame + FIL_PAGE_LSN, 0, 8);
if (!block->page.zip.data) { if (!block->page.zip.data)
buf_flush_init_for_writing( buf_flush_init_for_writing(nullptr, block->page.frame, nullptr,
NULL, block->page.frame, NULL, full_crc32);
} else if (fil_page_type_is_index(page_type)) {
buf_flush_init_for_writing(
NULL, block->page.zip.data, &block->page.zip,
full_crc32); full_crc32);
} else { else if (fil_page_type_is_index(page_type))
buf_flush_init_for_writing(nullptr, block->page.zip.data, &block->page.zip,
full_crc32);
else
/* Calculate and update the checksum of non-index /* Calculate and update the checksum of non-index
pages for ROW_FORMAT=COMPRESSED tables. */ pages for ROW_FORMAT=COMPRESSED tables. */
buf_flush_update_zip_checksum( buf_flush_update_zip_checksum(block->page.zip.data, block->zip_size());
block->page.zip.data, block->zip_size());
}
return DB_SUCCESS; return DB_SUCCESS;
} }
......
...@@ -2157,38 +2157,6 @@ row_merge_read_clustered_index( ...@@ -2157,38 +2157,6 @@ row_merge_read_clustered_index(
mem_heap_empty(row_heap); mem_heap_empty(row_heap);
if (!mtr_started) { if (!mtr_started) {
goto scan_next;
}
if (clust_index->lock.is_waiting()) {
/* There are waiters on the clustered
index tree lock, likely the purge
thread. Store and restore the cursor
position, and yield so that scanning a
large table will not starve other
threads. */
/* Store the cursor position on the last user
record on the page. */
if (!btr_pcur_move_to_prev_on_page(&pcur)) {
goto corrupted_index;
}
/* Leaf pages must never be empty, unless
this is the only page in the index tree. */
if (!btr_pcur_is_on_user_rec(&pcur)
&& btr_pcur_get_block(&pcur)->page.id()
.page_no() != clust_index->page) {
goto corrupted_index;
}
btr_pcur_store_position(&pcur, &mtr);
mtr.commit();
mtr_started = false;
/* Give the waiters a chance to proceed. */
std::this_thread::yield();
scan_next:
ut_ad(!mtr_started);
ut_ad(!mtr.is_active()); ut_ad(!mtr.is_active());
mtr.start(); mtr.start();
mtr_started = true; mtr_started = true;
...@@ -2236,14 +2204,14 @@ row_merge_read_clustered_index( ...@@ -2236,14 +2204,14 @@ row_merge_read_clustered_index(
buf_page_make_young_if_needed(&block->page); buf_page_make_young_if_needed(&block->page);
const auto s = mtr.get_savepoint();
mtr.rollback_to_savepoint(s - 2, s - 1);
page_cur_set_before_first(block, cur); page_cur_set_before_first(block, cur);
if (!page_cur_move_to_next(cur) if (!page_cur_move_to_next(cur)
|| page_cur_is_after_last(cur)) { || page_cur_is_after_last(cur)) {
goto corrupted_rec; goto corrupted_rec;
} }
const auto s = mtr.get_savepoint();
mtr.rollback_to_savepoint(s - 2, s - 1);
} }
} else { } else {
mem_heap_empty(row_heap); mem_heap_empty(row_heap);
......
...@@ -185,7 +185,7 @@ trx_undo_get_prev_rec_from_prev_page(buf_block_t *&block, uint16_t rec, ...@@ -185,7 +185,7 @@ trx_undo_get_prev_rec_from_prev_page(buf_block_t *&block, uint16_t rec,
return nullptr; return nullptr;
if (!buf_page_make_young_if_needed(&block->page)) if (!buf_page_make_young_if_needed(&block->page))
buf_read_ahead_linear(block->page.id(), 0); buf_read_ahead_linear(block->page.id());
return trx_undo_page_get_last_rec(block, page_no, offset); return trx_undo_page_get_last_rec(block, page_no, offset);
} }
...@@ -242,7 +242,7 @@ trx_undo_get_prev_rec(buf_block_t *&block, uint16_t rec, uint32_t page_no, ...@@ -242,7 +242,7 @@ trx_undo_get_prev_rec(buf_block_t *&block, uint16_t rec, uint32_t page_no,
static trx_undo_rec_t* static trx_undo_rec_t*
trx_undo_get_next_rec_from_next_page(const buf_block_t *&block, trx_undo_get_next_rec_from_next_page(const buf_block_t *&block,
uint32_t page_no, uint16_t offset, uint32_t page_no, uint16_t offset,
ulint mode, mtr_t *mtr) rw_lock_type_t mode, mtr_t *mtr)
{ {
if (page_no == block->page.id().page_no() && if (page_no == block->page.id().page_no() &&
mach_read_from_2(block->page.frame + offset + TRX_UNDO_NEXT_LOG)) mach_read_from_2(block->page.frame + offset + TRX_UNDO_NEXT_LOG))
...@@ -272,7 +272,8 @@ trx_undo_get_next_rec_from_next_page(const buf_block_t *&block, ...@@ -272,7 +272,8 @@ trx_undo_get_next_rec_from_next_page(const buf_block_t *&block,
@retval nullptr if none */ @retval nullptr if none */
static trx_undo_rec_t* static trx_undo_rec_t*
trx_undo_get_first_rec(const fil_space_t &space, uint32_t page_no, trx_undo_get_first_rec(const fil_space_t &space, uint32_t page_no,
uint16_t offset, ulint mode, const buf_block_t*& block, uint16_t offset, rw_lock_type_t mode,
const buf_block_t *&block,
mtr_t *mtr, dberr_t *err) mtr_t *mtr, dberr_t *err)
{ {
buf_block_t *b= buf_page_get_gen(page_id_t{space.id, page_no}, 0, mode, buf_block_t *b= buf_page_get_gen(page_id_t{space.id, page_no}, 0, mode,
...@@ -282,7 +283,7 @@ trx_undo_get_first_rec(const fil_space_t &space, uint32_t page_no, ...@@ -282,7 +283,7 @@ trx_undo_get_first_rec(const fil_space_t &space, uint32_t page_no,
return nullptr; return nullptr;
if (!buf_page_make_young_if_needed(&b->page)) if (!buf_page_make_young_if_needed(&b->page))
buf_read_ahead_linear(b->page.id(), 0); buf_read_ahead_linear(b->page.id());
if (trx_undo_rec_t *rec= trx_undo_page_get_first_rec(b, page_no, offset)) if (trx_undo_rec_t *rec= trx_undo_page_get_first_rec(b, page_no, offset))
return rec; return rec;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment