Commit 02e72f7b authored by Marko Mäkelä's avatar Marko Mäkelä

MDEV-26769 follow-up: Remove unnecessary page locking

btr_cur_optimistic_latch_leaves(): Use transactional_shared_lock_guard.

btr_cur_latch_leaves(): Avoid acquiring some page latches, because
the changes are already blocked by index->lock.

btr_cur_search_to_nth_level_func(): Remove a redundant variable
retrying_for_search_prev=!!prev_tree_blocks, and avoid acquiring
some page latches.
parent 14c5178f
......@@ -224,6 +224,11 @@ btr_cur_latch_leaves(
spatial = dict_index_is_spatial(cursor->index) && cursor->rtr_info;
ut_ad(block->page.in_file());
ut_ad(srv_read_only_mode
|| mtr->memo_contains_flagged(&cursor->index->lock,
MTR_MEMO_S_LOCK
| MTR_MEMO_X_LOCK
| MTR_MEMO_SX_LOCK));
switch (latch_mode) {
case BTR_SEARCH_LEAF:
......@@ -338,10 +343,10 @@ btr_cur_latch_leaves(
case BTR_SEARCH_PREV:
case BTR_MODIFY_PREV:
mode = latch_mode == BTR_SEARCH_PREV ? RW_S_LATCH : RW_X_LATCH;
/* latch also left sibling */
block->lock.s_lock();
/* Because we are holding index->lock, no page splits
or merges may run concurrently, and we may read
FIL_PAGE_PREV from a buffer-fixed, unlatched page. */
left_page_no = btr_page_get_prev(block->frame);
block->lock.s_unlock();
if (left_page_no != FIL_NULL) {
latch_leaves.savepoints[0] = mtr_set_savepoint(mtr);
......@@ -753,6 +758,7 @@ bool btr_cur_instant_root_init(dict_index_t* index, const page_t* page)
@param[in,out] cursor cursor
@param[in] mtr mini-transaction
@return true if success */
TRANSACTIONAL_TARGET
bool
btr_cur_optimistic_latch_leaves(
buf_block_t* block,
......@@ -774,14 +780,16 @@ btr_cur_optimistic_latch_leaves(
modify_clock, mtr));
case BTR_SEARCH_PREV:
case BTR_MODIFY_PREV:
block->lock.s_lock();
if (block->modify_clock != modify_clock) {
block->lock.s_unlock();
return false;
uint32_t curr_page_no, left_page_no;
{
transactional_shared_lock_guard<block_lock> g{
block->lock};
if (block->modify_clock != modify_clock) {
return false;
}
curr_page_no = block->page.id().page_no();
left_page_no = btr_page_get_prev(block->frame);
}
const uint32_t curr_page_no = block->page.id().page_no();
const uint32_t left_page_no = btr_page_get_prev(block->frame);
block->lock.s_unlock();
const rw_lock_type_t mode = *latch_mode == BTR_SEARCH_PREV
? RW_S_LATCH : RW_X_LATCH;
......@@ -1271,7 +1279,6 @@ btr_cur_search_to_nth_level_func(
ulint n_releases = 0;
bool detected_same_key_root = false;
bool retrying_for_search_prev = false;
ulint leftmost_from_level = 0;
buf_block_t** prev_tree_blocks = NULL;
ulint* prev_tree_savepoints = NULL;
......@@ -1494,9 +1501,6 @@ btr_cur_search_to_nth_level_func(
default:
if (!srv_read_only_mode) {
if (s_latch_by_caller) {
ut_ad(mtr->memo_contains_flagged(
&index->lock, MTR_MEMO_S_LOCK
| MTR_MEMO_SX_LOCK));
} else if (!modify_external) {
/* BTR_SEARCH_TREE is intended to be used with
BTR_ALREADY_S_LATCHED */
......@@ -1568,7 +1572,7 @@ btr_cur_search_to_nth_level_func(
if (height != 0) {
/* We are about to fetch the root or a non-leaf page. */
if ((latch_mode != BTR_MODIFY_TREE || height == level)
&& !retrying_for_search_prev) {
&& !prev_tree_blocks) {
/* If doesn't have SX or X latch of index,
each pages should be latched before reading. */
if (height == ULINT_UNDEFINED
......@@ -1698,18 +1702,18 @@ btr_cur_search_to_nth_level_func(
goto retry_page_get;
}
if (retrying_for_search_prev && height != 0) {
if (height && prev_tree_blocks) {
/* also latch left sibling */
uint32_t left_page_no;
buf_block_t* get_block;
ut_ad(rw_latch == RW_NO_LATCH);
rw_latch = upper_rw_latch;
block->lock.s_lock();
left_page_no = btr_page_get_prev(buf_block_get_frame(block));
block->lock.s_unlock();
/* Because we are holding index->lock, no page splits
or merges may run concurrently, and we may read
FIL_PAGE_PREV from a buffer-fixed, unlatched page. */
uint32_t left_page_no = btr_page_get_prev(block->frame);
if (left_page_no != FIL_NULL) {
ut_ad(prev_n_blocks < leftmost_from_level);
......@@ -1852,7 +1856,7 @@ btr_cur_search_to_nth_level_func(
}
/* release upper blocks */
if (retrying_for_search_prev) {
if (prev_tree_blocks) {
ut_ad(!autoinc);
for (;
prev_n_releases < prev_n_blocks;
......@@ -2238,7 +2242,7 @@ btr_cur_search_to_nth_level_func(
BTR_MODIFY_PREV latches prev_page of the leaf page. */
if ((latch_mode == BTR_SEARCH_PREV
|| latch_mode == BTR_MODIFY_PREV)
&& !retrying_for_search_prev) {
&& !prev_tree_blocks) {
/* block should be latched for consistent
btr_page_get_prev() */
ut_ad(mtr->memo_contains_flagged(
......@@ -2258,8 +2262,6 @@ btr_cur_search_to_nth_level_func(
if (height == 0 && leftmost_from_level > 0) {
/* should retry to get also prev_page
from level==leftmost_from_level. */
retrying_for_search_prev = true;
prev_tree_blocks = static_cast<buf_block_t**>(
ut_malloc_nokey(sizeof(buf_block_t*)
* leftmost_from_level));
......@@ -2481,10 +2483,8 @@ btr_cur_search_to_nth_level_func(
mem_heap_free(heap);
}
if (retrying_for_search_prev) {
ut_free(prev_tree_blocks);
ut_free(prev_tree_savepoints);
}
ut_free(prev_tree_blocks);
ut_free(prev_tree_savepoints);
if (mbr_adj) {
/* remember that we will need to adjust parent MBR */
......
......@@ -271,6 +271,11 @@ class sux_lock final
/** @return whether any writer is waiting */
bool is_waiting() const { return lock.is_waiting(); }
bool is_write_locked() const { return lock.is_write_locked(); }
inline void lock_shared();
inline void unlock_shared();
};
typedef sux_lock<ssux_lock_impl<true>> block_lock;
......@@ -351,6 +356,11 @@ template<typename ssux> inline void sux_lock<ssux>::s_lock()
ut_d(s_lock_register());
}
template<typename ssux>
inline void sux_lock<ssux>::lock_shared() { s_lock(); }
template<typename ssux>
inline void sux_lock<ssux>::unlock_shared() { s_unlock(); }
template<typename ssux> inline void sux_lock<ssux>::u_lock()
{
os_thread_id_t id= os_thread_get_curr_id();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment