Commit f074223a authored by Marko Mäkelä's avatar Marko Mäkelä

MDEV-32068 Some calls to buf_read_ahead_linear() seem to be useless

The linear read-ahead (enabled by nonzero innodb_read_ahead_threshold)
works best if index leaf pages or undo log pages have been allocated
on adjacent page numbers. The read-ahead is assumed not to be helpful
in other types of page accesses, such as non-leaf index pages.

buf_page_get_low(): Do not invoke buf_page_t::set_accessed(),
buf_page_make_young_if_needed(), or buf_read_ahead_linear().
We will invoke them in those callers of buf_page_get_gen() or
buf_page_get() where it makes sense: the access is not
one-time-on-startup and the page and not going to be freed soon.

btr_copy_blob_prefix(), btr_pcur_move_to_next_page(),
trx_undo_get_prev_rec_from_prev_page(),
trx_undo_get_first_rec(), btr_cur_t::search_leaf(),
btr_cur_t::open_leaf(): Invoke buf_read_ahead_linear().

We will not invoke linear read-ahead in functions that would
essentially allocate or free pages, because pages that are
freshly allocated are expected to be initialized by buf_page_create()
and not read from the data file. Likewise, freeing pages should
not involve accessing any sibling pages, except for freeing
singly-linked lists of BLOB pages.

We will not invoke read-ahead in btr_cur_t::pessimistic_search_leaf()
or in a pessimistic operation of btr_cur_t::open_leaf(), because
it is assumed that pessimistic operations should be preceded by
optimistic operations, which should already have invoked read-ahead.

buf_page_make_young_if_needed(): Invoke also buf_page_t::set_accessed()
and return the result.

btr_cur_nonleaf_make_young(): Like buf_page_make_young_if_needed(),
but do not invoke buf_page_t::set_accessed().

Reviewed by: Vladislav Lesin
Tested by: Matthias Leich
parent 768a7361
......@@ -216,10 +216,11 @@ ATTRIBUTE_COLD void btr_decryption_failed(const dict_index_t &index)
@param[in] merge whether change buffer merge should be attempted
@param[in,out] mtr mini-transaction
@param[out] err error code
@param[out] first set if this is a first-time access to the page
@return block */
buf_block_t *btr_block_get(const dict_index_t &index,
uint32_t page, rw_lock_type_t mode, bool merge,
mtr_t *mtr, dberr_t *err)
mtr_t *mtr, dberr_t *err, bool *first)
{
ut_ad(mode != RW_NO_LATCH);
dberr_t local_err;
......@@ -242,6 +243,8 @@ buf_block_t *btr_block_get(const dict_index_t &index,
*err= DB_PAGE_CORRUPTED;
block= nullptr;
}
else if (!buf_page_make_young_if_needed(&block->page) && first)
*first= true;
}
else if (*err == DB_DECRYPTION_FAILED)
btr_decryption_failed(index);
......@@ -302,6 +305,8 @@ btr_root_block_get(
*err= DB_CORRUPTION;
block= nullptr;
}
else
buf_page_make_young_if_needed(&block->page);
}
else if (*err == DB_DECRYPTION_FAILED)
btr_decryption_failed(*index);
......@@ -553,8 +558,11 @@ btr_page_alloc_for_ibuf(
root->page.frame)),
0, RW_X_LATCH, nullptr, BUF_GET, mtr, err);
if (new_block)
{
buf_page_make_young_if_needed(&new_block->page);
*err= flst_remove(root, PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST, new_block,
PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST_NODE, mtr);
}
ut_d(if (*err == DB_SUCCESS)
flst_validate(root, PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST, mtr));
return new_block;
......@@ -1352,6 +1360,7 @@ btr_write_autoinc(dict_index_t* index, ib_uint64_t autoinc, bool reset)
if (buf_block_t *root= buf_page_get(page_id_t(space->id, index->page),
space->zip_size(), RW_SX_LATCH, &mtr))
{
buf_page_make_young_if_needed(&root->page);
mtr.set_named_space(space);
page_set_autoinc(root, autoinc, &mtr, reset);
}
......
......@@ -1263,7 +1263,7 @@ dberr_t btr_cur_t::search_leaf(const dtuple_t *tuple, page_cur_mode_t mode,
page_cur.block= block;
ut_ad(block == mtr->at_savepoint(block_savepoint));
ut_ad(rw_latch != RW_NO_LATCH);
const bool not_first_access{buf_page_make_young_if_needed(&block->page)};
#ifdef UNIV_ZIP_DEBUG
if (const page_zip_des_t *page_zip= buf_block_get_page_zip(block))
ut_a(page_zip_validate(page_zip, block->page.frame, index()));
......@@ -1542,6 +1542,9 @@ dberr_t btr_cur_t::search_leaf(const dtuple_t *tuple, page_cur_mode_t mode,
case BTR_SEARCH_PREV: /* btr_pcur_move_to_prev() */
ut_ad(rw_latch == RW_S_LATCH || rw_latch == RW_X_LATCH);
if (!not_first_access)
buf_read_ahead_linear(page_id, zip_size, false);
if (page_has_prev(block->page.frame) &&
page_rec_is_first(page_cur.rec, block->page.frame))
{
......@@ -1581,6 +1584,8 @@ dberr_t btr_cur_t::search_leaf(const dtuple_t *tuple, page_cur_mode_t mode,
buf_mode= btr_op == BTR_DELETE_OP
? BUF_GET_IF_IN_POOL_OR_WATCH
: BUF_GET_IF_IN_POOL;
else if (!not_first_access)
buf_read_ahead_linear(page_id, zip_size, false);
break;
case BTR_MODIFY_TREE:
ut_ad(rw_latch == RW_X_LATCH);
......@@ -1614,6 +1619,14 @@ ATTRIBUTE_COLD void mtr_t::index_lock_upgrade()
slot.type= MTR_MEMO_X_LOCK;
}
/** Mark a non-leaf page "least recently used", but avoid invoking
buf_page_t::set_accessed(), because we do not want linear read-ahead */
static void btr_cur_nonleaf_make_young(buf_page_t *bpage)
{
if (UNIV_UNLIKELY(buf_page_peek_if_too_old(bpage)))
buf_page_make_young(bpage);
}
ATTRIBUTE_COLD
dberr_t btr_cur_t::pessimistic_search_leaf(const dtuple_t *tuple,
page_cur_mode_t mode, mtr_t *mtr)
......@@ -1716,6 +1729,8 @@ dberr_t btr_cur_t::pessimistic_search_leaf(const dtuple_t *tuple,
if (height != btr_page_get_level(block->page.frame))
goto corrupted;
btr_cur_nonleaf_make_young(&block->page);
#ifdef UNIV_ZIP_DEBUG
const page_zip_des_t *page_zip= buf_block_get_page_zip(block);
ut_a(!page_zip || page_zip_validate(page_zip, block->page.frame, index()));
......@@ -1802,6 +1817,8 @@ dberr_t btr_cur_search_to_nth_level(ulint level,
btr_decryption_failed(*index);
goto func_exit;
}
else
btr_cur_nonleaf_make_young(&block->page);
#ifdef UNIV_ZIP_DEBUG
if (const page_zip_des_t *page_zip= buf_block_get_page_zip(block))
......@@ -1937,18 +1954,15 @@ dberr_t btr_cur_t::open_leaf(bool first, dict_index_t *index,
ut_ad(n_blocks < BTR_MAX_LEVELS);
ut_ad(savepoint + n_blocks == mtr->get_savepoint());
bool first_access= false;
buf_block_t* block=
btr_block_get(*index, page,
height ? upper_rw_latch : root_leaf_rw_latch,
!height, mtr, &err);
!height, mtr, &err, &first_access);
ut_ad(!block == (err != DB_SUCCESS));
if (!block)
{
if (err == DB_DECRYPTION_FAILED)
btr_decryption_failed(*index);
break;
}
if (first)
page_cur_set_before_first(block, &page_cur);
......@@ -2032,10 +2046,16 @@ dberr_t btr_cur_t::open_leaf(bool first, dict_index_t *index,
offsets= rec_get_offsets(page_cur.rec, index, offsets, 0, ULINT_UNDEFINED,
&heap);
page= btr_node_ptr_get_child_page_no(page_cur.rec, offsets);
ut_ad(latch_mode != BTR_MODIFY_TREE || upper_rw_latch == RW_X_LATCH);
if (latch_mode != BTR_MODIFY_TREE);
if (latch_mode != BTR_MODIFY_TREE)
{
if (!height && first && first_access)
buf_read_ahead_linear(page_id_t(block->page.id().space(), page),
block->page.zip_size(), false);
}
else if (btr_cur_need_opposite_intention(block->page, index->is_clust(),
lock_intention,
node_ptr_max_size, compress_limit,
......@@ -2073,7 +2093,6 @@ dberr_t btr_cur_t::open_leaf(bool first, dict_index_t *index,
}
/* Go to the child node */
page= btr_node_ptr_get_child_page_no(page_cur.rec, offsets);
n_blocks++;
}
......@@ -3840,22 +3859,14 @@ btr_cur_pess_upd_restore_supremum(
const page_id_t block_id{block->page.id()};
const page_id_t prev_id(block_id.space(), prev_page_no);
dberr_t err;
buf_block_t* prev_block
= buf_page_get_gen(prev_id, 0, RW_NO_LATCH, nullptr,
BUF_PEEK_IF_IN_POOL, mtr, &err);
/* Since we already held an x-latch on prev_block, it must
be available and not be corrupted unless the buffer pool got
corrupted somehow. */
= mtr->get_already_latched(prev_id, MTR_MEMO_PAGE_X_FIX);
if (UNIV_UNLIKELY(!prev_block)) {
return err;
return DB_CORRUPTION;
}
ut_ad(!memcmp_aligned<4>(prev_block->page.frame + FIL_PAGE_NEXT,
block->page.frame + FIL_PAGE_OFFSET, 4));
/* We must already have an x-latch on prev_block! */
ut_ad(mtr->memo_contains_flagged(prev_block, MTR_MEMO_PAGE_X_FIX));
lock_rec_reset_and_inherit_gap_locks(*prev_block, block_id,
PAGE_HEAP_NO_SUPREMUM,
page_rec_get_heap_no(rec));
......@@ -6664,6 +6675,10 @@ btr_copy_blob_prefix(
mtr.commit();
return copied_len;
}
if (!buf_page_make_young_if_needed(&block->page)) {
buf_read_ahead_linear(id, 0, false);
}
page = buf_block_get_frame(block);
blob_header = page + offset;
......
......@@ -25,9 +25,10 @@ Created 2/23/1996 Heikki Tuuri
*******************************************************/
#include "btr0pcur.h"
#include "ut0byte.h"
#include "buf0rea.h"
#include "rem0cmp.h"
#include "trx0trx.h"
#include "ibuf0ibuf.h"
/**************************************************************//**
Resets a persistent cursor object, freeing ::old_rec_buf if it is
......@@ -261,13 +262,15 @@ static bool btr_pcur_optimistic_latch_leaves(buf_block_t *block,
buf_page_get_gen(page_id_t(id.space(), left_page_no), zip_size,
mode, nullptr, BUF_GET_POSSIBLY_FREED, mtr);
if (left_block &&
btr_page_get_next(left_block->page.frame) != id.page_no())
if (!left_block);
else if (btr_page_get_next(left_block->page.frame) != id.page_no())
{
release_left_block:
mtr->release_last_page();
return false;
}
else
buf_page_make_young_if_needed(&left_block->page);
}
if (buf_page_optimistic_get(mode, block, pcur->modify_clock, mtr))
......@@ -539,10 +542,11 @@ btr_pcur_move_to_next_page(
}
dberr_t err;
bool first_access = false;
buf_block_t* next_block = btr_block_get(
*cursor->index(), next_page_no,
rw_lock_type_t(cursor->latch_mode & (RW_X_LATCH | RW_S_LATCH)),
page_is_leaf(page), mtr, &err);
page_is_leaf(page), mtr, &err, &first_access);
if (UNIV_UNLIKELY(!next_block)) {
return err;
......@@ -561,6 +565,11 @@ btr_pcur_move_to_next_page(
const auto s = mtr->get_savepoint();
mtr->rollback_to_savepoint(s - 2, s - 1);
if (first_access) {
buf_read_ahead_linear(next_block->page.id(),
next_block->zip_size(),
ibuf_inside(mtr));
}
return DB_SUCCESS;
}
......
......@@ -1141,7 +1141,6 @@ btr_search_guess_on_hash(
}
block->page.fix();
block->page.set_accessed();
buf_page_make_young_if_needed(&block->page);
static_assert(ulint{MTR_MEMO_PAGE_S_FIX} == ulint{BTR_SEARCH_LEAF},
"");
......
......@@ -2268,7 +2268,6 @@ buf_page_t* buf_page_get_zip(const page_id_t page_id, ulint zip_size)
ut_ad(s < buf_page_t::READ_FIX || s >= buf_page_t::WRITE_FIX);
}
bpage->set_accessed();
buf_page_make_young_if_needed(bpage);
#ifdef UNIV_DEBUG
......@@ -2895,18 +2894,6 @@ buf_page_get_low(
ut_ad(page_id_t(page_get_space_id(block->page.frame),
page_get_page_no(block->page.frame))
== page_id);
if (mode == BUF_GET_POSSIBLY_FREED
|| mode == BUF_PEEK_IF_IN_POOL) {
return block;
}
const bool not_first_access{block->page.set_accessed()};
buf_page_make_young_if_needed(&block->page);
if (!not_first_access) {
buf_read_ahead_linear(page_id, block->zip_size(),
ibuf_inside(mtr));
}
}
return block;
......@@ -3079,7 +3066,6 @@ bool buf_page_optimistic_get(ulint rw_latch, buf_block_t *block,
block->page.fix();
ut_ad(!block->page.is_read_fixed());
block->page.set_accessed();
buf_page_make_young_if_needed(&block->page);
mtr->memo_push(block, mtr_memo_type_t(rw_latch));
}
......
......@@ -789,6 +789,14 @@ void buf_page_make_young(buf_page_t *bpage)
mysql_mutex_unlock(&buf_pool.mutex);
}
bool buf_page_make_young_if_needed(buf_page_t *bpage)
{
const bool not_first{bpage->set_accessed()};
if (UNIV_UNLIKELY(buf_page_peek_if_too_old(bpage)))
buf_page_make_young(bpage);
return not_first;
}
/** Try to free a block. If bpage is a descriptor of a compressed-only
ROW_FORMAT=COMPRESSED page, the buf_page_t object will be freed as well.
The caller must hold buf_pool.mutex.
......
......@@ -42,7 +42,10 @@ static constexpr page_id_t hdr_page_id{DICT_HDR_SPACE, DICT_HDR_PAGE_NO};
static buf_block_t *dict_hdr_get(mtr_t *mtr)
{
/* We assume that the DICT_HDR page is always readable and available. */
return buf_page_get_gen(hdr_page_id, 0, RW_X_LATCH, nullptr, BUF_GET, mtr);
buf_block_t *b=
buf_page_get_gen(hdr_page_id, 0, RW_X_LATCH, nullptr, BUF_GET, mtr);
buf_page_make_young_if_needed(&b->page);
return b;
}
/**********************************************************************//**
......
......@@ -304,6 +304,8 @@ rtr_pcur_getnext_from_path(
break;
}
buf_page_make_young_if_needed(&block->page);
page = buf_block_get_frame(block);
page_ssn = page_get_ssn_id(page);
......@@ -683,6 +685,8 @@ dberr_t rtr_search_to_nth_level(ulint level, const dtuple_t *tuple,
return err;
}
buf_page_make_young_if_needed(&block->page);
const page_t *page= buf_block_get_frame(block);
#ifdef UNIV_ZIP_DEBUG
if (rw_latch != RW_NO_LATCH) {
......@@ -1703,6 +1707,8 @@ rtr_cur_restore_position(
goto func_exit;
}
buf_page_make_young_if_needed(&page_cursor->block->page);
/* Get the page SSN */
page = buf_block_get_frame(page_cursor->block);
page_ssn = page_get_ssn_id(page);
......
......@@ -309,8 +309,13 @@ ibuf_header_page_get(
buf_block_t* block = buf_page_get(
page_id_t(IBUF_SPACE_ID, FSP_IBUF_HEADER_PAGE_NO),
0, RW_X_LATCH, mtr);
if (UNIV_UNLIKELY(!block)) {
return nullptr;
}
buf_page_make_young_if_needed(&block->page);
return block ? block->page.frame : nullptr;
return block->page.frame;
}
/** Acquire the change buffer root page.
......@@ -326,7 +331,12 @@ static buf_block_t *ibuf_tree_root_get(mtr_t *mtr, dberr_t *err= nullptr)
buf_block_t *block=
buf_page_get_gen(page_id_t{IBUF_SPACE_ID, FSP_IBUF_TREE_ROOT_PAGE_NO},
0, RW_SX_LATCH, nullptr, BUF_GET, mtr, err);
ut_ad(!block || ibuf.empty == page_is_empty(block->page.frame));
if (block)
{
ut_ad(ibuf.empty == page_is_empty(block->page.frame));
buf_page_make_young_if_needed(&block->page);
}
return block;
}
......
......@@ -89,10 +89,12 @@ ATTRIBUTE_COLD void btr_decryption_failed(const dict_index_t &index);
@param[in] merge whether change buffer merge should be attempted
@param[in,out] mtr mini-transaction
@param[out] err error code
@param[out] first set if this is a first-time access to the page
@return block */
buf_block_t *btr_block_get(const dict_index_t &index,
uint32_t page, rw_lock_type_t mode, bool merge,
mtr_t *mtr, dberr_t *err= nullptr);
mtr_t *mtr, dberr_t *err= nullptr,
bool *first= nullptr);
/**************************************************************//**
Gets the index id field of a page.
......
......@@ -262,8 +262,6 @@ buf_block_t*
buf_page_create_deferred(uint32_t space_id, ulint zip_size, mtr_t *mtr,
buf_block_t *free_block);
/** Move a block to the start of the LRU list. */
void buf_page_make_young(buf_page_t *bpage);
/** Mark the page status as FREED for the given tablespace and page number.
@param[in,out] space tablespace
@param[in] page page number
......@@ -285,15 +283,6 @@ there is danger of dropping from the buffer pool.
@return true if bpage should be made younger */
inline bool buf_page_peek_if_too_old(const buf_page_t *bpage);
/** Move a page to the start of the buffer pool LRU list if it is too old.
@param[in,out] bpage buffer pool page */
inline void buf_page_make_young_if_needed(buf_page_t *bpage)
{
if (UNIV_UNLIKELY(buf_page_peek_if_too_old(bpage))) {
buf_page_make_young(bpage);
}
}
/********************************************************************//**
Increments the modify clock of a frame by 1. The caller must (1) own the
buf_pool.mutex and block bufferfix count has to be zero, (2) or own an x-lock
......
......@@ -108,6 +108,16 @@ buf_LRU_add_block(
blocks in the LRU list, else put to the
start; if the LRU list is very short, added to
the start regardless of this parameter */
/** Move a block to the start of the buf_pool.LRU list.
@param bpage buffer pool page */
void buf_page_make_young(buf_page_t *bpage);
/** Flag a page accessed in buf_pool and move it to the start of buf_pool.LRU
if it is too old.
@param bpage buffer pool page
@return whether this is not the first access */
bool buf_page_make_young_if_needed(buf_page_t *bpage);
/******************************************************************//**
Adds a block to the LRU list of decompressed zip pages. */
void
......
......@@ -2051,6 +2051,8 @@ row_merge_read_clustered_index(
goto err_exit;
}
buf_page_make_young_if_needed(&block->page);
page_cur_set_before_first(block, cur);
if (!page_cur_move_to_next(cur)
|| page_cur_is_after_last(cur)) {
......
......@@ -822,7 +822,6 @@ row_purge_upd_exist_or_extern_func(
buf_page_get(page_id_t(rseg.space->id,
page_no),
0, RW_X_LATCH, &mtr)) {
block->page.set_accessed();
buf_page_make_young_if_needed(&block->page);
byte* data_field = block->page.frame
......
......@@ -1223,6 +1223,7 @@ sel_set_rtr_rec_lock(
if (!cur_block) {
goto func_end;
}
buf_page_make_young_if_needed(&cur_block->page);
} else {
mtr->start();
goto func_end;
......
......@@ -318,6 +318,8 @@ static buf_block_t* row_undo_rec_get(undo_node_t* node)
return nullptr;
}
buf_page_make_young_if_needed(&undo_page->page);
uint16_t offset = undo->top_offset;
buf_block_t* prev_page = undo_page;
......
......@@ -2062,9 +2062,10 @@ trx_undo_get_undo_rec_low(
mtr.start();
trx_undo_rec_t *undo_rec= nullptr;
if (const buf_block_t* undo_page=
if (buf_block_t* undo_page=
buf_page_get(page_id_t(rseg->space->id, page_no), 0, RW_S_LATCH, &mtr))
{
buf_page_make_young_if_needed(&undo_page->page);
undo_rec= undo_page->page.frame + offset;
const size_t end= mach_read_from_2(undo_rec);
if (UNIV_UNLIKELY(end <= offset ||
......
......@@ -295,8 +295,13 @@ buf_block_t *trx_rseg_t::get(mtr_t *mtr, dberr_t *err) const
if (err) *err= DB_TABLESPACE_NOT_FOUND;
return nullptr;
}
return buf_page_get_gen(page_id(), 0, RW_X_LATCH, nullptr,
BUF_GET, mtr, err);
buf_block_t *block= buf_page_get_gen(page_id(), 0, RW_X_LATCH, nullptr,
BUF_GET, mtr, err);
if (UNIV_LIKELY(block != nullptr))
buf_page_make_young_if_needed(&block->page);
return block;
}
/** Upgrade a rollback segment header page to MariaDB 10.3 format.
......
......@@ -582,6 +582,7 @@ static dberr_t trx_resurrect_table_locks(trx_t *trx, const trx_undo_t &undo)
undo.top_page_no), 0, RW_S_LATCH, nullptr,
BUF_GET, &mtr, &err))
{
buf_page_make_young_if_needed(&block->page);
buf_block_t *undo_block= block;
const trx_undo_rec_t *undo_rec= block->page.frame + undo.top_offset;
......
......@@ -25,8 +25,8 @@ Created 3/26/1996 Heikki Tuuri
*******************************************************/
#include "trx0undo.h"
#include "buf0rea.h"
#include "fsp0fsp.h"
#include "mach0data.h"
#include "mtr0log.h"
#include "srv0mon.h"
#include "srv0srv.h"
......@@ -178,8 +178,12 @@ trx_undo_get_prev_rec_from_prev_page(buf_block_t *&block, uint16_t rec,
block= buf_page_get(page_id_t(block->page.id().space(), prev_page_no),
0, shared ? RW_S_LATCH : RW_X_LATCH, mtr);
if (UNIV_UNLIKELY(!block))
return nullptr;
return block ? trx_undo_page_get_last_rec(block, page_no, offset) : nullptr;
if (!buf_page_make_young_if_needed(&block->page))
buf_read_ahead_linear(block->page.id(), 0, false);
return trx_undo_page_get_last_rec(block, page_no, offset);
}
/** Get the previous undo log record.
......@@ -268,12 +272,16 @@ trx_undo_get_first_rec(const fil_space_t &space, uint32_t page_no,
uint16_t offset, ulint mode, const buf_block_t*& block,
mtr_t *mtr, dberr_t *err)
{
block= buf_page_get_gen(page_id_t{space.id, page_no}, 0, mode,
nullptr, BUF_GET, mtr, err);
buf_block_t *b= buf_page_get_gen(page_id_t{space.id, page_no}, 0, mode,
nullptr, BUF_GET, mtr, err);
block= b;
if (!block)
return nullptr;
if (trx_undo_rec_t *rec= trx_undo_page_get_first_rec(block, page_no, offset))
if (!buf_page_make_young_if_needed(&b->page))
buf_read_ahead_linear(b->page.id(), 0, false);
if (trx_undo_rec_t *rec= trx_undo_page_get_first_rec(b, page_no, offset))
return rec;
return trx_undo_get_next_rec_from_next_page(block, page_no, offset, mode,
......@@ -663,6 +671,8 @@ buf_block_t *trx_undo_add_page(trx_undo_t *undo, mtr_t *mtr, dberr_t *err)
0, RW_X_LATCH, nullptr, BUF_GET, mtr, err);
if (!header_block)
goto func_exit;
buf_page_make_young_if_needed(&header_block->page);
*err= fsp_reserve_free_extents(&n_reserved, rseg->space, 1, FSP_UNDO, mtr);
if (UNIV_UNLIKELY(*err != DB_SUCCESS))
......@@ -732,6 +742,8 @@ trx_undo_free_page(
return FIL_NULL;
}
buf_page_make_young_if_needed(&header_block->page);
*err = flst_remove(header_block, TRX_UNDO_SEG_HDR + TRX_UNDO_PAGE_LIST,
undo_block, TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_NODE,
mtr);
......@@ -1271,6 +1283,8 @@ trx_undo_reuse_cached(trx_t* trx, trx_rseg_t* rseg, trx_undo_t** pundo,
return NULL;
}
buf_page_make_young_if_needed(&block->page);
UT_LIST_REMOVE(rseg->undo_cached, undo);
*pundo = undo;
......@@ -1305,19 +1319,24 @@ trx_undo_assign(trx_t* trx, dberr_t* err, mtr_t* mtr)
ut_ad(mtr->get_log_mode() == MTR_LOG_ALL);
trx_undo_t* undo = trx->rsegs.m_redo.undo;
buf_block_t* block;
if (undo) {
return buf_page_get_gen(
block = buf_page_get_gen(
page_id_t(undo->rseg->space->id, undo->last_page_no),
0, RW_X_LATCH, undo->guess_block,
BUF_GET, mtr, err);
if (UNIV_LIKELY(block != nullptr)) {
buf_page_make_young_if_needed(&block->page);
}
return block;
}
*err = DB_SUCCESS;
trx_rseg_t* rseg = trx->rsegs.m_redo.rseg;
rseg->latch.wr_lock(SRW_LOCK_CALL);
buf_block_t* block = trx_undo_reuse_cached(
block = trx_undo_reuse_cached(
trx, rseg, &trx->rsegs.m_redo.undo, mtr, err);
if (!block) {
......@@ -1358,12 +1377,17 @@ trx_undo_assign_low(trx_t *trx, trx_rseg_t *rseg, trx_undo_t **undo,
: &trx->rsegs.m_redo.undo));
ut_ad(mtr->get_log_mode()
== (is_temp ? MTR_LOG_NO_REDO : MTR_LOG_ALL));
buf_block_t* block;
if (*undo) {
return buf_page_get_gen(
block = buf_page_get_gen(
page_id_t(rseg->space->id, (*undo)->last_page_no),
0, RW_X_LATCH, (*undo)->guess_block,
BUF_GET, mtr, err);
if (UNIV_LIKELY(block != nullptr)) {
buf_page_make_young_if_needed(&block->page);
}
return block;
}
DBUG_EXECUTE_IF(
......@@ -1373,7 +1397,6 @@ trx_undo_assign_low(trx_t *trx, trx_rseg_t *rseg, trx_undo_t **undo,
*err = DB_SUCCESS;
rseg->latch.wr_lock(SRW_LOCK_CALL);
buf_block_t* block;
if (is_temp) {
ut_ad(!UT_LIST_GET_LEN(rseg->undo_cached));
} else {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment