Commit fa70c146 authored by Marko Mäkelä's avatar Marko Mäkelä

MDEV-23399 preparation: Remove buf_pool.zip_clean

The debug data structure may have been useful during the development of
ROW_FORMAT=COMPRESSED page frames. Let us simplify code by removing it.
parent 308f8350
......@@ -212,12 +212,12 @@ but not written to disk yet. The block with the oldest modification
which has not yet been written to disk is at the end of the chain.
The access to this list is protected by buf_pool.flush_list_mutex.
The chain of unmodified compressed blocks (buf_pool.zip_clean)
contains the control blocks (buf_page_t) of those compressed pages
The control blocks for uncompressed pages are accessible via
buf_block_t objects that are reachable via buf_pool.chunks[].
The control blocks (buf_page_t) of those ROW_FORMAT=COMPRESSED pages
that are not in buf_pool.flush_list and for which no uncompressed
page has been allocated in the buffer pool. The control blocks for
uncompressed pages are accessible via buf_block_t objects that are
reachable via buf_pool.chunks[].
page has been allocated in buf_pool are only accessible via
buf_pool.LRU.
The chains of free memory blocks (buf_pool.zip_free[]) are used by
the buddy allocator (buf0buddy.cc) to keep track of currently unused
......@@ -1554,8 +1554,6 @@ bool buf_pool_t::create()
UT_LIST_INIT(flush_list, &buf_page_t::list);
UT_LIST_INIT(unzip_LRU, &buf_block_t::unzip_LRU);
ut_d(UT_LIST_INIT(zip_clean, &buf_page_t::list));
for (size_t i= 0; i < UT_ARR_SIZE(zip_free); ++i)
UT_LIST_INIT(zip_free[i], &buf_buddy_free_t::list);
ulint s= curr_size;
......@@ -3364,10 +3362,7 @@ buf_page_get_low(
/* Set after buf_relocate(). */
block->page.set_buf_fix_count(1);
if (!block->page.oldest_modification()) {
ut_d(UT_LIST_REMOVE(buf_pool.zip_clean, &block->page));
} else {
/* Relocate buf_pool.flush_list. */
if (block->page.oldest_modification()) {
buf_flush_relocate_on_flush_list(bpage, &block->page);
}
......@@ -3870,10 +3865,6 @@ buf_page_create(fil_space_t *space, uint32_t offset,
if (block->page.oldest_modification() > 0)
buf_flush_relocate_on_flush_list(&block->page, &free_block->page);
#ifdef UNIV_DEBUG
else
UT_LIST_REMOVE(buf_pool.zip_clean, &block->page);
#endif
free_block->page.set_state(BUF_BLOCK_FILE_PAGE);
buf_unzip_LRU_add_block(free_block, FALSE);
......@@ -4474,8 +4465,8 @@ void buf_pool_t::validate()
for (j = chunk->size; j--; block++) {
switch (block->page.state()) {
case BUF_BLOCK_ZIP_PAGE:
/* These should only occur on
zip_clean, zip_free[], or flush_list. */
/* This kind of block descriptors should
be allocated by malloc() only. */
ut_error;
break;
......@@ -4499,34 +4490,6 @@ void buf_pool_t::validate()
}
}
/* Check clean compressed-only blocks. */
for (buf_page_t* b = UT_LIST_GET_FIRST(zip_clean); b;
b = UT_LIST_GET_NEXT(list, b)) {
ut_ad(b->state() == BUF_BLOCK_ZIP_PAGE);
ut_ad(!b->oldest_modification());
switch (b->io_fix()) {
case BUF_IO_NONE:
case BUF_IO_PIN:
/* All clean blocks should be I/O-unfixed. */
break;
case BUF_IO_READ:
/* In buf_LRU_free_page(), we temporarily set
b->io_fix = BUF_IO_READ for a newly allocated
control block in order to prevent
buf_page_get_gen() from decompressing the block. */
break;
default:
ut_error;
break;
}
const page_id_t id = b->id();
ut_ad(page_hash_get_low(id, id.fold()) == b);
n_lru++;
n_zip++;
}
/* Check dirty blocks. */
mutex_enter(&flush_list_mutex);
......@@ -4565,7 +4528,7 @@ void buf_pool_t::validate()
<< " zip " << n_zip << ". Aborting...";
}
ut_ad(UT_LIST_GET_LEN(LRU) == n_lru);
ut_ad(UT_LIST_GET_LEN(LRU) >= n_lru);
if (curr_size == old_size
&& UT_LIST_GET_LEN(free) != n_free) {
......@@ -4692,66 +4655,18 @@ void buf_pool_t::print()
/** @return the number of latched pages in the buffer pool */
ulint buf_get_latched_pages_number()
{
buf_page_t* b;
ulint i;
ulint fixed_pages_number = 0;
ulint fixed_pages_number= 0;
mutex_enter(&buf_pool.mutex);
auto chunk = buf_pool.chunks;
for (i = buf_pool.n_chunks; i--; chunk++) {
buf_block_t* block= chunk->blocks;
for (auto j= chunk->size; j--; block++) {
if (block->page.state() == BUF_BLOCK_FILE_PAGE
&& (block->page.buf_fix_count()
|| block->page.io_fix() != BUF_IO_NONE)) {
fixed_pages_number++;
}
}
}
/* Traverse the lists of clean and dirty compressed-only blocks. */
for (b = UT_LIST_GET_FIRST(buf_pool.zip_clean); b;
b = UT_LIST_GET_NEXT(list, b)) {
ut_a(b->state() == BUF_BLOCK_ZIP_PAGE);
ut_a(!b->oldest_modification());
ut_a(b->io_fix() != BUF_IO_WRITE);
if (b->buf_fix_count() || b->io_fix() != BUF_IO_NONE) {
fixed_pages_number++;
}
}
mutex_enter(&buf_pool.flush_list_mutex);
for (b = UT_LIST_GET_FIRST(buf_pool.flush_list); b;
b = UT_LIST_GET_NEXT(list, b)) {
ut_ad(b->oldest_modification());
mutex_enter(&buf_pool.mutex);
switch (b->state()) {
case BUF_BLOCK_ZIP_PAGE:
if (b->buf_fix_count() || b->io_fix() != BUF_IO_NONE) {
fixed_pages_number++;
}
continue;
case BUF_BLOCK_FILE_PAGE:
/* uncompressed page */
continue;
case BUF_BLOCK_NOT_USED:
case BUF_BLOCK_MEMORY:
case BUF_BLOCK_REMOVE_HASH:
break;
}
ut_error;
}
for (buf_page_t *b= UT_LIST_GET_FIRST(buf_pool.LRU); b;
b= UT_LIST_GET_NEXT(LRU, b))
if (b->in_file() && (b->buf_fix_count() || b->io_fix() != BUF_IO_NONE))
fixed_pages_number++;
mutex_exit(&buf_pool.flush_list_mutex);
mutex_exit(&buf_pool.mutex);
mutex_exit(&buf_pool.mutex);
return(fixed_pages_number);
return fixed_pages_number;
}
#endif /* UNIV_DEBUG */
......
......@@ -440,12 +440,6 @@ void buf_flush_remove(buf_page_t* bpage)
because we assert on it in buf_flush_block_cmp(). */
bpage->clear_oldest_modification();
#ifdef UNIV_DEBUG
if (bpage->state() == BUF_BLOCK_ZIP_PAGE) {
buf_LRU_insert_zip_clean(bpage);
}
#endif /* UNIV_DEBUG */
buf_pool.stat.flush_list_bytes -= bpage->physical_size();
#ifdef UNIV_DEBUG
......
......@@ -455,40 +455,6 @@ void buf_LRU_flush_or_remove_pages(ulint id, bool flush, ulint first)
}
}
#ifdef UNIV_DEBUG
/********************************************************************//**
Insert a compressed block into buf_pool.zip_clean in the LRU order. */
void
buf_LRU_insert_zip_clean(
/*=====================*/
buf_page_t* bpage) /*!< in: pointer to the block in question */
{
ut_ad(mutex_own(&buf_pool.mutex));
ut_ad(bpage->state() == BUF_BLOCK_ZIP_PAGE);
ut_ad(!bpage->oldest_modification());
/* Find the first successor of bpage in the LRU list
that is in the zip_clean list. */
buf_page_t* b = bpage;
do {
b = UT_LIST_GET_NEXT(LRU, b);
} while (b && (b->state() != BUF_BLOCK_ZIP_PAGE
|| b->oldest_modification()));
/* Insert bpage before b, i.e., after the predecessor of b. */
if (b != NULL) {
b = UT_LIST_GET_PREV(list, b);
}
if (b != NULL) {
UT_LIST_INSERT_AFTER(buf_pool.zip_clean, b, bpage);
} else {
UT_LIST_ADD_FIRST(buf_pool.zip_clean, bpage);
}
}
#endif /* UNIV_DEBUG */
/** Try to free an uncompressed page of a compressed block from the unzip
LRU list. The compressed page is preserved, and it need not be clean.
@param[in] scan_all true=scan the whole list;
......@@ -1272,12 +1238,7 @@ bool buf_LRU_free_page(buf_page_t *bpage, bool zip)
buf_LRU_add_block(b, b->old);
}
if (!b->oldest_modification()) {
#ifdef UNIV_DEBUG
buf_LRU_insert_zip_clean(b);
#endif /* UNIV_DEBUG */
} else {
/* Relocate on buf_pool.flush_list. */
if (b->oldest_modification()) {
buf_flush_relocate_on_flush_list(bpage, b);
}
......@@ -1480,9 +1441,6 @@ static bool buf_LRU_block_remove_hashed(buf_page_t *bpage, const page_id_t id,
ut_a(bpage->zip.ssize);
ut_ad(!bpage->oldest_modification());
#ifdef UNIV_DEBUG
UT_LIST_REMOVE(buf_pool.zip_clean, bpage);
#endif /* UNIV_DEBUG */
hash_lock->write_unlock();
buf_pool_mutex_exit_forbid();
......
......@@ -237,9 +237,6 @@ static buf_page_t* buf_page_init_for_read(ulint mode, const page_id_t page_id,
/* The block must be put to the LRU list, to the old blocks.
The zip size is already set into the page zip */
buf_LRU_add_block(bpage, true/* to old blocks */);
#ifdef UNIV_DEBUG
buf_LRU_insert_zip_clean(bpage);
#endif /* UNIV_DEBUG */
}
mutex_exit(&buf_pool.mutex);
......
......@@ -818,15 +818,11 @@ class buf_page_t
state() == BUF_BLOCK_NOT_USED: buf_pool.free or buf_pool.withdraw
state() == BUF_BLOCK_FILE_PAGE ||
(state() == BUF_BLOCK_ZIP_PAGE && !oldest_modification()):
in_file() && oldest_modification():
buf_pool.flush_list (protected by buf_pool.flush_list_mutex)
state() == BUF_BLOCK_ZIP_PAGE && !oldest_modification(): buf_pool.zip_clean
The contents is undefined if
!oldest_modification() && state() == BUF_BLOCK_FILE_PAGE,
or if state() is not any of the above. */
The contents is undefined if in_file() && !oldest_modification(),
or if state() is BUF_BLOCK_MEMORY or BUF_BLOCK_REMOVE_HASH. */
UT_LIST_NODE_T(buf_page_t) list;
private:
......@@ -2024,22 +2020,11 @@ class buf_pool_t
unzip_LRU list */
/* @} */
/** @name Buddy allocator fields
The buddy allocator is used for allocating compressed page
frames and buf_page_t descriptors of blocks that exist
in the buffer pool only in compressed form. */
/* @{ */
#ifdef UNIV_DEBUG
/** unmodified ROW_FORMAT=COMPRESSED pages;
protected by buf_pool.mutex */
UT_LIST_BASE_NODE_T(buf_page_t) zip_clean;
#endif /* UNIV_DEBUG */
UT_LIST_BASE_NODE_T(buf_buddy_free_t) zip_free[BUF_BUDDY_SIZES_MAX];
/*!< buddy free lists */
/** free ROW_FORMAT=COMPRESSED page frames */
UT_LIST_BASE_NODE_T(buf_buddy_free_t) zip_free[BUF_BUDDY_SIZES_MAX];
#if BUF_BUDDY_LOW > UNIV_ZIP_SIZE_MIN
# error "BUF_BUDDY_LOW > UNIV_ZIP_SIZE_MIN"
#endif
/* @} */
/** Sentinels to detect if pages are read into the buffer pool while
a delete-buffering operation is pending. Protected by mutex. */
......
......@@ -47,15 +47,6 @@ These are low-level functions
@param[in] first first page to be flushed or evicted */
void buf_LRU_flush_or_remove_pages(ulint id, bool flush, ulint first = 0);
#ifdef UNIV_DEBUG
/********************************************************************//**
Insert a compressed block into buf_pool.zip_clean in the LRU order. */
void
buf_LRU_insert_zip_clean(
/*=====================*/
buf_page_t* bpage); /*!< in: pointer to the block in question */
#endif /* UNIV_DEBUG */
/** Try to free a block. If bpage is a descriptor of a compressed-only
ROW_FORMAT=COMPRESSED page, the buf_page_t object will be freed as well.
The caller must hold buf_pool.mutex.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment