Commit ac694c10 authored by marko's avatar marko

branches/zip: Move the fields related to the least-recently-used algorithm

of the buffer pool from buf_block_t to buf_page_t.  Replace some buf_block_t*
parameters with buf_page_t*.  Add accessor functions.
parent cf7b6e91
......@@ -907,7 +907,7 @@ btr_search_guess_on_hash(
if (UNIV_LIKELY(!has_search_latch)
&& buf_block_peek_if_too_old(block)) {
buf_page_make_young(block);
buf_page_make_young(&block->page);
}
/* Increment the page get statistics though we did not really
......@@ -985,7 +985,7 @@ btr_search_drop_page_hash_index(
#ifdef UNIV_SYNC_DEBUG
ut_ad(rw_lock_own(&(block->lock), RW_LOCK_SHARED)
|| rw_lock_own(&(block->lock), RW_LOCK_EX)
|| (block->buf_fix_count == 0));
|| (block->page.buf_fix_count == 0));
#endif /* UNIV_SYNC_DEBUG */
n_fields = block->curr_n_fields;
......
......@@ -597,8 +597,9 @@ buf_block_init(
{
block->frame = frame;
block->buf_fix_count = 0;
block->io_fix = 0;
block->page.state = BUF_BLOCK_NOT_USED;
block->page.buf_fix_count = 0;
block->page.io_fix = BUF_IO_NONE;
block->modify_clock = 0;
......@@ -612,11 +613,10 @@ buf_block_init(
block->in_free_list = FALSE;
#ifdef UNIV_DEBUG
block->in_LRU_list = FALSE;
block->page.in_LRU_list = FALSE;
block->n_pointers = 0;
#endif /* UNIV_DEBUG */
page_zip_des_init(&block->page.zip);
block->page.state = BUF_BLOCK_NOT_USED;
mutex_create(&block->mutex, SYNC_BUF_BLOCK);
......@@ -728,7 +728,7 @@ buf_chunk_not_freed(
mutex_enter(&block->mutex);
if (buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE
&& !buf_flush_ready_for_replace(block)) {
&& !buf_flush_ready_for_replace(&block->page)) {
mutex_exit(&block->mutex);
return(block);
......@@ -791,7 +791,7 @@ buf_chunk_free(
ut_a(buf_block_get_state(block) == BUF_BLOCK_NOT_USED);
ut_a(!block->page.zip.data);
ut_ad(!block->in_LRU_list);
ut_ad(!block->page.in_LRU_list);
/* Remove the block from the free list. */
ut_a(block->in_free_list);
UT_LIST_REMOVE(free, buf_pool->free, block);
......@@ -977,11 +977,11 @@ buf_pool_shrink(
Therefore, we have to always retry,
even if !dirty && !nonfree. */
if (!buf_flush_ready_for_replace(block)) {
if (!buf_flush_ready_for_replace(&block->page)) {
buf_LRU_make_block_old(block);
buf_LRU_make_block_old(&block->page);
dirty++;
} else if (!buf_LRU_free_block(block)) {
} else if (!buf_LRU_free_block(&block->page)) {
nonfree++;
}
......@@ -1145,7 +1145,7 @@ UNIV_INLINE
void
buf_block_make_young(
/*=================*/
buf_block_t* block) /* in: block to make younger */
buf_page_t* bpage) /* in: block to make younger */
{
#ifdef UNIV_SYNC_DEBUG
ut_ad(!mutex_own(&(buf_pool->mutex)));
......@@ -1154,14 +1154,15 @@ buf_block_make_young(
/* Note that we read freed_page_clock's without holding any mutex:
this is allowed since the result is used only in heuristics */
if (buf_pool->freed_page_clock >= block->freed_page_clock
+ 1 + (buf_pool->curr_size / 4)) {
if (buf_pool->freed_page_clock
>= buf_page_get_freed_page_clock(bpage)
+ 1 + (buf_pool->curr_size / 4)) {
mutex_enter(&buf_pool->mutex);
/* There has been freeing activity in the LRU list:
best to move to the head of the LRU list */
buf_LRU_make_block_young(block);
buf_LRU_make_block_young(bpage);
mutex_exit(&buf_pool->mutex);
}
}
......@@ -1174,13 +1175,13 @@ the buffer pool. */
void
buf_page_make_young(
/*================*/
buf_block_t* block) /* in: buffer block of a file page */
buf_page_t* bpage) /* in: buffer block of a file page */
{
mutex_enter(&(buf_pool->mutex));
ut_a(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
ut_a(buf_page_get_state(bpage) == BUF_BLOCK_FILE_PAGE);
buf_LRU_make_block_young(block);
buf_LRU_make_block_young(bpage);
mutex_exit(&(buf_pool->mutex));
}
......@@ -1426,19 +1427,14 @@ buf_page_get_gen(
ut_a(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
must_read = FALSE;
must_read = buf_block_get_io_fix(block) == BUF_IO_READ;
if (block->io_fix == BUF_IO_READ) {
must_read = TRUE;
if (mode == BUF_GET_IF_IN_POOL) {
/* The page is only being read to buffer */
mutex_exit(&buf_pool->mutex);
mutex_exit(&block->mutex);
if (must_read && mode == BUF_GET_IF_IN_POOL) {
/* The page is only being read to buffer */
mutex_exit(&buf_pool->mutex);
mutex_exit(&block->mutex);
return(NULL);
}
return(NULL);
}
buf_block_buf_fix_inc(block, file, line);
......@@ -1446,13 +1442,13 @@ buf_page_get_gen(
/* Check if this is the first access to the page */
accessed = block->accessed;
accessed = buf_page_is_accessed(&block->page);
block->accessed = TRUE;
buf_page_set_accessed(&block->page, TRUE);
mutex_exit(&block->mutex);
buf_block_make_young(block);
buf_block_make_young(&block->page);
#ifdef UNIV_DEBUG_FILE_ACCESSES
ut_a(!block->page.file_page_was_freed);
......@@ -1460,7 +1456,7 @@ buf_page_get_gen(
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
ut_a(++buf_dbg_counter % 5771 || buf_validate());
ut_a(block->buf_fix_count > 0);
ut_a(block->page.buf_fix_count > 0);
ut_a(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
......@@ -1479,7 +1475,7 @@ buf_page_get_gen(
if (!success) {
mutex_enter(&block->mutex);
block->buf_fix_count--;
block->page.buf_fix_count--;
mutex_exit(&block->mutex);
#ifdef UNIV_SYNC_DEBUG
......@@ -1495,18 +1491,15 @@ buf_page_get_gen(
completes */
for (;;) {
mutex_enter(&block->mutex);
enum buf_io_fix io_fix;
if (block->io_fix == BUF_IO_READ) {
mutex_enter(&block->mutex);
io_fix = buf_block_get_io_fix(block);
mutex_exit(&block->mutex);
mutex_exit(&block->mutex);
if (io_fix == BUF_IO_READ) {
os_thread_sleep(WAIT_FOR_READ);
} else {
mutex_exit(&block->mutex);
break;
}
}
}
......@@ -1573,12 +1566,12 @@ buf_page_optimistic_get_func(
}
buf_block_buf_fix_inc(block, file, line);
accessed = block->accessed;
block->accessed = TRUE;
accessed = buf_page_is_accessed(&block->page);
buf_page_set_accessed(&block->page, TRUE);
mutex_exit(&block->mutex);
buf_block_make_young(block);
buf_block_make_young(&block->page);
/* Check if this is the first access to the page */
......@@ -1600,7 +1593,7 @@ buf_page_optimistic_get_func(
if (UNIV_UNLIKELY(!success)) {
mutex_enter(&block->mutex);
block->buf_fix_count--;
block->page.buf_fix_count--;
mutex_exit(&block->mutex);
......@@ -1622,7 +1615,7 @@ buf_page_optimistic_get_func(
mutex_enter(&block->mutex);
block->buf_fix_count--;
block->page.buf_fix_count--;
mutex_exit(&block->mutex);
......@@ -1636,7 +1629,7 @@ buf_page_optimistic_get_func(
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
ut_a(++buf_dbg_counter % 5771 || buf_validate());
ut_a(block->buf_fix_count > 0);
ut_a(block->page.buf_fix_count > 0);
ut_a(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
......@@ -1705,7 +1698,7 @@ buf_page_get_known_nowait(
mutex_exit(&block->mutex);
if (mode == BUF_MAKE_YOUNG) {
buf_block_make_young(block);
buf_block_make_young(&block->page);
}
ut_ad(!ibuf_inside() || (mode == BUF_KEEP_OLD));
......@@ -1723,7 +1716,7 @@ buf_page_get_known_nowait(
if (!success) {
mutex_enter(&block->mutex);
block->buf_fix_count--;
block->page.buf_fix_count--;
mutex_exit(&block->mutex);
......@@ -1738,7 +1731,7 @@ buf_page_get_known_nowait(
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
ut_a(++buf_dbg_counter % 5771 || buf_validate());
ut_a(block->buf_fix_count > 0);
ut_a(block->page.buf_fix_count > 0);
ut_a(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
#ifdef UNIV_DEBUG_FILE_ACCESSES
......@@ -1779,9 +1772,10 @@ buf_page_init_for_backup_restore(
block->newest_modification = 0;
block->oldest_modification = 0;
block->accessed = FALSE;
block->buf_fix_count = 0;
block->io_fix = 0;
block->page.state = BUF_BLOCK_FILE_PAGE;
block->page.accessed = FALSE;
block->page.buf_fix_count = 0;
block->page.io_fix = BUF_IO_NONE;
block->n_hash_helps = 0;
block->is_hashed = FALSE;
......@@ -1789,7 +1783,6 @@ buf_page_init_for_backup_restore(
block->n_bytes = 0;
block->left_side = TRUE;
page_zip_des_init(&block->page);
block->page.zip.state = BUF_BLOCK_FILE_PAGE;
/* We assume that block->page.data has been allocated
with zip_size == UNIV_PAGE_SIZE. */
......@@ -1847,14 +1840,14 @@ buf_page_init(
HASH_INSERT(buf_page_t, hash, buf_pool->page_hash,
buf_page_address_fold(space, offset), &block->page);
block->freed_page_clock = 0;
block->page.freed_page_clock = 0;
block->page.newest_modification = 0;
block->page.oldest_modification = 0;
block->accessed = FALSE;
block->buf_fix_count = 0;
block->io_fix = 0;
buf_page_set_accessed(&block->page, FALSE);
block->page.buf_fix_count = 0;
buf_block_set_io_fix(block, BUF_IO_NONE);
block->n_hash_helps = 0;
block->is_hashed = FALSE;
......@@ -1955,9 +1948,9 @@ buf_page_init_for_read(
/* The block must be put to the LRU list, to the old blocks */
buf_LRU_add_block(block, TRUE); /* TRUE == to old blocks */
buf_LRU_add_block(&block->page, TRUE/* to old blocks */);
block->io_fix = BUF_IO_READ;
buf_page_set_io_fix(&block->page, BUF_IO_READ);
buf_pool->n_pend_reads++;
......@@ -2044,7 +2037,7 @@ buf_page_create(
buf_page_init(space, offset, block);
/* The block must be put to the LRU list */
buf_LRU_add_block(block, FALSE);
buf_LRU_add_block(&block->page, FALSE);
buf_block_buf_fix_inc(block, __FILE__, __LINE__);
buf_pool->n_pages_created++;
......@@ -2053,7 +2046,7 @@ buf_page_create(
mtr_memo_push(mtr, block, MTR_MEMO_BUF_FIX);
block->accessed = TRUE;
buf_page_set_accessed(&block->page, TRUE);
mutex_exit(&block->mutex);
......@@ -2098,19 +2091,19 @@ buf_page_io_complete(
/*=================*/
buf_block_t* block) /* in: pointer to the block in question */
{
ulint io_type;
enum buf_io_fix io_type;
ut_ad(block);
ut_a(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
/* We do not need protect block->io_fix here by block->mutex to read
/* We do not need protect io_fix here by mutex to read
it because this is the only function where we can change the value
from BUF_IO_READ or BUF_IO_WRITE to some other value, and our code
ensures that this is the only thread that handles the i/o for this
block. */
io_type = block->io_fix;
io_type = buf_block_get_io_fix(block);
if (io_type == BUF_IO_READ) {
ulint read_page_no;
......@@ -2265,7 +2258,7 @@ buf_page_io_complete(
removes the newest lock debug record, without checking the thread
id. */
block->io_fix = 0;
buf_page_set_io_fix(&block->page, BUF_IO_NONE);
if (io_type == BUF_IO_READ) {
/* NOTE that the call to ibuf may have moved the ownership of
......@@ -2289,7 +2282,7 @@ buf_page_io_complete(
/* Write means a flush operation: call the completion
routine in the flush system */
buf_flush_write_complete(block);
buf_flush_write_complete(&block->page);
rw_lock_s_unlock_gen(&(block->lock), BUF_IO_WRITE);
......@@ -2388,14 +2381,18 @@ buf_validate(void)
n_page++;
#ifdef UNIV_IBUF_DEBUG
ut_a((block->io_fix == BUF_IO_READ)
ut_a(buf_page_get_io_fix(&block->page)
== BUF_IO_READ
|| !ibuf_count_get(buf_block_get_space(
block),
buf_block_get_page_no(
block)));
#endif
if (block->io_fix == BUF_IO_WRITE) {
switch (buf_page_get_io_fix(&block->page)) {
case BUF_IO_NONE:
break;
case BUF_IO_WRITE:
switch (buf_page_get_flush_type(
&block->page)) {
case BUF_FLUSH_LRU:
......@@ -2414,10 +2411,13 @@ buf_validate(void)
ut_error;
}
} else if (block->io_fix == BUF_IO_READ) {
break;
case BUF_IO_READ:
ut_a(rw_lock_is_locked(&block->lock,
RW_LOCK_EX));
break;
}
n_lru++;
......@@ -2611,7 +2611,9 @@ buf_get_latched_pages_number(void)
mutex_enter(&block->mutex);
if (block->buf_fix_count != 0 || block->io_fix != 0) {
if (block->page.buf_fix_count != 0
|| buf_page_get_io_fix(&block->page)
!= BUF_IO_NONE) {
fixed_pages_number++;
}
......
......@@ -113,27 +113,31 @@ ibool
buf_flush_ready_for_replace(
/*========================*/
/* out: TRUE if can replace immediately */
buf_block_t* block) /* in: buffer control block, must be in state
BUF_BLOCK_FILE_PAGE and in the LRU list */
buf_page_t* bpage) /* in: buffer control block, must be in state
BUF_BLOCK_FILE_PAGE or BUF_BLOCK_ZIP_PAGE
and in the LRU list */
{
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(buf_pool->mutex)));
ut_ad(mutex_own(&block->mutex));
ut_ad(mutex_own(buf_page_get_mutex(bpage)));
#endif /* UNIV_SYNC_DEBUG */
if (UNIV_UNLIKELY(buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE)) {
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: Error: buffer block state %lu"
" in the LRU list!\n",
(ulong) buf_block_get_state(block));
ut_print_buf(stderr, block, sizeof(buf_block_t));
return(FALSE);
ut_ad(bpage->in_LRU_list);
if (UNIV_LIKELY(buf_page_in_file(bpage))) {
return(bpage->oldest_modification == 0
&& buf_page_get_io_fix(bpage) == BUF_IO_NONE
&& bpage->buf_fix_count == 0);
}
return(block->page.oldest_modification == 0
&& block->buf_fix_count == 0
&& block->io_fix == 0);
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: Error: buffer block state %lu"
" in the LRU list!\n",
(ulong) buf_page_get_state(bpage));
ut_print_buf(stderr, bpage, sizeof(buf_page_t));
return(FALSE);
}
/************************************************************************
......@@ -143,22 +147,24 @@ ibool
buf_flush_ready_for_flush(
/*======================*/
/* out: TRUE if can flush immediately */
buf_block_t* block, /* in: buffer control block, must be in state
buf_page_t* bpage, /* in: buffer control block, must be in state
BUF_BLOCK_FILE_PAGE */
enum buf_flush flush_type)/* in: BUF_FLUSH_LRU or BUF_FLUSH_LIST */
{
ut_a(buf_page_in_file(bpage));
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(buf_pool->mutex)));
ut_ad(mutex_own(&(block->mutex)));
ut_ad(mutex_own(buf_page_get_mutex(bpage)));
#endif /* UNIV_SYNC_DEBUG */
ut_a(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
if (block->page.oldest_modification != 0 && block->io_fix == 0) {
if (bpage->oldest_modification != 0
&& buf_page_get_io_fix(bpage) == BUF_IO_NONE) {
if (flush_type != BUF_FLUSH_LRU) {
return(TRUE);
} else if (block->buf_fix_count == 0) {
} else if (bpage->buf_fix_count == 0) {
/* If we are flushing the LRU list, to avoid deadlocks
we require the block not to be bufferfixed, and hence
......@@ -177,30 +183,30 @@ Updates the flush system data structures when a write is completed. */
void
buf_flush_write_complete(
/*=====================*/
buf_block_t* block) /* in: pointer to the block in question */
buf_page_t* bpage) /* in: pointer to the block in question */
{
enum buf_flush flush_type;
ut_ad(block);
ut_ad(bpage);
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(buf_pool->mutex)));
#endif /* UNIV_SYNC_DEBUG */
ut_a(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
ut_a(buf_page_in_file(bpage));
block->page.oldest_modification = 0;
bpage->oldest_modification = 0;
UT_LIST_REMOVE(flush_list, buf_pool->flush_list, &(block->page));
UT_LIST_REMOVE(flush_list, buf_pool->flush_list, bpage);
ut_d(UT_LIST_VALIDATE(flush_list, buf_page_t, buf_pool->flush_list));
flush_type = buf_page_get_flush_type(&block->page);
flush_type = buf_page_get_flush_type(bpage);
buf_pool->n_flush[flush_type]--;
if (flush_type == BUF_FLUSH_LRU) {
/* Put the block to the end of the LRU list to wait to be
moved to the free list */
buf_LRU_make_block_old(block);
buf_LRU_make_block_old(bpage);
buf_pool->LRU_flush_ended++;
}
......@@ -409,8 +415,8 @@ buf_flush_buffered_writes(void)
" the doublewrite buffer.\n"
"InnoDB: Page buf fix count %lu,"
" io fix %lu, state %lu\n",
(ulong)block->buf_fix_count,
(ulong)block->io_fix,
(ulong)block->page.buf_fix_count,
(ulong)buf_block_get_io_fix(block),
(ulong)buf_block_get_state(block));
}
......@@ -652,9 +658,9 @@ buf_flush_try_page(
mutex_enter(&block->mutex);
if (flush_type == BUF_FLUSH_LIST
&& buf_flush_ready_for_flush(block, flush_type)) {
&& buf_flush_ready_for_flush(&block->page, flush_type)) {
block->io_fix = BUF_IO_WRITE;
buf_block_set_io_fix(block, BUF_IO_WRITE);
buf_page_set_flush_type(&block->page, flush_type);
......@@ -671,7 +677,7 @@ buf_flush_try_page(
not wait for any latch, as we may end up in a deadlock:
if buf_fix_count == 0, then we know we need not wait */
if (block->buf_fix_count == 0) {
if (block->page.buf_fix_count == 0) {
rw_lock_s_lock_gen(&(block->lock), BUF_IO_WRITE);
locked = TRUE;
......@@ -700,7 +706,7 @@ buf_flush_try_page(
return(1);
} else if (flush_type == BUF_FLUSH_LRU
&& buf_flush_ready_for_flush(block, flush_type)) {
&& buf_flush_ready_for_flush(&block->page, flush_type)) {
/* VERY IMPORTANT:
Because any thread may call the LRU flush, even when owning
......@@ -710,7 +716,7 @@ buf_flush_try_page(
the page not to be bufferfixed (in function
..._ready_for_flush). */
block->io_fix = BUF_IO_WRITE;
buf_block_set_io_fix(block, BUF_IO_WRITE);
buf_page_set_flush_type(&block->page, flush_type);
......@@ -735,9 +741,9 @@ buf_flush_try_page(
return(1);
} else if (flush_type == BUF_FLUSH_SINGLE_PAGE
&& buf_flush_ready_for_flush(block, flush_type)) {
&& buf_flush_ready_for_flush(&block->page, flush_type)) {
block->io_fix = BUF_IO_WRITE;
buf_block_set_io_fix(block, BUF_IO_WRITE);
buf_page_set_flush_type(&block->page, flush_type);
......@@ -823,7 +829,7 @@ buf_flush_try_neighbors(
continue;
} else if (flush_type == BUF_FLUSH_LRU && i != offset
&& !block->old) {
&& !buf_page_is_old(&block->page)) {
/* We avoid flushing 'non-old' blocks in an LRU flush,
because the flushed blocks are soon freed */
......@@ -833,8 +839,8 @@ buf_flush_try_neighbors(
mutex_enter(&block->mutex);
if (buf_flush_ready_for_flush(block, flush_type)
&& (i == offset || block->buf_fix_count == 0)) {
if (buf_flush_ready_for_flush(&block->page, flush_type)
&& (i == offset || !block->page.buf_fix_count)) {
/* We only try to flush those
neighbors != offset where the buf fix count is
zero, as we then know that we probably can
......@@ -895,7 +901,7 @@ buf_flush_batch(
(if their number does not exceed
min_n), otherwise ignored */
{
buf_block_t* block;
buf_page_t* bpage;
ulint page_count = 0;
ulint old_page_count;
ulint space;
......@@ -933,13 +939,13 @@ buf_flush_batch(
block to be flushed. */
if (flush_type == BUF_FLUSH_LRU) {
block = UT_LIST_GET_LAST(buf_pool->LRU);
bpage = UT_LIST_GET_LAST(buf_pool->LRU);
} else {
ut_ad(flush_type == BUF_FLUSH_LIST);
block = UT_LIST_GET_LAST(buf_pool->flush_list);
if (!block
|| block->page.oldest_modification >= lsn_limit) {
bpage = UT_LIST_GET_LAST(buf_pool->flush_list);
if (!bpage
|| bpage->oldest_modification >= lsn_limit) {
/* We have flushed enough */
break;
......@@ -954,19 +960,21 @@ buf_flush_batch(
during the flushing and we cannot safely preserve within this
function a pointer to a block in the list! */
while ((block != NULL) && !found) {
ut_a(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
while ((bpage != NULL) && !found) {
mutex_t* block_mutex = buf_page_get_mutex(bpage);
mutex_enter(&block->mutex);
ut_a(buf_page_in_file(bpage));
mutex_enter(block_mutex);
if (buf_flush_ready_for_flush(block, flush_type)) {
if (buf_flush_ready_for_flush(bpage, flush_type)) {
found = TRUE;
space = buf_block_get_space(block);
offset = buf_block_get_page_no(block);
space = buf_page_get_space(bpage);
offset = buf_page_get_page_no(bpage);
mutex_exit(&block->mutex);
mutex_exit(&(buf_pool->mutex));
mutex_exit(block_mutex);
old_page_count = page_count;
......@@ -982,16 +990,15 @@ buf_flush_batch(
} else if (flush_type == BUF_FLUSH_LRU) {
mutex_exit(&block->mutex);
mutex_exit(block_mutex);
block = UT_LIST_GET_PREV(LRU, block);
bpage = UT_LIST_GET_PREV(LRU, bpage);
} else {
ut_ad(flush_type == BUF_FLUSH_LIST);
mutex_exit(&block->mutex);
mutex_exit(block_mutex);
block = UT_LIST_GET_PREV(flush_list,
(&block->page));
bpage = UT_LIST_GET_PREV(flush_list, bpage);
}
}
......@@ -1058,7 +1065,7 @@ buf_flush_LRU_recommendation(void)
/* out: number of blocks which should be flushed
from the end of the LRU list */
{
buf_block_t* block;
buf_page_t* bpage;
ulint n_replaceable;
ulint distance = 0;
......@@ -1066,24 +1073,26 @@ buf_flush_LRU_recommendation(void)
n_replaceable = UT_LIST_GET_LEN(buf_pool->free);
block = UT_LIST_GET_LAST(buf_pool->LRU);
bpage = UT_LIST_GET_LAST(buf_pool->LRU);
while ((block != NULL)
while ((bpage != NULL)
&& (n_replaceable < BUF_FLUSH_FREE_BLOCK_MARGIN
+ BUF_FLUSH_EXTRA_MARGIN)
&& (distance < BUF_LRU_FREE_SEARCH_LEN)) {
mutex_enter(&block->mutex);
mutex_t* block_mutex = buf_page_get_mutex(bpage);
if (buf_flush_ready_for_replace(block)) {
mutex_enter(block_mutex);
if (buf_flush_ready_for_replace(bpage)) {
n_replaceable++;
}
mutex_exit(&block->mutex);
mutex_exit(block_mutex);
distance++;
block = UT_LIST_GET_PREV(LRU, block);
bpage = UT_LIST_GET_PREV(LRU, bpage);
}
mutex_exit(&(buf_pool->mutex));
......
......@@ -54,7 +54,7 @@ static
void
buf_LRU_block_remove_hashed_page(
/*=============================*/
buf_block_t* block); /* in: block, must contain a file page and
buf_page_t* bpage); /* in: block, must contain a file page and
be in a state where it can be freed; there
may or may not be a hash index to the page */
/**********************************************************************
......@@ -63,7 +63,7 @@ static
void
buf_LRU_block_free_hashed_page(
/*===========================*/
buf_block_t* block); /* in: block, must contain a file page and
buf_page_t* block); /* in: block, must contain a file page and
be in a state where it can be freed */
/**********************************************************************
......@@ -75,7 +75,7 @@ buf_LRU_invalidate_tablespace(
/*==========================*/
ulint id) /* in: space id */
{
buf_block_t* block;
buf_page_t* bpage;
ulint page_no;
ibool all_freed;
......@@ -84,43 +84,45 @@ buf_LRU_invalidate_tablespace(
all_freed = TRUE;
block = UT_LIST_GET_LAST(buf_pool->LRU);
bpage = UT_LIST_GET_LAST(buf_pool->LRU);
while (block != NULL) {
buf_block_t* prev_block;
while (bpage != NULL) {
mutex_t* block_mutex = buf_page_get_mutex(bpage);
buf_page_t* prev_bpage;
mutex_enter(&block->mutex);
prev_block = UT_LIST_GET_PREV(LRU, block);
ut_a(buf_page_in_file(bpage));
ut_a(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
mutex_enter(block_mutex);
prev_bpage = UT_LIST_GET_PREV(LRU, bpage);
if (buf_block_get_space(block) == id
&& (block->buf_fix_count > 0 || block->io_fix != 0)) {
if (buf_page_get_space(bpage) == id) {
if (bpage->buf_fix_count > 0
|| buf_page_get_io_fix(bpage) != BUF_IO_NONE) {
/* We cannot remove this page during this scan yet;
maybe the system is currently reading it in, or
flushing the modifications to the file */
/* We cannot remove this page during
this scan yet; maybe the system is
currently reading it in, or flushing
the modifications to the file */
all_freed = FALSE;
all_freed = FALSE;
goto next_page;
}
goto next_page;
}
if (buf_block_get_space(block) == id) {
#ifdef UNIV_DEBUG
if (buf_debug_prints) {
fprintf(stderr,
"Dropping space %lu page %lu\n",
(ulong) buf_block_get_space(block),
(ulong) buf_block_get_page_no(block));
(ulong) buf_page_get_space(bpage),
(ulong) buf_page_get_page_no(bpage));
}
#endif
if (block->is_hashed) {
page_no = buf_block_get_page_no(block);
mutex_exit(&block->mutex);
if (buf_page_get_state(bpage) == BUF_BLOCK_FILE_PAGE
&& ((buf_block_t*) bpage)->is_hashed) {
page_no = buf_page_get_page_no(bpage);
mutex_exit(&(buf_pool->mutex));
mutex_exit(block_mutex);
/* Note that the following call will acquire
an S-latch on the page */
......@@ -130,24 +132,24 @@ buf_LRU_invalidate_tablespace(
goto scan_again;
}
if (block->page.oldest_modification != 0) {
if (bpage->oldest_modification != 0) {
/* Remove from the flush list of modified
blocks */
block->page.oldest_modification = 0;
bpage->oldest_modification = 0;
UT_LIST_REMOVE(flush_list,
buf_pool->flush_list,
&(block->page));
bpage);
}
/* Remove from the LRU list */
buf_LRU_block_remove_hashed_page(block);
buf_LRU_block_free_hashed_page(block);
buf_LRU_block_remove_hashed_page(bpage);
buf_LRU_block_free_hashed_page(bpage);
}
next_page:
mutex_exit(&block->mutex);
block = prev_block;
mutex_exit(block_mutex);
bpage = prev_bpage;
}
mutex_exit(&(buf_pool->mutex));
......@@ -169,9 +171,9 @@ buf_LRU_get_recent_limit(void)
/*==========================*/
/* out: the limit; zero if could not determine it */
{
buf_block_t* block;
ulint len;
ulint limit;
const buf_page_t* bpage;
ulint len;
ulint limit;
mutex_enter(&(buf_pool->mutex));
......@@ -185,9 +187,9 @@ buf_LRU_get_recent_limit(void)
return(0);
}
block = UT_LIST_GET_FIRST(buf_pool->LRU);
bpage = UT_LIST_GET_FIRST(buf_pool->LRU);
limit = block->LRU_position - len / BUF_LRU_INITIAL_RATIO;
limit = buf_page_get_LRU_position(bpage) - len / BUF_LRU_INITIAL_RATIO;
mutex_exit(&(buf_pool->mutex));
......@@ -201,16 +203,19 @@ ibool
buf_LRU_free_block(
/*===============*/
/* out: TRUE if freed */
buf_block_t* block) /* in: block to be freed */
buf_page_t* bpage) /* in: block to be freed */
{
mutex_t* block_mutex = buf_page_get_mutex(bpage);
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&buf_pool->mutex));
ut_ad(mutex_own(&block->mutex));
ut_ad(mutex_own(block_mutex));
#endif /* UNIV_SYNC_DEBUG */
ut_ad(block->in_LRU_list);
ut_ad(buf_page_in_file(bpage));
ut_ad(bpage->in_LRU_list);
if (!buf_flush_ready_for_replace(block)) {
if (!buf_flush_ready_for_replace(bpage)) {
return(FALSE);
}
......@@ -218,26 +223,37 @@ buf_LRU_free_block(
#ifdef UNIV_DEBUG
if (buf_debug_prints) {
fprintf(stderr, "Putting space %lu page %lu to free list\n",
(ulong) buf_block_get_space(block),
(ulong) buf_block_get_page_no(block));
(ulong) buf_page_get_space(bpage),
(ulong) buf_page_get_page_no(bpage));
}
#endif /* UNIV_DEBUG */
buf_LRU_block_remove_hashed_page(block);
buf_LRU_block_remove_hashed_page(bpage);
mutex_exit(&(buf_pool->mutex));
mutex_exit(&block->mutex);
switch (buf_page_get_state(bpage)) {
case BUF_BLOCK_FILE_PAGE:
mutex_exit(&(buf_pool->mutex));
mutex_exit(block_mutex);
/* Remove possible adaptive hash index on the page */
/* Remove possible adaptive hash index on the page */
btr_search_drop_page_hash_index(block);
btr_search_drop_page_hash_index((buf_block_t*) bpage);
ut_a(bpage->buf_fix_count == 0);
ut_a(block->buf_fix_count == 0);
mutex_enter(&(buf_pool->mutex));
mutex_enter(block_mutex);
mutex_enter(&(buf_pool->mutex));
mutex_enter(&block->mutex);
buf_LRU_block_free_hashed_page(bpage);
break;
buf_LRU_block_free_hashed_page(block);
case BUF_BLOCK_ZIP_PAGE:
/* TODO: free page_zip */
break;
default:
ut_error;
break;
}
return(TRUE);
}
......@@ -257,26 +273,28 @@ buf_LRU_search_and_free_block(
of pages in the buffer pool] from the end
of the LRU list */
{
buf_block_t* block;
buf_page_t* bpage;
ulint distance = 0;
ibool freed;
mutex_enter(&(buf_pool->mutex));
freed = FALSE;
block = UT_LIST_GET_LAST(buf_pool->LRU);
bpage = UT_LIST_GET_LAST(buf_pool->LRU);
while (block != NULL) {
mutex_enter(&block->mutex);
freed = buf_LRU_free_block(block);
mutex_exit(&block->mutex);
while (bpage != NULL) {
mutex_t* block_mutex = buf_page_get_mutex(bpage);
mutex_enter(block_mutex);
freed = buf_LRU_free_block(bpage);
mutex_exit(block_mutex);
if (freed) {
break;
}
block = UT_LIST_GET_PREV(LRU, block);
bpage = UT_LIST_GET_PREV(LRU, bpage);
distance++;
if (n_iterations <= 10
......@@ -444,7 +462,7 @@ buf_LRU_get_free_block(
UT_LIST_REMOVE(free, buf_pool->free, block);
block->in_free_list = FALSE;
ut_a(buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE);
ut_ad(!block->in_LRU_list);
ut_ad(!block->page.in_LRU_list);
if (buf_block_get_zip_size(block) != zip_size) {
page_zip_set_size(&block->page.zip, zip_size);
......@@ -584,12 +602,12 @@ buf_LRU_old_adjust_len(void)
buf_pool->LRU_old = UT_LIST_GET_PREV(
LRU, buf_pool->LRU_old);
buf_pool->LRU_old->old = TRUE;
buf_page_set_old(buf_pool->LRU_old, TRUE);
buf_pool->LRU_old_len++;
} else if (old_len > new_len + BUF_LRU_OLD_TOLERANCE) {
buf_pool->LRU_old->old = FALSE;
buf_page_set_old(buf_pool->LRU_old, FALSE);
buf_pool->LRU_old = UT_LIST_GET_NEXT(
LRU, buf_pool->LRU_old);
buf_pool->LRU_old_len--;
......@@ -609,7 +627,7 @@ void
buf_LRU_old_init(void)
/*==================*/
{
buf_block_t* block;
buf_page_t* bpage;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(buf_pool->mutex)));
......@@ -620,13 +638,12 @@ buf_LRU_old_init(void)
the adjust function to move the LRU_old pointer to the right
position */
block = UT_LIST_GET_FIRST(buf_pool->LRU);
bpage = UT_LIST_GET_FIRST(buf_pool->LRU);
while (block != NULL) {
ut_a(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
ut_ad(block->in_LRU_list);
block->old = TRUE;
block = UT_LIST_GET_NEXT(LRU, block);
while (bpage != NULL) {
ut_ad(bpage->in_LRU_list);
buf_page_set_old(bpage, TRUE);
bpage = UT_LIST_GET_NEXT(LRU, bpage);
}
buf_pool->LRU_old = UT_LIST_GET_FIRST(buf_pool->LRU);
......@@ -641,37 +658,38 @@ UNIV_INLINE
void
buf_LRU_remove_block(
/*=================*/
buf_block_t* block) /* in: control block */
buf_page_t* bpage) /* in: control block */
{
ut_ad(buf_pool);
ut_ad(block);
ut_ad(bpage);
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(buf_pool->mutex)));
#endif /* UNIV_SYNC_DEBUG */
ut_a(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
ut_ad(block->in_LRU_list);
ut_a(buf_page_in_file(bpage));
ut_ad(bpage->in_LRU_list);
/* If the LRU_old pointer is defined and points to just this block,
move it backward one step */
if (block == buf_pool->LRU_old) {
if (bpage == buf_pool->LRU_old) {
/* Below: the previous block is guaranteed to exist, because
the LRU_old pointer is only allowed to differ by the
tolerance value from strict 3/8 of the LRU list length. */
buf_pool->LRU_old = UT_LIST_GET_PREV(LRU, block);
buf_pool->LRU_old->old = TRUE;
buf_pool->LRU_old = UT_LIST_GET_PREV(LRU, bpage);
buf_page_set_old(buf_pool->LRU_old, TRUE);
buf_pool->LRU_old_len++;
ut_a(buf_pool->LRU_old);
}
/* Remove the block from the LRU list */
UT_LIST_REMOVE(LRU, buf_pool->LRU, block);
UT_LIST_REMOVE(LRU, buf_pool->LRU, bpage);
#ifdef UNIV_DEBUG
block->in_LRU_list = FALSE;
bpage->in_LRU_list = FALSE;
#endif /* UNIV_DEBUG */
/* If the LRU list is so short that LRU_old not defined, return */
......@@ -685,7 +703,7 @@ buf_LRU_remove_block(
ut_ad(buf_pool->LRU_old);
/* Update the LRU_old_len field if necessary */
if (block->old) {
if (buf_page_is_old(bpage)) {
buf_pool->LRU_old_len--;
}
......@@ -700,32 +718,32 @@ UNIV_INLINE
void
buf_LRU_add_block_to_end_low(
/*=========================*/
buf_block_t* block) /* in: control block */
buf_page_t* bpage) /* in: control block */
{
buf_block_t* last_block;
buf_page_t* last_bpage;
ut_ad(buf_pool);
ut_ad(block);
ut_ad(bpage);
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(buf_pool->mutex)));
#endif /* UNIV_SYNC_DEBUG */
ut_a(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
ut_a(buf_page_in_file(bpage));
block->old = TRUE;
buf_page_set_old(bpage, TRUE);
last_block = UT_LIST_GET_LAST(buf_pool->LRU);
last_bpage = UT_LIST_GET_LAST(buf_pool->LRU);
if (last_block) {
block->LRU_position = last_block->LRU_position;
if (last_bpage) {
bpage->LRU_position = last_bpage->LRU_position;
} else {
block->LRU_position = buf_pool_clock_tic();
bpage->LRU_position = buf_pool_clock_tic();
}
ut_ad(!block->in_LRU_list);
UT_LIST_ADD_LAST(LRU, buf_pool->LRU, block);
ut_ad(!bpage->in_LRU_list);
UT_LIST_ADD_LAST(LRU, buf_pool->LRU, bpage);
#ifdef UNIV_DEBUG
block->in_LRU_list = TRUE;
bpage->in_LRU_list = TRUE;
#endif /* UNIV_DEBUG */
if (UT_LIST_GET_LEN(buf_pool->LRU) >= BUF_LRU_OLD_MIN_LEN) {
......@@ -756,45 +774,42 @@ UNIV_INLINE
void
buf_LRU_add_block_low(
/*==================*/
buf_block_t* block, /* in: control block */
buf_page_t* bpage, /* in: control block */
ibool old) /* in: TRUE if should be put to the old blocks
in the LRU list, else put to the start; if the
LRU list is very short, the block is added to
the start, regardless of this parameter */
{
ulint cl;
ut_ad(buf_pool);
ut_ad(block);
ut_ad(bpage);
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(buf_pool->mutex)));
#endif /* UNIV_SYNC_DEBUG */
ut_a(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
ut_ad(!block->in_LRU_list);
ut_a(buf_page_in_file(bpage));
ut_ad(!bpage->in_LRU_list);
block->old = old;
cl = buf_pool_clock_tic();
buf_page_set_old(bpage, old);
if (!old || (UT_LIST_GET_LEN(buf_pool->LRU) < BUF_LRU_OLD_MIN_LEN)) {
UT_LIST_ADD_FIRST(LRU, buf_pool->LRU, block);
UT_LIST_ADD_FIRST(LRU, buf_pool->LRU, bpage);
block->LRU_position = cl;
block->freed_page_clock = buf_pool->freed_page_clock;
bpage->LRU_position = buf_pool_clock_tic();
bpage->freed_page_clock = buf_pool->freed_page_clock;
} else {
UT_LIST_INSERT_AFTER(LRU, buf_pool->LRU, buf_pool->LRU_old,
block);
bpage);
buf_pool->LRU_old_len++;
/* We copy the LRU position field of the previous block
to the new block */
block->LRU_position = (buf_pool->LRU_old)->LRU_position;
bpage->LRU_position = (buf_pool->LRU_old)->LRU_position;
}
#ifdef UNIV_DEBUG
block->in_LRU_list = TRUE;
bpage->in_LRU_list = TRUE;
#endif /* UNIV_DEBUG */
if (UT_LIST_GET_LEN(buf_pool->LRU) > BUF_LRU_OLD_MIN_LEN) {
......@@ -820,14 +835,14 @@ Adds a block to the LRU list. */
void
buf_LRU_add_block(
/*==============*/
buf_block_t* block, /* in: control block */
buf_page_t* bpage, /* in: control block */
ibool old) /* in: TRUE if should be put to the old
blocks in the LRU list, else put to the start;
if the LRU list is very short, the block is
added to the start, regardless of this
parameter */
{
buf_LRU_add_block_low(block, old);
buf_LRU_add_block_low(bpage, old);
}
/**********************************************************************
......@@ -836,10 +851,10 @@ Moves a block to the start of the LRU list. */
void
buf_LRU_make_block_young(
/*=====================*/
buf_block_t* block) /* in: control block */
buf_page_t* bpage) /* in: control block */
{
buf_LRU_remove_block(block);
buf_LRU_add_block_low(block, FALSE);
buf_LRU_remove_block(bpage);
buf_LRU_add_block_low(bpage, FALSE);
}
/**********************************************************************
......@@ -848,10 +863,10 @@ Moves a block to the end of the LRU list. */
void
buf_LRU_make_block_old(
/*===================*/
buf_block_t* block) /* in: control block */
buf_page_t* bpage) /* in: control block */
{
buf_LRU_remove_block(block);
buf_LRU_add_block_to_end_low(block);
buf_LRU_remove_block(bpage);
buf_LRU_add_block_to_end_low(bpage);
}
/**********************************************************************
......@@ -909,45 +924,51 @@ static
void
buf_LRU_block_remove_hashed_page(
/*=============================*/
buf_block_t* block) /* in: block, must contain a file page and
buf_page_t* bpage) /* in: block, must contain a file page and
be in a state where it can be freed; there
may or may not be a hash index to the page */
{
const buf_block_t* hashed_block;
const buf_page_t* hashed_bpage;
ut_ad(bpage);
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(buf_pool->mutex)));
ut_ad(mutex_own(&block->mutex));
ut_ad(mutex_own(buf_page_get_mutex(bpage)));
#endif /* UNIV_SYNC_DEBUG */
ut_ad(block);
ut_a(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
ut_a(block->io_fix == 0);
ut_a(block->buf_fix_count == 0);
ut_a(block->page.oldest_modification == 0);
ut_a(buf_page_get_io_fix(bpage) == BUF_IO_NONE);
ut_a(bpage->buf_fix_count == 0);
ut_a(bpage->oldest_modification == 0);
buf_LRU_remove_block(block);
buf_LRU_remove_block(bpage);
buf_pool->freed_page_clock += 1;
buf_block_modify_clock_inc(block);
switch (buf_page_get_state(bpage)) {
case BUF_BLOCK_FILE_PAGE:
buf_block_modify_clock_inc((buf_block_t*) bpage);
break;
case BUF_BLOCK_ZIP_PAGE:
break;
default:
ut_error;
}
hashed_block = buf_page_hash_get(block->page.space,
block->page.offset);
hashed_bpage = buf_page_hash_get(bpage->space, bpage->offset);
if (UNIV_UNLIKELY(block != hashed_block)) {
if (UNIV_UNLIKELY(bpage != hashed_bpage)) {
fprintf(stderr,
"InnoDB: Error: page %lu %lu not found"
" in the hash table\n",
(ulong) block->page.space,
(ulong) block->page.offset);
if (hashed_block) {
(ulong) bpage->space,
(ulong) bpage->offset);
if (hashed_bpage) {
fprintf(stderr,
"InnoDB: In hash table we find block"
" %p of %lu %lu which is not %p\n",
(const void*) hashed_block,
(ulong) hashed_block->page.space,
(ulong) hashed_block->page.offset,
(void*) block);
(const void*) hashed_bpage,
(ulong) hashed_bpage->space,
(ulong) hashed_bpage->offset,
(const void*) bpage);
}
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
......@@ -960,13 +981,26 @@ buf_LRU_block_remove_hashed_page(
}
HASH_DELETE(buf_page_t, hash, buf_pool->page_hash,
buf_page_address_fold(block->page.space,
block->page.offset),
(&block->page));
memset(block->frame + FIL_PAGE_OFFSET, 0xff, 4);
memset(block->frame + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID, 0xff, 4);
buf_block_set_state(block, BUF_BLOCK_REMOVE_HASH);
buf_page_address_fold(bpage->space, bpage->offset),
bpage);
switch (buf_page_get_state(bpage)) {
case BUF_BLOCK_ZIP_PAGE:
ut_a(bpage->zip.data);
ut_a(buf_page_get_zip_size(bpage));
memset(bpage->zip.data + FIL_PAGE_OFFSET, 0xff, 4);
memset(bpage->zip.data + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID,
0xff, 4);
break;
case BUF_BLOCK_FILE_PAGE:
memset(((buf_block_t*) bpage)->frame
+ FIL_PAGE_OFFSET, 0xff, 4);
memset(((buf_block_t*) bpage)->frame
+ FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID, 0xff, 4);
buf_page_set_state(bpage, BUF_BLOCK_REMOVE_HASH);
break;
default:
ut_error;
}
}
/**********************************************************************
......@@ -975,18 +1009,16 @@ static
void
buf_LRU_block_free_hashed_page(
/*===========================*/
buf_block_t* block) /* in: block, must contain a file page and
buf_page_t* bpage) /* in: block, must contain a file page and
be in a state where it can be freed */
{
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(buf_pool->mutex)));
ut_ad(mutex_own(&block->mutex));
ut_ad(mutex_own(buf_page_get_mutex(bpage)));
#endif /* UNIV_SYNC_DEBUG */
ut_a(buf_block_get_state(block) == BUF_BLOCK_REMOVE_HASH);
buf_block_set_state(block, BUF_BLOCK_MEMORY);
buf_page_set_state(bpage, BUF_BLOCK_MEMORY);
buf_LRU_block_free_non_file_page(block);
buf_LRU_block_free_non_file_page((buf_block_t*) bpage);
}
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
......@@ -997,7 +1029,7 @@ ibool
buf_LRU_validate(void)
/*==================*/
{
buf_block_t* block;
buf_page_t* bpage;
ulint old_len;
ulint new_len;
ulint LRU_pos;
......@@ -1014,33 +1046,33 @@ buf_LRU_validate(void)
ut_a(old_len <= new_len + BUF_LRU_OLD_TOLERANCE);
}
UT_LIST_VALIDATE(LRU, buf_block_t, buf_pool->LRU);
UT_LIST_VALIDATE(LRU, buf_page_t, buf_pool->LRU);
block = UT_LIST_GET_FIRST(buf_pool->LRU);
bpage = UT_LIST_GET_FIRST(buf_pool->LRU);
old_len = 0;
while (block != NULL) {
while (bpage != NULL) {
ut_a(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
ut_a(buf_page_in_file(bpage));
if (block->old) {
if (buf_page_is_old(bpage)) {
old_len++;
}
if (buf_pool->LRU_old && (old_len == 1)) {
ut_a(buf_pool->LRU_old == block);
ut_a(buf_pool->LRU_old == bpage);
}
LRU_pos = block->LRU_position;
LRU_pos = buf_page_get_LRU_position(bpage);
block = UT_LIST_GET_NEXT(LRU, block);
bpage = UT_LIST_GET_NEXT(LRU, bpage);
if (block) {
if (bpage) {
/* If the following assert fails, it may
not be an error: just the buf_pool clock
has wrapped around */
ut_a(LRU_pos >= block->LRU_position);
ut_a(LRU_pos >= buf_page_get_LRU_position(bpage));
}
}
......@@ -1050,12 +1082,14 @@ buf_LRU_validate(void)
UT_LIST_VALIDATE(free, buf_block_t, buf_pool->free);
block = UT_LIST_GET_FIRST(buf_pool->free);
{
buf_block_t* block = UT_LIST_GET_FIRST(buf_pool->free);
while (block != NULL) {
ut_a(buf_block_get_state(block) == BUF_BLOCK_NOT_USED);
while (block != NULL) {
ut_a(buf_block_get_state(block) == BUF_BLOCK_NOT_USED);
block = UT_LIST_GET_NEXT(free, block);
block = UT_LIST_GET_NEXT(free, block);
}
}
mutex_exit(&(buf_pool->mutex));
......@@ -1071,8 +1105,7 @@ void
buf_LRU_print(void)
/*===============*/
{
buf_block_t* block;
buf_frame_t* frame;
const buf_page_t* bpage;
ut_ad(buf_pool);
mutex_enter(&(buf_pool->mutex));
......@@ -1080,40 +1113,62 @@ buf_LRU_print(void)
fprintf(stderr, "Pool ulint clock %lu\n",
(ulong) buf_pool->ulint_clock);
block = UT_LIST_GET_FIRST(buf_pool->LRU);
bpage = UT_LIST_GET_FIRST(buf_pool->LRU);
while (block != NULL) {
while (bpage != NULL) {
fprintf(stderr, "BLOCK space %lu page %lu ",
(ulong) buf_block_get_space(block),
(ulong) buf_block_get_page_no(block));
(ulong) buf_page_get_space(bpage),
(ulong) buf_page_get_page_no(bpage));
if (block->old) {
if (buf_page_is_old(bpage)) {
fputs("old ", stderr);
}
if (block->buf_fix_count) {
if (bpage->buf_fix_count) {
fprintf(stderr, "buffix count %lu ",
(ulong) block->buf_fix_count);
(ulong) bpage->buf_fix_count);
}
if (block->io_fix) {
fprintf(stderr, "io_fix %lu ", (ulong) block->io_fix);
if (buf_page_get_io_fix(bpage)) {
fprintf(stderr, "io_fix %lu ",
(ulong) buf_page_get_io_fix(bpage));
}
if (block->page.oldest_modification) {
if (bpage->oldest_modification) {
fputs("modif. ", stderr);
}
frame = buf_block_get_frame(block);
switch (buf_page_get_state(bpage)) {
const byte* frame;
case BUF_BLOCK_FILE_PAGE:
frame = buf_block_get_frame((buf_block_t*) bpage);
fprintf(stderr, "\nLRU pos %lu type %lu"
" index id %lu\n",
(ulong) buf_page_get_LRU_position(bpage),
(ulong) fil_page_get_type(frame),
(ulong) ut_dulint_get_low(
btr_page_get_index_id(frame)));
break;
case BUF_BLOCK_ZIP_PAGE:
frame = bpage->zip.data;
fprintf(stderr, "\nLRU pos %lu type %lu size %lu"
" index id %lu\n",
(ulong) buf_page_get_LRU_position(bpage),
(ulong) fil_page_get_type(frame),
(ulong) buf_page_get_zip_size(bpage),
(ulong) ut_dulint_get_low(
btr_page_get_index_id(frame)));
break;
fprintf(stderr, "\nLRU pos %lu type %lu index id %lu\n",
(ulong) block->LRU_position,
(ulong) fil_page_get_type(frame),
(ulong) ut_dulint_get_low(
btr_page_get_index_id(frame)));
default:
fprintf(stderr, "\nLRU pos %lu !state %lu!\n",
(ulong) buf_page_get_LRU_position(bpage),
(ulong) buf_page_get_state(bpage));
break;
}
block = UT_LIST_GET_NEXT(LRU, block);
bpage = UT_LIST_GET_NEXT(LRU, bpage);
}
mutex_exit(&(buf_pool->mutex));
......
......@@ -175,7 +175,6 @@ buf_read_ahead_random(
wants to access */
{
ib_longlong tablespace_version;
buf_block_t* block;
ulint recent_blocks = 0;
ulint count;
ulint LRU_recent_limit;
......@@ -233,11 +232,11 @@ buf_read_ahead_random(
that is, reside near the start of the LRU list. */
for (i = low; i < high; i++) {
block = buf_page_hash_get(space, i);
const buf_page_t* bpage = buf_page_hash_get(space, i);
if ((block)
&& block->accessed
&& (block->LRU_position > LRU_recent_limit)) {
if (bpage
&& buf_page_is_accessed(bpage)
&& (buf_page_get_LRU_position(bpage) > LRU_recent_limit)) {
recent_blocks++;
......@@ -386,9 +385,9 @@ buf_read_ahead_linear(
must want access to this page (see NOTE 3 above) */
{
ib_longlong tablespace_version;
buf_block_t* block;
buf_page_t* bpage;
buf_frame_t* frame;
buf_block_t* pred_block = NULL;
buf_page_t* pred_bpage = NULL;
ulint pred_offset;
ulint succ_offset;
ulint count;
......@@ -461,20 +460,21 @@ buf_read_ahead_linear(
fail_count = 0;
for (i = low; i < high; i++) {
block = buf_page_hash_get(space, i);
bpage = buf_page_hash_get(space, i);
if ((block == NULL) || !block->accessed) {
if ((bpage == NULL) || !buf_page_is_accessed(bpage)) {
/* Not accessed */
fail_count++;
} else if (pred_block
&& (ut_ulint_cmp(block->LRU_position,
pred_block->LRU_position)
} else if (pred_bpage
&& (ut_ulint_cmp(
buf_page_get_LRU_position(bpage),
buf_page_get_LRU_position(pred_bpage))
!= asc_or_desc)) {
/* Accesses not in the right order */
fail_count++;
pred_block = block;
pred_bpage = bpage;
}
}
......@@ -490,15 +490,25 @@ buf_read_ahead_linear(
/* If we got this far, we know that enough pages in the area have
been accessed in the right order: linear read-ahead can be sensible */
block = buf_page_hash_get(space, offset);
bpage = buf_page_hash_get(space, offset);
if (block == NULL) {
if (bpage == NULL) {
mutex_exit(&(buf_pool->mutex));
return(0);
}
frame = block->frame;
switch (buf_page_get_state(bpage)) {
case BUF_BLOCK_ZIP_PAGE:
frame = bpage->zip.data;
break;
case BUF_BLOCK_FILE_PAGE:
frame = ((buf_block_t*) bpage)->frame;
break;
default:
ut_error;
break;
}
/* Read the natural predecessor and successor page addresses from
the page; NOTE that because the calling thread may have an x-latch
......
......@@ -268,7 +268,7 @@ the buffer pool. */
void
buf_page_make_young(
/*================*/
buf_block_t* block); /* in: buffer block of a file page */
buf_page_t* bpage); /* in: buffer block of a file page */
/************************************************************************
Returns TRUE if the page can be found in the buffer pool hash table. NOTE
that it is possible that the page is not yet read from disk, though. */
......@@ -331,6 +331,25 @@ buf_page_reset_file_page_was_freed(
ulint space, /* in: space id */
ulint offset); /* in: page number */
#endif /* UNIV_DEBUG_FILE_ACCESSES */
/************************************************************************
Reads the freed_page_clock of a buffer block. */
UNIV_INLINE
ulint
buf_page_get_freed_page_clock(
/*==========================*/
/* out: freed_page_clock */
const buf_page_t* bpage) /* in: block */
__attribute__((pure));
/************************************************************************
Reads the freed_page_clock of a buffer block. */
UNIV_INLINE
ulint
buf_block_get_freed_page_clock(
/*===========================*/
/* out: freed_page_clock */
const buf_block_t* block) /* in: block */
__attribute__((pure));
/************************************************************************
Recommends a move of a block to the start of the LRU list if there is danger
of dropping from the buffer pool. NOTE: does not reserve the buffer pool
......@@ -339,8 +358,9 @@ UNIV_INLINE
ibool
buf_block_peek_if_too_old(
/*======================*/
/* out: TRUE if should be made younger */
buf_block_t* block); /* in: block to make younger */
/* out: TRUE if should be made
younger */
const buf_block_t* block); /* in: block to make younger */
/************************************************************************
Returns the current state of is_hashed of a page. FALSE if the page is
not in the pool. NOTE that this operation does not fix the page in the
......@@ -432,7 +452,7 @@ buf_block_get_lock_hash_val(
/*========================*/
/* out: lock hash value */
const buf_block_t* block) /* in: block */
__attribute__((const));
__attribute__((pure));
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
/*************************************************************************
Validates the buffer pool data structure. */
......@@ -551,6 +571,14 @@ buf_block_get_state(
Sets the state of a block. */
UNIV_INLINE
void
buf_page_set_state(
/*===============*/
buf_page_t* bpage, /* in/out: pointer to control block */
enum buf_page_state state); /* in: state */
/*************************************************************************
Sets the state of a block. */
UNIV_INLINE
void
buf_block_set_state(
/*================*/
buf_block_t* block, /* in/out: pointer to control block */
......@@ -565,6 +593,26 @@ buf_page_in_file(
const buf_page_t* bpage) /* in: pointer to control block */
__attribute__((pure));
/*************************************************************************
Determine the approximate LRU list position of a block. */
UNIV_INLINE
ulint
buf_page_get_LRU_position(
/*======================*/
/* out: LRU list position */
const buf_page_t* bpage) /* in: control block */
__attribute__((pure));
/*************************************************************************
Gets the mutex of a block. */
UNIV_INLINE
mutex_t*
buf_page_get_mutex(
/*================*/
/* out: pointer to mutex protecting bpage */
buf_page_t* bpage) /* in: pointer to control block */
__attribute__((pure));
/*************************************************************************
Get the flush type of a page. */
UNIV_INLINE
enum buf_flush
......@@ -591,6 +639,76 @@ buf_block_set_file_page(
ulint space, /* in: tablespace id */
ulint page_no);/* in: page number */
/*************************************************************************
Gets the io_fix state of a block. */
UNIV_INLINE
enum buf_io_fix
buf_page_get_io_fix(
/*================*/
/* out: io_fix state */
const buf_page_t* bpage) /* in: pointer to the control block */
__attribute__((pure));
/*************************************************************************
Gets the io_fix state of a block. */
UNIV_INLINE
enum buf_io_fix
buf_block_get_io_fix(
/*================*/
/* out: io_fix state */
const buf_block_t* block) /* in: pointer to the control block */
__attribute__((pure));
/*************************************************************************
Sets the io_fix state of a block. */
UNIV_INLINE
void
buf_page_set_io_fix(
/*================*/
buf_page_t* bpage, /* in/out: control block */
enum buf_io_fix io_fix);/* in: io_fix state */
/*************************************************************************
Sets the io_fix state of a block. */
UNIV_INLINE
void
buf_block_set_io_fix(
/*=================*/
buf_block_t* block, /* in/out: control block */
enum buf_io_fix io_fix);/* in: io_fix state */
/*************************************************************************
Determine if a block has been flagged old. */
UNIV_INLINE
ibool
buf_page_is_old(
/*============*/
/* out: TRUE if old */
const buf_page_t* bpage) /* in: control block */
__attribute__((pure));
/*************************************************************************
Flag a block old. */
UNIV_INLINE
void
buf_page_set_old(
/*=============*/
buf_page_t* bpage, /* in/out: control block */
ibool old); /* in: old */
/*************************************************************************
Determine if a block has been accessed in the buffer pool. */
UNIV_INLINE
ibool
buf_page_is_accessed(
/*=================*/
/* out: TRUE if accessed */
const buf_page_t* bpage) /* in: control block */
__attribute__((pure));
/*************************************************************************
Flag a block accessed. */
UNIV_INLINE
void
buf_page_set_accessed(
/*==================*/
buf_page_t* bpage, /* in/out: control block */
ibool accessed); /* in: accessed */
/*************************************************************************
Gets a pointer to the memory frame of a block. */
UNIV_INLINE
buf_frame_t*
......@@ -598,7 +716,16 @@ buf_block_get_frame(
/*================*/
/* out: pointer to the frame */
buf_block_t* block) /* in: pointer to the control block */
__attribute__((const));
__attribute__((pure));
/*************************************************************************
Gets the space id of a block. */
UNIV_INLINE
ulint
buf_page_get_space(
/*===============*/
/* out: space id */
const buf_page_t* bpage) /* in: pointer to the control block */
__attribute((pure));
/*************************************************************************
Gets the space id of a block. */
UNIV_INLINE
......@@ -607,7 +734,16 @@ buf_block_get_space(
/*================*/
/* out: space id */
const buf_block_t* block) /* in: pointer to the control block */
__attribute((const));
__attribute((pure));
/*************************************************************************
Gets the page number of a block. */
UNIV_INLINE
ulint
buf_page_get_page_no(
/*=================*/
/* out: page number */
const buf_page_t* bpage) /* in: pointer to the control block */
__attribute((pure));
/*************************************************************************
Gets the page number of a block. */
UNIV_INLINE
......@@ -616,7 +752,16 @@ buf_block_get_page_no(
/*==================*/
/* out: page number */
const buf_block_t* block) /* in: pointer to the control block */
__attribute((const));
__attribute((pure));
/*************************************************************************
Gets the compressed page size of a block. */
UNIV_INLINE
ulint
buf_page_get_zip_size(
/*==================*/
/* out: compressed page size, or 0 */
const buf_page_t* bpage) /* in: pointer to the control block */
__attribute((pure));
/*************************************************************************
Gets the compressed page size of a block. */
UNIV_INLINE
......@@ -625,7 +770,7 @@ buf_block_get_zip_size(
/*===================*/
/* out: compressed page size, or 0 */
const buf_block_t* block) /* in: pointer to the control block */
__attribute((const));
__attribute((pure));
/*************************************************************************
Gets the compressed page descriptor corresponding to an uncompressed page
if applicable. */
......@@ -635,7 +780,7 @@ buf_block_get_page_zip(
/*===================*/
/* out: compressed page descriptor, or NULL */
buf_block_t* block) /* in: pointer to the control block */
__attribute((const));
__attribute((pure));
#if defined UNIV_DEBUG || defined UNIV_ZIP_DEBUG
/***********************************************************************
Gets the block to whose frame the pointer is pointing to. */
......@@ -654,7 +799,7 @@ buf_frame_get_page_zip(
/*===================*/
/* out: compressed page descriptor, or NULL */
byte* ptr) /* in: pointer to the page */
__attribute((const));
__attribute((pure));
#endif /* UNIV_DEBUG || UNIV_ZIP_DEBUG */
/************************************************************************
This function is used to get info if there is an io operation
......@@ -664,7 +809,7 @@ ibool
buf_page_io_query(
/*==============*/
/* out: TRUE if io going on */
buf_block_t* block); /* in: pool block, must be bufferfixed */
buf_page_t* bpage); /* in: pool block, must be bufferfixed */
/************************************************************************
Function which inits a page for read to the buffer buf_pool. If the page is
(1) already in buf_pool, or
......@@ -751,6 +896,19 @@ struct buf_page_struct{
ulint flush_type:2; /* if this block is currently being
flushed to disk, this tells the
flush_type (@see enum buf_flush) */
ulint old:1; /* TRUE if the block is in the old
blocks in the LRU list */
ulint accessed:1; /* TRUE if the page has been accessed
while in the buffer pool: read-ahead
may read in pages which have not been
accessed yet; a thread is allowed to
read this for heuristic purposes
without holding any mutex or latch */
ulint io_fix:2; /* type of pending I/O operation
(@see enum buf_io_fix); also
protected by buf_pool->mutex */
ulint buf_fix_count:23;/* count of how manyfold this block
is currently bufferfixed */
page_zip_des_t zip; /* compressed page */
buf_page_t* hash; /* node used in chaining to the page
......@@ -771,6 +929,31 @@ struct buf_page_struct{
modification to this block which has
not yet been flushed on disk; zero if
all modifications are on disk */
/* 3. LRU replacement algorithm fields; protected by buf_pool->mutex
unless otherwise noted*/
UT_LIST_NODE_T(buf_page_t) LRU;
/* node of the LRU list */
#ifdef UNIV_DEBUG
ibool in_LRU_list; /* TRUE of the page is in the LRU list;
used in debugging */
#endif /* UNIV_DEBUG */
ulint LRU_position; /* value which monotonically
decreases (or may stay constant if
the block is in the old blocks) toward
the end of the LRU list, if the pool
ulint_clock has not wrapped around:
NOTE that this value can only be used
in heuristic algorithms, because of
the possibility of a wrap-around! */
ulint freed_page_clock;/* the value of freed_page_clock
of the buffer pool when this block was
the last time put to the head of the
LRU list; protected by buf_pool->mutex;
a thread is allowed to read this for
heuristic purposes without holding any
mutex or latch */
#ifdef UNIV_DEBUG_FILE_ACCESSES
ibool file_page_was_freed;
/* this is set to TRUE when fsp
......@@ -817,44 +1000,6 @@ struct buf_block_struct{
/* node of the free block list */
ibool in_free_list; /* TRUE if in the free list; used in
debugging */
UT_LIST_NODE_T(buf_block_t) LRU;
/* node of the LRU list */
#ifdef UNIV_DEBUG
ibool in_LRU_list; /* TRUE of the page is in the LRU list;
used in debugging */
#endif /* UNIV_DEBUG */
ulint LRU_position; /* value which monotonically
decreases (or may stay constant if
the block is in the old blocks) toward
the end of the LRU list, if the pool
ulint_clock has not wrapped around:
NOTE that this value can only be used
in heuristic algorithms, because of
the possibility of a wrap-around! */
ulint freed_page_clock;/* the value of freed_page_clock
of the buffer pool when this block was
the last time put to the head of the
LRU list; protected by buf_pool->mutex;
a thread is allowed to read this for
heuristic purposes without holding any
mutex or latch */
ulint old:1; /* TRUE if the block is in the old
blocks in the LRU list; protected
by buf_pool->mutex */
ulint accessed:1; /* TRUE if the page has been accessed
while in the buffer pool: read-ahead
may read in pages which have not been
accessed yet; this is protected by
block->mutex; a thread is allowed to
read this for heuristic purposes
without holding any mutex or latch */
ulint io_fix:2; /* if a read is pending to the frame,
io_fix is BUF_IO_READ, in the case
of a write BUF_IO_WRITE, otherwise 0;
this is protected by block->mutex */
ulint buf_fix_count:29;/* count of how manyfold this block
is currently bufferfixed; this is
protected by block->mutex */
/* 4. Optimistic search field */
ib_uint64_t modify_clock; /* this clock is incremented every
......@@ -994,9 +1139,9 @@ struct buf_pool_struct{
UT_LIST_BASE_NODE_T(buf_block_t) free;
/* base node of the free block list */
UT_LIST_BASE_NODE_T(buf_block_t) LRU;
UT_LIST_BASE_NODE_T(buf_page_t) LRU;
/* base node of the LRU list */
buf_block_t* LRU_old; /* pointer to the about 3/8 oldest
buf_page_t* LRU_old; /* pointer to the about 3/8 oldest
blocks in the LRU list; NULL if LRU
length less than BUF_LRU_OLD_MIN_LEN */
ulint LRU_old_len; /* length of the LRU list from
......@@ -1007,10 +1152,6 @@ struct buf_pool_struct{
LRU_old == NULL */
};
/* Io_fix states of a control block; these must be 1..3 */
#define BUF_IO_READ 1
#define BUF_IO_WRITE 2
/************************************************************************
Let us list the consistency conditions for different control block states.
......
......@@ -11,6 +11,30 @@ Created 11/5/1995 Heikki Tuuri
#include "buf0rea.h"
#include "mtr0mtr.h"
/************************************************************************
Reads the freed_page_clock of a buffer block. */
UNIV_INLINE
ulint
buf_page_get_freed_page_clock(
/*==========================*/
/* out: freed_page_clock */
const buf_page_t* bpage) /* in: block */
{
return(bpage->freed_page_clock);
}
/************************************************************************
Reads the freed_page_clock of a buffer block. */
UNIV_INLINE
ulint
buf_block_get_freed_page_clock(
/*===========================*/
/* out: freed_page_clock */
const buf_block_t* block) /* in: block */
{
return(buf_page_get_freed_page_clock(&block->page));
}
/************************************************************************
Recommends a move of a block to the start of the LRU list if there is danger
of dropping from the buffer pool. NOTE: does not reserve the buffer pool
......@@ -19,10 +43,12 @@ UNIV_INLINE
ibool
buf_block_peek_if_too_old(
/*======================*/
/* out: TRUE if should be made younger */
buf_block_t* block) /* in: block to make younger */
/* out: TRUE if should be made
younger */
const buf_block_t* block) /* in: block to make younger */
{
return(buf_pool->freed_page_clock >= block->freed_page_clock
return(buf_pool->freed_page_clock
>= buf_block_get_freed_page_clock(block)
+ 1 + (buf_pool->curr_size / 1024));
}
......@@ -125,13 +151,13 @@ buf_block_get_state(
Sets the state of a block. */
UNIV_INLINE
void
buf_block_set_state(
/*================*/
buf_block_t* block, /* in/out: pointer to control block */
buf_page_set_state(
/*===============*/
buf_page_t* bpage, /* in/out: pointer to control block */
enum buf_page_state state) /* in: state */
{
#ifdef UNIV_DEBUG
enum buf_page_state old_state = buf_block_get_state(block);
enum buf_page_state old_state = buf_page_get_state(bpage);
switch (old_state) {
case BUF_BLOCK_ZIP_PAGE:
......@@ -157,8 +183,20 @@ buf_block_set_state(
break;
}
#endif /* UNIV_DEBUG */
block->page.state = state;
ut_ad(buf_block_get_state(block) == state);
bpage->state = state;
ut_ad(buf_page_get_state(bpage) == state);
}
/*************************************************************************
Sets the state of a block. */
UNIV_INLINE
void
buf_block_set_state(
/*================*/
buf_block_t* block, /* in/out: pointer to control block */
enum buf_page_state state) /* in: state */
{
buf_page_set_state(&block->page, state);
}
/*************************************************************************
......@@ -184,6 +222,37 @@ buf_page_in_file(
return(FALSE);
}
/*************************************************************************
Determine the approximate LRU list position of a block. */
UNIV_INLINE
ulint
buf_page_get_LRU_position(
/*======================*/
/* out: LRU list position */
const buf_page_t* bpage) /* in: control block */
{
ut_ad(buf_page_in_file(bpage));
return(bpage->LRU_position);
}
/*************************************************************************
Gets the mutex of a block. */
UNIV_INLINE
mutex_t*
buf_page_get_mutex(
/*================*/
/* out: pointer to mutex protecting bpage */
buf_page_t* bpage) /* in: pointer to control block */
{
switch (buf_page_get_state(bpage)) {
case BUF_BLOCK_ZIP_PAGE:
return(NULL); /* TODO: return common mutex for page_zip */
default:
return(&((buf_block_t*) bpage)->mutex);
}
}
/*************************************************************************
Get the flush type of a page. */
UNIV_INLINE
......@@ -236,6 +305,132 @@ buf_block_set_file_page(
block->page.offset = page_no;
}
/*************************************************************************
Gets the io_fix state of a block. */
UNIV_INLINE
enum buf_io_fix
buf_page_get_io_fix(
/*================*/
/* out: io_fix state */
const buf_page_t* bpage) /* in: pointer to the control block */
{
enum buf_io_fix io_fix = bpage->io_fix;
#ifdef UNIV_SYNC_DEBUG
ut_a(mutex_own(&buf_pool->mutex)
|| mutex_own(buf_page_get_mutex((buf_page_t*) bpage)));
#endif /* UNIV_SYNC_DEBUG */
#ifdef UNIV_DEBUG
switch (io_fix) {
case BUF_IO_NONE:
case BUF_IO_READ:
case BUF_IO_WRITE:
return(io_fix);
}
ut_error;
#endif /* UNIV_DEBUG */
return(io_fix);
}
/*************************************************************************
Gets the io_fix state of a block. */
UNIV_INLINE
enum buf_io_fix
buf_block_get_io_fix(
/*================*/
/* out: io_fix state */
const buf_block_t* block) /* in: pointer to the control block */
{
return(buf_page_get_io_fix(&block->page));
}
/*************************************************************************
Sets the io_fix state of a block. */
UNIV_INLINE
void
buf_page_set_io_fix(
/*================*/
buf_page_t* bpage, /* in/out: control block */
enum buf_io_fix io_fix) /* in: io_fix state */
{
#ifdef UNIV_SYNC_DEBUG
ut_a(mutex_own(&buf_pool->mutex));
ut_a(mutex_own(buf_page_get_mutex(bpage)));
#endif /* UNIV_SYNC_DEBUG */
bpage->io_fix = io_fix;
ut_ad(buf_page_get_io_fix(bpage) == io_fix);
}
/*************************************************************************
Sets the io_fix state of a block. */
UNIV_INLINE
void
buf_block_set_io_fix(
/*=================*/
buf_block_t* block, /* in/out: control block */
enum buf_io_fix io_fix) /* in: io_fix state */
{
buf_page_set_io_fix(&block->page, io_fix);
}
/*************************************************************************
Determine if a block has been flagged old. */
UNIV_INLINE
ibool
buf_page_is_old(
/*============*/
/* out: TRUE if old */
const buf_page_t* bpage) /* in: control block */
{
ut_ad(buf_page_in_file(bpage));
return(bpage->old);
}
/*************************************************************************
Flag a block old. */
UNIV_INLINE
void
buf_page_set_old(
/*=============*/
buf_page_t* bpage, /* in/out: control block */
ibool old) /* in: old */
{
ut_a(buf_page_in_file(bpage));
#ifdef UNIV_SYNC_DEBUG
ut_a(mutex_own(buf_page_get_mutex(bpage)));
#endif /* UNIV_SYNC_DEBUG */
bpage->old = old;
}
/*************************************************************************
Determine if a block has been accessed in the buffer pool. */
UNIV_INLINE
ibool
buf_page_is_accessed(
/*=================*/
/* out: TRUE if accessed */
const buf_page_t* bpage) /* in: control block */
{
ut_ad(buf_page_in_file(bpage));
return(bpage->accessed);
}
/*************************************************************************
Flag a block accessed. */
UNIV_INLINE
void
buf_page_set_accessed(
/*==================*/
buf_page_t* bpage, /* in/out: control block */
ibool accessed) /* in: accessed */
{
ut_a(buf_page_in_file(bpage));
bpage->accessed = accessed;
}
/*************************************************************************
Gets a pointer to the memory frame of a block. */
UNIV_INLINE
......@@ -248,11 +443,26 @@ buf_block_get_frame(
ut_ad(block);
ut_ad(buf_block_get_state(block) != BUF_BLOCK_NOT_USED);
ut_ad(buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE
|| (block->buf_fix_count > 0));
|| (block->page.buf_fix_count > 0));
return(block->frame);
}
/*************************************************************************
Gets the space id of a block. */
UNIV_INLINE
ulint
buf_page_get_space(
/*===============*/
/* out: space id */
const buf_page_t* bpage) /* in: pointer to the control block */
{
ut_ad(bpage);
ut_a(buf_page_in_file(bpage));
return(bpage->space);
}
/*************************************************************************
Gets the space id of a block. */
UNIV_INLINE
......@@ -268,6 +478,21 @@ buf_block_get_space(
return(block->page.space);
}
/*************************************************************************
Gets the page number of a block. */
UNIV_INLINE
ulint
buf_page_get_page_no(
/*=================*/
/* out: page number */
const buf_page_t* bpage) /* in: pointer to the control block */
{
ut_ad(bpage);
ut_a(buf_page_in_file(bpage));
return(bpage->offset);
}
/*************************************************************************
Gets the page number of a block. */
UNIV_INLINE
......@@ -283,6 +508,18 @@ buf_block_get_page_no(
return(block->page.offset);
}
/*************************************************************************
Gets the compressed page size of a block. */
UNIV_INLINE
ulint
buf_page_get_zip_size(
/*==================*/
/* out: compressed page size, or 0 */
const buf_page_t* bpage) /* in: pointer to the control block */
{
return(bpage->zip.ssize ? 512 << bpage->zip.ssize : 0);
}
/*************************************************************************
Gets the compressed page size of a block. */
UNIV_INLINE
......@@ -462,14 +699,14 @@ ibool
buf_page_io_query(
/*==============*/
/* out: TRUE if io going on */
buf_block_t* block) /* in: buf_pool block, must be bufferfixed */
buf_page_t* bpage) /* in: buf_pool block, must be bufferfixed */
{
mutex_enter(&(buf_pool->mutex));
ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
ut_ad(block->buf_fix_count > 0);
ut_ad(buf_page_in_file(bpage));
ut_ad(bpage->buf_fix_count > 0);
if (block->io_fix != 0) {
if (buf_page_get_io_fix(bpage) != BUF_IO_NONE) {
mutex_exit(&(buf_pool->mutex));
return(TRUE);
......@@ -516,7 +753,8 @@ buf_block_modify_clock_inc(
buf_block_t* block) /* in: block */
{
#ifdef UNIV_SYNC_DEBUG
ut_ad((mutex_own(&(buf_pool->mutex)) && (block->buf_fix_count == 0))
ut_ad((mutex_own(&(buf_pool->mutex))
&& (block->page.buf_fix_count == 0))
|| rw_lock_own(&(block->lock), RW_LOCK_EXCLUSIVE));
#endif /* UNIV_SYNC_DEBUG */
......@@ -560,7 +798,7 @@ buf_block_buf_fix_inc_func(
ut_a(ret);
ut_a(mutex_own(&block->mutex));
#endif /* UNIV_SYNC_DEBUG */
block->buf_fix_count++;
block->page.buf_fix_count++;
}
#ifdef UNIV_SYNC_DEBUG
# define buf_block_buf_fix_inc(b,f,l) buf_block_buf_fix_inc_func(f,l,b)
......@@ -658,7 +896,7 @@ buf_page_release(
ut_ad(block);
ut_a(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
ut_a(block->buf_fix_count > 0);
ut_a(block->page.buf_fix_count > 0);
if (rw_latch == RW_X_LATCH && mtr->modifications) {
mutex_enter(&buf_pool->mutex);
......@@ -671,7 +909,7 @@ buf_page_release(
#ifdef UNIV_SYNC_DEBUG
rw_lock_s_unlock(&(block->debug_latch));
#endif
block->buf_fix_count--;
block->page.buf_fix_count--;
mutex_exit(&block->mutex);
......
......@@ -20,7 +20,7 @@ Updates the flush system data structures when a write is completed. */
void
buf_flush_write_complete(
/*=====================*/
buf_block_t* block); /* in: pointer to the block in question */
buf_page_t* bpage); /* in: pointer to the block in question */
/*************************************************************************
Flushes pages from the end of the LRU list if there is too small
a margin of replaceable pages there. */
......@@ -99,8 +99,8 @@ ibool
buf_flush_ready_for_replace(
/*========================*/
/* out: TRUE if can replace immediately */
buf_block_t* block); /* in: buffer control block, must
be in state BUF_BLOCK_FILE_PAGE
buf_page_t* bpage); /* in: buffer control block, must be in state
BUF_BLOCK_FILE_PAGE or BUF_BLOCK_ZIP_PAGE
and in the LRU list */
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
/**********************************************************************
......
......@@ -39,7 +39,7 @@ buf_flush_note_modification(
{
ut_ad(block);
ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
ut_ad(block->buf_fix_count > 0);
ut_ad(block->page.buf_fix_count > 0);
#ifdef UNIV_SYNC_DEBUG
ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX));
ut_ad(mutex_own(&(buf_pool->mutex)));
......@@ -78,7 +78,7 @@ buf_flush_recv_note_modification(
{
ut_ad(block);
ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
ut_ad(block->buf_fix_count > 0);
ut_ad(block->page.buf_fix_count > 0);
#ifdef UNIV_SYNC_DEBUG
ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX));
#endif /* UNIV_SYNC_DEBUG */
......
......@@ -72,7 +72,7 @@ ibool
buf_LRU_free_block(
/*===============*/
/* out: TRUE if freed */
buf_block_t* block); /* in: block to be freed */
buf_page_t* block); /* in: block to be freed */
/**********************************************************************
Look for a replaceable block from the end of the LRU list and put it to
the free list if found. */
......@@ -112,7 +112,7 @@ Adds a block to the LRU list. */
void
buf_LRU_add_block(
/*==============*/
buf_block_t* block, /* in: control block */
buf_page_t* bpage, /* in: control block */
ibool old); /* in: TRUE if should be put to the old
blocks in the LRU list, else put to the
start; if the LRU list is very short, added to
......@@ -123,14 +123,14 @@ Moves a block to the start of the LRU list. */
void
buf_LRU_make_block_young(
/*=====================*/
buf_block_t* block); /* in: control block */
buf_page_t* bpage); /* in: control block */
/**********************************************************************
Moves a block to the end of the LRU list. */
void
buf_LRU_make_block_old(
/*===================*/
buf_block_t* block); /* in: control block */
buf_page_t* bpage); /* in: control block */
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
/**************************************************************************
Validates the LRU list. */
......
......@@ -25,5 +25,12 @@ enum buf_flush {
BUF_FLUSH_N_TYPES /* index of last element + 1 */
};
/* Flags for io_fix types */
enum buf_io_fix {
BUF_IO_NONE = 0, /**< no pending I/O */
BUF_IO_READ, /**< read pending */
BUF_IO_WRITE /**< write pending */
};
#endif
......@@ -223,7 +223,7 @@ ibuf_update_free_bits_if_full(
cannot make inserts using the insert buffer from slipping
out of the buffer pool */
buf_page_make_young(block);
buf_page_make_young(&block->page);
}
if (before > after) {
......
......@@ -3805,7 +3805,7 @@ row_search_for_mysql(
" buf block fix count %lu\n",
(void*) rec, (ulong)
btr_cur_get_block(btr_pcur_get_btr_cur(pcur))
->buf_fix_count);
->page.buf_fix_count);
fprintf(stderr,
"InnoDB: Index corruption: rec offs %lu"
" next offs %lu, page no %lu,\n"
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment