Commit 3d9f3eed authored by marko's avatar marko

branches/innodb+: Implement the buf_pool_watch for DeleteBuffering in

the page hash table. This serves two purposes. It allows multiple
watches to be set at the same time (by multiple purge threads) and it
removes a race condition when the read of a block completes about the
time the buffer pool watch is being set.

buf_pool_watch_clear(): Rename to buf_pool_watch_unset(). Add
parameters space, offset.

buf_pool_watch_remove(): A helper function for removing the watch.

buf_pool_watch_is(): A predicate for testing if a block descriptor is
a sentinel for the buffer pool watch.

buf_pool_watch[BUF_POOL_WATCH_SIZE]: An array of sentinel block descriptors.

buf_pool_watch_set(): Add a parameter for the fold value, and return
the block if the block is in the buffer pool. Allocate the sentinel
from buf_pool_watch[] if needed. Use buf_fix_count for
reference-counting.

enum buf_block_state: Add BUF_BLOCK_POOL_WATCH as a state alias that
is shared with BUF_BLOCK_ZIP_FREE.

buf_page_hash_get_low(): A low-level variant of buf_page_hash_get()
that takes the fold value as a parameter and may return a watch
sentinel block. In callers, test the return value for
buf_pool_watch_is() [impossible cases with ut_ad(), possible ones with if].
When needed, invoke buf_pool_watch_remove() but preserve the buf_fix_count.

buf_page_hash_get(), buf_block_hash_get(): Return NULL for watch
sentinel blocks, to keep existing behaviour.

buf_page_init(): Add a parameter for the fold value.

ibuf_insert(): If a buffer pool watch exists for the block, refuse to
buffer subsequent operations, so that the purge that is being buffered
will not "overtake" later requests. Previously, we would notify the
watch in this case. Either way, the block would be read to the buffer
pool. In the current design, we can only notify the watch by actually
setting up a real block in buf_pool->page_hash.

rb://263 approved by Inaam Rana
parent 227df33e
......@@ -645,11 +645,11 @@ retry_page_get:
cursor->flag = BTR_CUR_DELETE_IBUF;
} else {
/* The purge could not be buffered. */
buf_pool_watch_clear();
buf_pool_watch_unset(space, page_no);
break;
}
buf_pool_watch_clear();
buf_pool_watch_unset(space, page_no);
goto func_exit;
default:
......
......@@ -457,6 +457,8 @@ buf_buddy_relocate(
return(FALSE);
}
ut_ad(!buf_pool_watch_is(bpage));
if (page_zip_get_size(&bpage->zip) != size) {
/* The block is of different size. We would
have to relocate all blocks covered by src.
......
......@@ -1138,6 +1138,7 @@ buf_relocate(
ut_ad(!bpage->in_zip_hash);
ut_ad(bpage->in_page_hash);
ut_ad(bpage == buf_page_hash_get(bpage->space, bpage->offset));
ut_ad(!buf_pool_watch_is(bpage));
#ifdef UNIV_DEBUG
switch (buf_page_get_state(bpage)) {
case BUF_BLOCK_ZIP_FREE:
......@@ -1502,63 +1503,191 @@ buf_pool_resize(void)
buf_pool_page_hash_rebuild();
}
/** Maximum number of concurrent buffer pool watches */
#define BUF_POOL_WATCH_SIZE 1
/** Sentinel records for buffer pool watches. Protected by buf_pool_mutex. */
static buf_page_t buf_pool_watch[BUF_POOL_WATCH_SIZE];
/********************************************************************
Determine if a block is a sentinel for a buffer pool watch.
@return TRUE if a sentinel for a buffer pool watch, FALSE if not */
UNIV_INTERN
ibool
buf_pool_watch_is(
/*==============*/
const buf_page_t* bpage) /*!< in: block */
{
ut_ad(buf_page_in_file(bpage));
if (UNIV_LIKELY(bpage < &buf_pool_watch[0]
|| bpage >= &buf_pool_watch[BUF_POOL_WATCH_SIZE])) {
ut_ad(buf_page_get_state(bpage) != BUF_BLOCK_ZIP_PAGE
|| bpage->zip.data != NULL);
return(FALSE);
}
ut_ad(buf_page_get_state(bpage) == BUF_BLOCK_ZIP_PAGE);
ut_ad(!bpage->in_zip_hash);
ut_ad(bpage->in_page_hash);
ut_ad(bpage->zip.data == NULL);
ut_ad(bpage->buf_fix_count > 0);
return(TRUE);
}
/****************************************************************//**
Add watch for the given page to be read in. Caller must have the buffer pool
mutex reserved. */
static
void
mutex reserved.
@return NULL if watch set, block if the page is in the buffer pool */
UNIV_INTERN
buf_page_t*
buf_pool_watch_set(
/*===============*/
ulint space, /*!< in: space id */
ulint page_no) /*!< in: page number */
ulint offset, /*!< in: page number */
ulint fold) /*!< in: buf_page_address_fold(space, offset) */
{
buf_page_t* bpage;
ulint i;
ut_ad(buf_pool_mutex_own());
/* There can't be multiple watches at the same time. */
ut_a(!buf_pool->watch_active);
bpage = buf_page_hash_get_low(space, offset, fold);
if (UNIV_LIKELY_NULL(bpage)) {
if (!buf_pool_watch_is(bpage)) {
/* The page was loaded meanwhile. */
return(bpage);
}
/* Add to an existing watch. */
bpage->buf_fix_count++;
return(NULL);
}
for (i = 0; i < BUF_POOL_WATCH_SIZE; i++) {
bpage = &buf_pool_watch[i];
buf_pool->watch_active = TRUE;
buf_pool->watch_space = space;
buf_pool->watch_occurred = FALSE;
buf_pool->watch_page_no = page_no;
ut_ad(bpage->access_time == 0);
ut_ad(bpage->newest_modification == 0);
ut_ad(bpage->oldest_modification == 0);
ut_ad(bpage->zip.data == NULL);
ut_ad(!bpage->in_zip_hash);
switch (bpage->state) {
case BUF_BLOCK_POOL_WATCH:
ut_ad(!bpage->in_page_hash);
ut_ad(bpage->buf_fix_count == 0);
/* bpage is pointing to buf_pool_watch[],
which is protected by buf_pool_mutex.
Normally, buf_page_t objects are protected by
buf_block_t::mutex or buf_pool_zip_mutex or both. */
bpage->state = BUF_BLOCK_ZIP_PAGE;
bpage->space = space;
bpage->offset = offset;
bpage->buf_fix_count = 1;
ut_d(bpage->in_page_hash = TRUE);
HASH_INSERT(buf_page_t, hash, buf_pool->page_hash,
fold, bpage);
return(NULL);
case BUF_BLOCK_ZIP_PAGE:
ut_ad(bpage->in_page_hash);
ut_ad(bpage->buf_fix_count > 0);
break;
default:
ut_error;
}
}
/* Allocation failed. Either the maximum number of purge
threads should never exceed BUF_POOL_WATCH_SIZE, or this code
should be modified to return a special non-NULL value and the
caller should purge the record directly. */
ut_error;
}
/****************************************************************//**
Stop watching if the marked page is read in. */
Remove the sentinel block for the watch before replacing it with a real block.
buf_page_watch_clear() or buf_page_watch_occurred() will notice that
the block has been replaced with the real block.
@return reference count, to be added to the replacement block */
static
void
buf_pool_watch_remove(
/*==================*/
ulint fold, /*!< in: buf_page_address_fold(space, offset) */
buf_page_t* watch) /*!< in/out: sentinel for watch */
{
ut_ad(buf_pool_mutex_own());
HASH_DELETE(buf_page_t, hash, buf_pool->page_hash, fold, watch);
ut_d(watch->in_page_hash = FALSE);
watch->buf_fix_count = 0;
watch->state = BUF_BLOCK_POOL_WATCH;
}
/****************************************************************//**
Stop watching if the page has been read in.
buf_pool_watch_set(space,offset) must have returned NULL before. */
UNIV_INTERN
void
buf_pool_watch_clear(void)
/*======================*/
buf_pool_watch_unset(
/*=================*/
ulint space, /*!< in: space id */
ulint offset) /*!< in: page number */
{
buf_pool_mutex_enter();
buf_page_t* bpage;
ulint fold = buf_page_address_fold(space, offset);
ut_ad(buf_pool->watch_active);
buf_pool_mutex_enter();
bpage = buf_page_hash_get_low(space, offset, fold);
/* The page must exist because buf_pool_watch_set()
increments buf_fix_count. */
ut_a(bpage);
if (UNIV_UNLIKELY(!buf_pool_watch_is(bpage))) {
mutex_t* mutex = buf_page_get_mutex(bpage);
mutex_enter(mutex);
ut_a(bpage->buf_fix_count > 0);
bpage->buf_fix_count--;
mutex_exit(mutex);
} else {
ut_a(bpage->buf_fix_count > 0);
buf_pool->watch_active = FALSE;
if (UNIV_LIKELY(!--bpage->buf_fix_count)) {
buf_pool_watch_remove(fold, bpage);
}
}
buf_pool_mutex_exit();
}
/****************************************************************//**
Check if the given page is being watched and has been read to the buffer
pool.
@return TRUE if the given page is being watched and it has been read in */
Check if the page has been read in.
This may only be called after buf_pool_watch_set(space,offset)
has returned NULL and before invoking buf_pool_watch_unset(space,offset).
@return FALSE if the given page was not read in, TRUE if it was */
UNIV_INTERN
ibool
buf_pool_watch_occurred(
/*====================*/
ulint space, /*!< in: space id */
ulint page_no) /*!< in: page number */
ulint offset) /*!< in: page number */
{
ulint ret;
buf_page_t* bpage;
ulint fold = buf_page_address_fold(space, offset);
ibool ret;
buf_pool_mutex_enter();
ret = buf_pool->watch_active
&& space == buf_pool->watch_space
&& page_no == buf_pool->watch_page_no
&& buf_pool->watch_occurred;
bpage = buf_page_hash_get_low(space, offset, fold);
/* The page must exist because buf_pool_watch_set()
increments buf_fix_count. */
ut_a(bpage);
ret = !buf_pool_watch_is(bpage);
buf_pool_mutex_exit();
return(ret);
......@@ -1630,6 +1759,7 @@ buf_reset_check_index_page_at_flush(
block = (buf_block_t*) buf_page_hash_get(space, offset);
if (block && buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE) {
ut_ad(!buf_pool_watch_is(&block->page));
block->check_index_page_at_flush = FALSE;
}
......@@ -1658,6 +1788,7 @@ buf_page_peek_if_search_hashed(
if (!block || buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE) {
is_hashed = FALSE;
} else {
ut_ad(!buf_pool_watch_is(&block->page));
is_hashed = block->is_hashed;
}
......@@ -1686,7 +1817,7 @@ buf_page_set_file_page_was_freed(
bpage = buf_page_hash_get(space, offset);
if (bpage) {
if (bpage && !buf_pool_watch_is(bpage)) {
bpage->file_page_was_freed = TRUE;
}
......@@ -1714,7 +1845,7 @@ buf_page_reset_file_page_was_freed(
bpage = buf_page_hash_get(space, offset);
if (bpage) {
if (bpage && !buf_pool_watch_is(bpage)) {
bpage->file_page_was_freed = FALSE;
}
......@@ -1755,7 +1886,7 @@ buf_page_get_zip(
buf_pool_mutex_enter();
lookup:
bpage = buf_page_hash_get(space, offset);
if (bpage) {
if (bpage && !buf_pool_watch_is(bpage)) {
break;
}
......@@ -1777,6 +1908,8 @@ err_exit:
return(NULL);
}
ut_ad(!buf_pool_watch_is(bpage));
switch (buf_page_get_state(bpage)) {
case BUF_BLOCK_NOT_USED:
case BUF_BLOCK_READY_FOR_USE:
......@@ -2100,6 +2233,7 @@ buf_page_get_gen(
mtr_t* mtr) /*!< in: mini-transaction */
{
buf_block_t* block;
ulint fold;
unsigned access_time;
ulint fix_type;
ibool must_read;
......@@ -2120,6 +2254,7 @@ buf_page_get_gen(
ut_ad(!ibuf_inside() || ibuf_page(space, zip_size, offset, NULL));
#endif
buf_pool->stat.n_page_gets++;
fold = buf_page_address_fold(space, offset);
loop:
block = guess;
buf_pool_mutex_enter();
......@@ -2146,15 +2281,26 @@ loop:
}
if (block == NULL) {
block = (buf_block_t*) buf_page_hash_get(space, offset);
block = (buf_block_t*) buf_page_hash_get_low(space, offset,
fold);
}
loop2:
if (block && buf_pool_watch_is(&block->page)) {
block = NULL;
}
if (block == NULL) {
/* Page not in buf_pool: needs to be read from file */
if (mode == BUF_GET_IF_IN_POOL_OR_WATCH) {
buf_pool_watch_set(space, offset);
block = (buf_block_t*) buf_pool_watch_set(
space, offset, fold);
if (UNIV_LIKELY_NULL(block)) {
goto got_block;
}
}
buf_pool_mutex_exit();
......@@ -2195,23 +2341,16 @@ loop2:
goto loop;
}
got_block:
ut_ad(page_zip_get_size(&block->page.zip) == zip_size);
must_read = buf_block_get_io_fix(block) == BUF_IO_READ;
if (must_read
&& (mode == BUF_GET_IF_IN_POOL
|| mode == BUF_GET_IF_IN_POOL_OR_WATCH)) {
if (must_read && mode == BUF_GET_IF_IN_POOL) {
/* The page is being read to buffer pool,
but we cannot wait around for the read to
complete. */
if (mode == BUF_GET_IF_IN_POOL_OR_WATCH) {
buf_pool_watch_set(space, offset);
}
/* The page is only being read to buffer */
buf_pool_mutex_exit();
return(NULL);
......@@ -2257,7 +2396,7 @@ wait_until_unfixed:
{
buf_page_t* hash_bpage
= buf_page_hash_get(space, offset);
= buf_page_hash_get_low(space, offset, fold);
if (UNIV_UNLIKELY(bpage != hash_bpage)) {
/* The buf_pool->page_hash was modified
......@@ -2683,11 +2822,13 @@ buf_page_try_get_func(
buf_pool_mutex_enter();
block = buf_block_hash_get(space_id, page_no);
if (!block) {
if (!block || buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE) {
buf_pool_mutex_exit();
return(NULL);
}
ut_ad(!buf_pool_watch_is(&block->page));
mutex_enter(&block->mutex);
buf_pool_mutex_exit();
......@@ -2763,25 +2904,6 @@ buf_page_init_low(
#endif /* UNIV_DEBUG_FILE_ACCESSES */
}
/********************************************************************//**
Set watch occurred flag. */
UNIV_INTERN
void
buf_pool_watch_notify(
/*==================*/
ulint space, /*!< in: space id of page read in */
ulint offset) /*!< in: offset of page read in */
{
ut_ad(buf_pool_mutex_own());
if (buf_pool->watch_active
&& space == buf_pool->watch_space
&& offset == buf_pool->watch_page_no) {
buf_pool->watch_occurred = TRUE;
}
}
/********************************************************************//**
Inits a page to the buffer buf_pool. */
static
......@@ -2791,6 +2913,7 @@ buf_page_init(
ulint space, /*!< in: space id */
ulint offset, /*!< in: offset of the page within space
in units of a page */
ulint fold, /*!< in: buf_page_address_fold(space,offset) */
buf_block_t* block) /*!< in: block to init */
{
buf_page_t* hash_page;
......@@ -2815,11 +2938,20 @@ buf_page_init(
block->lock_hash_val = lock_rec_hash(space, offset);
buf_page_init_low(&block->page);
/* Insert into the hash table of file pages */
hash_page = buf_page_hash_get(space, offset);
hash_page = buf_page_hash_get_low(space, offset, fold);
if (UNIV_LIKELY_NULL(hash_page)) {
if (UNIV_LIKELY(!hash_page)) {
} else if (UNIV_LIKELY(buf_pool_watch_is(hash_page))) {
/* Preserve the reference count. */
ulint buf_fix_count = hash_page->buf_fix_count;
ut_a(buf_fix_count > 0);
block->page.buf_fix_count += buf_fix_count;
buf_pool_watch_remove(fold, hash_page);
} else {
fprintf(stderr,
"InnoDB: Error: page %lu %lu already found"
" in the hash table: %p, %p\n",
......@@ -2837,14 +2969,11 @@ buf_page_init(
ut_error;
}
buf_page_init_low(&block->page);
buf_pool_watch_notify(space, offset);
ut_ad(!block->page.in_zip_hash);
ut_ad(!block->page.in_page_hash);
ut_d(block->page.in_page_hash = TRUE);
HASH_INSERT(buf_page_t, hash, buf_pool->page_hash,
buf_page_address_fold(space, offset), &block->page);
fold, &block->page);
}
/********************************************************************//**
......@@ -2872,8 +3001,10 @@ buf_page_init_for_read(
ulint offset) /*!< in: page number */
{
buf_block_t* block;
buf_page_t* bpage;
buf_page_t* bpage = NULL;
buf_page_t* watch_page;
mtr_t mtr;
ulint fold;
ibool lru = FALSE;
void* data;
......@@ -2908,10 +3039,14 @@ buf_page_init_for_read(
ut_ad(block);
}
fold = buf_page_address_fold(space, offset);
buf_pool_mutex_enter();
if (buf_page_hash_get(space, offset)) {
watch_page = buf_page_hash_get_low(space, offset, fold);
if (watch_page && !buf_pool_watch_is(watch_page)) {
/* The page is already in the buffer pool. */
watch_page = NULL;
err_exit:
if (block) {
mutex_enter(&block->mutex);
......@@ -2936,7 +3071,7 @@ err_exit:
bpage = &block->page;
mutex_enter(&block->mutex);
buf_page_init(space, offset, block);
buf_page_init(space, offset, fold, block);
/* The block must be put to the LRU list, to the old blocks */
buf_LRU_add_block(bpage, TRUE/* to old blocks */);
......@@ -2995,16 +3130,20 @@ err_exit:
/* If buf_buddy_alloc() allocated storage from the LRU list,
it released and reacquired buf_pool_mutex. Thus, we must
check the page_hash again, as it may have been modified. */
if (UNIV_UNLIKELY(lru)
&& UNIV_LIKELY_NULL(buf_page_hash_get(space, offset))) {
if (UNIV_UNLIKELY(lru)) {
watch_page = buf_page_hash_get_low(space, offset, fold);
if (UNIV_UNLIKELY
(watch_page && !buf_pool_watch_is(watch_page))) {
/* The block was added by some other thread. */
watch_page = NULL;
buf_buddy_free(bpage, sizeof *bpage);
buf_buddy_free(data, zip_size);
bpage = NULL;
goto func_exit;
}
}
page_zip_des_init(&bpage->zip);
page_zip_set_size(&bpage->zip, zip_size);
......@@ -3015,7 +3154,6 @@ err_exit:
page_zip_get_size(&bpage->zip), bpage);
buf_page_init_low(bpage);
buf_pool_watch_notify(space, offset);
bpage->state = BUF_BLOCK_ZIP_PAGE;
bpage->space = space;
......@@ -3031,8 +3169,18 @@ err_exit:
#endif /* UNIV_DEBUG */
ut_d(bpage->in_page_hash = TRUE);
HASH_INSERT(buf_page_t, hash, buf_pool->page_hash,
buf_page_address_fold(space, offset), bpage);
if (UNIV_LIKELY_NULL(watch_page)) {
/* Preserve the reference count. */
ulint buf_fix_count = watch_page->buf_fix_count;
ut_a(buf_fix_count > 0);
block->page.buf_fix_count += buf_fix_count;
ut_ad(buf_pool_watch_is(watch_page));
buf_pool_watch_remove(fold, watch_page);
}
HASH_INSERT(buf_page_t, hash, buf_pool->page_hash, fold,
bpage);
/* The block must be put to the LRU list, to the old blocks */
buf_LRU_add_block(bpage, TRUE/* to old blocks */);
......@@ -3076,17 +3224,21 @@ buf_page_create(
buf_block_t* block;
buf_block_t* free_block = NULL;
ulint time_ms = ut_time_ms();
ulint fold;
ut_ad(mtr);
ut_ad(space || !zip_size);
free_block = buf_LRU_get_free_block(0);
fold = buf_page_address_fold(space, offset);
buf_pool_mutex_enter();
block = (buf_block_t*) buf_page_hash_get(space, offset);
block = (buf_block_t*) buf_page_hash_get_low(space, offset, fold);
if (block && buf_page_in_file(&block->page)) {
if (block && buf_page_in_file(&block->page)
&& !buf_pool_watch_is(&block->page)) {
#ifdef UNIV_IBUF_COUNT_DEBUG
ut_a(ibuf_count_get(space, offset) == 0);
#endif
......@@ -3116,7 +3268,7 @@ buf_page_create(
mutex_enter(&block->mutex);
buf_page_init(space, offset, block);
buf_page_init(space, offset, fold, block);
/* The block must be put to the LRU list */
buf_LRU_add_block(&block->page, FALSE);
......
......@@ -1454,8 +1454,10 @@ alloc:
buf_page_t* prev_b = UT_LIST_GET_PREV(LRU, b);
const ulint fold = buf_page_address_fold(
bpage->space, bpage->offset);
buf_page_t* hash_b = buf_page_hash_get_low(
bpage->space, bpage->offset, fold);
ut_a(!buf_page_hash_get(bpage->space, bpage->offset));
ut_a(!hash_b);
b->state = b->oldest_modification
? BUF_BLOCK_ZIP_DIRTY
......@@ -1680,6 +1682,7 @@ buf_LRU_block_remove_hashed_page(
ibool zip) /*!< in: TRUE if should remove also the
compressed page of an uncompressed page */
{
ulint fold;
const buf_page_t* hashed_bpage;
ut_ad(bpage);
ut_ad(buf_pool_mutex_own());
......@@ -1763,7 +1766,9 @@ buf_LRU_block_remove_hashed_page(
break;
}
hashed_bpage = buf_page_hash_get(bpage->space, bpage->offset);
fold = buf_page_address_fold(bpage->space, bpage->offset);
hashed_bpage = buf_page_hash_get_low(bpage->space, bpage->offset,
fold);
if (UNIV_UNLIKELY(bpage != hashed_bpage)) {
fprintf(stderr,
......@@ -1795,9 +1800,7 @@ buf_LRU_block_remove_hashed_page(
ut_ad(!bpage->in_zip_hash);
ut_ad(bpage->in_page_hash);
ut_d(bpage->in_page_hash = FALSE);
HASH_DELETE(buf_page_t, hash, buf_pool->page_hash,
buf_page_address_fold(bpage->space, bpage->offset),
bpage);
HASH_DELETE(buf_page_t, hash, buf_pool->page_hash, fold, bpage);
switch (buf_page_get_state(bpage)) {
case BUF_BLOCK_ZIP_PAGE:
ut_ad(!bpage->in_free_list);
......
......@@ -3399,15 +3399,14 @@ ibuf_insert_low(
goto function_exit;
}
/* After this point, buf_pool_watch_occurred(space, page_no)
may still become true, but we do not have to care about it,
since we are holding a latch on the insert buffer leaf page
that contains buffered changes for (space, page_no). If
buf_pool_watch_occurred(space, page_no) becomes true,
buf_page_io_complete() for (space, page_no) will have to
acquire a latch on the same insert buffer leaf page, which it
cannot do until we have buffered the IBUF_OP_DELETE and done
mtr_commit(&mtr) to release the latch. */
/* After this point, the page could still be loaded to the
buffer pool, but we do not have to care about it, since we are
holding a latch on the insert buffer leaf page that contains
buffered changes for (space, page_no). If the page enters the
buffer pool, buf_page_io_complete() for (space, page_no) will
have to acquire a latch on the same insert buffer leaf page,
which it cannot do until we have buffered the IBUF_OP_DELETE
and done mtr_commit(&mtr) to release the latch. */
#ifdef UNIV_IBUF_COUNT_DEBUG
ut_a((buffered == 0) || ibuf_count_get(space, page_no));
......@@ -3602,7 +3601,7 @@ ibuf_insert(
case IBUF_USE_INSERT:
case IBUF_USE_INSERT_DELETE_MARK:
case IBUF_USE_ALL:
goto notify;
goto check_watch;
case IBUF_USE_COUNT:
break;
}
......@@ -3617,7 +3616,7 @@ ibuf_insert(
case IBUF_USE_INSERT_DELETE_MARK:
case IBUF_USE_ALL:
ut_ad(!no_counter);
goto notify;
goto check_watch;
case IBUF_USE_COUNT:
break;
}
......@@ -3632,7 +3631,7 @@ ibuf_insert(
case IBUF_USE_DELETE:
case IBUF_USE_ALL:
ut_ad(!no_counter);
goto skip_notify;
goto skip_watch;
case IBUF_USE_COUNT:
break;
}
......@@ -3644,23 +3643,39 @@ ibuf_insert(
/* unknown op or use */
ut_error;
notify:
/* If another thread buffers an insert on a page while
the purge is in progress, the purge for the same page
must not be buffered, because it could remove a record
that was re-inserted later.
check_watch:
/* If a thread attempts to buffer an insert on a page while a
purge is in progress on the same page, the purge must not be
buffered, because it could remove a record that was
re-inserted later. For simplicity, we block the buffering of
all operations on a page that has a purge pending.
We do not call this in the IBUF_OP_DELETE case,
because that would always trigger the buffer pool
watch during purge and thus prevent the buffering of
delete operations. We assume that IBUF_OP_DELETE
operations are only issued by the purge thread. */
We do not check this in the IBUF_OP_DELETE case, because that
would always trigger the buffer pool watch during purge and
thus prevent the buffering of delete operations. We assume
that the issuer of IBUF_OP_DELETE has called
buf_pool_watch_set(space, page_no). */
{
buf_page_t* bpage;
ulint fold = buf_page_address_fold(space, page_no);
buf_pool_mutex_enter();
buf_pool_watch_notify(space, page_no);
bpage = buf_page_hash_get_low(space, page_no, fold);
buf_pool_mutex_exit();
skip_notify:
if (UNIV_LIKELY_NULL(bpage)) {
/* A buffer pool watch has been set or the
page has been read into the buffer pool.
Do not buffer the request. If a purge operation
is being buffered, have this request executed
directly on the page in the buffer pool after the
buffered entries for this page have been merged. */
return(FALSE);
}
}
skip_watch:
entry_size = rec_get_converted_size(index, entry, 0);
if (entry_size
......
......@@ -86,6 +86,8 @@ The enumeration values must be 0..7. */
enum buf_page_state {
BUF_BLOCK_ZIP_FREE = 0, /*!< contains a free
compressed page */
BUF_BLOCK_POOL_WATCH = 0, /*!< a sentinel for the buffer pool
watch, element of buf_pool_watch[] */
BUF_BLOCK_ZIP_PAGE, /*!< contains a clean
compressed page */
BUF_BLOCK_ZIP_DIRTY, /*!< contains a compressed
......@@ -290,8 +292,8 @@ buf_page_get_gen(
ulint rw_latch,/*!< in: RW_S_LATCH, RW_X_LATCH, RW_NO_LATCH */
buf_block_t* guess, /*!< in: guessed block or NULL */
ulint mode, /*!< in: BUF_GET, BUF_GET_IF_IN_POOL,
BUF_GET_NO_LATCH, BUF_GET_NOWAIT or
BUF_GET_IF_IN_POOL_WATCH */
BUF_GET_NO_LATCH or
BUF_GET_IF_IN_POOL_OR_WATCH */
const char* file, /*!< in: file name */
ulint line, /*!< in: line where called */
mtr_t* mtr); /*!< in: mini-transaction */
......@@ -994,6 +996,16 @@ Returns the control block of a file page, NULL if not found.
@return block, NULL if not found */
UNIV_INLINE
buf_page_t*
buf_page_hash_get_low(
/*==================*/
ulint space, /*!< in: space id */
ulint offset, /*!< in: offset of the page within space */
ulint fold); /*!< in: buf_page_address_fold(space, offset) */
/******************************************************************//**
Returns the control block of a file page, NULL if not found.
@return block, NULL if not found or not a real control block */
UNIV_INLINE
buf_page_t*
buf_page_hash_get(
/*==============*/
ulint space, /*!< in: space id */
......@@ -1015,30 +1027,48 @@ UNIV_INTERN
ulint
buf_get_free_list_len(void);
/*=======================*/
/********************************************************************
Stop watching if the marked page is read in. */
Determine if a block is a sentinel for a buffer pool watch.
@return TRUE if a sentinel for a buffer pool watch, FALSE if not */
UNIV_INTERN
void
buf_pool_watch_clear(void);
/*======================*/
/************************************************************************
Set watch occurred flag. */
ibool
buf_pool_watch_is(
/*==============*/
const buf_page_t* bpage) /*!< in: block */
__attribute__((nonnull, warn_unused_result));
/****************************************************************//**
Add watch for the given page to be read in. Caller must have the buffer pool
@return NULL if watch set, block if the page is in the buffer pool */
UNIV_INTERN
buf_page_t*
buf_pool_watch_set(
/*===============*/
ulint space, /*!< in: space id */
ulint offset, /*!< in: page number */
ulint fold) /*!< in: buf_page_address_fold(space, offset) */
__attribute__((warn_unused_result));
/****************************************************************//**
Stop watching if the page has been read in.
buf_pool_watch_set(space,offset) must have returned NULL before. */
UNIV_INTERN
void
buf_pool_watch_notify(
/*==================*/
ulint space, /*!< in: space id of page read in */
ulint offset);/*!< in: offset of page read in */
/********************************************************************
Check if the given page is being watched and has been read to the buffer
pool.
@return TRUE if the given page is being watched and it has been read in */
buf_pool_watch_unset(
/*=================*/
ulint space, /*!< in: space id */
ulint offset);/*!< in: page number */
/****************************************************************//**
Check if the page has been read in.
This may only be called after buf_pool_watch_set(space,offset)
has returned NULL and before invoking buf_pool_watch_unset(space,offset).
@return FALSE if the given page was not read in, TRUE if it was */
UNIV_INTERN
ibool
buf_pool_watch_occurred(
/*====================*/
ulint space, /*!< in: space id */
ulint page_no); /*!< in: page number */
ulint offset) /*!< in: page number */
__attribute__((warn_unused_result));
#endif /* !UNIV_HOTBACKUP */
/** The common buffer control block structure
......@@ -1079,7 +1109,10 @@ struct buf_page_struct{
#endif /* !UNIV_HOTBACKUP */
page_zip_des_t zip; /*!< compressed page; zip.data
(but not the data it points to) is
also protected by buf_pool_mutex */
also protected by buf_pool_mutex;
state == BUF_BLOCK_ZIP_PAGE and
zip.data == NULL means an active
buf_pool_watch */
#ifndef UNIV_HOTBACKUP
buf_page_t* hash; /*!< node used in chaining to
buf_pool->page_hash or
......@@ -1434,18 +1467,7 @@ struct buf_pool_struct{
set to zero when a buffer block is
allocated */
/* @} */
/** @name Buffer pool watch
This is needed for implementing delete buffering. */
/* @{ */
/*--------------------------*/
ibool watch_active; /* if TRUE, set watch_occurred
when watch_space, watch_page_no
is read in. */
ulint watch_space; /* space id of watched page */
ulint watch_page_no; /* page number of watched page */
ibool watch_occurred; /* has watched page been read in */
/*--------------------------*/
/* @} */
/** @name LRU replacement algorithm fields */
/* @{ */
......
......@@ -902,21 +902,20 @@ Returns the control block of a file page, NULL if not found.
@return block, NULL if not found */
UNIV_INLINE
buf_page_t*
buf_page_hash_get(
/*==============*/
buf_page_hash_get_low(
/*==================*/
ulint space, /*!< in: space id */
ulint offset) /*!< in: offset of the page within space */
ulint offset, /*!< in: offset of the page within space */
ulint fold) /*!< in: buf_page_address_fold(space, offset) */
{
buf_page_t* bpage;
ulint fold;
ut_ad(buf_pool);
ut_ad(buf_pool_mutex_own());
ut_ad(fold == buf_page_address_fold(space, offset));
/* Look for the page in the hash table */
fold = buf_page_address_fold(space, offset);
HASH_SEARCH(hash, buf_pool->page_hash, fold, buf_page_t*, bpage,
ut_ad(bpage->in_page_hash && !bpage->in_zip_hash
&& buf_page_in_file(bpage)),
......@@ -931,6 +930,26 @@ buf_page_hash_get(
return(bpage);
}
/******************************************************************//**
Returns the control block of a file page, NULL if not found.
@return block, NULL if not found or not a real control block */
UNIV_INLINE
buf_page_t*
buf_page_hash_get(
/*==============*/
ulint space, /*!< in: space id */
ulint offset) /*!< in: offset of the page within space */
{
ulint fold = buf_page_address_fold(space, offset);
buf_page_t* bpage = buf_page_hash_get_low(space, offset, fold);
if (bpage && UNIV_UNLIKELY(buf_pool_watch_is(bpage))) {
bpage = NULL;
}
return(bpage);
}
/******************************************************************//**
Returns the control block of a file page, NULL if not found
or an uncompressed page frame does not exist.
......@@ -942,7 +961,11 @@ buf_block_hash_get(
ulint space, /*!< in: space id */
ulint offset) /*!< in: offset of the page within space */
{
return(buf_page_get_block(buf_page_hash_get(space, offset)));
buf_block_t* block;
block = buf_page_get_block(buf_page_hash_get(space, offset));
return(block);
}
/********************************************************************//**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment