Commit 0011a8a9 authored by marko's avatar marko

Reduce buffer pool mutex contention under >= 4 big concurrent

CPU-bound SELECT queries.  (Bug #15815)

Fix: replace the mutex by one mutex protecting the 'flush list'
(and the free list) and several mutexes protecting portions of the
buffer pool, where we keep several indivudual LRU lists of pages.

This patch is from Sunny Bains and Heikki Tuuri.
parent b4b9a107
This diff is collapsed.
......@@ -113,6 +113,7 @@ buf_flush_ready_for_replace(
{
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(buf_pool->mutex)));
ut_ad(mutex_own(&block->mutex));
#endif /* UNIV_SYNC_DEBUG */
if (block->state != BUF_BLOCK_FILE_PAGE) {
ut_print_timestamp(stderr);
......@@ -148,6 +149,7 @@ buf_flush_ready_for_flush(
{
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(buf_pool->mutex)));
ut_ad(mutex_own(&(block->mutex)));
#endif /* UNIV_SYNC_DEBUG */
ut_a(block->state == BUF_BLOCK_FILE_PAGE);
......@@ -559,8 +561,15 @@ buf_flush_try_page(
ut_a(!block || block->state == BUF_BLOCK_FILE_PAGE);
if (!block) {
mutex_exit(&(buf_pool->mutex));
return(0);
}
mutex_enter(&block->mutex);
if (flush_type == BUF_FLUSH_LIST
&& block && buf_flush_ready_for_flush(block, flush_type)) {
&& buf_flush_ready_for_flush(block, flush_type)) {
block->io_fix = BUF_IO_WRITE;
......@@ -598,6 +607,7 @@ buf_flush_try_page(
locked = TRUE;
}
mutex_exit(&block->mutex);
mutex_exit(&(buf_pool->mutex));
if (!locked) {
......@@ -618,7 +628,7 @@ buf_flush_try_page(
return(1);
} else if (flush_type == BUF_FLUSH_LRU && block
} else if (flush_type == BUF_FLUSH_LRU
&& buf_flush_ready_for_flush(block, flush_type)) {
/* VERY IMPORTANT:
......@@ -659,13 +669,14 @@ buf_flush_try_page(
buf_pool mutex: this ensures that the latch is acquired
immediately. */
mutex_exit(&block->mutex);
mutex_exit(&(buf_pool->mutex));
buf_flush_write_block_low(block);
return(1);
} else if (flush_type == BUF_FLUSH_SINGLE_PAGE && block
} else if (flush_type == BUF_FLUSH_SINGLE_PAGE
&& buf_flush_ready_for_flush(block, flush_type)) {
block->io_fix = BUF_IO_WRITE;
......@@ -692,6 +703,7 @@ buf_flush_try_page(
(buf_pool->n_flush[flush_type])++;
mutex_exit(&block->mutex);
mutex_exit(&(buf_pool->mutex));
rw_lock_s_lock_gen(&(block->lock), BUF_IO_WRITE);
......@@ -709,11 +721,12 @@ buf_flush_try_page(
buf_flush_write_block_low(block);
return(1);
} else {
mutex_exit(&(buf_pool->mutex));
return(0);
}
mutex_exit(&block->mutex);
mutex_exit(&(buf_pool->mutex));
return(0);
}
/***************************************************************
......@@ -758,34 +771,48 @@ buf_flush_try_neighbors(
block = buf_page_hash_get(space, i);
ut_a(!block || block->state == BUF_BLOCK_FILE_PAGE);
if (block && flush_type == BUF_FLUSH_LRU && i != offset
&& !block->old) {
if (!block) {
continue;
} else if (flush_type == BUF_FLUSH_LRU && i != offset
&& !block->old) {
/* We avoid flushing 'non-old' blocks in an LRU flush,
because the flushed blocks are soon freed */
continue;
}
} else {
if (block && buf_flush_ready_for_flush(block, flush_type)
&& (i == offset || block->buf_fix_count == 0)) {
/* We only try to flush those neighbors != offset
where the buf fix count is zero, as we then know that
we probably can latch the page without a semaphore
wait. Semaphore waits are expensive because we must
flush the doublewrite buffer before we start
waiting. */
mutex_enter(&block->mutex);
mutex_exit(&(buf_pool->mutex));
if (buf_flush_ready_for_flush(block, flush_type)
&& (i == offset || block->buf_fix_count == 0)) {
/* We only try to flush those
neighbors != offset where the buf fix count is
zero, as we then know that we probably can
latch the page without a semaphore wait.
Semaphore waits are expensive because we must
flush the doublewrite buffer before we start
waiting. */
/* Note: as we release the buf_pool mutex above, in
buf_flush_try_page we cannot be sure the page is still
in a flushable state: therefore we check it again
inside that function. */
mutex_exit(&block->mutex);
count += buf_flush_try_page(space, i, flush_type);
mutex_exit(&(buf_pool->mutex));
mutex_enter(&(buf_pool->mutex));
/* Note: as we release the buf_pool mutex
above, in buf_flush_try_page we cannot be sure
the page is still in a flushable state:
therefore we check it again inside that
function. */
count += buf_flush_try_page(space, i,
flush_type);
mutex_enter(&(buf_pool->mutex));
} else {
mutex_exit(&block->mutex);
}
}
}
......@@ -879,12 +906,15 @@ buf_flush_batch(
while ((block != NULL) && !found) {
ut_a(block->state == BUF_BLOCK_FILE_PAGE);
mutex_enter(&block->mutex);
if (buf_flush_ready_for_flush(block, flush_type)) {
found = TRUE;
space = block->space;
offset = block->offset;
mutex_exit(&block->mutex);
mutex_exit(&(buf_pool->mutex));
old_page_count = page_count;
......@@ -901,10 +931,14 @@ buf_flush_batch(
} else if (flush_type == BUF_FLUSH_LRU) {
mutex_exit(&block->mutex);
block = UT_LIST_GET_PREV(LRU, block);
} else {
ut_ad(flush_type == BUF_FLUSH_LIST);
mutex_exit(&block->mutex);
block = UT_LIST_GET_PREV(flush_list, block);
}
}
......@@ -986,10 +1020,14 @@ buf_flush_LRU_recommendation(void)
+ BUF_FLUSH_EXTRA_MARGIN)
&& (distance < BUF_LRU_FREE_SEARCH_LEN)) {
mutex_enter(&block->mutex);
if (buf_flush_ready_for_replace(block)) {
n_replaceable++;
}
mutex_exit(&block->mutex);
distance++;
block = UT_LIST_GET_PREV(LRU, block);
......
......@@ -86,6 +86,9 @@ scan_again:
block = UT_LIST_GET_LAST(buf_pool->LRU);
while (block != NULL) {
mutex_enter(&block->mutex);
ut_a(block->state == BUF_BLOCK_FILE_PAGE);
if (block->space == id
......@@ -112,6 +115,8 @@ scan_again:
if (block->is_hashed) {
page_no = block->offset;
mutex_exit(&block->mutex);
mutex_exit(&(buf_pool->mutex));
/* Note that the following call will acquire
......@@ -138,6 +143,7 @@ scan_again:
buf_LRU_block_free_hashed_page(block);
}
next_page:
mutex_exit(&block->mutex);
block = UT_LIST_GET_PREV(LRU, block);
}
......@@ -211,6 +217,9 @@ buf_LRU_search_and_free_block(
while (block != NULL) {
ut_a(block->in_LRU_list);
mutex_enter(&block->mutex);
if (buf_flush_ready_for_replace(block)) {
#ifdef UNIV_DEBUG
......@@ -226,6 +235,7 @@ buf_LRU_search_and_free_block(
buf_LRU_block_remove_hashed_page(block);
mutex_exit(&(buf_pool->mutex));
mutex_exit(&block->mutex);
/* Remove possible adaptive hash index built on the
page; in the case of AWE the block may not have a
......@@ -234,15 +244,21 @@ buf_LRU_search_and_free_block(
if (block->frame) {
btr_search_drop_page_hash_index(block->frame);
}
mutex_enter(&(buf_pool->mutex));
ut_a(block->buf_fix_count == 0);
mutex_enter(&(buf_pool->mutex));
mutex_enter(&block->mutex);
buf_LRU_block_free_hashed_page(block);
freed = TRUE;
mutex_exit(&block->mutex);
break;
}
mutex_exit(&block->mutex);
block = UT_LIST_GET_PREV(LRU, block);
distance++;
......@@ -428,8 +444,12 @@ loop:
}
}
mutex_enter(&block->mutex);
block->state = BUF_BLOCK_READY_FOR_USE;
mutex_exit(&block->mutex);
mutex_exit(&(buf_pool->mutex));
if (started_monitor) {
......@@ -838,6 +858,7 @@ buf_LRU_block_free_non_file_page(
{
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(buf_pool->mutex)));
ut_ad(mutex_own(&block->mutex));
#endif /* UNIV_SYNC_DEBUG */
ut_ad(block);
......@@ -877,6 +898,7 @@ buf_LRU_block_remove_hashed_page(
{
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(buf_pool->mutex)));
ut_ad(mutex_own(&block->mutex));
#endif /* UNIV_SYNC_DEBUG */
ut_ad(block);
......@@ -939,6 +961,7 @@ buf_LRU_block_free_hashed_page(
{
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(buf_pool->mutex)));
ut_ad(mutex_own(&block->mutex));
#endif /* UNIV_SYNC_DEBUG */
ut_a(block->state == BUF_BLOCK_REMOVE_HASH);
......
......@@ -461,8 +461,8 @@ Gets the mutex number protecting the page record lock hash chain in the lock
table. */
UNIV_INLINE
mutex_t*
buf_frame_get_lock_mutex(
/*=====================*/
buf_frame_get_mutex(
/*================*/
/* out: mutex */
byte* ptr); /* in: pointer to within a buffer frame */
/***********************************************************************
......@@ -713,7 +713,10 @@ struct buf_block_struct{
ulint magic_n; /* magic number to check */
ulint state; /* state of the control block:
BUF_BLOCK_NOT_USED, ... */
BUF_BLOCK_NOT_USED, ...; changing
this is only allowed when a thread
has BOTH the buffer pool mutex AND
block->mutex locked */
byte* frame; /* pointer to buffer frame which
is of size UNIV_PAGE_SIZE, and
aligned to an address divisible by
......@@ -731,8 +734,12 @@ struct buf_block_struct{
ulint offset; /* page number within the space */
ulint lock_hash_val; /* hashed value of the page address
in the record lock hash table */
mutex_t* lock_mutex; /* mutex protecting the chain in the
record lock hash table */
mutex_t mutex; /* mutex protecting this block:
state (also protected by the buffer
pool mutex), io_fix, buf_fix_count,
and accessed; we introduce this new
mutex in InnoDB-5.1 to relieve
contention on the buffer pool mutex */
rw_lock_t lock; /* read-write lock of the buffer
frame */
buf_block_t* hash; /* node used in chaining to the page
......@@ -788,20 +795,27 @@ struct buf_block_struct{
in heuristic algorithms, because of
the possibility of a wrap-around! */
ulint freed_page_clock;/* the value of freed_page_clock
buffer pool when this block was
last time put to the head of the
LRU list */
of the buffer pool when this block was
the last time put to the head of the
LRU list; a thread is allowed to
read this for heuristic purposes
without holding any mutex or latch */
ibool old; /* TRUE if the block is in the old
blocks in the LRU list */
ibool accessed; /* TRUE if the page has been accessed
while in the buffer pool: read-ahead
may read in pages which have not been
accessed yet */
accessed yet; this is protected by
block->mutex; a thread is allowed to
read this for heuristic purposes
without holding any mutex or latch */
ulint buf_fix_count; /* count of how manyfold this block
is currently bufferfixed */
is currently bufferfixed; this is
protected by block->mutex */
ulint io_fix; /* if a read is pending to the frame,
io_fix is BUF_IO_READ, in the case
of a write BUF_IO_WRITE, otherwise 0 */
of a write BUF_IO_WRITE, otherwise 0;
this is protected by block->mutex */
/* 4. Optimistic search field */
dulint modify_clock; /* this clock is incremented every
......@@ -959,7 +973,9 @@ struct buf_pool_struct{
number of buffer blocks removed from
the end of the LRU list; NOTE that
this counter may wrap around at 4
billion! */
billion! A thread is allowed to
read this for heuristic purposes
without holding any mutex or latch */
ulint LRU_flush_ended;/* when an LRU flush ends for a page,
this is incremented by one; this is
set to zero when a buffer block is
......
......@@ -337,8 +337,8 @@ Gets the mutex number protecting the page record lock hash chain in the lock
table. */
UNIV_INLINE
mutex_t*
buf_frame_get_lock_mutex(
/*=====================*/
buf_frame_get_mutex(
/*================*/
/* out: mutex */
byte* ptr) /* in: pointer to within a buffer frame */
{
......@@ -346,7 +346,7 @@ buf_frame_get_lock_mutex(
block = buf_block_align(ptr);
return(block->lock_mutex);
return(&block->mutex);
}
/*************************************************************************
......@@ -519,6 +519,7 @@ buf_block_buf_fix_inc_debug(
ret = rw_lock_s_lock_func_nowait(&(block->debug_latch), file, line);
ut_ad(ret == TRUE);
ut_ad(mutex_own(&block->mutex));
#endif
block->buf_fix_count++;
}
......@@ -531,6 +532,9 @@ buf_block_buf_fix_inc(
/*==================*/
buf_block_t* block) /* in: block to bufferfix */
{
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&block->mutex));
#endif
block->buf_fix_count++;
}
#endif /* UNIV_SYNC_DEBUG */
......@@ -625,23 +629,24 @@ buf_page_release(
ut_ad(block);
mutex_enter_fast(&(buf_pool->mutex));
ut_a(block->state == BUF_BLOCK_FILE_PAGE);
ut_a(block->buf_fix_count > 0);
if (rw_latch == RW_X_LATCH && mtr->modifications) {
mutex_enter(&buf_pool->mutex);
buf_flush_note_modification(block, mtr);
mutex_exit(&buf_pool->mutex);
}
mutex_enter(&block->mutex);
#ifdef UNIV_SYNC_DEBUG
rw_lock_s_unlock(&(block->debug_latch));
#endif
buf_fix_count = block->buf_fix_count;
block->buf_fix_count = buf_fix_count - 1;
mutex_exit(&(buf_pool->mutex));
mutex_exit(&block->mutex);
if (rw_latch == RW_S_LATCH) {
rw_lock_s_unlock(&(block->lock));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment