Commit cdb5b464 authored by marko's avatar marko

branches/zip: Implement wrappers for all operations on the buffer pool mutex.

buf_pool->mutex: Rename to buf_pool_mutex, so that the wrappers will have
to be used when changes are merged from other source trees.

buf_pool->zip_mutex: Rename to buf_pool_zip_mutex.

buf_pool_mutex_own(), buf_pool_mutex_enter(), buf_pool_mutex_exit():
Wrappers for buf_pool_mutex.
parent 4c8c6f37
...@@ -3619,7 +3619,7 @@ btr_blob_free( ...@@ -3619,7 +3619,7 @@ btr_blob_free(
mtr_commit(mtr); mtr_commit(mtr);
mutex_enter(&buf_pool->mutex); buf_pool_mutex_enter();
mutex_enter(&block->mutex); mutex_enter(&block->mutex);
/* Only free the block if it is still allocated to /* Only free the block if it is still allocated to
...@@ -3639,7 +3639,7 @@ btr_blob_free( ...@@ -3639,7 +3639,7 @@ btr_blob_free(
} }
} }
mutex_exit(&buf_pool->mutex); buf_pool_mutex_exit();
mutex_exit(&block->mutex); mutex_exit(&block->mutex);
} }
......
...@@ -781,9 +781,9 @@ btr_search_guess_on_hash( ...@@ -781,9 +781,9 @@ btr_search_guess_on_hash(
ulint page_no = page_get_page_no(page); ulint page_no = page_get_page_no(page);
ulint space_id = page_get_space_id(page); ulint space_id = page_get_space_id(page);
mutex_enter(&buf_pool->mutex); buf_pool_mutex_enter();
block = (buf_block_t*) buf_page_hash_get(space_id, page_no); block = (buf_block_t*) buf_page_hash_get(space_id, page_no);
mutex_exit(&buf_pool->mutex); buf_pool_mutex_exit();
} }
if (UNIV_UNLIKELY(!block) if (UNIV_UNLIKELY(!block)
...@@ -1650,7 +1650,7 @@ btr_search_validate(void) ...@@ -1650,7 +1650,7 @@ btr_search_validate(void)
rec_offs_init(offsets_); rec_offs_init(offsets_);
rw_lock_x_lock(&btr_search_latch); rw_lock_x_lock(&btr_search_latch);
mutex_enter(&buf_pool->mutex); buf_pool_mutex_enter();
cell_count = hash_get_n_cells(btr_search_sys->hash_index); cell_count = hash_get_n_cells(btr_search_sys->hash_index);
...@@ -1658,11 +1658,11 @@ btr_search_validate(void) ...@@ -1658,11 +1658,11 @@ btr_search_validate(void)
/* We release btr_search_latch every once in a while to /* We release btr_search_latch every once in a while to
give other queries a chance to run. */ give other queries a chance to run. */
if ((i != 0) && ((i % chunk_size) == 0)) { if ((i != 0) && ((i % chunk_size) == 0)) {
mutex_exit(&buf_pool->mutex); buf_pool_mutex_exit();
rw_lock_x_unlock(&btr_search_latch); rw_lock_x_unlock(&btr_search_latch);
os_thread_yield(); os_thread_yield();
rw_lock_x_lock(&btr_search_latch); rw_lock_x_lock(&btr_search_latch);
mutex_enter(&buf_pool->mutex); buf_pool_mutex_enter();
} }
node = hash_get_nth_cell(btr_search_sys->hash_index, i)->node; node = hash_get_nth_cell(btr_search_sys->hash_index, i)->node;
...@@ -1753,11 +1753,11 @@ btr_search_validate(void) ...@@ -1753,11 +1753,11 @@ btr_search_validate(void)
/* We release btr_search_latch every once in a while to /* We release btr_search_latch every once in a while to
give other queries a chance to run. */ give other queries a chance to run. */
if (i != 0) { if (i != 0) {
mutex_exit(&buf_pool->mutex); buf_pool_mutex_exit();
rw_lock_x_unlock(&btr_search_latch); rw_lock_x_unlock(&btr_search_latch);
os_thread_yield(); os_thread_yield();
rw_lock_x_lock(&btr_search_latch); rw_lock_x_lock(&btr_search_latch);
mutex_enter(&buf_pool->mutex); buf_pool_mutex_enter();
} }
if (!ha_validate(btr_search_sys->hash_index, i, end_index)) { if (!ha_validate(btr_search_sys->hash_index, i, end_index)) {
...@@ -1765,7 +1765,7 @@ btr_search_validate(void) ...@@ -1765,7 +1765,7 @@ btr_search_validate(void)
} }
} }
mutex_exit(&buf_pool->mutex); buf_pool_mutex_exit();
rw_lock_x_unlock(&btr_search_latch); rw_lock_x_unlock(&btr_search_latch);
if (UNIV_LIKELY_NULL(heap)) { if (UNIV_LIKELY_NULL(heap)) {
mem_heap_free(heap); mem_heap_free(heap);
......
...@@ -20,24 +20,24 @@ Created December 2006 by Marko Makela ...@@ -20,24 +20,24 @@ Created December 2006 by Marko Makela
/* Statistic counters */ /* Statistic counters */
/** Number of frames allocated from the buffer pool to the buddy system. /** Number of frames allocated from the buffer pool to the buddy system.
Protected by buf_pool->mutex. */ Protected by buf_pool_mutex. */
ulint buf_buddy_n_frames; ulint buf_buddy_n_frames;
/** Counts of blocks allocated from the buddy system. /** Counts of blocks allocated from the buddy system.
Protected by buf_pool->mutex. */ Protected by buf_pool_mutex. */
ulint buf_buddy_used[BUF_BUDDY_SIZES + 1]; ulint buf_buddy_used[BUF_BUDDY_SIZES + 1];
/** Counts of blocks relocated by the buddy system. /** Counts of blocks relocated by the buddy system.
Protected by buf_pool->mutex. */ Protected by buf_pool_mutex. */
ib_uint64_t buf_buddy_relocated[BUF_BUDDY_SIZES + 1]; ib_uint64_t buf_buddy_relocated[BUF_BUDDY_SIZES + 1];
/** Preferred minimum number of frames allocated from the buffer pool /** Preferred minimum number of frames allocated from the buffer pool
to the buddy system. Unless this number is exceeded or the buffer to the buddy system. Unless this number is exceeded or the buffer
pool is scarce, the LRU algorithm will not free compressed-only pages pool is scarce, the LRU algorithm will not free compressed-only pages
in order to satisfy an allocation request. Protected by buf_pool->mutex. */ in order to satisfy an allocation request. Protected by buf_pool_mutex. */
ulint buf_buddy_min_n_frames = 0; ulint buf_buddy_min_n_frames = 0;
/** Preferred maximum number of frames allocated from the buffer pool /** Preferred maximum number of frames allocated from the buffer pool
to the buddy system. Unless this number is exceeded, the buddy allocator to the buddy system. Unless this number is exceeded, the buddy allocator
will not try to free clean compressed-only pages before falling back will not try to free clean compressed-only pages before falling back
to the LRU algorithm. Protected by buf_pool->mutex. */ to the LRU algorithm. Protected by buf_pool_mutex. */
ulint buf_buddy_max_n_frames = ULINT_UNDEFINED; ulint buf_buddy_max_n_frames = ULINT_UNDEFINED;
/************************************************************************** /**************************************************************************
...@@ -127,7 +127,7 @@ buf_buddy_alloc_zip( ...@@ -127,7 +127,7 @@ buf_buddy_alloc_zip(
{ {
buf_page_t* bpage; buf_page_t* bpage;
ut_ad(mutex_own(&buf_pool->mutex)); ut_ad(buf_pool_mutex_own());
ut_a(i < BUF_BUDDY_SIZES); ut_a(i < BUF_BUDDY_SIZES);
#if defined UNIV_DEBUG && !defined UNIV_DEBUG_VALGRIND #if defined UNIV_DEBUG && !defined UNIV_DEBUG_VALGRIND
...@@ -179,8 +179,8 @@ buf_buddy_block_free( ...@@ -179,8 +179,8 @@ buf_buddy_block_free(
buf_page_t* bpage; buf_page_t* bpage;
buf_block_t* block; buf_block_t* block;
ut_ad(mutex_own(&buf_pool->mutex)); ut_ad(buf_pool_mutex_own());
ut_ad(!mutex_own(&buf_pool->zip_mutex)); ut_ad(!mutex_own(&buf_pool_zip_mutex));
ut_a(!ut_align_offset(buf, UNIV_PAGE_SIZE)); ut_a(!ut_align_offset(buf, UNIV_PAGE_SIZE));
HASH_SEARCH(hash, buf_pool->zip_hash, fold, buf_page_t*, bpage, HASH_SEARCH(hash, buf_pool->zip_hash, fold, buf_page_t*, bpage,
...@@ -213,8 +213,8 @@ buf_buddy_block_register( ...@@ -213,8 +213,8 @@ buf_buddy_block_register(
buf_block_t* block) /* in: buffer frame to allocate */ buf_block_t* block) /* in: buffer frame to allocate */
{ {
const ulint fold = BUF_POOL_ZIP_FOLD(block); const ulint fold = BUF_POOL_ZIP_FOLD(block);
ut_ad(mutex_own(&buf_pool->mutex)); ut_ad(buf_pool_mutex_own());
ut_ad(!mutex_own(&buf_pool->zip_mutex)); ut_ad(!mutex_own(&buf_pool_zip_mutex));
buf_block_set_state(block, BUF_BLOCK_MEMORY); buf_block_set_state(block, BUF_BLOCK_MEMORY);
...@@ -276,12 +276,12 @@ buf_buddy_alloc_clean( ...@@ -276,12 +276,12 @@ buf_buddy_alloc_clean(
ulint i, /* in: index of buf_pool->zip_free[] */ ulint i, /* in: index of buf_pool->zip_free[] */
ibool* lru) /* in: pointer to a variable that will be assigned ibool* lru) /* in: pointer to a variable that will be assigned
TRUE if storage was allocated from the LRU list TRUE if storage was allocated from the LRU list
and buf_pool->mutex was temporarily released */ and buf_pool_mutex was temporarily released */
{ {
buf_page_t* bpage; buf_page_t* bpage;
ut_ad(mutex_own(&buf_pool->mutex)); ut_ad(buf_pool_mutex_own());
ut_ad(!mutex_own(&buf_pool->zip_mutex)); ut_ad(!mutex_own(&buf_pool_zip_mutex));
if (buf_buddy_n_frames < buf_buddy_max_n_frames) { if (buf_buddy_n_frames < buf_buddy_max_n_frames) {
...@@ -301,7 +301,7 @@ buf_buddy_alloc_clean( ...@@ -301,7 +301,7 @@ buf_buddy_alloc_clean(
j = ut_min(UT_LIST_GET_LEN(buf_pool->zip_clean), 100); j = ut_min(UT_LIST_GET_LEN(buf_pool->zip_clean), 100);
bpage = UT_LIST_GET_FIRST(buf_pool->zip_clean); bpage = UT_LIST_GET_FIRST(buf_pool->zip_clean);
mutex_enter(&buf_pool->zip_mutex); mutex_enter(&buf_pool_zip_mutex);
for (; j--; bpage = UT_LIST_GET_NEXT(list, bpage)) { for (; j--; bpage = UT_LIST_GET_NEXT(list, bpage)) {
if (bpage->zip.ssize != dummy_zip.ssize if (bpage->zip.ssize != dummy_zip.ssize
...@@ -312,7 +312,7 @@ buf_buddy_alloc_clean( ...@@ -312,7 +312,7 @@ buf_buddy_alloc_clean(
/* Reuse the block. */ /* Reuse the block. */
mutex_exit(&buf_pool->zip_mutex); mutex_exit(&buf_pool_zip_mutex);
bpage = buf_buddy_alloc_zip(i); bpage = buf_buddy_alloc_zip(i);
/* bpage may be NULL if buf_buddy_free() /* bpage may be NULL if buf_buddy_free()
...@@ -320,7 +320,7 @@ buf_buddy_alloc_clean( ...@@ -320,7 +320,7 @@ buf_buddy_alloc_clean(
buf_LRU_block_remove_hashed_page()] buf_LRU_block_remove_hashed_page()]
recombines blocks and invokes recombines blocks and invokes
buf_buddy_block_free(). Because buf_buddy_block_free(). Because
buf_pool->mutex will not be released buf_pool_mutex will not be released
after buf_buddy_block_free(), there will after buf_buddy_block_free(), there will
be at least one block available in the be at least one block available in the
buffer pool, and thus it does not make sense buffer pool, and thus it does not make sense
...@@ -329,7 +329,7 @@ buf_buddy_alloc_clean( ...@@ -329,7 +329,7 @@ buf_buddy_alloc_clean(
return(bpage); return(bpage);
} }
mutex_exit(&buf_pool->zip_mutex); mutex_exit(&buf_pool_zip_mutex);
} }
/* Free blocks from the end of the LRU list until enough space /* Free blocks from the end of the LRU list until enough space
...@@ -381,7 +381,7 @@ buf_buddy_alloc_clean( ...@@ -381,7 +381,7 @@ buf_buddy_alloc_clean(
} }
/* A successful buf_LRU_free_block() may release and /* A successful buf_LRU_free_block() may release and
reacquire buf_pool->mutex, and thus bpage->LRU of reacquire buf_pool_mutex, and thus bpage->LRU of
an uncompressed page may point to garbage. Furthermore, an uncompressed page may point to garbage. Furthermore,
if bpage were a compressed page descriptor, it would if bpage were a compressed page descriptor, it would
have been deallocated by buf_LRU_free_block(). have been deallocated by buf_LRU_free_block().
...@@ -396,8 +396,8 @@ buf_buddy_alloc_clean( ...@@ -396,8 +396,8 @@ buf_buddy_alloc_clean(
/************************************************************************** /**************************************************************************
Allocate a block. The thread calling this function must hold Allocate a block. The thread calling this function must hold
buf_pool->mutex and must not hold buf_pool->zip_mutex or any block->mutex. buf_pool_mutex and must not hold buf_pool_zip_mutex or any block->mutex.
The buf_pool->mutex may only be released and reacquired if lru != NULL. */ The buf_pool_mutex may only be released and reacquired if lru != NULL. */
void* void*
buf_buddy_alloc_low( buf_buddy_alloc_low(
...@@ -408,13 +408,13 @@ buf_buddy_alloc_low( ...@@ -408,13 +408,13 @@ buf_buddy_alloc_low(
or BUF_BUDDY_SIZES */ or BUF_BUDDY_SIZES */
ibool* lru) /* in: pointer to a variable that will be assigned ibool* lru) /* in: pointer to a variable that will be assigned
TRUE if storage was allocated from the LRU list TRUE if storage was allocated from the LRU list
and buf_pool->mutex was temporarily released, and buf_pool_mutex was temporarily released,
or NULL if the LRU list should not be used */ or NULL if the LRU list should not be used */
{ {
buf_block_t* block; buf_block_t* block;
ut_ad(mutex_own(&buf_pool->mutex)); ut_ad(buf_pool_mutex_own());
ut_ad(!mutex_own(&buf_pool->zip_mutex)); ut_ad(!mutex_own(&buf_pool_zip_mutex));
if (i < BUF_BUDDY_SIZES) { if (i < BUF_BUDDY_SIZES) {
/* Try to allocate from the buddy system. */ /* Try to allocate from the buddy system. */
...@@ -449,10 +449,10 @@ buf_buddy_alloc_low( ...@@ -449,10 +449,10 @@ buf_buddy_alloc_low(
} }
/* Try replacing an uncompressed page in the buffer pool. */ /* Try replacing an uncompressed page in the buffer pool. */
mutex_exit(&buf_pool->mutex); buf_pool_mutex_exit();
block = buf_LRU_get_free_block(0); block = buf_LRU_get_free_block(0);
*lru = TRUE; *lru = TRUE;
mutex_enter(&buf_pool->mutex); buf_pool_mutex_enter();
alloc_big: alloc_big:
buf_buddy_block_register(block); buf_buddy_block_register(block);
...@@ -476,7 +476,7 @@ buf_buddy_relocate_block( ...@@ -476,7 +476,7 @@ buf_buddy_relocate_block(
{ {
buf_page_t* b; buf_page_t* b;
ut_ad(mutex_own(&buf_pool->mutex)); ut_ad(buf_pool_mutex_own());
switch (buf_page_get_state(bpage)) { switch (buf_page_get_state(bpage)) {
case BUF_BLOCK_ZIP_FREE: case BUF_BLOCK_ZIP_FREE:
...@@ -494,10 +494,10 @@ buf_buddy_relocate_block( ...@@ -494,10 +494,10 @@ buf_buddy_relocate_block(
break; break;
} }
mutex_enter(&buf_pool->zip_mutex); mutex_enter(&buf_pool_zip_mutex);
if (!buf_page_can_relocate(bpage)) { if (!buf_page_can_relocate(bpage)) {
mutex_exit(&buf_pool->zip_mutex); mutex_exit(&buf_pool_zip_mutex);
return(FALSE); return(FALSE);
} }
...@@ -514,7 +514,7 @@ buf_buddy_relocate_block( ...@@ -514,7 +514,7 @@ buf_buddy_relocate_block(
UT_LIST_ADD_FIRST(list, buf_pool->zip_clean, dpage); UT_LIST_ADD_FIRST(list, buf_pool->zip_clean, dpage);
} }
mutex_exit(&buf_pool->zip_mutex); mutex_exit(&buf_pool_zip_mutex);
return(TRUE); return(TRUE);
} }
...@@ -532,8 +532,8 @@ buf_buddy_relocate( ...@@ -532,8 +532,8 @@ buf_buddy_relocate(
buf_page_t* bpage; buf_page_t* bpage;
const ulint size = BUF_BUDDY_LOW << i; const ulint size = BUF_BUDDY_LOW << i;
ut_ad(mutex_own(&buf_pool->mutex)); ut_ad(buf_pool_mutex_own());
ut_ad(!mutex_own(&buf_pool->zip_mutex)); ut_ad(!mutex_own(&buf_pool_zip_mutex));
ut_ad(!ut_align_offset(src, size)); ut_ad(!ut_align_offset(src, size));
ut_ad(!ut_align_offset(dst, size)); ut_ad(!ut_align_offset(dst, size));
UNIV_MEM_ASSERT_W(dst, size); UNIV_MEM_ASSERT_W(dst, size);
...@@ -632,8 +632,8 @@ buf_buddy_free_low( ...@@ -632,8 +632,8 @@ buf_buddy_free_low(
buf_page_t* bpage; buf_page_t* bpage;
buf_page_t* buddy; buf_page_t* buddy;
ut_ad(mutex_own(&buf_pool->mutex)); ut_ad(buf_pool_mutex_own());
ut_ad(!mutex_own(&buf_pool->zip_mutex)); ut_ad(!mutex_own(&buf_pool_zip_mutex));
ut_ad(i <= BUF_BUDDY_SIZES); ut_ad(i <= BUF_BUDDY_SIZES);
ut_ad(buf_buddy_used[i] > 0); ut_ad(buf_buddy_used[i] > 0);
......
...@@ -224,6 +224,13 @@ static const int WAIT_FOR_READ = 5000; ...@@ -224,6 +224,13 @@ static const int WAIT_FOR_READ = 5000;
buf_pool_t* buf_pool = NULL; /* The buffer buf_pool of the database */ buf_pool_t* buf_pool = NULL; /* The buffer buf_pool of the database */
/* mutex protecting the buffer pool struct and control blocks, except the
read-write lock in them */
mutex_t buf_pool_mutex;
/* mutex protecting the control blocks of compressed-only pages
(of type buf_page_t, not buf_block_t) */
mutex_t buf_pool_zip_mutex;
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
static ulint buf_dbg_counter = 0; /* This is used to insert validation static ulint buf_dbg_counter = 0; /* This is used to insert validation
operations in excution in the operations in excution in the
...@@ -746,7 +753,7 @@ buf_chunk_contains_zip( ...@@ -746,7 +753,7 @@ buf_chunk_contains_zip(
ulint i; ulint i;
ut_ad(buf_pool); ut_ad(buf_pool);
ut_ad(mutex_own(&buf_pool->mutex)); ut_ad(buf_pool_mutex_own());
block = chunk->blocks; block = chunk->blocks;
...@@ -800,7 +807,7 @@ buf_chunk_not_freed( ...@@ -800,7 +807,7 @@ buf_chunk_not_freed(
ulint i; ulint i;
ut_ad(buf_pool); ut_ad(buf_pool);
ut_ad(mutex_own(&(buf_pool->mutex))); ut_ad(buf_pool_mutex_own());
block = chunk->blocks; block = chunk->blocks;
...@@ -833,7 +840,7 @@ buf_chunk_all_free( ...@@ -833,7 +840,7 @@ buf_chunk_all_free(
ulint i; ulint i;
ut_ad(buf_pool); ut_ad(buf_pool);
ut_ad(mutex_own(&(buf_pool->mutex))); ut_ad(buf_pool_mutex_own());
block = chunk->blocks; block = chunk->blocks;
...@@ -859,7 +866,7 @@ buf_chunk_free( ...@@ -859,7 +866,7 @@ buf_chunk_free(
buf_block_t* block; buf_block_t* block;
const buf_block_t* block_end; const buf_block_t* block_end;
ut_ad(mutex_own(&(buf_pool->mutex))); ut_ad(buf_pool_mutex_own());
block_end = chunk->blocks + chunk->size; block_end = chunk->blocks + chunk->size;
...@@ -901,11 +908,11 @@ buf_pool_init(void) ...@@ -901,11 +908,11 @@ buf_pool_init(void)
/* 1. Initialize general fields /* 1. Initialize general fields
------------------------------- */ ------------------------------- */
mutex_create(&buf_pool->mutex, SYNC_BUF_POOL); mutex_create(&buf_pool_mutex, SYNC_BUF_POOL);
mutex_enter(&(buf_pool->mutex)); buf_pool_mutex_enter();
mutex_create(&buf_pool->zip_mutex, SYNC_BUF_BLOCK); mutex_create(&buf_pool_zip_mutex, SYNC_BUF_BLOCK);
buf_pool->n_chunks = 1; buf_pool->n_chunks = 1;
buf_pool->chunks = chunk = mem_alloc(sizeof *chunk); buf_pool->chunks = chunk = mem_alloc(sizeof *chunk);
...@@ -941,7 +948,7 @@ buf_pool_init(void) ...@@ -941,7 +948,7 @@ buf_pool_init(void)
--------------------------- */ --------------------------- */
/* All fields are initialized by mem_zalloc(). */ /* All fields are initialized by mem_zalloc(). */
mutex_exit(&(buf_pool->mutex)); buf_pool_mutex_exit();
btr_search_sys_create(buf_pool->curr_size btr_search_sys_create(buf_pool->curr_size
* UNIV_PAGE_SIZE / sizeof(void*) / 64); * UNIV_PAGE_SIZE / sizeof(void*) / 64);
...@@ -965,7 +972,7 @@ buf_relocate( ...@@ -965,7 +972,7 @@ buf_relocate(
buf_page_t* b; buf_page_t* b;
ulint fold; ulint fold;
ut_ad(mutex_own(&buf_pool->mutex)); ut_ad(buf_pool_mutex_own());
ut_ad(mutex_own(buf_page_get_mutex(bpage))); ut_ad(mutex_own(buf_page_get_mutex(bpage)));
ut_a(buf_page_get_io_fix(bpage) == BUF_IO_NONE); ut_a(buf_page_get_io_fix(bpage) == BUF_IO_NONE);
ut_a(bpage->buf_fix_count == 0); ut_a(bpage->buf_fix_count == 0);
...@@ -1021,11 +1028,11 @@ buf_pool_shrink( ...@@ -1021,11 +1028,11 @@ buf_pool_shrink(
buf_chunk_t* max_chunk; buf_chunk_t* max_chunk;
buf_chunk_t* max_free_chunk; buf_chunk_t* max_free_chunk;
ut_ad(!mutex_own(&buf_pool->mutex)); ut_ad(!buf_pool_mutex_own());
try_again: try_again:
btr_search_disable(); /* Empty the adaptive hash index again */ btr_search_disable(); /* Empty the adaptive hash index again */
mutex_enter(&buf_pool->mutex); buf_pool_mutex_enter();
shrink_again: shrink_again:
if (buf_pool->n_chunks <= 1) { if (buf_pool->n_chunks <= 1) {
...@@ -1088,7 +1095,7 @@ buf_pool_shrink( ...@@ -1088,7 +1095,7 @@ buf_pool_shrink(
mutex_enter(&block->mutex); mutex_enter(&block->mutex);
/* The following calls will temporarily /* The following calls will temporarily
release block->mutex and buf_pool->mutex. release block->mutex and buf_pool_mutex.
Therefore, we have to always retry, Therefore, we have to always retry,
even if !dirty && !nonfree. */ even if !dirty && !nonfree. */
...@@ -1104,7 +1111,7 @@ buf_pool_shrink( ...@@ -1104,7 +1111,7 @@ buf_pool_shrink(
mutex_exit(&block->mutex); mutex_exit(&block->mutex);
} }
mutex_exit(&buf_pool->mutex); buf_pool_mutex_exit();
/* Request for a flush of the chunk if it helps. /* Request for a flush of the chunk if it helps.
Do not flush if there are non-free blocks, since Do not flush if there are non-free blocks, since
...@@ -1153,7 +1160,7 @@ buf_pool_shrink( ...@@ -1153,7 +1160,7 @@ buf_pool_shrink(
func_done: func_done:
srv_buf_pool_old_size = srv_buf_pool_size; srv_buf_pool_old_size = srv_buf_pool_size;
func_exit: func_exit:
mutex_exit(&buf_pool->mutex); buf_pool_mutex_exit();
btr_search_enable(); btr_search_enable();
} }
...@@ -1171,7 +1178,7 @@ buf_pool_page_hash_rebuild(void) ...@@ -1171,7 +1178,7 @@ buf_pool_page_hash_rebuild(void)
hash_table_t* zip_hash; hash_table_t* zip_hash;
buf_page_t* b; buf_page_t* b;
mutex_enter(&buf_pool->mutex); buf_pool_mutex_enter();
/* Free, create, and populate the hash table. */ /* Free, create, and populate the hash table. */
hash_table_free(buf_pool->page_hash); hash_table_free(buf_pool->page_hash);
...@@ -1251,7 +1258,7 @@ buf_pool_page_hash_rebuild(void) ...@@ -1251,7 +1258,7 @@ buf_pool_page_hash_rebuild(void)
} }
} }
mutex_exit(&buf_pool->mutex); buf_pool_mutex_exit();
} }
/************************************************************************ /************************************************************************
...@@ -1261,17 +1268,17 @@ void ...@@ -1261,17 +1268,17 @@ void
buf_pool_resize(void) buf_pool_resize(void)
/*=================*/ /*=================*/
{ {
mutex_enter(&buf_pool->mutex); buf_pool_mutex_enter();
if (srv_buf_pool_old_size == srv_buf_pool_size) { if (srv_buf_pool_old_size == srv_buf_pool_size) {
mutex_exit(&buf_pool->mutex); buf_pool_mutex_exit();
return; return;
} }
if (srv_buf_pool_curr_size + 1048576 > srv_buf_pool_size) { if (srv_buf_pool_curr_size + 1048576 > srv_buf_pool_size) {
mutex_exit(&buf_pool->mutex); buf_pool_mutex_exit();
/* Disable adaptive hash indexes and empty the index /* Disable adaptive hash indexes and empty the index
in order to free up memory in the buffer pool chunks. */ in order to free up memory in the buffer pool chunks. */
...@@ -1305,7 +1312,7 @@ buf_pool_resize(void) ...@@ -1305,7 +1312,7 @@ buf_pool_resize(void)
} }
srv_buf_pool_old_size = srv_buf_pool_size; srv_buf_pool_old_size = srv_buf_pool_size;
mutex_exit(&buf_pool->mutex); buf_pool_mutex_exit();
} }
buf_pool_page_hash_rebuild(); buf_pool_page_hash_rebuild();
...@@ -1320,19 +1327,19 @@ buf_block_make_young( ...@@ -1320,19 +1327,19 @@ buf_block_make_young(
/*=================*/ /*=================*/
buf_page_t* bpage) /* in: block to make younger */ buf_page_t* bpage) /* in: block to make younger */
{ {
ut_ad(!mutex_own(&(buf_pool->mutex))); ut_ad(!buf_pool_mutex_own());
/* Note that we read freed_page_clock's without holding any mutex: /* Note that we read freed_page_clock's without holding any mutex:
this is allowed since the result is used only in heuristics */ this is allowed since the result is used only in heuristics */
if (buf_page_peek_if_too_old(bpage)) { if (buf_page_peek_if_too_old(bpage)) {
mutex_enter(&buf_pool->mutex); buf_pool_mutex_enter();
/* There has been freeing activity in the LRU list: /* There has been freeing activity in the LRU list:
best to move to the head of the LRU list */ best to move to the head of the LRU list */
buf_LRU_make_block_young(bpage); buf_LRU_make_block_young(bpage);
mutex_exit(&buf_pool->mutex); buf_pool_mutex_exit();
} }
} }
...@@ -1346,13 +1353,13 @@ buf_page_make_young( ...@@ -1346,13 +1353,13 @@ buf_page_make_young(
/*================*/ /*================*/
buf_page_t* bpage) /* in: buffer block of a file page */ buf_page_t* bpage) /* in: buffer block of a file page */
{ {
mutex_enter(&(buf_pool->mutex)); buf_pool_mutex_enter();
ut_a(buf_page_in_file(bpage)); ut_a(buf_page_in_file(bpage));
buf_LRU_make_block_young(bpage); buf_LRU_make_block_young(bpage);
mutex_exit(&(buf_pool->mutex)); buf_pool_mutex_exit();
} }
/************************************************************************ /************************************************************************
...@@ -1367,7 +1374,7 @@ buf_reset_check_index_page_at_flush( ...@@ -1367,7 +1374,7 @@ buf_reset_check_index_page_at_flush(
{ {
buf_block_t* block; buf_block_t* block;
mutex_enter_fast(&(buf_pool->mutex)); buf_pool_mutex_enter();
block = (buf_block_t*) buf_page_hash_get(space, offset); block = (buf_block_t*) buf_page_hash_get(space, offset);
...@@ -1375,7 +1382,7 @@ buf_reset_check_index_page_at_flush( ...@@ -1375,7 +1382,7 @@ buf_reset_check_index_page_at_flush(
block->check_index_page_at_flush = FALSE; block->check_index_page_at_flush = FALSE;
} }
mutex_exit(&(buf_pool->mutex)); buf_pool_mutex_exit();
} }
/************************************************************************ /************************************************************************
...@@ -1394,7 +1401,7 @@ buf_page_peek_if_search_hashed( ...@@ -1394,7 +1401,7 @@ buf_page_peek_if_search_hashed(
buf_block_t* block; buf_block_t* block;
ibool is_hashed; ibool is_hashed;
mutex_enter_fast(&(buf_pool->mutex)); buf_pool_mutex_enter();
block = (buf_block_t*) buf_page_hash_get(space, offset); block = (buf_block_t*) buf_page_hash_get(space, offset);
...@@ -1404,7 +1411,7 @@ buf_page_peek_if_search_hashed( ...@@ -1404,7 +1411,7 @@ buf_page_peek_if_search_hashed(
is_hashed = block->is_hashed; is_hashed = block->is_hashed;
} }
mutex_exit(&(buf_pool->mutex)); buf_pool_mutex_exit();
return(is_hashed); return(is_hashed);
} }
...@@ -1426,7 +1433,7 @@ buf_page_set_file_page_was_freed( ...@@ -1426,7 +1433,7 @@ buf_page_set_file_page_was_freed(
{ {
buf_page_t* bpage; buf_page_t* bpage;
mutex_enter_fast(&(buf_pool->mutex)); buf_pool_mutex_enter();
bpage = buf_page_hash_get(space, offset); bpage = buf_page_hash_get(space, offset);
...@@ -1434,7 +1441,7 @@ buf_page_set_file_page_was_freed( ...@@ -1434,7 +1441,7 @@ buf_page_set_file_page_was_freed(
bpage->file_page_was_freed = TRUE; bpage->file_page_was_freed = TRUE;
} }
mutex_exit(&(buf_pool->mutex)); buf_pool_mutex_exit();
return(bpage); return(bpage);
} }
...@@ -1455,7 +1462,7 @@ buf_page_reset_file_page_was_freed( ...@@ -1455,7 +1462,7 @@ buf_page_reset_file_page_was_freed(
{ {
buf_page_t* bpage; buf_page_t* bpage;
mutex_enter_fast(&(buf_pool->mutex)); buf_pool_mutex_enter();
bpage = buf_page_hash_get(space, offset); bpage = buf_page_hash_get(space, offset);
...@@ -1463,7 +1470,7 @@ buf_page_reset_file_page_was_freed( ...@@ -1463,7 +1470,7 @@ buf_page_reset_file_page_was_freed(
bpage->file_page_was_freed = FALSE; bpage->file_page_was_freed = FALSE;
} }
mutex_exit(&(buf_pool->mutex)); buf_pool_mutex_exit();
return(bpage); return(bpage);
} }
...@@ -1490,7 +1497,7 @@ buf_page_get_zip( ...@@ -1490,7 +1497,7 @@ buf_page_get_zip(
buf_pool->n_page_gets++; buf_pool->n_page_gets++;
for (;;) { for (;;) {
mutex_enter_fast(&buf_pool->mutex); buf_pool_mutex_enter();
lookup: lookup:
bpage = buf_page_hash_get(space, offset); bpage = buf_page_hash_get(space, offset);
if (bpage) { if (bpage) {
...@@ -1499,7 +1506,7 @@ buf_page_get_zip( ...@@ -1499,7 +1506,7 @@ buf_page_get_zip(
/* Page not in buf_pool: needs to be read from file */ /* Page not in buf_pool: needs to be read from file */
mutex_exit(&buf_pool->mutex); buf_pool_mutex_exit();
buf_read_page(space, zip_size, offset); buf_read_page(space, zip_size, offset);
...@@ -1510,7 +1517,7 @@ buf_page_get_zip( ...@@ -1510,7 +1517,7 @@ buf_page_get_zip(
if (UNIV_UNLIKELY(!bpage->zip.data)) { if (UNIV_UNLIKELY(!bpage->zip.data)) {
/* There is no compressed page. */ /* There is no compressed page. */
mutex_exit(&buf_pool->mutex); buf_pool_mutex_exit();
return(NULL); return(NULL);
} }
...@@ -1544,7 +1551,7 @@ buf_page_get_zip( ...@@ -1544,7 +1551,7 @@ buf_page_get_zip(
must_read = buf_page_get_io_fix(bpage) == BUF_IO_READ; must_read = buf_page_get_io_fix(bpage) == BUF_IO_READ;
mutex_exit(&buf_pool->mutex); buf_pool_mutex_exit();
buf_page_set_accessed(bpage, TRUE); buf_page_set_accessed(bpage, TRUE);
...@@ -1687,7 +1694,7 @@ buf_block_is_uncompressed( ...@@ -1687,7 +1694,7 @@ buf_block_is_uncompressed(
const buf_chunk_t* chunk = buf_pool->chunks; const buf_chunk_t* chunk = buf_pool->chunks;
const buf_chunk_t* const echunk = chunk + buf_pool->n_chunks; const buf_chunk_t* const echunk = chunk + buf_pool->n_chunks;
ut_ad(mutex_own(&buf_pool->mutex)); ut_ad(buf_pool_mutex_own());
if (UNIV_UNLIKELY((((ulint) block) % sizeof *block) != 0)) { if (UNIV_UNLIKELY((((ulint) block) % sizeof *block) != 0)) {
/* The pointer should be aligned. */ /* The pointer should be aligned. */
...@@ -1745,7 +1752,7 @@ buf_page_get_gen( ...@@ -1745,7 +1752,7 @@ buf_page_get_gen(
buf_pool->n_page_gets++; buf_pool->n_page_gets++;
loop: loop:
block = guess; block = guess;
mutex_enter_fast(&(buf_pool->mutex)); buf_pool_mutex_enter();
if (block) { if (block) {
/* If the guess is a compressed page descriptor that /* If the guess is a compressed page descriptor that
...@@ -1776,7 +1783,7 @@ buf_page_get_gen( ...@@ -1776,7 +1783,7 @@ buf_page_get_gen(
if (block == NULL) { if (block == NULL) {
/* Page not in buf_pool: needs to be read from file */ /* Page not in buf_pool: needs to be read from file */
mutex_exit(&(buf_pool->mutex)); buf_pool_mutex_exit();
if (mode == BUF_GET_IF_IN_POOL) { if (mode == BUF_GET_IF_IN_POOL) {
...@@ -1797,7 +1804,7 @@ buf_page_get_gen( ...@@ -1797,7 +1804,7 @@ buf_page_get_gen(
if (must_read && mode == BUF_GET_IF_IN_POOL) { if (must_read && mode == BUF_GET_IF_IN_POOL) {
/* The page is only being read to buffer */ /* The page is only being read to buffer */
mutex_exit(&buf_pool->mutex); buf_pool_mutex_exit();
return(NULL); return(NULL);
} }
...@@ -1821,19 +1828,19 @@ buf_page_get_gen( ...@@ -1821,19 +1828,19 @@ buf_page_get_gen(
wait_until_unfixed: wait_until_unfixed:
/* The block is buffer-fixed or I/O-fixed. /* The block is buffer-fixed or I/O-fixed.
Try again later. */ Try again later. */
mutex_exit(&buf_pool->mutex); buf_pool_mutex_exit();
os_thread_sleep(WAIT_FOR_READ); os_thread_sleep(WAIT_FOR_READ);
goto loop; goto loop;
} }
/* Allocate an uncompressed page. */ /* Allocate an uncompressed page. */
mutex_exit(&buf_pool->mutex); buf_pool_mutex_exit();
block = buf_LRU_get_free_block(0); block = buf_LRU_get_free_block(0);
ut_a(block); ut_a(block);
mutex_enter(&buf_pool->mutex); buf_pool_mutex_enter();
mutex_enter(&block->mutex); mutex_enter(&block->mutex);
{ {
...@@ -1842,7 +1849,7 @@ buf_page_get_gen( ...@@ -1842,7 +1849,7 @@ buf_page_get_gen(
if (UNIV_UNLIKELY(bpage != hash_bpage)) { if (UNIV_UNLIKELY(bpage != hash_bpage)) {
/* The buf_pool->page_hash was modified /* The buf_pool->page_hash was modified
while buf_pool->mutex was released. while buf_pool_mutex was released.
Free the block that was allocated. */ Free the block that was allocated. */
buf_LRU_block_free_non_file_page(block); buf_LRU_block_free_non_file_page(block);
...@@ -1858,7 +1865,7 @@ buf_page_get_gen( ...@@ -1858,7 +1865,7 @@ buf_page_get_gen(
|| buf_page_get_io_fix(bpage) != BUF_IO_NONE)) { || buf_page_get_io_fix(bpage) != BUF_IO_NONE)) {
/* The block was buffer-fixed or I/O-fixed /* The block was buffer-fixed or I/O-fixed
while buf_pool->mutex was not held by this thread. while buf_pool_mutex was not held by this thread.
Free the block that was allocated and try again. Free the block that was allocated and try again.
This should be extremely unlikely. */ This should be extremely unlikely. */
...@@ -1871,7 +1878,7 @@ buf_page_get_gen( ...@@ -1871,7 +1878,7 @@ buf_page_get_gen(
/* Move the compressed page from bpage to block, /* Move the compressed page from bpage to block,
and uncompress it. */ and uncompress it. */
mutex_enter(&buf_pool->zip_mutex); mutex_enter(&buf_pool_zip_mutex);
buf_relocate(bpage, &block->page); buf_relocate(bpage, &block->page);
buf_block_init_low(block); buf_block_init_low(block);
...@@ -1913,14 +1920,14 @@ buf_page_get_gen( ...@@ -1913,14 +1920,14 @@ buf_page_get_gen(
buf_pool->n_pend_unzip++; buf_pool->n_pend_unzip++;
rw_lock_x_lock(&block->lock); rw_lock_x_lock(&block->lock);
mutex_exit(&block->mutex); mutex_exit(&block->mutex);
mutex_exit(&buf_pool->zip_mutex); mutex_exit(&buf_pool_zip_mutex);
buf_buddy_free(bpage, sizeof *bpage); buf_buddy_free(bpage, sizeof *bpage);
mutex_exit(&buf_pool->mutex); buf_pool_mutex_exit();
/* Decompress the page and apply buffered operations /* Decompress the page and apply buffered operations
while not holding buf_pool->mutex or block->mutex. */ while not holding buf_pool_mutex or block->mutex. */
success = buf_zip_decompress(block, srv_use_checksums); success = buf_zip_decompress(block, srv_use_checksums);
if (UNIV_LIKELY(success)) { if (UNIV_LIKELY(success)) {
...@@ -1929,7 +1936,7 @@ buf_page_get_gen( ...@@ -1929,7 +1936,7 @@ buf_page_get_gen(
} }
/* Unfix and unlatch the block. */ /* Unfix and unlatch the block. */
mutex_enter(&buf_pool->mutex); buf_pool_mutex_enter();
mutex_enter(&block->mutex); mutex_enter(&block->mutex);
buf_pool->n_pend_unzip--; buf_pool->n_pend_unzip--;
block->page.buf_fix_count--; block->page.buf_fix_count--;
...@@ -1939,7 +1946,7 @@ buf_page_get_gen( ...@@ -1939,7 +1946,7 @@ buf_page_get_gen(
if (UNIV_UNLIKELY(!success)) { if (UNIV_UNLIKELY(!success)) {
mutex_exit(&buf_pool->mutex); buf_pool_mutex_exit();
return(NULL); return(NULL);
} }
...@@ -1960,7 +1967,7 @@ buf_page_get_gen( ...@@ -1960,7 +1967,7 @@ buf_page_get_gen(
UNIV_MEM_ASSERT_RW(&block->page, sizeof block->page); UNIV_MEM_ASSERT_RW(&block->page, sizeof block->page);
buf_block_buf_fix_inc(block, file, line); buf_block_buf_fix_inc(block, file, line);
mutex_exit(&buf_pool->mutex); buf_pool_mutex_exit();
/* Check if this is the first access to the page */ /* Check if this is the first access to the page */
...@@ -2271,16 +2278,16 @@ buf_page_try_get_func( ...@@ -2271,16 +2278,16 @@ buf_page_try_get_func(
ibool success; ibool success;
ulint fix_type; ulint fix_type;
mutex_enter(&buf_pool->mutex); buf_pool_mutex_enter();
block = buf_block_hash_get(space_id, page_no); block = buf_block_hash_get(space_id, page_no);
if (!block) { if (!block) {
mutex_exit(&buf_pool->mutex); buf_pool_mutex_exit();
return(NULL); return(NULL);
} }
mutex_enter(&block->mutex); mutex_enter(&block->mutex);
mutex_exit(&buf_pool->mutex); buf_pool_mutex_exit();
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
ut_a(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE); ut_a(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
...@@ -2396,7 +2403,7 @@ buf_page_init( ...@@ -2396,7 +2403,7 @@ buf_page_init(
{ {
buf_page_t* hash_page; buf_page_t* hash_page;
ut_ad(mutex_own(&(buf_pool->mutex))); ut_ad(buf_pool_mutex_own());
ut_ad(mutex_own(&(block->mutex))); ut_ad(mutex_own(&(block->mutex)));
ut_a(buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE); ut_a(buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE);
...@@ -2429,7 +2436,7 @@ buf_page_init( ...@@ -2429,7 +2436,7 @@ buf_page_init(
(const void*) hash_page, (const void*) block); (const void*) hash_page, (const void*) block);
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
mutex_exit(&block->mutex); mutex_exit(&block->mutex);
mutex_exit(&buf_pool->mutex); buf_pool_mutex_exit();
buf_print(); buf_print();
buf_LRU_print(); buf_LRU_print();
buf_validate(); buf_validate();
...@@ -2507,7 +2514,7 @@ buf_page_init_for_read( ...@@ -2507,7 +2514,7 @@ buf_page_init_for_read(
ut_ad(block); ut_ad(block);
} }
mutex_enter(&buf_pool->mutex); buf_pool_mutex_enter();
if (buf_page_hash_get(space, offset)) { if (buf_page_hash_get(space, offset)) {
/* The page is already in the buffer pool. */ /* The page is already in the buffer pool. */
...@@ -2519,7 +2526,7 @@ buf_page_init_for_read( ...@@ -2519,7 +2526,7 @@ buf_page_init_for_read(
} }
err_exit2: err_exit2:
mutex_exit(&buf_pool->mutex); buf_pool_mutex_exit();
if (mode == BUF_READ_IBUF_PAGES_ONLY) { if (mode == BUF_READ_IBUF_PAGES_ONLY) {
...@@ -2561,11 +2568,11 @@ buf_page_init_for_read( ...@@ -2561,11 +2568,11 @@ buf_page_init_for_read(
if (UNIV_UNLIKELY(zip_size)) { if (UNIV_UNLIKELY(zip_size)) {
page_zip_set_size(&block->page.zip, zip_size); page_zip_set_size(&block->page.zip, zip_size);
/* buf_pool->mutex may be released and /* buf_pool_mutex may be released and
reacquired by buf_buddy_alloc(). Thus, we reacquired by buf_buddy_alloc(). Thus, we
must release block->mutex in order not to must release block->mutex in order not to
break the latching order in the reacquisition break the latching order in the reacquisition
of buf_pool->mutex. We also must defer this of buf_pool_mutex. We also must defer this
operation until after the block descriptor has operation until after the block descriptor has
been added to buf_pool->LRU and been added to buf_pool->LRU and
buf_pool->page_hash. */ buf_pool->page_hash. */
...@@ -2590,7 +2597,7 @@ buf_page_init_for_read( ...@@ -2590,7 +2597,7 @@ buf_page_init_for_read(
bpage = buf_buddy_alloc(sizeof *bpage, &lru); bpage = buf_buddy_alloc(sizeof *bpage, &lru);
/* If buf_buddy_alloc() allocated storage from the LRU list, /* If buf_buddy_alloc() allocated storage from the LRU list,
it released and reacquired buf_pool->mutex. Thus, we must it released and reacquired buf_pool_mutex. Thus, we must
check the page_hash again, as it may have been modified. */ check the page_hash again, as it may have been modified. */
if (UNIV_UNLIKELY(lru) if (UNIV_UNLIKELY(lru)
&& UNIV_LIKELY_NULL(buf_page_hash_get(space, offset))) { && UNIV_LIKELY_NULL(buf_page_hash_get(space, offset))) {
...@@ -2605,7 +2612,7 @@ buf_page_init_for_read( ...@@ -2605,7 +2612,7 @@ buf_page_init_for_read(
page_zip_set_size(&bpage->zip, zip_size); page_zip_set_size(&bpage->zip, zip_size);
bpage->zip.data = data; bpage->zip.data = data;
mutex_enter(&buf_pool->zip_mutex); mutex_enter(&buf_pool_zip_mutex);
UNIV_MEM_DESC(bpage->zip.data, UNIV_MEM_DESC(bpage->zip.data,
page_zip_get_size(&bpage->zip), bpage); page_zip_get_size(&bpage->zip), bpage);
buf_page_init_low(bpage); buf_page_init_low(bpage);
...@@ -2631,11 +2638,11 @@ buf_page_init_for_read( ...@@ -2631,11 +2638,11 @@ buf_page_init_for_read(
buf_page_set_io_fix(bpage, BUF_IO_READ); buf_page_set_io_fix(bpage, BUF_IO_READ);
mutex_exit(&buf_pool->zip_mutex); mutex_exit(&buf_pool_zip_mutex);
} }
buf_pool->n_pend_reads++; buf_pool->n_pend_reads++;
mutex_exit(&buf_pool->mutex); buf_pool_mutex_exit();
if (mode == BUF_READ_IBUF_PAGES_ONLY) { if (mode == BUF_READ_IBUF_PAGES_ONLY) {
...@@ -2671,7 +2678,7 @@ buf_page_create( ...@@ -2671,7 +2678,7 @@ buf_page_create(
free_block = buf_LRU_get_free_block(0); free_block = buf_LRU_get_free_block(0);
mutex_enter(&(buf_pool->mutex)); buf_pool_mutex_enter();
block = (buf_block_t*) buf_page_hash_get(space, offset); block = (buf_block_t*) buf_page_hash_get(space, offset);
...@@ -2684,7 +2691,7 @@ buf_page_create( ...@@ -2684,7 +2691,7 @@ buf_page_create(
#endif /* UNIV_DEBUG_FILE_ACCESSES */ #endif /* UNIV_DEBUG_FILE_ACCESSES */
/* Page can be found in buf_pool */ /* Page can be found in buf_pool */
mutex_exit(&(buf_pool->mutex)); buf_pool_mutex_exit();
buf_block_free(free_block); buf_block_free(free_block);
...@@ -2718,7 +2725,7 @@ buf_page_create( ...@@ -2718,7 +2725,7 @@ buf_page_create(
ibool lru; ibool lru;
/* Prevent race conditions during buf_buddy_alloc(), /* Prevent race conditions during buf_buddy_alloc(),
which may release and reacquire buf_pool->mutex, which may release and reacquire buf_pool_mutex,
by IO-fixing and X-latching the block. */ by IO-fixing and X-latching the block. */
buf_page_set_io_fix(&block->page, BUF_IO_READ); buf_page_set_io_fix(&block->page, BUF_IO_READ);
...@@ -2726,10 +2733,10 @@ buf_page_create( ...@@ -2726,10 +2733,10 @@ buf_page_create(
page_zip_set_size(&block->page.zip, zip_size); page_zip_set_size(&block->page.zip, zip_size);
mutex_exit(&block->mutex); mutex_exit(&block->mutex);
/* buf_pool->mutex may be released and reacquired by /* buf_pool_mutex may be released and reacquired by
buf_buddy_alloc(). Thus, we must release block->mutex buf_buddy_alloc(). Thus, we must release block->mutex
in order not to break the latching order in in order not to break the latching order in
the reacquisition of buf_pool->mutex. We also must the reacquisition of buf_pool_mutex. We also must
defer this operation until after the block descriptor defer this operation until after the block descriptor
has been added to buf_pool->LRU and buf_pool->page_hash. */ has been added to buf_pool->LRU and buf_pool->page_hash. */
data = buf_buddy_alloc(zip_size, &lru); data = buf_buddy_alloc(zip_size, &lru);
...@@ -2740,7 +2747,7 @@ buf_page_create( ...@@ -2740,7 +2747,7 @@ buf_page_create(
rw_lock_x_unlock(&block->lock); rw_lock_x_unlock(&block->lock);
} }
mutex_exit(&(buf_pool->mutex)); buf_pool_mutex_exit();
mtr_memo_push(mtr, block, MTR_MEMO_BUF_FIX); mtr_memo_push(mtr, block, MTR_MEMO_BUF_FIX);
...@@ -2926,7 +2933,7 @@ buf_page_io_complete( ...@@ -2926,7 +2933,7 @@ buf_page_io_complete(
} }
} }
mutex_enter(&(buf_pool->mutex)); buf_pool_mutex_enter();
mutex_enter(buf_page_get_mutex(bpage)); mutex_enter(buf_page_get_mutex(bpage));
#ifdef UNIV_IBUF_COUNT_DEBUG #ifdef UNIV_IBUF_COUNT_DEBUG
...@@ -2981,7 +2988,7 @@ buf_page_io_complete( ...@@ -2981,7 +2988,7 @@ buf_page_io_complete(
} }
mutex_exit(buf_page_get_mutex(bpage)); mutex_exit(buf_page_get_mutex(bpage));
mutex_exit(&(buf_pool->mutex)); buf_pool_mutex_exit();
#ifdef UNIV_DEBUG #ifdef UNIV_DEBUG
if (buf_debug_prints) { if (buf_debug_prints) {
...@@ -3012,11 +3019,11 @@ buf_pool_invalidate(void) ...@@ -3012,11 +3019,11 @@ buf_pool_invalidate(void)
freed = buf_LRU_search_and_free_block(100); freed = buf_LRU_search_and_free_block(100);
} }
mutex_enter(&(buf_pool->mutex)); buf_pool_mutex_enter();
ut_ad(UT_LIST_GET_LEN(buf_pool->LRU) == 0); ut_ad(UT_LIST_GET_LEN(buf_pool->LRU) == 0);
mutex_exit(&(buf_pool->mutex)); buf_pool_mutex_exit();
} }
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
...@@ -3040,7 +3047,7 @@ buf_validate(void) ...@@ -3040,7 +3047,7 @@ buf_validate(void)
ut_ad(buf_pool); ut_ad(buf_pool);
mutex_enter(&(buf_pool->mutex)); buf_pool_mutex_enter();
chunk = buf_pool->chunks; chunk = buf_pool->chunks;
...@@ -3134,7 +3141,7 @@ buf_validate(void) ...@@ -3134,7 +3141,7 @@ buf_validate(void)
} }
} }
mutex_enter(&buf_pool->zip_mutex); mutex_enter(&buf_pool_zip_mutex);
/* Check clean compressed-only blocks. */ /* Check clean compressed-only blocks. */
...@@ -3211,7 +3218,7 @@ buf_validate(void) ...@@ -3211,7 +3218,7 @@ buf_validate(void)
ut_a(buf_page_hash_get(b->space, b->offset) == b); ut_a(buf_page_hash_get(b->space, b->offset) == b);
} }
mutex_exit(&buf_pool->zip_mutex); mutex_exit(&buf_pool_zip_mutex);
if (n_lru + n_free > buf_pool->curr_size + n_zip) { if (n_lru + n_free > buf_pool->curr_size + n_zip) {
fprintf(stderr, "n LRU %lu, n free %lu, pool %lu zip %lu\n", fprintf(stderr, "n LRU %lu, n free %lu, pool %lu zip %lu\n",
...@@ -3233,7 +3240,7 @@ buf_validate(void) ...@@ -3233,7 +3240,7 @@ buf_validate(void)
ut_a(buf_pool->n_flush[BUF_FLUSH_LIST] == n_list_flush); ut_a(buf_pool->n_flush[BUF_FLUSH_LIST] == n_list_flush);
ut_a(buf_pool->n_flush[BUF_FLUSH_LRU] == n_lru_flush); ut_a(buf_pool->n_flush[BUF_FLUSH_LRU] == n_lru_flush);
mutex_exit(&(buf_pool->mutex)); buf_pool_mutex_exit();
ut_a(buf_LRU_validate()); ut_a(buf_LRU_validate());
ut_a(buf_flush_validate()); ut_a(buf_flush_validate());
...@@ -3267,7 +3274,7 @@ buf_print(void) ...@@ -3267,7 +3274,7 @@ buf_print(void)
index_ids = mem_alloc(sizeof(dulint) * size); index_ids = mem_alloc(sizeof(dulint) * size);
counts = mem_alloc(sizeof(ulint) * size); counts = mem_alloc(sizeof(ulint) * size);
mutex_enter(&(buf_pool->mutex)); buf_pool_mutex_enter();
fprintf(stderr, fprintf(stderr,
"buf_pool size %lu\n" "buf_pool size %lu\n"
...@@ -3330,7 +3337,7 @@ buf_print(void) ...@@ -3330,7 +3337,7 @@ buf_print(void)
} }
} }
mutex_exit(&(buf_pool->mutex)); buf_pool_mutex_exit();
for (i = 0; i < n_found; i++) { for (i = 0; i < n_found; i++) {
index = dict_index_get_if_in_cache(index_ids[i]); index = dict_index_get_if_in_cache(index_ids[i]);
...@@ -3367,7 +3374,7 @@ buf_get_latched_pages_number(void) ...@@ -3367,7 +3374,7 @@ buf_get_latched_pages_number(void)
ulint i; ulint i;
ulint fixed_pages_number = 0; ulint fixed_pages_number = 0;
mutex_enter(&(buf_pool->mutex)); buf_pool_mutex_enter();
chunk = buf_pool->chunks; chunk = buf_pool->chunks;
...@@ -3396,7 +3403,7 @@ buf_get_latched_pages_number(void) ...@@ -3396,7 +3403,7 @@ buf_get_latched_pages_number(void)
} }
} }
mutex_enter(&buf_pool->zip_mutex); mutex_enter(&buf_pool_zip_mutex);
/* Traverse the lists of clean and dirty compressed-only blocks. */ /* Traverse the lists of clean and dirty compressed-only blocks. */
...@@ -3436,8 +3443,8 @@ buf_get_latched_pages_number(void) ...@@ -3436,8 +3443,8 @@ buf_get_latched_pages_number(void)
} }
} }
mutex_exit(&buf_pool->zip_mutex); mutex_exit(&buf_pool_zip_mutex);
mutex_exit(&(buf_pool->mutex)); buf_pool_mutex_exit();
return(fixed_pages_number); return(fixed_pages_number);
} }
...@@ -3465,7 +3472,7 @@ buf_get_modified_ratio_pct(void) ...@@ -3465,7 +3472,7 @@ buf_get_modified_ratio_pct(void)
{ {
ulint ratio; ulint ratio;
mutex_enter(&(buf_pool->mutex)); buf_pool_mutex_enter();
ratio = (100 * UT_LIST_GET_LEN(buf_pool->flush_list)) ratio = (100 * UT_LIST_GET_LEN(buf_pool->flush_list))
/ (1 + UT_LIST_GET_LEN(buf_pool->LRU) / (1 + UT_LIST_GET_LEN(buf_pool->LRU)
...@@ -3473,7 +3480,7 @@ buf_get_modified_ratio_pct(void) ...@@ -3473,7 +3480,7 @@ buf_get_modified_ratio_pct(void)
/* 1 + is there to avoid division by zero */ /* 1 + is there to avoid division by zero */
mutex_exit(&(buf_pool->mutex)); buf_pool_mutex_exit();
return(ratio); return(ratio);
} }
...@@ -3493,7 +3500,7 @@ buf_print_io( ...@@ -3493,7 +3500,7 @@ buf_print_io(
ut_ad(buf_pool); ut_ad(buf_pool);
size = buf_pool->curr_size; size = buf_pool->curr_size;
mutex_enter(&(buf_pool->mutex)); buf_pool_mutex_enter();
fprintf(file, fprintf(file,
"Buffer pool size %lu\n" "Buffer pool size %lu\n"
...@@ -3548,7 +3555,7 @@ buf_print_io( ...@@ -3548,7 +3555,7 @@ buf_print_io(
buf_pool->n_pages_created_old = buf_pool->n_pages_created; buf_pool->n_pages_created_old = buf_pool->n_pages_created;
buf_pool->n_pages_written_old = buf_pool->n_pages_written; buf_pool->n_pages_written_old = buf_pool->n_pages_written;
mutex_exit(&(buf_pool->mutex)); buf_pool_mutex_exit();
} }
/************************************************************************** /**************************************************************************
...@@ -3577,7 +3584,7 @@ buf_all_freed(void) ...@@ -3577,7 +3584,7 @@ buf_all_freed(void)
ut_ad(buf_pool); ut_ad(buf_pool);
mutex_enter(&(buf_pool->mutex)); buf_pool_mutex_enter();
chunk = buf_pool->chunks; chunk = buf_pool->chunks;
...@@ -3594,7 +3601,7 @@ buf_all_freed(void) ...@@ -3594,7 +3601,7 @@ buf_all_freed(void)
} }
} }
mutex_exit(&(buf_pool->mutex)); buf_pool_mutex_exit();
return(TRUE); return(TRUE);
} }
...@@ -3610,7 +3617,7 @@ buf_pool_check_no_pending_io(void) ...@@ -3610,7 +3617,7 @@ buf_pool_check_no_pending_io(void)
{ {
ibool ret; ibool ret;
mutex_enter(&(buf_pool->mutex)); buf_pool_mutex_enter();
if (buf_pool->n_pend_reads + buf_pool->n_flush[BUF_FLUSH_LRU] if (buf_pool->n_pend_reads + buf_pool->n_flush[BUF_FLUSH_LRU]
+ buf_pool->n_flush[BUF_FLUSH_LIST] + buf_pool->n_flush[BUF_FLUSH_LIST]
...@@ -3620,7 +3627,7 @@ buf_pool_check_no_pending_io(void) ...@@ -3620,7 +3627,7 @@ buf_pool_check_no_pending_io(void)
ret = TRUE; ret = TRUE;
} }
mutex_exit(&(buf_pool->mutex)); buf_pool_mutex_exit();
return(ret); return(ret);
} }
...@@ -3634,11 +3641,11 @@ buf_get_free_list_len(void) ...@@ -3634,11 +3641,11 @@ buf_get_free_list_len(void)
{ {
ulint len; ulint len;
mutex_enter(&(buf_pool->mutex)); buf_pool_mutex_enter();
len = UT_LIST_GET_LEN(buf_pool->free); len = UT_LIST_GET_LEN(buf_pool->free);
mutex_exit(&(buf_pool->mutex)); buf_pool_mutex_exit();
return(len); return(len);
} }
...@@ -51,16 +51,16 @@ buf_flush_insert_into_flush_list( ...@@ -51,16 +51,16 @@ buf_flush_insert_into_flush_list(
/*=============================*/ /*=============================*/
buf_page_t* bpage) /* in: block which is modified */ buf_page_t* bpage) /* in: block which is modified */
{ {
ut_ad(mutex_own(&(buf_pool->mutex))); ut_ad(buf_pool_mutex_own());
ut_ad((UT_LIST_GET_FIRST(buf_pool->flush_list) == NULL) ut_ad((UT_LIST_GET_FIRST(buf_pool->flush_list) == NULL)
|| (UT_LIST_GET_FIRST(buf_pool->flush_list)->oldest_modification || (UT_LIST_GET_FIRST(buf_pool->flush_list)->oldest_modification
<= bpage->oldest_modification)); <= bpage->oldest_modification));
switch (buf_page_get_state(bpage)) { switch (buf_page_get_state(bpage)) {
case BUF_BLOCK_ZIP_PAGE: case BUF_BLOCK_ZIP_PAGE:
mutex_enter(&buf_pool->zip_mutex); mutex_enter(&buf_pool_zip_mutex);
buf_page_set_state(bpage, BUF_BLOCK_ZIP_DIRTY); buf_page_set_state(bpage, BUF_BLOCK_ZIP_DIRTY);
mutex_exit(&buf_pool->zip_mutex); mutex_exit(&buf_pool_zip_mutex);
UT_LIST_REMOVE(list, buf_pool->zip_clean, bpage); UT_LIST_REMOVE(list, buf_pool->zip_clean, bpage);
/* fall through */ /* fall through */
case BUF_BLOCK_ZIP_DIRTY: case BUF_BLOCK_ZIP_DIRTY:
...@@ -99,13 +99,13 @@ buf_flush_insert_sorted_into_flush_list( ...@@ -99,13 +99,13 @@ buf_flush_insert_sorted_into_flush_list(
buf_page_t* prev_b; buf_page_t* prev_b;
buf_page_t* b; buf_page_t* b;
ut_ad(mutex_own(&(buf_pool->mutex))); ut_ad(buf_pool_mutex_own());
switch (buf_page_get_state(bpage)) { switch (buf_page_get_state(bpage)) {
case BUF_BLOCK_ZIP_PAGE: case BUF_BLOCK_ZIP_PAGE:
mutex_enter(&buf_pool->zip_mutex); mutex_enter(&buf_pool_zip_mutex);
buf_page_set_state(bpage, BUF_BLOCK_ZIP_DIRTY); buf_page_set_state(bpage, BUF_BLOCK_ZIP_DIRTY);
mutex_exit(&buf_pool->zip_mutex); mutex_exit(&buf_pool_zip_mutex);
UT_LIST_REMOVE(list, buf_pool->zip_clean, bpage); UT_LIST_REMOVE(list, buf_pool->zip_clean, bpage);
/* fall through */ /* fall through */
case BUF_BLOCK_ZIP_DIRTY: case BUF_BLOCK_ZIP_DIRTY:
...@@ -157,7 +157,7 @@ buf_flush_ready_for_replace( ...@@ -157,7 +157,7 @@ buf_flush_ready_for_replace(
buf_page_t* bpage) /* in: buffer control block, must be buf_page_t* bpage) /* in: buffer control block, must be
buf_page_in_file(bpage) and in the LRU list */ buf_page_in_file(bpage) and in the LRU list */
{ {
ut_ad(mutex_own(&(buf_pool->mutex))); ut_ad(buf_pool_mutex_own());
ut_ad(mutex_own(buf_page_get_mutex(bpage))); ut_ad(mutex_own(buf_page_get_mutex(bpage)));
ut_ad(bpage->in_LRU_list); ut_ad(bpage->in_LRU_list);
...@@ -190,7 +190,7 @@ buf_flush_ready_for_flush( ...@@ -190,7 +190,7 @@ buf_flush_ready_for_flush(
enum buf_flush flush_type)/* in: BUF_FLUSH_LRU or BUF_FLUSH_LIST */ enum buf_flush flush_type)/* in: BUF_FLUSH_LRU or BUF_FLUSH_LIST */
{ {
ut_a(buf_page_in_file(bpage)); ut_a(buf_page_in_file(bpage));
ut_ad(mutex_own(&(buf_pool->mutex))); ut_ad(buf_pool_mutex_own());
ut_ad(mutex_own(buf_page_get_mutex(bpage))); ut_ad(mutex_own(buf_page_get_mutex(bpage)));
if (bpage->oldest_modification != 0 if (bpage->oldest_modification != 0
...@@ -222,7 +222,7 @@ buf_flush_remove( ...@@ -222,7 +222,7 @@ buf_flush_remove(
/*=============*/ /*=============*/
buf_page_t* bpage) /* in: pointer to the block in question */ buf_page_t* bpage) /* in: pointer to the block in question */
{ {
ut_ad(mutex_own(&buf_pool->mutex)); ut_ad(buf_pool_mutex_own());
ut_ad(mutex_own(buf_page_get_mutex(bpage))); ut_ad(mutex_own(buf_page_get_mutex(bpage)));
ut_ad(bpage->in_flush_list); ut_ad(bpage->in_flush_list);
ut_d(bpage->in_flush_list = FALSE); ut_d(bpage->in_flush_list = FALSE);
...@@ -758,12 +758,12 @@ buf_flush_try_page( ...@@ -758,12 +758,12 @@ buf_flush_try_page(
ut_ad(flush_type == BUF_FLUSH_LRU || flush_type == BUF_FLUSH_LIST ut_ad(flush_type == BUF_FLUSH_LRU || flush_type == BUF_FLUSH_LIST
|| flush_type == BUF_FLUSH_SINGLE_PAGE); || flush_type == BUF_FLUSH_SINGLE_PAGE);
mutex_enter(&(buf_pool->mutex)); buf_pool_mutex_enter();
bpage = buf_page_hash_get(space, offset); bpage = buf_page_hash_get(space, offset);
if (!bpage) { if (!bpage) {
mutex_exit(&(buf_pool->mutex)); buf_pool_mutex_exit();
return(0); return(0);
} }
...@@ -774,7 +774,7 @@ buf_flush_try_page( ...@@ -774,7 +774,7 @@ buf_flush_try_page(
if (!buf_flush_ready_for_flush(bpage, flush_type)) { if (!buf_flush_ready_for_flush(bpage, flush_type)) {
mutex_exit(block_mutex); mutex_exit(block_mutex);
mutex_exit(&buf_pool->mutex); buf_pool_mutex_exit();
return(0); return(0);
} }
...@@ -803,7 +803,7 @@ buf_flush_try_page( ...@@ -803,7 +803,7 @@ buf_flush_try_page(
} }
mutex_exit(block_mutex); mutex_exit(block_mutex);
mutex_exit(&(buf_pool->mutex)); buf_pool_mutex_exit();
if (!locked) { if (!locked) {
buf_flush_buffered_writes(); buf_flush_buffered_writes();
...@@ -846,7 +846,7 @@ buf_flush_try_page( ...@@ -846,7 +846,7 @@ buf_flush_try_page(
immediately. */ immediately. */
mutex_exit(block_mutex); mutex_exit(block_mutex);
mutex_exit(&(buf_pool->mutex)); buf_pool_mutex_exit();
break; break;
case BUF_FLUSH_SINGLE_PAGE: case BUF_FLUSH_SINGLE_PAGE:
...@@ -862,7 +862,7 @@ buf_flush_try_page( ...@@ -862,7 +862,7 @@ buf_flush_try_page(
buf_pool->n_flush[flush_type]++; buf_pool->n_flush[flush_type]++;
mutex_exit(block_mutex); mutex_exit(block_mutex);
mutex_exit(&(buf_pool->mutex)); buf_pool_mutex_exit();
if (buf_page_get_state(bpage) == BUF_BLOCK_FILE_PAGE) { if (buf_page_get_state(bpage) == BUF_BLOCK_FILE_PAGE) {
rw_lock_s_lock_gen(&((buf_block_t*) bpage)->lock, rw_lock_s_lock_gen(&((buf_block_t*) bpage)->lock,
...@@ -922,7 +922,7 @@ buf_flush_try_neighbors( ...@@ -922,7 +922,7 @@ buf_flush_try_neighbors(
high = fil_space_get_size(space); high = fil_space_get_size(space);
} }
mutex_enter(&(buf_pool->mutex)); buf_pool_mutex_enter();
for (i = low; i < high; i++) { for (i = low; i < high; i++) {
...@@ -956,7 +956,7 @@ buf_flush_try_neighbors( ...@@ -956,7 +956,7 @@ buf_flush_try_neighbors(
flush the doublewrite buffer before we start flush the doublewrite buffer before we start
waiting. */ waiting. */
mutex_exit(&(buf_pool->mutex)); buf_pool_mutex_exit();
mutex_exit(block_mutex); mutex_exit(block_mutex);
...@@ -969,14 +969,14 @@ buf_flush_try_neighbors( ...@@ -969,14 +969,14 @@ buf_flush_try_neighbors(
count += buf_flush_try_page(space, i, count += buf_flush_try_page(space, i,
flush_type); flush_type);
mutex_enter(&(buf_pool->mutex)); buf_pool_mutex_enter();
} else { } else {
mutex_exit(block_mutex); mutex_exit(block_mutex);
} }
} }
} }
mutex_exit(&(buf_pool->mutex)); buf_pool_mutex_exit();
return(count); return(count);
} }
...@@ -1020,14 +1020,14 @@ buf_flush_batch( ...@@ -1020,14 +1020,14 @@ buf_flush_batch(
ut_ad((flush_type != BUF_FLUSH_LIST) ut_ad((flush_type != BUF_FLUSH_LIST)
|| sync_thread_levels_empty_gen(TRUE)); || sync_thread_levels_empty_gen(TRUE));
#endif /* UNIV_SYNC_DEBUG */ #endif /* UNIV_SYNC_DEBUG */
mutex_enter(&(buf_pool->mutex)); buf_pool_mutex_enter();
if ((buf_pool->n_flush[flush_type] > 0) if ((buf_pool->n_flush[flush_type] > 0)
|| (buf_pool->init_flush[flush_type] == TRUE)) { || (buf_pool->init_flush[flush_type] == TRUE)) {
/* There is already a flush batch of the same type running */ /* There is already a flush batch of the same type running */
mutex_exit(&(buf_pool->mutex)); buf_pool_mutex_exit();
return(ULINT_UNDEFINED); return(ULINT_UNDEFINED);
} }
...@@ -1078,7 +1078,7 @@ buf_flush_batch( ...@@ -1078,7 +1078,7 @@ buf_flush_batch(
space = buf_page_get_space(bpage); space = buf_page_get_space(bpage);
offset = buf_page_get_page_no(bpage); offset = buf_page_get_page_no(bpage);
mutex_exit(&(buf_pool->mutex)); buf_pool_mutex_exit();
mutex_exit(block_mutex); mutex_exit(block_mutex);
old_page_count = page_count; old_page_count = page_count;
...@@ -1091,7 +1091,7 @@ buf_flush_batch( ...@@ -1091,7 +1091,7 @@ buf_flush_batch(
flush_type, offset, flush_type, offset,
page_count - old_page_count); */ page_count - old_page_count); */
mutex_enter(&(buf_pool->mutex)); buf_pool_mutex_enter();
goto flush_next; goto flush_next;
} else if (flush_type == BUF_FLUSH_LRU) { } else if (flush_type == BUF_FLUSH_LRU) {
...@@ -1124,7 +1124,7 @@ buf_flush_batch( ...@@ -1124,7 +1124,7 @@ buf_flush_batch(
os_event_set(buf_pool->no_flush[flush_type]); os_event_set(buf_pool->no_flush[flush_type]);
} }
mutex_exit(&(buf_pool->mutex)); buf_pool_mutex_exit();
buf_flush_buffered_writes(); buf_flush_buffered_writes();
...@@ -1172,7 +1172,7 @@ buf_flush_LRU_recommendation(void) ...@@ -1172,7 +1172,7 @@ buf_flush_LRU_recommendation(void)
ulint n_replaceable; ulint n_replaceable;
ulint distance = 0; ulint distance = 0;
mutex_enter(&(buf_pool->mutex)); buf_pool_mutex_enter();
n_replaceable = UT_LIST_GET_LEN(buf_pool->free); n_replaceable = UT_LIST_GET_LEN(buf_pool->free);
...@@ -1198,7 +1198,7 @@ buf_flush_LRU_recommendation(void) ...@@ -1198,7 +1198,7 @@ buf_flush_LRU_recommendation(void)
bpage = UT_LIST_GET_PREV(LRU, bpage); bpage = UT_LIST_GET_PREV(LRU, bpage);
} }
mutex_exit(&(buf_pool->mutex)); buf_pool_mutex_exit();
if (n_replaceable >= BUF_FLUSH_FREE_BLOCK_MARGIN) { if (n_replaceable >= BUF_FLUSH_FREE_BLOCK_MARGIN) {
...@@ -1275,11 +1275,11 @@ buf_flush_validate(void) ...@@ -1275,11 +1275,11 @@ buf_flush_validate(void)
{ {
ibool ret; ibool ret;
mutex_enter(&(buf_pool->mutex)); buf_pool_mutex_enter();
ret = buf_flush_validate_low(); ret = buf_flush_validate_low();
mutex_exit(&(buf_pool->mutex)); buf_pool_mutex_exit();
return(ret); return(ret);
} }
......
...@@ -51,7 +51,7 @@ ibool buf_lru_switched_on_innodb_mon = FALSE; ...@@ -51,7 +51,7 @@ ibool buf_lru_switched_on_innodb_mon = FALSE;
/********************************************************************** /**********************************************************************
Takes a block out of the LRU list and page hash table. Takes a block out of the LRU list and page hash table.
If the block is compressed-only (BUF_BLOCK_ZIP_PAGE), If the block is compressed-only (BUF_BLOCK_ZIP_PAGE),
the object will be freed and buf_pool->zip_mutex will be released. the object will be freed and buf_pool_zip_mutex will be released.
If a compressed page or a compressed-only block descriptor is freed, If a compressed page or a compressed-only block descriptor is freed,
other compressed pages or compressed-only block descriptors may be other compressed pages or compressed-only block descriptors may be
...@@ -92,7 +92,7 @@ buf_LRU_invalidate_tablespace( ...@@ -92,7 +92,7 @@ buf_LRU_invalidate_tablespace(
ibool all_freed; ibool all_freed;
scan_again: scan_again:
mutex_enter(&(buf_pool->mutex)); buf_pool_mutex_enter();
all_freed = TRUE; all_freed = TRUE;
...@@ -133,7 +133,7 @@ buf_LRU_invalidate_tablespace( ...@@ -133,7 +133,7 @@ buf_LRU_invalidate_tablespace(
&& ((buf_block_t*) bpage)->is_hashed) { && ((buf_block_t*) bpage)->is_hashed) {
page_no = buf_page_get_page_no(bpage); page_no = buf_page_get_page_no(bpage);
mutex_exit(&(buf_pool->mutex)); buf_pool_mutex_exit();
mutex_exit(block_mutex); mutex_exit(block_mutex);
/* Note that the following call will acquire /* Note that the following call will acquire
...@@ -172,7 +172,7 @@ buf_LRU_invalidate_tablespace( ...@@ -172,7 +172,7 @@ buf_LRU_invalidate_tablespace(
bpage = prev_bpage; bpage = prev_bpage;
} }
mutex_exit(&(buf_pool->mutex)); buf_pool_mutex_exit();
if (!all_freed) { if (!all_freed) {
os_thread_sleep(20000); os_thread_sleep(20000);
...@@ -195,14 +195,14 @@ buf_LRU_get_recent_limit(void) ...@@ -195,14 +195,14 @@ buf_LRU_get_recent_limit(void)
ulint len; ulint len;
ulint limit; ulint limit;
mutex_enter(&(buf_pool->mutex)); buf_pool_mutex_enter();
len = UT_LIST_GET_LEN(buf_pool->LRU); len = UT_LIST_GET_LEN(buf_pool->LRU);
if (len < BUF_LRU_OLD_MIN_LEN) { if (len < BUF_LRU_OLD_MIN_LEN) {
/* The LRU list is too short to do read-ahead */ /* The LRU list is too short to do read-ahead */
mutex_exit(&(buf_pool->mutex)); buf_pool_mutex_exit();
return(0); return(0);
} }
...@@ -211,7 +211,7 @@ buf_LRU_get_recent_limit(void) ...@@ -211,7 +211,7 @@ buf_LRU_get_recent_limit(void)
limit = buf_page_get_LRU_position(bpage) - len / BUF_LRU_INITIAL_RATIO; limit = buf_page_get_LRU_position(bpage) - len / BUF_LRU_INITIAL_RATIO;
mutex_exit(&(buf_pool->mutex)); buf_pool_mutex_exit();
return(limit); return(limit);
} }
...@@ -226,7 +226,7 @@ buf_LRU_insert_zip_clean( ...@@ -226,7 +226,7 @@ buf_LRU_insert_zip_clean(
{ {
buf_page_t* b; buf_page_t* b;
ut_ad(mutex_own(&buf_pool->mutex)); ut_ad(buf_pool_mutex_own());
ut_ad(buf_page_get_state(bpage) == BUF_BLOCK_ZIP_PAGE); ut_ad(buf_page_get_state(bpage) == BUF_BLOCK_ZIP_PAGE);
/* Find the first successor of bpage in the LRU list /* Find the first successor of bpage in the LRU list
...@@ -266,7 +266,7 @@ buf_LRU_search_and_free_block( ...@@ -266,7 +266,7 @@ buf_LRU_search_and_free_block(
buf_page_t* bpage; buf_page_t* bpage;
ibool freed; ibool freed;
mutex_enter(&(buf_pool->mutex)); buf_pool_mutex_enter();
freed = FALSE; freed = FALSE;
bpage = UT_LIST_GET_LAST(buf_pool->LRU); bpage = UT_LIST_GET_LAST(buf_pool->LRU);
...@@ -355,7 +355,7 @@ buf_LRU_search_and_free_block( ...@@ -355,7 +355,7 @@ buf_LRU_search_and_free_block(
if (!freed) { if (!freed) {
buf_pool->LRU_flush_ended = 0; buf_pool->LRU_flush_ended = 0;
} }
mutex_exit(&(buf_pool->mutex)); buf_pool_mutex_exit();
return(freed); return(freed);
} }
...@@ -373,18 +373,18 @@ void ...@@ -373,18 +373,18 @@ void
buf_LRU_try_free_flushed_blocks(void) buf_LRU_try_free_flushed_blocks(void)
/*=================================*/ /*=================================*/
{ {
mutex_enter(&(buf_pool->mutex)); buf_pool_mutex_enter();
while (buf_pool->LRU_flush_ended > 0) { while (buf_pool->LRU_flush_ended > 0) {
mutex_exit(&(buf_pool->mutex)); buf_pool_mutex_exit();
buf_LRU_search_and_free_block(1); buf_LRU_search_and_free_block(1);
mutex_enter(&(buf_pool->mutex)); buf_pool_mutex_enter();
} }
mutex_exit(&(buf_pool->mutex)); buf_pool_mutex_exit();
} }
/********************************************************************** /**********************************************************************
...@@ -400,7 +400,7 @@ buf_LRU_buf_pool_running_out(void) ...@@ -400,7 +400,7 @@ buf_LRU_buf_pool_running_out(void)
{ {
ibool ret = FALSE; ibool ret = FALSE;
mutex_enter(&(buf_pool->mutex)); buf_pool_mutex_enter();
if (!recv_recovery_on && UT_LIST_GET_LEN(buf_pool->free) if (!recv_recovery_on && UT_LIST_GET_LEN(buf_pool->free)
+ UT_LIST_GET_LEN(buf_pool->LRU) < buf_pool->curr_size / 4) { + UT_LIST_GET_LEN(buf_pool->LRU) < buf_pool->curr_size / 4) {
...@@ -408,7 +408,7 @@ buf_LRU_buf_pool_running_out(void) ...@@ -408,7 +408,7 @@ buf_LRU_buf_pool_running_out(void)
ret = TRUE; ret = TRUE;
} }
mutex_exit(&(buf_pool->mutex)); buf_pool_mutex_exit();
return(ret); return(ret);
} }
...@@ -425,7 +425,7 @@ buf_LRU_get_free_only(void) ...@@ -425,7 +425,7 @@ buf_LRU_get_free_only(void)
{ {
buf_block_t* block; buf_block_t* block;
ut_ad(mutex_own(&buf_pool->mutex)); ut_ad(buf_pool_mutex_own());
block = (buf_block_t*) UT_LIST_GET_FIRST(buf_pool->free); block = (buf_block_t*) UT_LIST_GET_FIRST(buf_pool->free);
...@@ -467,7 +467,7 @@ buf_LRU_get_free_block( ...@@ -467,7 +467,7 @@ buf_LRU_get_free_block(
ibool mon_value_was = FALSE; ibool mon_value_was = FALSE;
ibool started_monitor = FALSE; ibool started_monitor = FALSE;
loop: loop:
mutex_enter(&(buf_pool->mutex)); buf_pool_mutex_enter();
if (!recv_recovery_on && UT_LIST_GET_LEN(buf_pool->free) if (!recv_recovery_on && UT_LIST_GET_LEN(buf_pool->free)
+ UT_LIST_GET_LEN(buf_pool->LRU) < buf_pool->curr_size / 20) { + UT_LIST_GET_LEN(buf_pool->LRU) < buf_pool->curr_size / 20) {
...@@ -554,7 +554,7 @@ buf_LRU_get_free_block( ...@@ -554,7 +554,7 @@ buf_LRU_get_free_block(
block->page.zip.data = NULL; block->page.zip.data = NULL;
} }
mutex_exit(&(buf_pool->mutex)); buf_pool_mutex_exit();
if (started_monitor) { if (started_monitor) {
srv_print_innodb_monitor = mon_value_was; srv_print_innodb_monitor = mon_value_was;
...@@ -566,7 +566,7 @@ buf_LRU_get_free_block( ...@@ -566,7 +566,7 @@ buf_LRU_get_free_block(
/* If no block was in the free list, search from the end of the LRU /* If no block was in the free list, search from the end of the LRU
list and try to free a block there */ list and try to free a block there */
mutex_exit(&(buf_pool->mutex)); buf_pool_mutex_exit();
freed = buf_LRU_search_and_free_block(n_iterations); freed = buf_LRU_search_and_free_block(n_iterations);
...@@ -615,18 +615,18 @@ buf_LRU_get_free_block( ...@@ -615,18 +615,18 @@ buf_LRU_get_free_block(
os_aio_simulated_wake_handler_threads(); os_aio_simulated_wake_handler_threads();
mutex_enter(&(buf_pool->mutex)); buf_pool_mutex_enter();
if (buf_pool->LRU_flush_ended > 0) { if (buf_pool->LRU_flush_ended > 0) {
/* We have written pages in an LRU flush. To make the insert /* We have written pages in an LRU flush. To make the insert
buffer more efficient, we try to move these pages to the free buffer more efficient, we try to move these pages to the free
list. */ list. */
mutex_exit(&(buf_pool->mutex)); buf_pool_mutex_exit();
buf_LRU_try_free_flushed_blocks(); buf_LRU_try_free_flushed_blocks();
} else { } else {
mutex_exit(&(buf_pool->mutex)); buf_pool_mutex_exit();
} }
if (n_iterations > 10) { if (n_iterations > 10) {
...@@ -651,7 +651,7 @@ buf_LRU_old_adjust_len(void) ...@@ -651,7 +651,7 @@ buf_LRU_old_adjust_len(void)
ulint new_len; ulint new_len;
ut_a(buf_pool->LRU_old); ut_a(buf_pool->LRU_old);
ut_ad(mutex_own(&(buf_pool->mutex))); ut_ad(buf_pool_mutex_own());
ut_ad(3 * (BUF_LRU_OLD_MIN_LEN / 8) > BUF_LRU_OLD_TOLERANCE + 5); ut_ad(3 * (BUF_LRU_OLD_MIN_LEN / 8) > BUF_LRU_OLD_TOLERANCE + 5);
for (;;) { for (;;) {
...@@ -693,7 +693,7 @@ buf_LRU_old_init(void) ...@@ -693,7 +693,7 @@ buf_LRU_old_init(void)
{ {
buf_page_t* bpage; buf_page_t* bpage;
ut_ad(mutex_own(&(buf_pool->mutex))); ut_ad(buf_pool_mutex_own());
ut_a(UT_LIST_GET_LEN(buf_pool->LRU) == BUF_LRU_OLD_MIN_LEN); ut_a(UT_LIST_GET_LEN(buf_pool->LRU) == BUF_LRU_OLD_MIN_LEN);
/* We first initialize all blocks in the LRU list as old and then use /* We first initialize all blocks in the LRU list as old and then use
...@@ -724,7 +724,7 @@ buf_LRU_remove_block( ...@@ -724,7 +724,7 @@ buf_LRU_remove_block(
{ {
ut_ad(buf_pool); ut_ad(buf_pool);
ut_ad(bpage); ut_ad(bpage);
ut_ad(mutex_own(&(buf_pool->mutex))); ut_ad(buf_pool_mutex_own());
ut_a(buf_page_in_file(bpage)); ut_a(buf_page_in_file(bpage));
...@@ -784,7 +784,7 @@ buf_LRU_add_block_to_end_low( ...@@ -784,7 +784,7 @@ buf_LRU_add_block_to_end_low(
ut_ad(buf_pool); ut_ad(buf_pool);
ut_ad(bpage); ut_ad(bpage);
ut_ad(mutex_own(&(buf_pool->mutex))); ut_ad(buf_pool_mutex_own());
ut_a(buf_page_in_file(bpage)); ut_a(buf_page_in_file(bpage));
...@@ -840,7 +840,7 @@ buf_LRU_add_block_low( ...@@ -840,7 +840,7 @@ buf_LRU_add_block_low(
{ {
ut_ad(buf_pool); ut_ad(buf_pool);
ut_ad(bpage); ut_ad(bpage);
ut_ad(mutex_own(&(buf_pool->mutex))); ut_ad(buf_pool_mutex_own());
ut_a(buf_page_in_file(bpage)); ut_a(buf_page_in_file(bpage));
ut_ad(!bpage->in_LRU_list); ut_ad(!bpage->in_LRU_list);
...@@ -936,19 +936,19 @@ buf_LRU_free_block( ...@@ -936,19 +936,19 @@ buf_LRU_free_block(
the descriptor object will be freed the descriptor object will be freed
as well. If this function returns FALSE, as well. If this function returns FALSE,
it will not temporarily release it will not temporarily release
buf_pool->mutex. */ buf_pool_mutex. */
buf_page_t* bpage, /* in: block to be freed */ buf_page_t* bpage, /* in: block to be freed */
ibool zip, /* in: TRUE if should remove also the ibool zip, /* in: TRUE if should remove also the
compressed page of an uncompressed page */ compressed page of an uncompressed page */
ibool* buf_pool_mutex_released) ibool* buf_pool_mutex_released)
/* in: pointer to a variable that will /* in: pointer to a variable that will
be assigned TRUE if buf_pool->mutex be assigned TRUE if buf_pool_mutex
was temporarily released, or NULL */ was temporarily released, or NULL */
{ {
buf_page_t* b = NULL; buf_page_t* b = NULL;
mutex_t* block_mutex = buf_page_get_mutex(bpage); mutex_t* block_mutex = buf_page_get_mutex(bpage);
ut_ad(mutex_own(&buf_pool->mutex)); ut_ad(buf_pool_mutex_own());
ut_ad(mutex_own(block_mutex)); ut_ad(mutex_own(block_mutex));
ut_ad(buf_page_in_file(bpage)); ut_ad(buf_page_in_file(bpage));
ut_ad(bpage->in_LRU_list); ut_ad(bpage->in_LRU_list);
...@@ -1053,7 +1053,7 @@ buf_LRU_free_block( ...@@ -1053,7 +1053,7 @@ buf_LRU_free_block(
/* Prevent buf_page_get_gen() from /* Prevent buf_page_get_gen() from
decompressing the block while we release decompressing the block while we release
buf_pool->mutex and block_mutex. */ buf_pool_mutex and block_mutex. */
b->buf_fix_count++; b->buf_fix_count++;
b->io_fix = BUF_IO_READ; b->io_fix = BUF_IO_READ;
} }
...@@ -1062,7 +1062,7 @@ buf_LRU_free_block( ...@@ -1062,7 +1062,7 @@ buf_LRU_free_block(
*buf_pool_mutex_released = TRUE; *buf_pool_mutex_released = TRUE;
} }
mutex_exit(&buf_pool->mutex); buf_pool_mutex_exit();
mutex_exit(block_mutex); mutex_exit(block_mutex);
/* Remove possible adaptive hash index on the page. /* Remove possible adaptive hash index on the page.
...@@ -1094,14 +1094,14 @@ buf_LRU_free_block( ...@@ -1094,14 +1094,14 @@ buf_LRU_free_block(
: BUF_NO_CHECKSUM_MAGIC); : BUF_NO_CHECKSUM_MAGIC);
} }
mutex_enter(&buf_pool->mutex); buf_pool_mutex_enter();
mutex_enter(block_mutex); mutex_enter(block_mutex);
if (b) { if (b) {
mutex_enter(&buf_pool->zip_mutex); mutex_enter(&buf_pool_zip_mutex);
b->buf_fix_count--; b->buf_fix_count--;
buf_page_set_io_fix(b, BUF_IO_NONE); buf_page_set_io_fix(b, BUF_IO_NONE);
mutex_exit(&buf_pool->zip_mutex); mutex_exit(&buf_pool_zip_mutex);
} }
buf_LRU_block_free_hashed_page((buf_block_t*) bpage); buf_LRU_block_free_hashed_page((buf_block_t*) bpage);
...@@ -1122,7 +1122,7 @@ buf_LRU_block_free_non_file_page( ...@@ -1122,7 +1122,7 @@ buf_LRU_block_free_non_file_page(
{ {
void* data; void* data;
ut_ad(mutex_own(&(buf_pool->mutex))); ut_ad(buf_pool_mutex_own());
ut_ad(mutex_own(&block->mutex)); ut_ad(mutex_own(&block->mutex));
ut_ad(block); ut_ad(block);
...@@ -1169,7 +1169,7 @@ buf_LRU_block_free_non_file_page( ...@@ -1169,7 +1169,7 @@ buf_LRU_block_free_non_file_page(
/********************************************************************** /**********************************************************************
Takes a block out of the LRU list and page hash table. Takes a block out of the LRU list and page hash table.
If the block is compressed-only (BUF_BLOCK_ZIP_PAGE), If the block is compressed-only (BUF_BLOCK_ZIP_PAGE),
the object will be freed and buf_pool->zip_mutex will be released. the object will be freed and buf_pool_zip_mutex will be released.
If a compressed page or a compressed-only block descriptor is freed, If a compressed page or a compressed-only block descriptor is freed,
other compressed pages or compressed-only block descriptors may be other compressed pages or compressed-only block descriptors may be
...@@ -1190,7 +1190,7 @@ buf_LRU_block_remove_hashed_page( ...@@ -1190,7 +1190,7 @@ buf_LRU_block_remove_hashed_page(
{ {
const buf_page_t* hashed_bpage; const buf_page_t* hashed_bpage;
ut_ad(bpage); ut_ad(bpage);
ut_ad(mutex_own(&(buf_pool->mutex))); ut_ad(buf_pool_mutex_own());
ut_ad(mutex_own(buf_page_get_mutex(bpage))); ut_ad(mutex_own(buf_page_get_mutex(bpage)));
ut_a(buf_page_get_io_fix(bpage) == BUF_IO_NONE); ut_a(buf_page_get_io_fix(bpage) == BUF_IO_NONE);
...@@ -1279,7 +1279,7 @@ buf_LRU_block_remove_hashed_page( ...@@ -1279,7 +1279,7 @@ buf_LRU_block_remove_hashed_page(
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
mutex_exit(buf_page_get_mutex(bpage)); mutex_exit(buf_page_get_mutex(bpage));
mutex_exit(&buf_pool->mutex); buf_pool_mutex_exit();
buf_print(); buf_print();
buf_LRU_print(); buf_LRU_print();
buf_validate(); buf_validate();
...@@ -1304,7 +1304,7 @@ buf_LRU_block_remove_hashed_page( ...@@ -1304,7 +1304,7 @@ buf_LRU_block_remove_hashed_page(
UT_LIST_REMOVE(list, buf_pool->zip_clean, bpage); UT_LIST_REMOVE(list, buf_pool->zip_clean, bpage);
mutex_exit(&buf_pool->zip_mutex); mutex_exit(&buf_pool_zip_mutex);
buf_buddy_free(bpage->zip.data, buf_buddy_free(bpage->zip.data,
page_zip_get_size(&bpage->zip)); page_zip_get_size(&bpage->zip));
buf_buddy_free(bpage, sizeof(*bpage)); buf_buddy_free(bpage, sizeof(*bpage));
...@@ -1355,7 +1355,7 @@ buf_LRU_block_free_hashed_page( ...@@ -1355,7 +1355,7 @@ buf_LRU_block_free_hashed_page(
buf_block_t* block) /* in: block, must contain a file page and buf_block_t* block) /* in: block, must contain a file page and
be in a state where it can be freed */ be in a state where it can be freed */
{ {
ut_ad(mutex_own(&(buf_pool->mutex))); ut_ad(buf_pool_mutex_own());
ut_ad(mutex_own(&block->mutex)); ut_ad(mutex_own(&block->mutex));
buf_block_set_state(block, BUF_BLOCK_MEMORY); buf_block_set_state(block, BUF_BLOCK_MEMORY);
...@@ -1377,7 +1377,7 @@ buf_LRU_validate(void) ...@@ -1377,7 +1377,7 @@ buf_LRU_validate(void)
ulint LRU_pos; ulint LRU_pos;
ut_ad(buf_pool); ut_ad(buf_pool);
mutex_enter(&(buf_pool->mutex)); buf_pool_mutex_enter();
if (UT_LIST_GET_LEN(buf_pool->LRU) >= BUF_LRU_OLD_MIN_LEN) { if (UT_LIST_GET_LEN(buf_pool->LRU) >= BUF_LRU_OLD_MIN_LEN) {
...@@ -1431,7 +1431,7 @@ buf_LRU_validate(void) ...@@ -1431,7 +1431,7 @@ buf_LRU_validate(void)
ut_a(buf_page_get_state(bpage) == BUF_BLOCK_NOT_USED); ut_a(buf_page_get_state(bpage) == BUF_BLOCK_NOT_USED);
} }
mutex_exit(&(buf_pool->mutex)); buf_pool_mutex_exit();
return(TRUE); return(TRUE);
} }
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */ #endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
...@@ -1447,7 +1447,7 @@ buf_LRU_print(void) ...@@ -1447,7 +1447,7 @@ buf_LRU_print(void)
const buf_page_t* bpage; const buf_page_t* bpage;
ut_ad(buf_pool); ut_ad(buf_pool);
mutex_enter(&(buf_pool->mutex)); buf_pool_mutex_enter();
fprintf(stderr, "Pool ulint clock %lu\n", fprintf(stderr, "Pool ulint clock %lu\n",
(ulong) buf_pool->ulint_clock); (ulong) buf_pool->ulint_clock);
...@@ -1510,6 +1510,6 @@ buf_LRU_print(void) ...@@ -1510,6 +1510,6 @@ buf_LRU_print(void)
bpage = UT_LIST_GET_NEXT(LRU, bpage); bpage = UT_LIST_GET_NEXT(LRU, bpage);
} }
mutex_exit(&(buf_pool->mutex)); buf_pool_mutex_exit();
} }
#endif /* UNIV_DEBUG_PRINT || UNIV_DEBUG || UNIV_BUF_DEBUG */ #endif /* UNIV_DEBUG_PRINT || UNIV_DEBUG || UNIV_BUF_DEBUG */
...@@ -222,11 +222,11 @@ buf_read_ahead_random( ...@@ -222,11 +222,11 @@ buf_read_ahead_random(
LRU_recent_limit = buf_LRU_get_recent_limit(); LRU_recent_limit = buf_LRU_get_recent_limit();
mutex_enter(&(buf_pool->mutex)); buf_pool_mutex_enter();
if (buf_pool->n_pend_reads if (buf_pool->n_pend_reads
> buf_pool->curr_size / BUF_READ_AHEAD_PEND_LIMIT) { > buf_pool->curr_size / BUF_READ_AHEAD_PEND_LIMIT) {
mutex_exit(&(buf_pool->mutex)); buf_pool_mutex_exit();
return(0); return(0);
} }
...@@ -245,13 +245,13 @@ buf_read_ahead_random( ...@@ -245,13 +245,13 @@ buf_read_ahead_random(
if (recent_blocks >= BUF_READ_AHEAD_RANDOM_THRESHOLD) { if (recent_blocks >= BUF_READ_AHEAD_RANDOM_THRESHOLD) {
mutex_exit(&buf_pool->mutex); buf_pool_mutex_exit();
goto read_ahead; goto read_ahead;
} }
} }
} }
mutex_exit(&(buf_pool->mutex)); buf_pool_mutex_exit();
/* Do nothing */ /* Do nothing */
return(0); return(0);
...@@ -436,10 +436,10 @@ buf_read_ahead_linear( ...@@ -436,10 +436,10 @@ buf_read_ahead_linear(
tablespace_version = fil_space_get_version(space); tablespace_version = fil_space_get_version(space);
mutex_enter(&(buf_pool->mutex)); buf_pool_mutex_enter();
if (high > fil_space_get_size(space)) { if (high > fil_space_get_size(space)) {
mutex_exit(&(buf_pool->mutex)); buf_pool_mutex_exit();
/* The area is not whole, return */ /* The area is not whole, return */
return(0); return(0);
...@@ -447,7 +447,7 @@ buf_read_ahead_linear( ...@@ -447,7 +447,7 @@ buf_read_ahead_linear(
if (buf_pool->n_pend_reads if (buf_pool->n_pend_reads
> buf_pool->curr_size / BUF_READ_AHEAD_PEND_LIMIT) { > buf_pool->curr_size / BUF_READ_AHEAD_PEND_LIMIT) {
mutex_exit(&(buf_pool->mutex)); buf_pool_mutex_exit();
return(0); return(0);
} }
...@@ -487,7 +487,7 @@ buf_read_ahead_linear( ...@@ -487,7 +487,7 @@ buf_read_ahead_linear(
- BUF_READ_AHEAD_LINEAR_THRESHOLD) { - BUF_READ_AHEAD_LINEAR_THRESHOLD) {
/* Too many failures: return */ /* Too many failures: return */
mutex_exit(&(buf_pool->mutex)); buf_pool_mutex_exit();
return(0); return(0);
} }
...@@ -498,7 +498,7 @@ buf_read_ahead_linear( ...@@ -498,7 +498,7 @@ buf_read_ahead_linear(
bpage = buf_page_hash_get(space, offset); bpage = buf_page_hash_get(space, offset);
if (bpage == NULL) { if (bpage == NULL) {
mutex_exit(&(buf_pool->mutex)); buf_pool_mutex_exit();
return(0); return(0);
} }
...@@ -524,7 +524,7 @@ buf_read_ahead_linear( ...@@ -524,7 +524,7 @@ buf_read_ahead_linear(
pred_offset = fil_page_get_prev(frame); pred_offset = fil_page_get_prev(frame);
succ_offset = fil_page_get_next(frame); succ_offset = fil_page_get_next(frame);
mutex_exit(&(buf_pool->mutex)); buf_pool_mutex_exit();
if ((offset == low) && (succ_offset == offset + 1)) { if ((offset == low) && (succ_offset == offset + 1)) {
......
...@@ -1064,13 +1064,13 @@ i_s_zip_fill_low( ...@@ -1064,13 +1064,13 @@ i_s_zip_fill_low(
/* Determine log2(PAGE_ZIP_MIN_SIZE / 2 / BUF_BUDDY_LOW). */ /* Determine log2(PAGE_ZIP_MIN_SIZE / 2 / BUF_BUDDY_LOW). */
for (uint r = PAGE_ZIP_MIN_SIZE / 2 / BUF_BUDDY_LOW; r >>= 1; y++); for (uint r = PAGE_ZIP_MIN_SIZE / 2 / BUF_BUDDY_LOW; r >>= 1; y++);
mutex_enter(&buf_pool->mutex); buf_pool_mutex_enter();
for (uint x = 0; x <= BUF_BUDDY_SIZES; x++) { for (uint x = 0; x <= BUF_BUDDY_SIZES; x++) {
table->field[0]->store(BUF_BUDDY_LOW << x); table->field[0]->store(BUF_BUDDY_LOW << x);
table->field[1]->store(buf_buddy_relocated[x]); table->field[1]->store(buf_buddy_relocated[x]);
if (reset) { if (reset) {
/* This is protected by buf_pool->mutex. */ /* This is protected by buf_pool_mutex. */
buf_buddy_relocated[x] = 0; buf_buddy_relocated[x] = 0;
} }
...@@ -1103,7 +1103,7 @@ i_s_zip_fill_low( ...@@ -1103,7 +1103,7 @@ i_s_zip_fill_low(
} }
} }
mutex_exit(&buf_pool->mutex); buf_pool_mutex_exit();
DBUG_RETURN(status); DBUG_RETURN(status);
} }
......
...@@ -19,13 +19,13 @@ Created December 2006 by Marko Makela ...@@ -19,13 +19,13 @@ Created December 2006 by Marko Makela
/************************************************************************** /**************************************************************************
Allocate a block. The thread calling this function must hold Allocate a block. The thread calling this function must hold
buf_pool->mutex and must not hold buf_pool->zip_mutex or any buf_pool_mutex and must not hold buf_pool_zip_mutex or any
block->mutex. The buf_pool->mutex may only be released and reacquired block->mutex. The buf_pool_mutex may only be released and reacquired
if lru == BUF_BUDDY_USE_LRU. This function should only be used for if lru == BUF_BUDDY_USE_LRU. This function should only be used for
allocating compressed page frames or control blocks (buf_page_t). allocating compressed page frames or control blocks (buf_page_t).
Allocated control blocks must be properly initialized immediately Allocated control blocks must be properly initialized immediately
after buf_buddy_alloc() has returned the memory, before releasing after buf_buddy_alloc() has returned the memory, before releasing
buf_pool->mutex. */ buf_pool_mutex. */
UNIV_INLINE UNIV_INLINE
void* void*
buf_buddy_alloc( buf_buddy_alloc(
...@@ -35,7 +35,7 @@ buf_buddy_alloc( ...@@ -35,7 +35,7 @@ buf_buddy_alloc(
ulint size, /* in: block size, up to UNIV_PAGE_SIZE */ ulint size, /* in: block size, up to UNIV_PAGE_SIZE */
ibool* lru) /* in: pointer to a variable that will be assigned ibool* lru) /* in: pointer to a variable that will be assigned
TRUE if storage was allocated from the LRU list TRUE if storage was allocated from the LRU list
and buf_pool->mutex was temporarily released, and buf_pool_mutex was temporarily released,
or NULL if the LRU list should not be used */ or NULL if the LRU list should not be used */
__attribute__((malloc)); __attribute__((malloc));
...@@ -51,23 +51,23 @@ buf_buddy_free( ...@@ -51,23 +51,23 @@ buf_buddy_free(
__attribute__((nonnull)); __attribute__((nonnull));
/** Number of frames allocated from the buffer pool to the buddy system. /** Number of frames allocated from the buffer pool to the buddy system.
Protected by buf_pool->mutex. */ Protected by buf_pool_mutex. */
extern ulint buf_buddy_n_frames; extern ulint buf_buddy_n_frames;
/** Preferred minimum number of frames allocated from the buffer pool /** Preferred minimum number of frames allocated from the buffer pool
to the buddy system. Unless this number is exceeded or the buffer to the buddy system. Unless this number is exceeded or the buffer
pool is scarce, the LRU algorithm will not free compressed-only pages pool is scarce, the LRU algorithm will not free compressed-only pages
in order to satisfy an allocation request. Protected by buf_pool->mutex. */ in order to satisfy an allocation request. Protected by buf_pool_mutex. */
extern ulint buf_buddy_min_n_frames; extern ulint buf_buddy_min_n_frames;
/** Preferred maximum number of frames allocated from the buffer pool /** Preferred maximum number of frames allocated from the buffer pool
to the buddy system. Unless this number is exceeded, the buddy allocator to the buddy system. Unless this number is exceeded, the buddy allocator
will not try to free clean compressed-only pages before falling back will not try to free clean compressed-only pages before falling back
to the LRU algorithm. Protected by buf_pool->mutex. */ to the LRU algorithm. Protected by buf_pool_mutex. */
extern ulint buf_buddy_max_n_frames; extern ulint buf_buddy_max_n_frames;
/** Counts of blocks allocated from the buddy system. /** Counts of blocks allocated from the buddy system.
Protected by buf_pool->mutex. */ Protected by buf_pool_mutex. */
extern ulint buf_buddy_used[BUF_BUDDY_SIZES + 1]; extern ulint buf_buddy_used[BUF_BUDDY_SIZES + 1];
/** Counts of blocks relocated by the buddy system. /** Counts of blocks relocated by the buddy system.
Protected by buf_pool->mutex. */ Protected by buf_pool_mutex. */
extern ib_uint64_t buf_buddy_relocated[BUF_BUDDY_SIZES + 1]; extern ib_uint64_t buf_buddy_relocated[BUF_BUDDY_SIZES + 1];
#ifndef UNIV_NONINL #ifndef UNIV_NONINL
......
...@@ -18,8 +18,8 @@ Created December 2006 by Marko Makela ...@@ -18,8 +18,8 @@ Created December 2006 by Marko Makela
/************************************************************************** /**************************************************************************
Allocate a block. The thread calling this function must hold Allocate a block. The thread calling this function must hold
buf_pool->mutex and must not hold buf_pool->zip_mutex or any block->mutex. buf_pool_mutex and must not hold buf_pool_zip_mutex or any block->mutex.
The buf_pool->mutex may only be released and reacquired if The buf_pool_mutex may only be released and reacquired if
lru == BUF_BUDDY_USE_LRU. */ lru == BUF_BUDDY_USE_LRU. */
void* void*
...@@ -31,7 +31,7 @@ buf_buddy_alloc_low( ...@@ -31,7 +31,7 @@ buf_buddy_alloc_low(
or BUF_BUDDY_SIZES */ or BUF_BUDDY_SIZES */
ibool* lru) /* in: pointer to a variable that will be assigned ibool* lru) /* in: pointer to a variable that will be assigned
TRUE if storage was allocated from the LRU list TRUE if storage was allocated from the LRU list
and buf_pool->mutex was temporarily released, and buf_pool_mutex was temporarily released,
or NULL if the LRU list should not be used */ or NULL if the LRU list should not be used */
__attribute__((malloc)); __attribute__((malloc));
...@@ -68,13 +68,13 @@ buf_buddy_get_slot( ...@@ -68,13 +68,13 @@ buf_buddy_get_slot(
/************************************************************************** /**************************************************************************
Allocate a block. The thread calling this function must hold Allocate a block. The thread calling this function must hold
buf_pool->mutex and must not hold buf_pool->zip_mutex or any buf_pool_mutex and must not hold buf_pool_zip_mutex or any
block->mutex. The buf_pool->mutex may only be released and reacquired block->mutex. The buf_pool_mutex may only be released and reacquired
if lru == BUF_BUDDY_USE_LRU. This function should only be used for if lru == BUF_BUDDY_USE_LRU. This function should only be used for
allocating compressed page frames or control blocks (buf_page_t). allocating compressed page frames or control blocks (buf_page_t).
Allocated control blocks must be properly initialized immediately Allocated control blocks must be properly initialized immediately
after buf_buddy_alloc() has returned the memory, before releasing after buf_buddy_alloc() has returned the memory, before releasing
buf_pool->mutex. */ buf_pool_mutex. */
UNIV_INLINE UNIV_INLINE
void* void*
buf_buddy_alloc( buf_buddy_alloc(
...@@ -84,10 +84,10 @@ buf_buddy_alloc( ...@@ -84,10 +84,10 @@ buf_buddy_alloc(
ulint size, /* in: block size, up to UNIV_PAGE_SIZE */ ulint size, /* in: block size, up to UNIV_PAGE_SIZE */
ibool* lru) /* in: pointer to a variable that will be assigned ibool* lru) /* in: pointer to a variable that will be assigned
TRUE if storage was allocated from the LRU list TRUE if storage was allocated from the LRU list
and buf_pool->mutex was temporarily released, and buf_pool_mutex was temporarily released,
or NULL if the LRU list should not be used */ or NULL if the LRU list should not be used */
{ {
ut_ad(mutex_own(&buf_pool->mutex)); ut_ad(buf_pool_mutex_own());
return(buf_buddy_alloc_low(buf_buddy_get_slot(size), lru)); return(buf_buddy_alloc_low(buf_buddy_get_slot(size), lru));
} }
...@@ -102,7 +102,7 @@ buf_buddy_free( ...@@ -102,7 +102,7 @@ buf_buddy_free(
pointed to by the buffer pool */ pointed to by the buffer pool */
ulint size) /* in: block size, up to UNIV_PAGE_SIZE */ ulint size) /* in: block size, up to UNIV_PAGE_SIZE */
{ {
ut_ad(mutex_own(&buf_pool->mutex)); ut_ad(buf_pool_mutex_own());
buf_buddy_free_low(buf, buf_buddy_get_slot(size)); buf_buddy_free_low(buf, buf_buddy_get_slot(size));
} }
......
...@@ -947,16 +947,16 @@ for compressed and uncompressed frames */ ...@@ -947,16 +947,16 @@ for compressed and uncompressed frames */
struct buf_page_struct{ struct buf_page_struct{
/* None of the following bit-fields must be modified without /* None of the following bit-fields must be modified without
holding buf_page_get_mutex() [block->mutex or buf_pool->zip_mutex], holding buf_page_get_mutex() [block->mutex or buf_pool_zip_mutex],
since they can be stored in the same machine word. Some of them are since they can be stored in the same machine word. Some of them are
additionally protected by buf_pool->mutex. */ additionally protected by buf_pool_mutex. */
unsigned space:32; /* tablespace id */ unsigned space:32; /* tablespace id */
unsigned offset:32; /* page number */ unsigned offset:32; /* page number */
unsigned state:3; /* state of the control block unsigned state:3; /* state of the control block
(@see enum buf_page_state); also (@see enum buf_page_state); also
protected by buf_pool->mutex. protected by buf_pool_mutex.
State transitions from State transitions from
BUF_BLOCK_READY_FOR_USE to BUF_BLOCK_READY_FOR_USE to
BUF_BLOCK_MEMORY need not be BUF_BLOCK_MEMORY need not be
...@@ -972,13 +972,13 @@ struct buf_page_struct{ ...@@ -972,13 +972,13 @@ struct buf_page_struct{
without holding any mutex or latch */ without holding any mutex or latch */
unsigned io_fix:2; /* type of pending I/O operation unsigned io_fix:2; /* type of pending I/O operation
(@see enum buf_io_fix); also (@see enum buf_io_fix); also
protected by buf_pool->mutex */ protected by buf_pool_mutex */
unsigned buf_fix_count:24;/* count of how manyfold this block unsigned buf_fix_count:24;/* count of how manyfold this block
is currently bufferfixed */ is currently bufferfixed */
page_zip_des_t zip; /* compressed page; zip.data page_zip_des_t zip; /* compressed page; zip.data
(but not the data it points to) is (but not the data it points to) is
also protected by buf_pool->mutex */ also protected by buf_pool_mutex */
buf_page_t* hash; /* node used in chaining to buf_page_t* hash; /* node used in chaining to
buf_pool->page_hash or buf_pool->page_hash or
buf_pool->zip_hash */ buf_pool->zip_hash */
...@@ -987,7 +987,7 @@ struct buf_page_struct{ ...@@ -987,7 +987,7 @@ struct buf_page_struct{
ibool in_zip_hash; /* TRUE if in buf_pool->zip_hash */ ibool in_zip_hash; /* TRUE if in buf_pool->zip_hash */
#endif /* UNIV_DEBUG */ #endif /* UNIV_DEBUG */
/* 2. Page flushing fields; protected by buf_pool->mutex */ /* 2. Page flushing fields; protected by buf_pool_mutex */
UT_LIST_NODE_T(buf_page_t) list; UT_LIST_NODE_T(buf_page_t) list;
/* based on state, this is a list /* based on state, this is a list
...@@ -1001,12 +1001,12 @@ struct buf_page_struct{ ...@@ -1001,12 +1001,12 @@ struct buf_page_struct{
BUF_BLOCK_ZIP_FREE: zip_free[] */ BUF_BLOCK_ZIP_FREE: zip_free[] */
#ifdef UNIV_DEBUG #ifdef UNIV_DEBUG
ibool in_flush_list; /* TRUE if in buf_pool->flush_list; ibool in_flush_list; /* TRUE if in buf_pool->flush_list;
when buf_pool->mutex is free, the when buf_pool_mutex is free, the
following should hold: in_flush_list following should hold: in_flush_list
== (state == BUF_BLOCK_FILE_PAGE == (state == BUF_BLOCK_FILE_PAGE
|| state == BUF_BLOCK_ZIP_DIRTY) */ || state == BUF_BLOCK_ZIP_DIRTY) */
ibool in_free_list; /* TRUE if in buf_pool->free; when ibool in_free_list; /* TRUE if in buf_pool->free; when
buf_pool->mutex is free, the following buf_pool_mutex is free, the following
should hold: in_free_list should hold: in_free_list
== (state == BUF_BLOCK_NOT_USED) */ == (state == BUF_BLOCK_NOT_USED) */
#endif /* UNIV_DEBUG */ #endif /* UNIV_DEBUG */
...@@ -1021,7 +1021,7 @@ struct buf_page_struct{ ...@@ -1021,7 +1021,7 @@ struct buf_page_struct{
not yet been flushed on disk; zero if not yet been flushed on disk; zero if
all modifications are on disk */ all modifications are on disk */
/* 3. LRU replacement algorithm fields; protected by buf_pool->mutex */ /* 3. LRU replacement algorithm fields; protected by buf_pool_mutex */
UT_LIST_NODE_T(buf_page_t) LRU; UT_LIST_NODE_T(buf_page_t) LRU;
/* node of the LRU list */ /* node of the LRU list */
...@@ -1166,12 +1166,6 @@ struct buf_pool_struct{ ...@@ -1166,12 +1166,6 @@ struct buf_pool_struct{
/* 1. General fields */ /* 1. General fields */
mutex_t mutex; /* mutex protecting the buffer pool
struct and control blocks, except the
read-write lock in them */
mutex_t zip_mutex; /* mutex protecting the control blocks
of compressed-only pages (of type
buf_page_t, not buf_block_t) */
ulint n_chunks; /* number of buffer pool chunks */ ulint n_chunks; /* number of buffer pool chunks */
buf_chunk_t* chunks; /* buffer pool chunks */ buf_chunk_t* chunks; /* buffer pool chunks */
ulint curr_size; /* current pool size in pages */ ulint curr_size; /* current pool size in pages */
...@@ -1266,6 +1260,26 @@ struct buf_pool_struct{ ...@@ -1266,6 +1260,26 @@ struct buf_pool_struct{
#endif #endif
}; };
/* mutex protecting the buffer pool struct and control blocks, except the
read-write lock in them */
extern mutex_t buf_pool_mutex;
/* mutex protecting the control blocks of compressed-only pages
(of type buf_page_t, not buf_block_t) */
extern mutex_t buf_pool_zip_mutex;
/* Accessors for buf_pool_mutex. Use these instead of accessing
buf_pool_mutex directly. */
/* Test if buf_pool_mutex is owned. */
#define buf_pool_mutex_own() mutex_own(&buf_pool_mutex)
/* Acquire the buffer pool mutex. */
#define buf_pool_mutex_enter() do { \
ut_ad(!mutex_own(&buf_pool_zip_mutex)); \
mutex_enter(&buf_pool_mutex); \
} while (0)
/* Release the buffer pool mutex. */
#define buf_pool_mutex_exit() mutex_exit(&buf_pool_mutex)
/************************************************************************ /************************************************************************
Let us list the consistency conditions for different control block states. Let us list the consistency conditions for different control block states.
......
...@@ -76,7 +76,7 @@ buf_pool_get_oldest_modification(void) ...@@ -76,7 +76,7 @@ buf_pool_get_oldest_modification(void)
buf_page_t* bpage; buf_page_t* bpage;
ib_uint64_t lsn; ib_uint64_t lsn;
mutex_enter(&(buf_pool->mutex)); buf_pool_mutex_enter();
bpage = UT_LIST_GET_LAST(buf_pool->flush_list); bpage = UT_LIST_GET_LAST(buf_pool->flush_list);
...@@ -87,7 +87,7 @@ buf_pool_get_oldest_modification(void) ...@@ -87,7 +87,7 @@ buf_pool_get_oldest_modification(void)
lsn = bpage->oldest_modification; lsn = bpage->oldest_modification;
} }
mutex_exit(&(buf_pool->mutex)); buf_pool_mutex_exit();
return(lsn); return(lsn);
} }
...@@ -101,7 +101,7 @@ buf_pool_clock_tic(void) ...@@ -101,7 +101,7 @@ buf_pool_clock_tic(void)
/*====================*/ /*====================*/
/* out: new clock value */ /* out: new clock value */
{ {
ut_ad(mutex_own(&(buf_pool->mutex))); ut_ad(buf_pool_mutex_own());
buf_pool->ulint_clock++; buf_pool->ulint_clock++;
...@@ -265,7 +265,7 @@ buf_page_get_mutex( ...@@ -265,7 +265,7 @@ buf_page_get_mutex(
break; break;
case BUF_BLOCK_ZIP_PAGE: case BUF_BLOCK_ZIP_PAGE:
case BUF_BLOCK_ZIP_DIRTY: case BUF_BLOCK_ZIP_DIRTY:
return(&buf_pool->zip_mutex); return(&buf_pool_zip_mutex);
default: default:
return(&((buf_block_t*) bpage)->mutex); return(&((buf_block_t*) bpage)->mutex);
} }
...@@ -366,7 +366,7 @@ buf_page_set_io_fix( ...@@ -366,7 +366,7 @@ buf_page_set_io_fix(
buf_page_t* bpage, /* in/out: control block */ buf_page_t* bpage, /* in/out: control block */
enum buf_io_fix io_fix) /* in: io_fix state */ enum buf_io_fix io_fix) /* in: io_fix state */
{ {
ut_ad(mutex_own(&buf_pool->mutex)); ut_ad(buf_pool_mutex_own());
ut_ad(mutex_own(buf_page_get_mutex(bpage))); ut_ad(mutex_own(buf_page_get_mutex(bpage)));
bpage->io_fix = io_fix; bpage->io_fix = io_fix;
...@@ -394,7 +394,7 @@ buf_page_can_relocate( ...@@ -394,7 +394,7 @@ buf_page_can_relocate(
/*==================*/ /*==================*/
const buf_page_t* bpage) /* control block being relocated */ const buf_page_t* bpage) /* control block being relocated */
{ {
ut_ad(mutex_own(&buf_pool->mutex)); ut_ad(buf_pool_mutex_own());
ut_ad(mutex_own(buf_page_get_mutex(bpage))); ut_ad(mutex_own(buf_page_get_mutex(bpage)));
ut_ad(buf_page_in_file(bpage)); ut_ad(buf_page_in_file(bpage));
ut_ad(bpage->in_LRU_list); ut_ad(bpage->in_LRU_list);
...@@ -427,7 +427,7 @@ buf_page_set_old( ...@@ -427,7 +427,7 @@ buf_page_set_old(
ibool old) /* in: old */ ibool old) /* in: old */
{ {
ut_a(buf_page_in_file(bpage)); ut_a(buf_page_in_file(bpage));
ut_ad(mutex_own(&buf_pool->mutex)); ut_ad(buf_pool_mutex_own());
bpage->old = old; bpage->old = old;
} }
...@@ -634,9 +634,9 @@ buf_frame_get_page_zip( ...@@ -634,9 +634,9 @@ buf_frame_get_page_zip(
const byte* ptr) /* in: pointer to the page */ const byte* ptr) /* in: pointer to the page */
{ {
const page_zip_des_t* page_zip; const page_zip_des_t* page_zip;
mutex_enter(&buf_pool->mutex); buf_pool_mutex_enter();
page_zip = buf_block_get_page_zip(buf_block_align(ptr)); page_zip = buf_block_get_page_zip(buf_block_align(ptr));
mutex_exit(&buf_pool->mutex); buf_pool_mutex_exit();
return(page_zip); return(page_zip);
} }
#endif /* UNIV_DEBUG || UNIV_ZIP_DEBUG */ #endif /* UNIV_DEBUG || UNIV_ZIP_DEBUG */
...@@ -701,7 +701,7 @@ buf_block_free( ...@@ -701,7 +701,7 @@ buf_block_free(
/*===========*/ /*===========*/
buf_block_t* block) /* in, own: block to be freed */ buf_block_t* block) /* in, own: block to be freed */
{ {
mutex_enter(&(buf_pool->mutex)); buf_pool_mutex_enter();
mutex_enter(&block->mutex); mutex_enter(&block->mutex);
...@@ -711,7 +711,7 @@ buf_block_free( ...@@ -711,7 +711,7 @@ buf_block_free(
mutex_exit(&block->mutex); mutex_exit(&block->mutex);
mutex_exit(&(buf_pool->mutex)); buf_pool_mutex_exit();
} }
/************************************************************************* /*************************************************************************
...@@ -757,13 +757,13 @@ buf_page_io_query( ...@@ -757,13 +757,13 @@ buf_page_io_query(
{ {
ibool io_fixed; ibool io_fixed;
mutex_enter(&(buf_pool->mutex)); buf_pool_mutex_enter();
ut_ad(buf_page_in_file(bpage)); ut_ad(buf_page_in_file(bpage));
ut_ad(bpage->buf_fix_count > 0); ut_ad(bpage->buf_fix_count > 0);
io_fixed = buf_page_get_io_fix(bpage) != BUF_IO_NONE; io_fixed = buf_page_get_io_fix(bpage) != BUF_IO_NONE;
mutex_exit(&(buf_pool->mutex)); buf_pool_mutex_exit();
return(io_fixed); return(io_fixed);
} }
...@@ -781,7 +781,7 @@ buf_page_get_newest_modification( ...@@ -781,7 +781,7 @@ buf_page_get_newest_modification(
{ {
ib_uint64_t lsn; ib_uint64_t lsn;
mutex_enter(&(buf_pool->mutex)); buf_pool_mutex_enter();
if (buf_page_in_file(bpage)) { if (buf_page_in_file(bpage)) {
lsn = bpage->newest_modification; lsn = bpage->newest_modification;
...@@ -789,7 +789,7 @@ buf_page_get_newest_modification( ...@@ -789,7 +789,7 @@ buf_page_get_newest_modification(
lsn = 0; lsn = 0;
} }
mutex_exit(&(buf_pool->mutex)); buf_pool_mutex_exit();
return(lsn); return(lsn);
} }
...@@ -805,7 +805,7 @@ buf_block_modify_clock_inc( ...@@ -805,7 +805,7 @@ buf_block_modify_clock_inc(
buf_block_t* block) /* in: block */ buf_block_t* block) /* in: block */
{ {
#ifdef UNIV_SYNC_DEBUG #ifdef UNIV_SYNC_DEBUG
ut_ad((mutex_own(&(buf_pool->mutex)) ut_ad((buf_pool_mutex_own()
&& (block->page.buf_fix_count == 0)) && (block->page.buf_fix_count == 0))
|| rw_lock_own(&(block->lock), RW_LOCK_EXCLUSIVE)); || rw_lock_own(&(block->lock), RW_LOCK_EXCLUSIVE));
#endif /* UNIV_SYNC_DEBUG */ #endif /* UNIV_SYNC_DEBUG */
...@@ -889,7 +889,7 @@ buf_page_hash_get( ...@@ -889,7 +889,7 @@ buf_page_hash_get(
ulint fold; ulint fold;
ut_ad(buf_pool); ut_ad(buf_pool);
ut_ad(mutex_own(&(buf_pool->mutex))); ut_ad(buf_pool_mutex_own());
/* Look for the page in the hash table */ /* Look for the page in the hash table */
...@@ -936,11 +936,11 @@ buf_page_peek( ...@@ -936,11 +936,11 @@ buf_page_peek(
{ {
const buf_page_t* bpage; const buf_page_t* bpage;
mutex_enter(&buf_pool->mutex); buf_pool_mutex_enter();
bpage = buf_page_hash_get(space, offset); bpage = buf_page_hash_get(space, offset);
mutex_exit(&buf_pool->mutex); buf_pool_mutex_exit();
return(bpage != NULL); return(bpage != NULL);
} }
...@@ -961,9 +961,9 @@ buf_page_release_zip( ...@@ -961,9 +961,9 @@ buf_page_release_zip(
switch (buf_page_get_state(bpage)) { switch (buf_page_get_state(bpage)) {
case BUF_BLOCK_ZIP_PAGE: case BUF_BLOCK_ZIP_PAGE:
case BUF_BLOCK_ZIP_DIRTY: case BUF_BLOCK_ZIP_DIRTY:
mutex_enter(&buf_pool->zip_mutex); mutex_enter(&buf_pool_zip_mutex);
bpage->buf_fix_count--; bpage->buf_fix_count--;
mutex_exit(&buf_pool->zip_mutex); mutex_exit(&buf_pool_zip_mutex);
return; return;
case BUF_BLOCK_FILE_PAGE: case BUF_BLOCK_FILE_PAGE:
block = (buf_block_t*) bpage; block = (buf_block_t*) bpage;
...@@ -1003,9 +1003,9 @@ buf_page_release( ...@@ -1003,9 +1003,9 @@ buf_page_release(
ut_a(block->page.buf_fix_count > 0); ut_a(block->page.buf_fix_count > 0);
if (rw_latch == RW_X_LATCH && mtr->modifications) { if (rw_latch == RW_X_LATCH && mtr->modifications) {
mutex_enter(&buf_pool->mutex); buf_pool_mutex_enter();
buf_flush_note_modification(block, mtr); buf_flush_note_modification(block, mtr);
mutex_exit(&buf_pool->mutex); buf_pool_mutex_exit();
} }
mutex_enter(&block->mutex); mutex_enter(&block->mutex);
......
...@@ -36,7 +36,7 @@ buf_flush_note_modification( ...@@ -36,7 +36,7 @@ buf_flush_note_modification(
#ifdef UNIV_SYNC_DEBUG #ifdef UNIV_SYNC_DEBUG
ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX)); ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX));
#endif /* UNIV_SYNC_DEBUG */ #endif /* UNIV_SYNC_DEBUG */
ut_ad(mutex_own(&(buf_pool->mutex))); ut_ad(buf_pool_mutex_own());
ut_ad(mtr->start_lsn != 0); ut_ad(mtr->start_lsn != 0);
ut_ad(mtr->modifications); ut_ad(mtr->modifications);
...@@ -76,7 +76,7 @@ buf_flush_recv_note_modification( ...@@ -76,7 +76,7 @@ buf_flush_recv_note_modification(
ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX)); ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX));
#endif /* UNIV_SYNC_DEBUG */ #endif /* UNIV_SYNC_DEBUG */
mutex_enter(&(buf_pool->mutex)); buf_pool_mutex_enter();
ut_ad(block->page.newest_modification <= end_lsn); ut_ad(block->page.newest_modification <= end_lsn);
...@@ -93,5 +93,5 @@ buf_flush_recv_note_modification( ...@@ -93,5 +93,5 @@ buf_flush_recv_note_modification(
ut_ad(block->page.oldest_modification <= start_lsn); ut_ad(block->page.oldest_modification <= start_lsn);
} }
mutex_exit(&(buf_pool->mutex)); buf_pool_mutex_exit();
} }
...@@ -83,13 +83,13 @@ buf_LRU_free_block( ...@@ -83,13 +83,13 @@ buf_LRU_free_block(
the descriptor object will be freed the descriptor object will be freed
as well. If this function returns FALSE, as well. If this function returns FALSE,
it will not temporarily release it will not temporarily release
buf_pool->mutex. */ buf_pool_mutex. */
buf_page_t* block, /* in: block to be freed */ buf_page_t* block, /* in: block to be freed */
ibool zip, /* in: TRUE if should remove also the ibool zip, /* in: TRUE if should remove also the
compressed page of an uncompressed page */ compressed page of an uncompressed page */
ibool* buf_pool_mutex_released); ibool* buf_pool_mutex_released);
/* in: pointer to a variable that will /* in: pointer to a variable that will
be assigned TRUE if buf_pool->mutex be assigned TRUE if buf_pool_mutex
was temporarily released, or NULL */ was temporarily released, or NULL */
/********************************************************************** /**********************************************************************
Look for a replaceable block from the end of the LRU list and put it to Look for a replaceable block from the end of the LRU list and put it to
......
...@@ -191,10 +191,10 @@ mlog_write_initial_log_record_fast( ...@@ -191,10 +191,10 @@ mlog_write_initial_log_record_fast(
#endif #endif
#ifdef UNIV_DEBUG #ifdef UNIV_DEBUG
mutex_enter(&buf_pool->mutex); buf_pool_mutex_enter();
/* We now assume that all x-latched pages have been modified! */ /* We now assume that all x-latched pages have been modified! */
block = (buf_block_t*) buf_block_align(ptr); block = (buf_block_t*) buf_block_align(ptr);
mutex_exit(&buf_pool->mutex); buf_pool_mutex_exit();
if (!mtr_memo_contains(mtr, block, MTR_MEMO_MODIFY)) { if (!mtr_memo_contains(mtr, block, MTR_MEMO_MODIFY)) {
......
...@@ -115,9 +115,9 @@ row_upd_rec_sys_fields( ...@@ -115,9 +115,9 @@ row_upd_rec_sys_fields(
ut_ad(rec_offs_validate(rec, index, offsets)); ut_ad(rec_offs_validate(rec, index, offsets));
#ifdef UNIV_SYNC_DEBUG #ifdef UNIV_SYNC_DEBUG
if (!rw_lock_own(&btr_search_latch, RW_LOCK_EX)) { if (!rw_lock_own(&btr_search_latch, RW_LOCK_EX)) {
mutex_enter(&buf_pool->mutex); buf_pool_mutex_enter();
ut_ad(!buf_block_align(rec)->is_hashed); ut_ad(!buf_block_align(rec)->is_hashed);
mutex_exit(&buf_pool->mutex); buf_pool_mutex_exit();
} }
#endif /* UNIV_SYNC_DEBUG */ #endif /* UNIV_SYNC_DEBUG */
......
...@@ -314,9 +314,9 @@ mtr_memo_contains_page( ...@@ -314,9 +314,9 @@ mtr_memo_contains_page(
{ {
ibool ret; ibool ret;
mutex_enter(&buf_pool->mutex); buf_pool_mutex_enter();
ret = mtr_memo_contains(mtr, buf_block_align(ptr), type); ret = mtr_memo_contains(mtr, buf_block_align(ptr), type);
mutex_exit(&buf_pool->mutex); buf_pool_mutex_exit();
return(ret); return(ret);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment