Commit 121a5e8d authored by Marko Mäkelä's avatar Marko Mäkelä

Minor buffer pool cleanup

btr_blob_free(): Compare page_id_t directly.

buf_pool_watch_unset(): Avoid unnecessarily buf_pool.mutex acquisition.

Clean up some comments as well.
parent 82c465f6
...@@ -7044,44 +7044,25 @@ btr_blob_get_next_page_no( ...@@ -7044,44 +7044,25 @@ btr_blob_get_next_page_no(
return(mach_read_from_4(blob_header + BTR_BLOB_HDR_NEXT_PAGE_NO)); return(mach_read_from_4(blob_header + BTR_BLOB_HDR_NEXT_PAGE_NO));
} }
/*******************************************************************//** /** Deallocate a buffer block that was reserved for a BLOB part.
Deallocate a buffer block that was reserved for a BLOB part. */ @param block buffer block
static @param all flag whether to remove a ROW_FORMAT=COMPRESSED page
void @param mtr mini-transaction to commit */
btr_blob_free( static void btr_blob_free(buf_block_t *block, bool all, mtr_t *mtr)
/*==========*/
buf_block_t* block, /*!< in: buffer block */
ibool all, /*!< in: TRUE=remove also the compressed page
if there is one */
mtr_t* mtr) /*!< in: mini-transaction to commit */
{ {
ulint space = block->page.id.space(); const page_id_t page_id= block->page.id;
ulint page_no = block->page.id.page_no(); ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX));
mtr->commit();
ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX));
mutex_enter(&buf_pool.mutex);
mtr_commit(mtr); /* Free the block if it is still allocated to the same file page. */
if (block->page.state == BUF_BLOCK_FILE_PAGE && block->page.id == page_id &&
mutex_enter(&buf_pool.mutex); !buf_LRU_free_page(&block->page, all) && all && block->page.zip.data)
/* Attempt to deallocate the redundant copy of the uncompressed page
/* Only free the block if it is still allocated to if the whole ROW_FORMAT=COMPRESSED block cannot be deallocted. */
the same file page. */ buf_LRU_free_page(&block->page, false);
if (buf_block_get_state(block) mutex_exit(&buf_pool.mutex);
== BUF_BLOCK_FILE_PAGE
&& block->page.id.space() == space
&& block->page.id.page_no() == page_no) {
if (!buf_LRU_free_page(&block->page, all)
&& all && block->page.zip.data) {
/* Attempt to deallocate the uncompressed page
if the whole block cannot be deallocted. */
buf_LRU_free_page(&block->page, false);
}
}
mutex_exit(&buf_pool.mutex);
} }
/** Helper class used while writing blob pages, during insert or update. */ /** Helper class used while writing blob pages, during insert or update. */
......
...@@ -2516,18 +2516,12 @@ void buf_resize_shutdown() ...@@ -2516,18 +2516,12 @@ void buf_resize_shutdown()
} }
/********************************************************************//** /** Relocate a ROW_FORMAT=COMPRESSED block in the LRU list and
Relocate a buffer control block. Relocates the block on the LRU list buf_pool.page_hash.
and in buf_pool.page_hash. Does not relocate bpage->list. The caller must relocate bpage->list.
The caller must take care of relocating bpage->list. */ @param bpage control block in BUF_BLOCK_ZIP_DIRTY or BUF_BLOCK_ZIP_PAGE
static @param dpage destination control block */
void static void buf_relocate(buf_page_t *bpage, buf_page_t *dpage)
buf_relocate(
/*=========*/
buf_page_t* bpage, /*!< in/out: control block being relocated;
buf_page_get_state(bpage) must be
BUF_BLOCK_ZIP_DIRTY or BUF_BLOCK_ZIP_PAGE */
buf_page_t* dpage) /*!< in/out: destination control block */
{ {
buf_page_t* b; buf_page_t* b;
...@@ -2745,27 +2739,20 @@ buf_pool_watch_set( ...@@ -2745,27 +2739,20 @@ buf_pool_watch_set(
} }
/** Remove the sentinel block for the watch before replacing it with a /** Remove the sentinel block for the watch before replacing it with a
real block. buf_page_watch_clear() or buf_page_watch_occurred() will notice real block. buf_pool_watch_unset() or buf_pool_watch_occurred() will notice
that the block has been replaced with the real block. that the block has been replaced with the real block.
@param[in,out] watch sentinel for watch @param[in,out] watch sentinel for watch
@return reference count, to be added to the replacement block */ @return reference count, to be added to the replacement block */
static static void buf_pool_watch_remove(buf_page_t *watch)
void
buf_pool_watch_remove(buf_page_t* watch)
{ {
#ifdef UNIV_DEBUG ut_ad(rw_lock_own(buf_page_hash_lock_get(watch->id), RW_LOCK_X));
/* We must also own the appropriate hash_bucket mutex. */ ut_ad(mutex_own(&buf_pool.mutex));
rw_lock_t* hash_lock = buf_page_hash_lock_get(watch->id);
ut_ad(rw_lock_own(hash_lock, RW_LOCK_X)); ut_ad(watch->in_page_hash);
#endif /* UNIV_DEBUG */ ut_d(watch->in_page_hash= FALSE);
HASH_DELETE(buf_page_t, hash, buf_pool.page_hash, watch->id.fold(), watch);
ut_ad(mutex_own(&buf_pool.mutex)); watch->buf_fix_count= 0;
watch->state= BUF_BLOCK_POOL_WATCH;
HASH_DELETE(buf_page_t, hash, buf_pool.page_hash, watch->id.fold(),
watch);
ut_d(watch->in_page_hash = FALSE);
watch->buf_fix_count = 0;
watch->state = BUF_BLOCK_POOL_WATCH;
} }
/** Stop watching if the page has been read in. /** Stop watching if the page has been read in.
...@@ -2773,27 +2760,28 @@ buf_pool_watch_set(same_page_id) must have returned NULL before. ...@@ -2773,27 +2760,28 @@ buf_pool_watch_set(same_page_id) must have returned NULL before.
@param[in] page_id page id */ @param[in] page_id page id */
void buf_pool_watch_unset(const page_id_t page_id) void buf_pool_watch_unset(const page_id_t page_id)
{ {
buf_page_t* bpage; rw_lock_t *hash_lock= buf_page_hash_lock_get(page_id);
/* We only need to have buf_pool.mutex in case where we end rw_lock_x_lock(hash_lock);
up calling buf_pool_watch_remove but to obey latching order
we acquire it here before acquiring hash_lock. This should
not cause too much grief as this function is only ever
called from the purge thread. */
mutex_enter(&buf_pool.mutex);
rw_lock_t* hash_lock = buf_page_hash_lock_get(page_id);
rw_lock_x_lock(hash_lock);
/* The page must exist because buf_pool_watch_set()
increments buf_fix_count. */
bpage = buf_page_hash_get_low(page_id);
if (bpage->unfix() == 0 && buf_pool_watch_is_sentinel(bpage)) { /* The page must exist because buf_pool_watch_set() increments
buf_pool_watch_remove(bpage); buf_fix_count. */
} buf_page_t *watch= buf_page_hash_get_low(page_id);
mutex_exit(&buf_pool.mutex); if (watch->unfix() == 0 && buf_pool_watch_is_sentinel(watch))
rw_lock_x_unlock(hash_lock); {
/* The following is based on buf_pool_watch_remove(). */
ut_d(watch->in_page_hash= FALSE);
HASH_DELETE(buf_page_t, hash, buf_pool.page_hash, watch->id.fold(), watch);
rw_lock_x_unlock(hash_lock);
/* Now that the watch is no longer reachable by other threads,
return it to the pool of inactive watches, for reuse. */
mutex_enter(&buf_pool.mutex);
watch->buf_fix_count= 0;
watch->state= BUF_BLOCK_POOL_WATCH;
mutex_exit(&buf_pool.mutex);
}
else
rw_lock_x_unlock(hash_lock);
} }
/** Check if the page has been read in. /** Check if the page has been read in.
...@@ -4187,17 +4175,9 @@ static void buf_page_init(const page_id_t page_id, ulint zip_size, ...@@ -4187,17 +4175,9 @@ static void buf_page_init(const page_id_t page_id, ulint zip_size,
buf_pool_watch_remove(hash_page); buf_pool_watch_remove(hash_page);
} else { } else {
ib::fatal() << "Page " << page_id
ib::error() << "Page " << page_id
<< " already found in the hash table: " << " already found in the hash table: "
<< hash_page << ", " << block; << hash_page << ", " << block;
ut_d(buf_page_mutex_exit(block));
ut_d(mutex_exit(&buf_pool.mutex));
ut_d(buf_pool.print());
ut_d(buf_LRU_print());
ut_d(buf_LRU_validate());
ut_error;
} }
ut_ad(!block->page.in_zip_hash); ut_ad(!block->page.in_zip_hash);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment