Commit 13629c13 authored by marko's avatar marko

branches/zip: buf_LRU_free_block(): When preserving the compressed page

of a block, do not release buf_pool->mutex between the time the old control
block is removed from buf_pool->page_hash and the new control block is
added to it.  Prevent operations on the compressed-only block while calling
btr_search_drop_page_hash_index() or page_zip_calc_checksum().

buf_LRU_invalidate_tablespace(): Revert the change done in r1223.
buf_pool->zip_mutex will have been released by
buf_LRU_block_remove_hashed_page() when it returns BUF_BLOCK_ZIP_FREE.
parent fb8b8e6d
...@@ -105,7 +105,6 @@ buf_LRU_invalidate_tablespace( ...@@ -105,7 +105,6 @@ buf_LRU_invalidate_tablespace(
ut_a(buf_page_in_file(bpage)); ut_a(buf_page_in_file(bpage));
mutex_enter(block_mutex); mutex_enter(block_mutex);
next_zip:
prev_bpage = UT_LIST_GET_PREV(LRU, bpage); prev_bpage = UT_LIST_GET_PREV(LRU, bpage);
if (buf_page_get_space(bpage) == id) { if (buf_page_get_space(bpage) == id) {
...@@ -163,7 +162,7 @@ buf_LRU_invalidate_tablespace( ...@@ -163,7 +162,7 @@ buf_LRU_invalidate_tablespace(
prev_bpage. Rescan the LRU list. */ prev_bpage. Rescan the LRU list. */
bpage = UT_LIST_GET_LAST(buf_pool->LRU); bpage = UT_LIST_GET_LAST(buf_pool->LRU);
goto next_zip; continue;
} }
} }
next_page: next_page:
...@@ -900,6 +899,7 @@ buf_LRU_free_block( ...@@ -900,6 +899,7 @@ buf_LRU_free_block(
ibool zip) /* in: TRUE if should remove also the ibool zip) /* in: TRUE if should remove also the
compressed page of an uncompressed page */ compressed page of an uncompressed page */
{ {
buf_page_t* b = NULL;
mutex_t* block_mutex = buf_page_get_mutex(bpage); mutex_t* block_mutex = buf_page_get_mutex(bpage);
#ifdef UNIV_SYNC_DEBUG #ifdef UNIV_SYNC_DEBUG
...@@ -909,6 +909,7 @@ buf_LRU_free_block( ...@@ -909,6 +909,7 @@ buf_LRU_free_block(
ut_ad(buf_page_in_file(bpage)); ut_ad(buf_page_in_file(bpage));
ut_ad(bpage->in_LRU_list); ut_ad(bpage->in_LRU_list);
ut_ad(!bpage->in_flush_list == !bpage->oldest_modification);
if (!buf_page_can_relocate(bpage)) { if (!buf_page_can_relocate(bpage)) {
...@@ -917,7 +918,6 @@ buf_LRU_free_block( ...@@ -917,7 +918,6 @@ buf_LRU_free_block(
if (bpage->oldest_modification) { if (bpage->oldest_modification) {
/* Do not completely free dirty blocks. */ /* Do not completely free dirty blocks. */
ut_ad(bpage->in_flush_list);
if (zip || !bpage->zip.data) { if (zip || !bpage->zip.data) {
return(FALSE); return(FALSE);
...@@ -929,8 +929,12 @@ buf_LRU_free_block( ...@@ -929,8 +929,12 @@ buf_LRU_free_block(
return(FALSE); return(FALSE);
} }
// b = buf_buddy_alloc(sizeof *b, FALSE); // TODO: enable this
if (!b) {
return(FALSE); return(FALSE);
} }
}
#ifdef UNIV_DEBUG #ifdef UNIV_DEBUG
if (buf_debug_prints) { if (buf_debug_prints) {
...@@ -942,58 +946,14 @@ buf_LRU_free_block( ...@@ -942,58 +946,14 @@ buf_LRU_free_block(
if (buf_LRU_block_remove_hashed_page(bpage, zip) if (buf_LRU_block_remove_hashed_page(bpage, zip)
!= BUF_BLOCK_ZIP_FREE) { != BUF_BLOCK_ZIP_FREE) {
mutex_exit(&(buf_pool->mutex));
mutex_exit(block_mutex);
/* Remove possible adaptive hash index on the page.
The page was declared uninitialized by
buf_LRU_block_remove_hashed_page(). We need to flag
the contents of the page valid (which it still is) in
order to avoid bogus Valgrind warnings.*/
UNIV_MEM_VALID(((buf_block_t*) bpage)->frame,
UNIV_PAGE_SIZE);
btr_search_drop_page_hash_index((buf_block_t*) bpage);
UNIV_MEM_INVALID(((buf_block_t*) bpage)->frame,
UNIV_PAGE_SIZE);
ut_a(bpage->buf_fix_count == 0); ut_a(bpage->buf_fix_count == 0);
if (bpage->zip.data && UNIV_LIKELY(srv_use_checksums)) { if (b) {
/* Compute and stamp the compressed page
checksum while not holding any mutex. The
block is already half-freed
(BUF_BLOCK_REMOVE_HASH) and removed from
buf_pool->page_hash, thus inaccessible by any
other thread. */
mach_write_to_4(
bpage->zip.data + FIL_PAGE_SPACE_OR_CHKSUM,
page_zip_calc_checksum(
bpage->zip.data,
page_zip_get_size(&bpage->zip)));
}
mutex_enter(&(buf_pool->mutex));
if (bpage->zip.data) {
const ulint fold = buf_page_address_fold( const ulint fold = buf_page_address_fold(
bpage->space, bpage->offset); bpage->space, bpage->offset);
buf_page_t* b = buf_page_hash_get(
bpage->space, bpage->offset);
if (UNIV_LIKELY_NULL(b)) { ut_a(!buf_page_hash_get(bpage->space, bpage->offset));
/* The block was reloaded to the buffer pool
while we were not holding buf_pool->mutex.
Free this block entirely; do not attempt to
preserve the compressed page. */
b = NULL;
} else {
/* Keep the compressed page.
Allocate a block descriptor for it. */
b = buf_buddy_alloc(sizeof *b, FALSE);
}
if (b) {
memcpy(b, bpage, sizeof *b); memcpy(b, bpage, sizeof *b);
b->state = b->oldest_modification b->state = b->oldest_modification
? BUF_BLOCK_ZIP_DIRTY ? BUF_BLOCK_ZIP_DIRTY
...@@ -1004,7 +964,7 @@ buf_LRU_free_block( ...@@ -1004,7 +964,7 @@ buf_LRU_free_block(
HASH_INSERT(buf_page_t, hash, HASH_INSERT(buf_page_t, hash,
buf_pool->page_hash, fold, b); buf_pool->page_hash, fold, b);
buf_LRU_add_block_low(b, TRUE); buf_LRU_add_block_low(b, FALSE);
if (b->state == BUF_BLOCK_ZIP_PAGE) { if (b->state == BUF_BLOCK_ZIP_PAGE) {
buf_LRU_insert_zip_clean(b); buf_LRU_insert_zip_clean(b);
...@@ -1015,9 +975,7 @@ buf_LRU_free_block( ...@@ -1015,9 +975,7 @@ buf_LRU_free_block(
ut_d(bpage->in_flush_list = FALSE); ut_d(bpage->in_flush_list = FALSE);
prev = UT_LIST_GET_PREV(list, b); prev = UT_LIST_GET_PREV(list, b);
UT_LIST_REMOVE(list, UT_LIST_REMOVE(list, buf_pool->flush_list, b);
buf_pool->flush_list,
b);
if (prev) { if (prev) {
ut_ad(prev->in_flush_list); ut_ad(prev->in_flush_list);
...@@ -1033,17 +991,56 @@ buf_LRU_free_block( ...@@ -1033,17 +991,56 @@ buf_LRU_free_block(
} }
} }
mutex_enter(block_mutex);
bpage->zip.data = NULL; bpage->zip.data = NULL;
page_zip_set_size(&bpage->zip, 0); page_zip_set_size(&bpage->zip, 0);
goto free_hashed; /* Prevent buf_page_init_for_read() from
decompressing the block while we release
buf_pool->mutex and block_mutex. */
b->buf_fix_count++;
buf_page_set_io_fix(b, BUF_IO_READ);
} }
mutex_exit(&buf_pool->mutex);
mutex_exit(block_mutex);
/* Remove possible adaptive hash index on the page.
The page was declared uninitialized by
buf_LRU_block_remove_hashed_page(). We need to flag
the contents of the page valid (which it still is) in
order to avoid bogus Valgrind warnings.*/
UNIV_MEM_VALID(((buf_block_t*) bpage)->frame,
UNIV_PAGE_SIZE);
btr_search_drop_page_hash_index((buf_block_t*) bpage);
UNIV_MEM_INVALID(((buf_block_t*) bpage)->frame,
UNIV_PAGE_SIZE);
if (b) {
/* Compute and stamp the compressed page
checksum while not holding any mutex. The
block is already half-freed
(BUF_BLOCK_REMOVE_HASH) and removed from
buf_pool->page_hash, thus inaccessible by any
other thread. */
mach_write_to_4(
b->zip.data + FIL_PAGE_SPACE_OR_CHKSUM,
UNIV_LIKELY(srv_use_checksums)
? page_zip_calc_checksum(
b->zip.data,
page_zip_get_size(&b->zip))
: BUF_NO_CHECKSUM_MAGIC);
} }
mutex_enter(&buf_pool->mutex);
mutex_enter(block_mutex); mutex_enter(block_mutex);
free_hashed:
if (b) {
b->buf_fix_count--;
buf_page_set_io_fix(b, BUF_IO_NONE);
}
buf_LRU_block_free_hashed_page((buf_block_t*) bpage); buf_LRU_block_free_hashed_page((buf_block_t*) bpage);
} else { } else {
mutex_enter(block_mutex); mutex_enter(block_mutex);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment