Commit e2c305f3 authored by marko's avatar marko

branches/zip: Improve the shrinking of the buffer pool.

buf_LRU_block_free_non_file_page(): Deallocate block->page_zip.data
to avoid ut_a(!block->page_zip.data) in buf_chunk_free().

buf_chunk_free(): Add the assertion ut_a(!block->in_LRU_list).

buf_pool_resize(): When shrinking the buffer pool and there are
non-free blocks in the candidate chunk, free the clean blocks
and move the dirty blocks to the end of the LRU list and request a flush.
Proceed if the chunk becomes free, and retry otherwise.
parent 274f2bc6
...@@ -798,6 +798,7 @@ buf_chunk_free( ...@@ -798,6 +798,7 @@ buf_chunk_free(
ut_a(block->state == BUF_BLOCK_NOT_USED); ut_a(block->state == BUF_BLOCK_NOT_USED);
ut_a(!block->page_zip.data); ut_a(!block->page_zip.data);
ut_a(!block->in_LRU_list);
/* Remove the block from the free list. */ /* Remove the block from the free list. */
ut_a(block->in_free_list); ut_a(block->in_free_list);
UT_LIST_REMOVE(free, buf_pool->free, block); UT_LIST_REMOVE(free, buf_pool->free, block);
...@@ -904,6 +905,7 @@ buf_pool_resize(void) ...@@ -904,6 +905,7 @@ buf_pool_resize(void)
buf_chunk_t* chunks; buf_chunk_t* chunks;
buf_chunk_t* chunk; buf_chunk_t* chunk;
try_again:
mutex_enter(&buf_pool->mutex); mutex_enter(&buf_pool->mutex);
if (srv_buf_pool_old_size == srv_buf_pool_size) { if (srv_buf_pool_old_size == srv_buf_pool_size) {
...@@ -919,7 +921,9 @@ buf_pool_resize(void) ...@@ -919,7 +921,9 @@ buf_pool_resize(void)
= (srv_buf_pool_curr_size - srv_buf_pool_size) = (srv_buf_pool_curr_size - srv_buf_pool_size)
/ UNIV_PAGE_SIZE; / UNIV_PAGE_SIZE;
ulint max_size; ulint max_size;
ulint max_free_size;
buf_chunk_t* max_chunk; buf_chunk_t* max_chunk;
buf_chunk_t* max_free_chunk;
shrink_again: shrink_again:
if (buf_pool->n_chunks <= 1) { if (buf_pool->n_chunks <= 1) {
...@@ -932,25 +936,86 @@ shrink_again: ...@@ -932,25 +936,86 @@ shrink_again:
not larger than the size difference */ not larger than the size difference */
chunks = buf_pool->chunks; chunks = buf_pool->chunks;
chunk = chunks + buf_pool->n_chunks; chunk = chunks + buf_pool->n_chunks;
max_size = 0; max_size = max_free_size = 0;
max_chunk = NULL; max_chunk = max_free_chunk = NULL;
while (--chunk >= chunks) { while (--chunk >= chunks) {
if (chunk->size <= chunk_size if (chunk->size <= chunk_size
&& chunk->size > max_size && chunk->size > max_free_size) {
&& buf_chunk_all_free(chunk)) { if (chunk->size > max_size) {
max_size = chunk->size; max_size = chunk->size;
max_chunk = chunk; max_chunk = chunk;
} }
if (buf_chunk_all_free(chunk)) {
max_free_size = chunk->size;
max_free_chunk = chunk;
}
} }
}
if (!max_free_size) {
if (!max_size) { ulint dirty = 0;
ulint nonfree = 0;
buf_block_t* block;
buf_block_t* bend;
/* Cannot shrink: try again later /* Cannot shrink: try again later
(do not assign srv_buf_pool_old_size) */ (do not assign srv_buf_pool_old_size) */
if (!max_chunk) {
goto func_exit; goto func_exit;
} }
block = max_chunk->blocks;
bend = block + max_chunk->size;
/* Move the blocks of chunk to the end of the
LRU list and try to flush them. */
for (; block < bend; block++) {
if (block->state != BUF_BLOCK_FILE_PAGE) {
continue;
}
mutex_enter(&block->mutex);
if (!buf_flush_ready_for_replace(block)) {
buf_LRU_make_block_old(block);
dirty++;
} else if (!buf_LRU_free_block(block)) {
nonfree++;
}
mutex_exit(&block->mutex);
}
/* See if the chunk was in fact free. */
if (!dirty && !nonfree) {
goto is_free;
}
mutex_exit(&buf_pool->mutex);
/* Request for a flush of the chunk. */
if (buf_flush_batch(BUF_FLUSH_LRU, dirty,
ut_dulint_zero)
== ULINT_UNDEFINED) {
buf_flush_wait_batch_end(BUF_FLUSH_LRU);
}
/* Retry after flushing. */
goto try_again;
}
max_size = max_free_size;
max_chunk = max_free_chunk;
is_free:
srv_buf_pool_old_size = srv_buf_pool_size; srv_buf_pool_old_size = srv_buf_pool_size;
/* Rewrite buf_pool->chunks. Copy everything but max_chunk. */ /* Rewrite buf_pool->chunks. Copy everything but max_chunk. */
......
...@@ -868,12 +868,18 @@ buf_LRU_block_free_non_file_page( ...@@ -868,12 +868,18 @@ buf_LRU_block_free_non_file_page(
#ifdef UNIV_DEBUG #ifdef UNIV_DEBUG
/* Wipe contents of page to reveal possible stale pointers to it */ /* Wipe contents of page to reveal possible stale pointers to it */
memset(block->frame, '\0', UNIV_PAGE_SIZE); memset(block->frame, '\0', UNIV_PAGE_SIZE);
memset(block->page_zip.data, 0xff, block->page_zip.size);
#else #else
/* Wipe page_no and space_id */ /* Wipe page_no and space_id */
memset(block->frame + FIL_PAGE_OFFSET, 0xfe, 4); memset(block->frame + FIL_PAGE_OFFSET, 0xfe, 4);
memset(block->frame + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID, 0xfe, 4); memset(block->frame + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID, 0xfe, 4);
#endif #endif
if (block->page_zip.data) {
/* TODO: return zip to an aligned pool */
ut_free(block->page_zip.data);
block->page_zip.data = NULL;
block->page_zip.size = 0;
}
UT_LIST_ADD_FIRST(free, buf_pool->free, block); UT_LIST_ADD_FIRST(free, buf_pool->free, block);
block->in_free_list = TRUE; block->in_free_list = TRUE;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment