Commit 492cd8ce authored by marko's avatar marko

branches/zip: Minor improvements.

buf_LRU_block_remove_hashed_page(): Return the new state of the block.

Only call buf_LRU_block_free_hashed_page()
if buf_LRU_block_remove_hashed_page() did not return BUF_BLOCK_ZIP_FREE,
that is, the control block was not freed.

buf_LRU_insert_zip_clean(): New function, for inserting a compressed-only
block into buf_pool->zip_clean in the LRU order.

buf_LRU_block_remove_hashed_page(), buf_LRU_free_block():
Add the flag "zip" for controlling if the compressed page of an uncompressed
page should be removed.  For now, assume zip==TRUE.

buf_LRU_get_free_block(): Replace the test for UT_LIST_GET_LEN(buf_pool->free)
with a test for the return value of buf_LRU_get_free_only().  Do not
free zip.data, as it must already have been freed.

buf_flush_insert_into_flush_list(), buf_flush_insert_sorted_into_flush_list():
Remove compressed-only blocks from the buf_pool->zip_clean list.

buf_flush_remove(): Restore compressed-only blocks to
the buf_pool->zip_clean list.

buf_page_init_for_read(): Uncompress compressed-only blocks when possible.
Currently, there cannot be any compressed-only blocks in the buffer pool;
they would be inserted by buf_LRU_free_block(bpage, zip=FALSE).
parent 0fb620f5
......@@ -165,7 +165,7 @@ buf_buddy_alloc_clean(
for (; j--; bpage = UT_LIST_GET_NEXT(list, bpage)) {
if (bpage->zip.ssize != dummy_zip.ssize
|| !buf_LRU_free_block(bpage)) {
|| !buf_LRU_free_block(bpage, FALSE)) {
continue;
}
......@@ -197,7 +197,8 @@ buf_buddy_alloc_clean(
void* ret;
if (!buf_LRU_free_block(bpage)) {
/* Keep the compressed pages of uncompressed blocks. */
if (!buf_LRU_free_block(bpage, FALSE)) {
continue;
}
......
......@@ -26,6 +26,7 @@ Created 11/5/1995 Heikki Tuuri
#include "buf0buf.ic"
#endif
#include "buf0buddy.h"
#include "mem0mem.h"
#include "btr0btr.h"
#include "fil0fil.h"
......@@ -1030,7 +1031,7 @@ shrink_again:
buf_LRU_make_block_old(&block->page);
dirty++;
} else if (!buf_LRU_free_block(&block->page)) {
} else if (!buf_LRU_free_block(&block->page, TRUE)) {
nonfree++;
}
......@@ -1938,6 +1939,7 @@ buf_page_init_for_read(
ulint offset) /* in: page number */
{
buf_block_t* block;
buf_page_t* bpage;
mtr_t mtr;
ut_ad(buf_pool);
......@@ -1972,27 +1974,94 @@ buf_page_init_for_read(
if (fil_tablespace_deleted_or_being_deleted_in_mem(
space, tablespace_version)) {
*err = DB_TABLESPACE_DELETED;
goto err_exit;
}
if (*err == DB_TABLESPACE_DELETED
|| NULL != buf_page_hash_get(space, offset)) {
bpage = buf_page_hash_get(space, offset);
if (UNIV_LIKELY_NULL(bpage)) {
switch (buf_page_get_state(bpage)) {
case BUF_BLOCK_ZIP_PAGE:
case BUF_BLOCK_ZIP_DIRTY:
ut_a(page_zip_get_size(&bpage->zip) == zip_size);
if (bpage->buf_fix_count
|| buf_page_get_io_fix(bpage)
!= BUF_IO_NONE) {
goto err_exit;
}
/* Move the compressed page from bpage to block,
and uncompress it. */
buf_buddy_free(block->page.zip.data, zip_size);
mutex_enter(&buf_pool->zip_mutex);
memcpy(&block->page, bpage, sizeof *bpage);
block->page.state = BUF_BLOCK_FILE_PAGE;
/* The page belongs to a space which has been
deleted or is being deleted, or the page is
already in buf_pool, return */
buf_relocate(bpage, &block->page);
if (buf_page_get_state(bpage) == BUF_BLOCK_ZIP_PAGE) {
UT_LIST_REMOVE(list, buf_pool->zip_clean,
bpage);
} else {
/* Relocate buf_pool->flush_list. */
buf_page_t* b;
b = UT_LIST_GET_PREV(list, bpage);
UT_LIST_REMOVE(list, buf_pool->flush_list,
bpage);
if (b) {
UT_LIST_INSERT_AFTER(
list, buf_pool->flush_list, b,
&block->page);
} else {
UT_LIST_ADD_FIRST(
list, buf_pool->flush_list,
&block->page);
}
}
bpage->zip.data = NULL;
page_zip_set_size(&bpage->zip, 0);
buf_buddy_free(bpage, sizeof *bpage);
mutex_exit(&buf_pool->zip_mutex);
break;
case BUF_BLOCK_FILE_PAGE:
break;
case BUF_BLOCK_ZIP_FREE:
case BUF_BLOCK_NOT_USED:
case BUF_BLOCK_READY_FOR_USE:
case BUF_BLOCK_MEMORY:
case BUF_BLOCK_REMOVE_HASH:
ut_error;
break;
}
err_exit:
/* The page belongs to a space which has been
deleted or is being deleted, or the page is
already in buf_pool, return */
mutex_exit(&block->mutex);
mutex_exit(&(buf_pool->mutex));
mutex_exit(&(buf_pool->mutex));
buf_block_free(block);
buf_block_free(block);
if (mode == BUF_READ_IBUF_PAGES_ONLY) {
if (mode == BUF_READ_IBUF_PAGES_ONLY) {
mtr_commit(&mtr);
}
mtr_commit(&mtr);
}
return(NULL);
}
return(NULL);
}
ut_ad(block);
......
......@@ -55,13 +55,29 @@ buf_flush_insert_into_flush_list(
ut_ad(mutex_own(&(buf_pool->mutex)));
#endif /* UNIV_SYNC_DEBUG */
ut_a(buf_page_in_file(bpage));
ut_ad((UT_LIST_GET_FIRST(buf_pool->flush_list) == NULL)
|| (UT_LIST_GET_FIRST(buf_pool->flush_list)->oldest_modification
<= bpage->oldest_modification));
UT_LIST_ADD_FIRST(list, buf_pool->flush_list, bpage);
switch (buf_page_get_state(bpage)) {
case BUF_BLOCK_ZIP_PAGE:
mutex_enter(&buf_pool->zip_mutex);
buf_page_set_state(bpage, BUF_BLOCK_ZIP_DIRTY);
mutex_exit(&buf_pool->zip_mutex);
UT_LIST_REMOVE(list, buf_pool->zip_clean, bpage);
/* fall through */
case BUF_BLOCK_ZIP_DIRTY:
case BUF_BLOCK_FILE_PAGE:
UT_LIST_ADD_FIRST(list, buf_pool->flush_list, bpage);
break;
case BUF_BLOCK_ZIP_FREE:
case BUF_BLOCK_NOT_USED:
case BUF_BLOCK_READY_FOR_USE:
case BUF_BLOCK_MEMORY:
case BUF_BLOCK_REMOVE_HASH:
ut_error;
return;
}
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
ut_a(buf_flush_validate_low());
......@@ -85,6 +101,25 @@ buf_flush_insert_sorted_into_flush_list(
ut_ad(mutex_own(&(buf_pool->mutex)));
#endif /* UNIV_SYNC_DEBUG */
switch (buf_page_get_state(bpage)) {
case BUF_BLOCK_ZIP_PAGE:
mutex_enter(&buf_pool->zip_mutex);
buf_page_set_state(bpage, BUF_BLOCK_ZIP_DIRTY);
mutex_exit(&buf_pool->zip_mutex);
UT_LIST_REMOVE(list, buf_pool->zip_clean, bpage);
/* fall through */
case BUF_BLOCK_ZIP_DIRTY:
case BUF_BLOCK_FILE_PAGE:
break;
case BUF_BLOCK_ZIP_FREE:
case BUF_BLOCK_NOT_USED:
case BUF_BLOCK_READY_FOR_USE:
case BUF_BLOCK_MEMORY:
case BUF_BLOCK_REMOVE_HASH:
ut_error;
return;
}
prev_b = NULL;
b = UT_LIST_GET_FIRST(buf_pool->flush_list);
......@@ -202,7 +237,9 @@ buf_flush_remove(
mutex_enter(&buf_pool->zip_mutex);
buf_page_set_state(bpage, BUF_BLOCK_ZIP_PAGE);
mutex_exit(&buf_pool->zip_mutex);
/* fall through */
UT_LIST_REMOVE(list, buf_pool->flush_list, bpage);
buf_LRU_insert_zip_clean(bpage);
break;
case BUF_BLOCK_FILE_PAGE:
UT_LIST_REMOVE(list, buf_pool->flush_list, bpage);
break;
......
......@@ -49,15 +49,20 @@ frames in the buffer pool, we set this to TRUE */
ibool buf_lru_switched_on_innodb_mon = FALSE;
/**********************************************************************
Takes a block out of the LRU list and page hash table and sets the block
state to BUF_BLOCK_REMOVE_HASH. */
Takes a block out of the LRU list and page hash table. */
static
void
enum buf_page_state
buf_LRU_block_remove_hashed_page(
/*=============================*/
buf_page_t* bpage); /* in: block, must contain a file page and
/* out: the new state of the block
(BUF_BLOCK_ZIP_FREE if the state was
BUF_BLOCK_ZIP_PAGE, or BUF_BLOCK_REMOVE_HASH
otherwise) */
buf_page_t* bpage, /* in: block, must contain a file page and
be in a state where it can be freed; there
may or may not be a hash index to the page */
ibool zip); /* in: TRUE if should remove also the
compressed page of an uncompressed page */
/**********************************************************************
Puts a file page whose has no hash index to the free list. */
static
......@@ -139,8 +144,10 @@ scan_again:
}
/* Remove from the LRU list */
buf_LRU_block_remove_hashed_page(bpage);
buf_LRU_block_free_hashed_page(bpage);
if (buf_LRU_block_remove_hashed_page(bpage, TRUE)
!= BUF_BLOCK_ZIP_FREE) {
buf_LRU_block_free_hashed_page(bpage);
}
}
next_page:
mutex_exit(block_mutex);
......@@ -191,6 +198,40 @@ buf_LRU_get_recent_limit(void)
return(limit);
}
/************************************************************************
Insert a compressed block into buf_pool->zip_clean in the LRU order. */
void
buf_LRU_insert_zip_clean(
/*=====================*/
buf_page_t* bpage) /* in: pointer to the block in question */
{
buf_page_t* b;
#ifdef UNIV_SYNC_DEBUG
ut_a(mutex_own(&buf_pool->mutex));
#endif /* UNIV_SYNC_DEBUG */
ut_ad(buf_page_get_state(bpage) == BUF_BLOCK_ZIP_PAGE);
/* Find the first successor of bpage in the LRU list
that is in the zip_clean list. */
b = bpage;
do {
b = UT_LIST_GET_NEXT(LRU, b);
} while (b && buf_page_get_state(b) != BUF_BLOCK_ZIP_PAGE);
/* Insert bpage before b, i.e., after the predecessor of b. */
if (b) {
b = UT_LIST_GET_PREV(list, b);
}
if (b) {
UT_LIST_INSERT_AFTER(list, buf_pool->zip_clean, b, bpage);
} else {
UT_LIST_ADD_FIRST(list, buf_pool->zip_clean, bpage);
}
}
/**********************************************************************
Try to free a block. */
......@@ -198,7 +239,9 @@ ibool
buf_LRU_free_block(
/*===============*/
/* out: TRUE if freed */
buf_page_t* bpage) /* in: block to be freed */
buf_page_t* bpage, /* in: block to be freed */
ibool zip) /* in: TRUE if should remove also the
compressed page of an uncompressed page */
{
mutex_t* block_mutex = buf_page_get_mutex(bpage);
......@@ -223,12 +266,8 @@ buf_LRU_free_block(
}
#endif /* UNIV_DEBUG */
buf_LRU_block_remove_hashed_page(bpage);
switch (buf_page_get_state(bpage)) {
case BUF_BLOCK_REMOVE_HASH:
/* The state was changed from BUF_BLOCK_FILE_PAGE
in buf_LRU_block_remove_hashed_page(bpage). */
if (buf_LRU_block_remove_hashed_page(bpage, zip)
!= BUF_BLOCK_ZIP_FREE) {
mutex_exit(&(buf_pool->mutex));
mutex_exit(block_mutex);
......@@ -241,22 +280,6 @@ buf_LRU_free_block(
mutex_enter(block_mutex);
buf_LRU_block_free_hashed_page(bpage);
break;
case BUF_BLOCK_ZIP_PAGE:
ut_ad(!bpage->in_free_list);
ut_ad(!bpage->in_LRU_list);
UT_LIST_REMOVE(list, buf_pool->zip_clean, bpage);
buf_buddy_free(bpage->zip.data,
page_zip_get_size(&bpage->zip));
buf_buddy_free(bpage, sizeof(*bpage));
break;
default:
ut_error;
break;
}
return(TRUE);
......@@ -290,7 +313,7 @@ buf_LRU_search_and_free_block(
mutex_t* block_mutex = buf_page_get_mutex(bpage);
mutex_enter(block_mutex);
freed = buf_LRU_free_block(bpage);
freed = buf_LRU_free_block(bpage, n_iterations > 10);
mutex_exit(block_mutex);
if (freed) {
......@@ -496,29 +519,23 @@ loop:
}
/* If there is a block in the free list, take it */
if (UT_LIST_GET_LEN(buf_pool->free) > 0) {
block = buf_LRU_get_free_only();
ut_a(block); /* We tested that buf_pool->free is nonempty. */
block = buf_LRU_get_free_only();
if (block) {
if (buf_block_get_zip_size(block) != zip_size) {
page_zip_set_size(&block->page.zip, zip_size);
#ifdef UNIV_DEBUG
block->page.zip.m_start =
block->page.zip.m_start =
#endif /* UNIV_DEBUG */
block->page.zip.m_end =
block->page.zip.m_nonempty =
block->page.zip.n_blobs = 0;
if (block->page.zip.data) {
ut_free(block->page.zip.data);
}
block->page.zip.m_end =
block->page.zip.m_nonempty =
block->page.zip.n_blobs = 0;
if (zip_size) {
/* TODO: allocate zip from an aligned pool */
block->page.zip.data = ut_malloc(zip_size);
} else {
block->page.zip.data = NULL;
}
if (zip_size) {
page_zip_set_size(&block->page.zip, zip_size);
/* TODO: allocate zip from an aligned pool */
block->page.zip.data = ut_malloc(zip_size);
} else {
page_zip_set_size(&block->page.zip, 0);
block->page.zip.data = NULL;
}
mutex_exit(&(buf_pool->mutex));
......@@ -951,15 +968,20 @@ buf_LRU_block_free_non_file_page(
}
/**********************************************************************
Takes a block out of the LRU list and page hash table and sets the block
state to BUF_BLOCK_REMOVE_HASH. */
Takes a block out of the LRU list and page hash table. */
static
void
enum buf_page_state
buf_LRU_block_remove_hashed_page(
/*=============================*/
buf_page_t* bpage) /* in: block, must contain a file page and
/* out: the new state of the block
(BUF_BLOCK_ZIP_FREE if the state was
BUF_BLOCK_ZIP_PAGE, or BUF_BLOCK_REMOVE_HASH
otherwise) */
buf_page_t* bpage, /* in: block, must contain a file page and
be in a state where it can be freed; there
may or may not be a hash index to the page */
ibool zip) /* in: TRUE if should remove also the
compressed page of an uncompressed page */
{
const buf_page_t* hashed_bpage;
ut_ad(bpage);
......@@ -982,8 +1004,14 @@ buf_LRU_block_remove_hashed_page(
break;
case BUF_BLOCK_ZIP_PAGE:
break;
default:
case BUF_BLOCK_ZIP_FREE:
case BUF_BLOCK_ZIP_DIRTY:
case BUF_BLOCK_NOT_USED:
case BUF_BLOCK_READY_FOR_USE:
case BUF_BLOCK_MEMORY:
case BUF_BLOCK_REMOVE_HASH:
ut_error;
break;
}
hashed_bpage = buf_page_hash_get(bpage->space, bpage->offset);
......@@ -1018,6 +1046,8 @@ buf_LRU_block_remove_hashed_page(
bpage);
switch (buf_page_get_state(bpage)) {
case BUF_BLOCK_ZIP_PAGE:
ut_ad(!bpage->in_free_list);
ut_ad(!bpage->in_LRU_list);
ut_a(bpage->zip.data);
ut_a(buf_page_get_zip_size(bpage));
memset(bpage->zip.data + FIL_PAGE_OFFSET, 0xff, 4);
......@@ -1030,10 +1060,27 @@ buf_LRU_block_remove_hashed_page(
memset(((buf_block_t*) bpage)->frame
+ FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID, 0xff, 4);
buf_page_set_state(bpage, BUF_BLOCK_REMOVE_HASH);
if (bpage->zip.data) {
/* Free the compressed page. */
ut_free(bpage->zip.data);
bpage->zip.data = NULL;
page_zip_set_size(&bpage->zip, 0);
}
return(BUF_BLOCK_REMOVE_HASH);
case BUF_BLOCK_ZIP_FREE:
case BUF_BLOCK_ZIP_DIRTY:
case BUF_BLOCK_NOT_USED:
case BUF_BLOCK_READY_FOR_USE:
case BUF_BLOCK_MEMORY:
case BUF_BLOCK_REMOVE_HASH:
break;
default:
ut_error;
}
ut_error;
return(BUF_BLOCK_ZIP_FREE);
}
/**********************************************************************
......
......@@ -65,6 +65,13 @@ ulint
buf_LRU_get_recent_limit(void);
/*==========================*/
/* out: the limit; zero if could not determine it */
/************************************************************************
Insert a compressed block into buf_pool->zip_clean in the LRU order. */
void
buf_LRU_insert_zip_clean(
/*=====================*/
buf_page_t* bpage); /* in: pointer to the block in question */
/**********************************************************************
Try to free a block. */
......@@ -72,7 +79,9 @@ ibool
buf_LRU_free_block(
/*===============*/
/* out: TRUE if freed */
buf_page_t* block); /* in: block to be freed */
buf_page_t* block, /* in: block to be freed */
ibool zip); /* in: TRUE if should remove also the
compressed page of an uncompressed page */
/**********************************************************************
Look for a replaceable block from the end of the LRU list and put it to
the free list if found. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment