Commit f43adb80 authored by Marko Mäkelä's avatar Marko Mäkelä

Merge mysql-5.1 to mysql-5.5.

parents 3400e0be b0fc27dc
/***************************************************************************** /*****************************************************************************
Copyright (c) 2006, 2010, Innobase Oy. All Rights Reserved. Copyright (c) 2006, 2011, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software the terms of the GNU General Public License as published by the Free Software
...@@ -352,7 +352,9 @@ buf_buddy_relocate_block( ...@@ -352,7 +352,9 @@ buf_buddy_relocate_block(
buf_page_t* bpage, /*!< in: block to relocate */ buf_page_t* bpage, /*!< in: block to relocate */
buf_page_t* dpage) /*!< in: free block to relocate to */ buf_page_t* dpage) /*!< in: free block to relocate to */
{ {
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
buf_page_t* b; buf_page_t* b;
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
buf_pool_t* buf_pool = buf_pool_from_bpage(bpage); buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
ut_ad(buf_pool_mutex_own(buf_pool)); ut_ad(buf_pool_mutex_own(buf_pool));
...@@ -382,7 +384,7 @@ buf_buddy_relocate_block( ...@@ -382,7 +384,7 @@ buf_buddy_relocate_block(
buf_relocate(bpage, dpage); buf_relocate(bpage, dpage);
ut_d(bpage->state = BUF_BLOCK_ZIP_FREE); ut_d(bpage->state = BUF_BLOCK_ZIP_FREE);
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
/* relocate buf_pool->zip_clean */ /* relocate buf_pool->zip_clean */
b = UT_LIST_GET_PREV(list, dpage); b = UT_LIST_GET_PREV(list, dpage);
UT_LIST_REMOVE(list, buf_pool->zip_clean, dpage); UT_LIST_REMOVE(list, buf_pool->zip_clean, dpage);
...@@ -392,6 +394,7 @@ buf_buddy_relocate_block( ...@@ -392,6 +394,7 @@ buf_buddy_relocate_block(
} else { } else {
UT_LIST_ADD_FIRST(list, buf_pool->zip_clean, dpage); UT_LIST_ADD_FIRST(list, buf_pool->zip_clean, dpage);
} }
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
UNIV_MEM_INVALID(bpage, sizeof *bpage); UNIV_MEM_INVALID(bpage, sizeof *bpage);
......
/***************************************************************************** /*****************************************************************************
Copyright (c) 1995, 2010, Innobase Oy. All Rights Reserved. Copyright (c) 1995, 2011, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2008, Google Inc. Copyright (c) 2008, Google Inc.
Portions of this file contain modifications contributed and copyrighted by Portions of this file contain modifications contributed and copyrighted by
...@@ -1099,70 +1099,6 @@ buf_chunk_not_freed( ...@@ -1099,70 +1099,6 @@ buf_chunk_not_freed(
return(NULL); return(NULL);
} }
/*********************************************************************//**
Checks that all blocks in the buffer chunk are in BUF_BLOCK_NOT_USED state.
@return TRUE if all freed */
static
ibool
buf_chunk_all_free(
/*===============*/
const buf_chunk_t* chunk) /*!< in: chunk being checked */
{
const buf_block_t* block;
ulint i;
block = chunk->blocks;
for (i = chunk->size; i--; block++) {
if (buf_block_get_state(block) != BUF_BLOCK_NOT_USED) {
return(FALSE);
}
}
return(TRUE);
}
/********************************************************************//**
Frees a chunk of buffer frames. */
static
void
buf_chunk_free(
/*===========*/
buf_pool_t* buf_pool, /*!< in: buffer pool instance */
buf_chunk_t* chunk) /*!< out: chunk of buffers */
{
buf_block_t* block;
const buf_block_t* block_end;
ut_ad(buf_pool_mutex_own(buf_pool));
block_end = chunk->blocks + chunk->size;
for (block = chunk->blocks; block < block_end; block++) {
ut_a(buf_block_get_state(block) == BUF_BLOCK_NOT_USED);
ut_a(!block->page.zip.data);
ut_ad(!block->page.in_LRU_list);
ut_ad(!block->in_unzip_LRU_list);
ut_ad(!block->page.in_flush_list);
/* Remove the block from the free list. */
ut_ad(block->page.in_free_list);
UT_LIST_REMOVE(list, buf_pool->free, (&block->page));
/* Free the latches. */
mutex_free(&block->mutex);
rw_lock_free(&block->lock);
#ifdef UNIV_SYNC_DEBUG
rw_lock_free(&block->debug_latch);
#endif /* UNIV_SYNC_DEBUG */
UNIV_MEM_UNDESC(block);
}
os_mem_free_large(chunk->mem, chunk->mem_size);
}
/********************************************************************//** /********************************************************************//**
Set buffer pool size variables after resizing it */ Set buffer pool size variables after resizing it */
static static
...@@ -1272,8 +1208,6 @@ buf_pool_free_instance( ...@@ -1272,8 +1208,6 @@ buf_pool_free_instance(
chunk = chunks + buf_pool->n_chunks; chunk = chunks + buf_pool->n_chunks;
while (--chunk >= chunks) { while (--chunk >= chunks) {
/* Bypass the checks of buf_chunk_free(), since they
would fail at shutdown. */
os_mem_free_large(chunk->mem, chunk->mem_size); os_mem_free_large(chunk->mem, chunk->mem_size);
} }
...@@ -1533,281 +1467,6 @@ buf_relocate( ...@@ -1533,281 +1467,6 @@ buf_relocate(
} }
/********************************************************************//** /********************************************************************//**
Shrinks a buffer pool instance. */
static
void
buf_pool_shrink_instance(
/*=====================*/
buf_pool_t* buf_pool, /*!< in: buffer pool instance */
ulint chunk_size) /*!< in: number of pages to remove */
{
buf_chunk_t* chunks;
buf_chunk_t* chunk;
ulint max_size;
ulint max_free_size;
buf_chunk_t* max_chunk;
buf_chunk_t* max_free_chunk;
ut_ad(!buf_pool_mutex_own(buf_pool));
try_again:
btr_search_disable(); /* Empty the adaptive hash index again */
buf_pool_mutex_enter(buf_pool);
shrink_again:
if (buf_pool->n_chunks <= 1) {
/* Cannot shrink if there is only one chunk */
goto func_done;
}
/* Search for the largest free chunk
not larger than the size difference */
chunks = buf_pool->chunks;
chunk = chunks + buf_pool->n_chunks;
max_size = max_free_size = 0;
max_chunk = max_free_chunk = NULL;
while (--chunk >= chunks) {
if (chunk->size <= chunk_size
&& chunk->size > max_free_size) {
if (chunk->size > max_size) {
max_size = chunk->size;
max_chunk = chunk;
}
if (buf_chunk_all_free(chunk)) {
max_free_size = chunk->size;
max_free_chunk = chunk;
}
}
}
if (!max_free_size) {
ulint dirty = 0;
ulint nonfree = 0;
buf_block_t* block;
buf_block_t* bend;
/* Cannot shrink: try again later
(do not assign srv_buf_pool_old_size) */
if (!max_chunk) {
goto func_exit;
}
block = max_chunk->blocks;
bend = block + max_chunk->size;
/* Move the blocks of chunk to the end of the
LRU list and try to flush them. */
for (; block < bend; block++) {
switch (buf_block_get_state(block)) {
case BUF_BLOCK_NOT_USED:
continue;
case BUF_BLOCK_FILE_PAGE:
break;
default:
nonfree++;
continue;
}
mutex_enter(&block->mutex);
/* The following calls will temporarily
release block->mutex and buf_pool->mutex.
Therefore, we have to always retry,
even if !dirty && !nonfree. */
if (!buf_flush_ready_for_replace(&block->page)) {
buf_LRU_make_block_old(&block->page);
dirty++;
} else if (buf_LRU_free_block(&block->page, TRUE)
!= BUF_LRU_FREED) {
nonfree++;
}
mutex_exit(&block->mutex);
}
buf_pool_mutex_exit(buf_pool);
/* Request for a flush of the chunk if it helps.
Do not flush if there are non-free blocks, since
flushing will not make the chunk freeable. */
if (nonfree) {
/* Avoid busy-waiting. */
os_thread_sleep(100000);
} else if (dirty
&& buf_flush_LRU(buf_pool, dirty)
== ULINT_UNDEFINED) {
buf_flush_wait_batch_end(buf_pool, BUF_FLUSH_LRU);
}
goto try_again;
}
max_size = max_free_size;
max_chunk = max_free_chunk;
buf_pool->old_pool_size = buf_pool->curr_pool_size;
/* Rewrite buf_pool->chunks. Copy everything but max_chunk. */
chunks = mem_alloc((buf_pool->n_chunks - 1) * sizeof *chunks);
memcpy(chunks, buf_pool->chunks,
(max_chunk - buf_pool->chunks) * sizeof *chunks);
memcpy(chunks + (max_chunk - buf_pool->chunks),
max_chunk + 1,
buf_pool->chunks + buf_pool->n_chunks
- (max_chunk + 1));
ut_a(buf_pool->curr_size > max_chunk->size);
buf_pool->curr_size -= max_chunk->size;
buf_pool->curr_pool_size = buf_pool->curr_size * UNIV_PAGE_SIZE;
chunk_size -= max_chunk->size;
buf_chunk_free(buf_pool, max_chunk);
mem_free(buf_pool->chunks);
buf_pool->chunks = chunks;
buf_pool->n_chunks--;
/* Allow a slack of one megabyte. */
if (chunk_size > 1048576 / UNIV_PAGE_SIZE) {
goto shrink_again;
}
goto func_exit;
func_done:
buf_pool->old_pool_size = buf_pool->curr_pool_size;
func_exit:
buf_pool_mutex_exit(buf_pool);
btr_search_enable();
}
/********************************************************************//**
Shrinks the buffer pool. */
static
void
buf_pool_shrink(
/*============*/
ulint chunk_size) /*!< in: number of pages to remove */
{
ulint i;
for (i = 0; i < srv_buf_pool_instances; i++) {
buf_pool_t* buf_pool;
ulint instance_chunk_size;
instance_chunk_size = chunk_size / srv_buf_pool_instances;
buf_pool = buf_pool_from_array(i);
buf_pool_shrink_instance(buf_pool, instance_chunk_size);
}
buf_pool_set_sizes();
}
/********************************************************************//**
Rebuild buf_pool->page_hash for a buffer pool instance. */
static
void
buf_pool_page_hash_rebuild_instance(
/*================================*/
buf_pool_t* buf_pool) /*!< in: buffer pool instance */
{
ulint i;
buf_page_t* b;
buf_chunk_t* chunk;
ulint n_chunks;
hash_table_t* zip_hash;
hash_table_t* page_hash;
buf_pool_mutex_enter(buf_pool);
/* Free, create, and populate the hash table. */
hash_table_free(buf_pool->page_hash);
buf_pool->page_hash = page_hash = hash_create(2 * buf_pool->curr_size);
zip_hash = hash_create(2 * buf_pool->curr_size);
HASH_MIGRATE(buf_pool->zip_hash, zip_hash, buf_page_t, hash,
BUF_POOL_ZIP_FOLD_BPAGE);
hash_table_free(buf_pool->zip_hash);
buf_pool->zip_hash = zip_hash;
/* Insert the uncompressed file pages to buf_pool->page_hash. */
chunk = buf_pool->chunks;
n_chunks = buf_pool->n_chunks;
for (i = 0; i < n_chunks; i++, chunk++) {
ulint j;
buf_block_t* block = chunk->blocks;
for (j = 0; j < chunk->size; j++, block++) {
if (buf_block_get_state(block)
== BUF_BLOCK_FILE_PAGE) {
ut_ad(!block->page.in_zip_hash);
ut_ad(block->page.in_page_hash);
HASH_INSERT(buf_page_t, hash, page_hash,
buf_page_address_fold(
block->page.space,
block->page.offset),
&block->page);
}
}
}
/* Insert the compressed-only pages to buf_pool->page_hash.
All such blocks are either in buf_pool->zip_clean or
in buf_pool->flush_list. */
for (b = UT_LIST_GET_FIRST(buf_pool->zip_clean); b;
b = UT_LIST_GET_NEXT(list, b)) {
ut_a(buf_page_get_state(b) == BUF_BLOCK_ZIP_PAGE);
ut_ad(!b->in_flush_list);
ut_ad(b->in_LRU_list);
ut_ad(b->in_page_hash);
ut_ad(!b->in_zip_hash);
HASH_INSERT(buf_page_t, hash, page_hash,
buf_page_address_fold(b->space, b->offset), b);
}
buf_flush_list_mutex_enter(buf_pool);
for (b = UT_LIST_GET_FIRST(buf_pool->flush_list); b;
b = UT_LIST_GET_NEXT(list, b)) {
ut_ad(b->in_flush_list);
ut_ad(b->in_LRU_list);
ut_ad(b->in_page_hash);
ut_ad(!b->in_zip_hash);
switch (buf_page_get_state(b)) {
case BUF_BLOCK_ZIP_DIRTY:
HASH_INSERT(buf_page_t, hash, page_hash,
buf_page_address_fold(b->space,
b->offset), b);
break;
case BUF_BLOCK_FILE_PAGE:
/* uncompressed page */
break;
case BUF_BLOCK_ZIP_FREE:
case BUF_BLOCK_ZIP_PAGE:
case BUF_BLOCK_NOT_USED:
case BUF_BLOCK_READY_FOR_USE:
case BUF_BLOCK_MEMORY:
case BUF_BLOCK_REMOVE_HASH:
ut_error;
break;
}
}
buf_flush_list_mutex_exit(buf_pool);
buf_pool_mutex_exit(buf_pool);
}
/********************************************************************
Determine if a block is a sentinel for a buffer pool watch. Determine if a block is a sentinel for a buffer pool watch.
@return TRUE if a sentinel for a buffer pool watch, FALSE if not */ @return TRUE if a sentinel for a buffer pool watch, FALSE if not */
UNIV_INTERN UNIV_INTERN
...@@ -1913,123 +1572,6 @@ buf_pool_watch_set( ...@@ -1913,123 +1572,6 @@ buf_pool_watch_set(
return(NULL); return(NULL);
} }
/********************************************************************//**
Rebuild buf_pool->page_hash. */
static
void
buf_pool_page_hash_rebuild(void)
/*============================*/
{
ulint i;
for (i = 0; i < srv_buf_pool_instances; i++) {
buf_pool_page_hash_rebuild_instance(buf_pool_from_array(i));
}
}
/********************************************************************//**
Increase the buffer pool size of one buffer pool instance. */
static
void
buf_pool_increase_instance(
/*=======================*/
buf_pool_t* buf_pool, /*!< in: buffer pool instane */
ulint change_size) /*!< in: new size of the pool */
{
buf_chunk_t* chunks;
buf_chunk_t* chunk;
buf_pool_mutex_enter(buf_pool);
chunks = mem_alloc((buf_pool->n_chunks + 1) * sizeof *chunks);
memcpy(chunks, buf_pool->chunks, buf_pool->n_chunks * sizeof *chunks);
chunk = &chunks[buf_pool->n_chunks];
if (!buf_chunk_init(buf_pool, chunk, change_size)) {
mem_free(chunks);
} else {
buf_pool->old_pool_size = buf_pool->curr_pool_size;
buf_pool->curr_size += chunk->size;
buf_pool->curr_pool_size = buf_pool->curr_size * UNIV_PAGE_SIZE;
mem_free(buf_pool->chunks);
buf_pool->chunks = chunks;
buf_pool->n_chunks++;
}
buf_pool_mutex_exit(buf_pool);
}
/********************************************************************//**
Increase the buffer pool size. */
static
void
buf_pool_increase(
/*==============*/
ulint change_size)
{
ulint i;
for (i = 0; i < srv_buf_pool_instances; i++) {
buf_pool_increase_instance(
buf_pool_from_array(i),
change_size / srv_buf_pool_instances);
}
buf_pool_set_sizes();
}
/********************************************************************//**
Resizes the buffer pool. */
UNIV_INTERN
void
buf_pool_resize(void)
/*=================*/
{
ulint change_size;
ulint min_change_size = 1048576 * srv_buf_pool_instances;
buf_pool_mutex_enter_all();
if (srv_buf_pool_old_size == srv_buf_pool_size) {
buf_pool_mutex_exit_all();
return;
} else if (srv_buf_pool_curr_size + min_change_size
> srv_buf_pool_size) {
change_size = (srv_buf_pool_curr_size - srv_buf_pool_size)
/ UNIV_PAGE_SIZE;
buf_pool_mutex_exit_all();
/* Disable adaptive hash indexes and empty the index
in order to free up memory in the buffer pool chunks. */
buf_pool_shrink(change_size);
} else if (srv_buf_pool_curr_size + min_change_size
< srv_buf_pool_size) {
/* Enlarge the buffer pool by at least one megabyte */
change_size = srv_buf_pool_size - srv_buf_pool_curr_size;
buf_pool_mutex_exit_all();
buf_pool_increase(change_size);
} else {
srv_buf_pool_size = srv_buf_pool_old_size;
buf_pool_mutex_exit_all();
return;
}
buf_pool_page_hash_rebuild();
}
/****************************************************************//** /****************************************************************//**
Remove the sentinel block for the watch before replacing it with a real block. Remove the sentinel block for the watch before replacing it with a real block.
buf_page_watch_clear() or buf_page_watch_occurred() will notice that buf_page_watch_clear() or buf_page_watch_occurred() will notice that
...@@ -2951,8 +2493,10 @@ wait_until_unfixed: ...@@ -2951,8 +2493,10 @@ wait_until_unfixed:
if (buf_page_get_state(&block->page) if (buf_page_get_state(&block->page)
== BUF_BLOCK_ZIP_PAGE) { == BUF_BLOCK_ZIP_PAGE) {
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
UT_LIST_REMOVE(list, buf_pool->zip_clean, UT_LIST_REMOVE(list, buf_pool->zip_clean,
&block->page); &block->page);
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
ut_ad(!block->page.in_flush_list); ut_ad(!block->page.in_flush_list);
} else { } else {
/* Relocate buf_pool->flush_list. */ /* Relocate buf_pool->flush_list. */
...@@ -3764,7 +3308,9 @@ err_exit: ...@@ -3764,7 +3308,9 @@ err_exit:
/* The block must be put to the LRU list, to the old blocks */ /* The block must be put to the LRU list, to the old blocks */
buf_LRU_add_block(bpage, TRUE/* to old blocks */); buf_LRU_add_block(bpage, TRUE/* to old blocks */);
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
buf_LRU_insert_zip_clean(bpage); buf_LRU_insert_zip_clean(bpage);
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
buf_page_set_io_fix(bpage, BUF_IO_READ); buf_page_set_io_fix(bpage, BUF_IO_READ);
......
/***************************************************************************** /*****************************************************************************
Copyright (c) 1995, 2010, Innobase Oy. All Rights Reserved. Copyright (c) 1995, 2011, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software the terms of the GNU General Public License as published by the Free Software
...@@ -524,7 +524,9 @@ buf_flush_remove( ...@@ -524,7 +524,9 @@ buf_flush_remove(
case BUF_BLOCK_ZIP_DIRTY: case BUF_BLOCK_ZIP_DIRTY:
buf_page_set_state(bpage, BUF_BLOCK_ZIP_PAGE); buf_page_set_state(bpage, BUF_BLOCK_ZIP_PAGE);
UT_LIST_REMOVE(list, buf_pool->flush_list, bpage); UT_LIST_REMOVE(list, buf_pool->flush_list, bpage);
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
buf_LRU_insert_zip_clean(bpage); buf_LRU_insert_zip_clean(bpage);
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
break; break;
case BUF_BLOCK_FILE_PAGE: case BUF_BLOCK_FILE_PAGE:
UT_LIST_REMOVE(list, buf_pool->flush_list, bpage); UT_LIST_REMOVE(list, buf_pool->flush_list, bpage);
......
/***************************************************************************** /*****************************************************************************
Copyright (c) 1995, 2010, Innobase Oy. All Rights Reserved. Copyright (c) 1995, 2011, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software the terms of the GNU General Public License as published by the Free Software
...@@ -525,6 +525,7 @@ buf_LRU_invalidate_tablespace( ...@@ -525,6 +525,7 @@ buf_LRU_invalidate_tablespace(
} }
} }
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
/********************************************************************//** /********************************************************************//**
Insert a compressed block into buf_pool->zip_clean in the LRU order. */ Insert a compressed block into buf_pool->zip_clean in the LRU order. */
UNIV_INTERN UNIV_INTERN
...@@ -557,6 +558,7 @@ buf_LRU_insert_zip_clean( ...@@ -557,6 +558,7 @@ buf_LRU_insert_zip_clean(
UT_LIST_ADD_FIRST(list, buf_pool->zip_clean, bpage); UT_LIST_ADD_FIRST(list, buf_pool->zip_clean, bpage);
} }
} }
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
/******************************************************************//** /******************************************************************//**
Try to free an uncompressed page of a compressed block from the unzip Try to free an uncompressed page of a compressed block from the unzip
...@@ -1598,7 +1600,9 @@ alloc: ...@@ -1598,7 +1600,9 @@ alloc:
} }
if (b->state == BUF_BLOCK_ZIP_PAGE) { if (b->state == BUF_BLOCK_ZIP_PAGE) {
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
buf_LRU_insert_zip_clean(b); buf_LRU_insert_zip_clean(b);
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
} else { } else {
/* Relocate on buf_pool->flush_list. */ /* Relocate on buf_pool->flush_list. */
buf_flush_relocate_on_flush_list(bpage, b); buf_flush_relocate_on_flush_list(bpage, b);
...@@ -1884,7 +1888,9 @@ buf_LRU_block_remove_hashed_page( ...@@ -1884,7 +1888,9 @@ buf_LRU_block_remove_hashed_page(
ut_a(bpage->zip.data); ut_a(bpage->zip.data);
ut_a(buf_page_get_zip_size(bpage)); ut_a(buf_page_get_zip_size(bpage));
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
UT_LIST_REMOVE(list, buf_pool->zip_clean, bpage); UT_LIST_REMOVE(list, buf_pool->zip_clean, bpage);
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
mutex_exit(&buf_pool->zip_mutex); mutex_exit(&buf_pool->zip_mutex);
buf_pool_mutex_exit_forbid(buf_pool); buf_pool_mutex_exit_forbid(buf_pool);
......
...@@ -246,12 +246,6 @@ buf_relocate( ...@@ -246,12 +246,6 @@ buf_relocate(
BUF_BLOCK_ZIP_DIRTY or BUF_BLOCK_ZIP_PAGE */ BUF_BLOCK_ZIP_DIRTY or BUF_BLOCK_ZIP_PAGE */
buf_page_t* dpage) /*!< in/out: destination control block */ buf_page_t* dpage) /*!< in/out: destination control block */
__attribute__((nonnull)); __attribute__((nonnull));
/********************************************************************//**
Resizes the buffer pool. */
UNIV_INTERN
void
buf_pool_resize(void);
/*=================*/
/*********************************************************************//** /*********************************************************************//**
Gets the current size of buffer buf_pool in bytes. Gets the current size of buffer buf_pool in bytes.
@return size in bytes */ @return size in bytes */
...@@ -1221,7 +1215,7 @@ ulint ...@@ -1221,7 +1215,7 @@ ulint
buf_get_free_list_len(void); buf_get_free_list_len(void);
/*=======================*/ /*=======================*/
/******************************************************************** /********************************************************************//**
Determine if a block is a sentinel for a buffer pool watch. Determine if a block is a sentinel for a buffer pool watch.
@return TRUE if a sentinel for a buffer pool watch, FALSE if not */ @return TRUE if a sentinel for a buffer pool watch, FALSE if not */
UNIV_INTERN UNIV_INTERN
...@@ -1757,8 +1751,10 @@ struct buf_pool_struct{ ...@@ -1757,8 +1751,10 @@ struct buf_pool_struct{
frames and buf_page_t descriptors of blocks that exist frames and buf_page_t descriptors of blocks that exist
in the buffer pool only in compressed form. */ in the buffer pool only in compressed form. */
/* @{ */ /* @{ */
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
UT_LIST_BASE_NODE_T(buf_page_t) zip_clean; UT_LIST_BASE_NODE_T(buf_page_t) zip_clean;
/*!< unmodified compressed pages */ /*!< unmodified compressed pages */
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
UT_LIST_BASE_NODE_T(buf_page_t) zip_free[BUF_BUDDY_SIZES]; UT_LIST_BASE_NODE_T(buf_page_t) zip_free[BUF_BUDDY_SIZES];
/*!< buddy free lists */ /*!< buddy free lists */
......
/***************************************************************************** /*****************************************************************************
Copyright (c) 1995, 2009, Innobase Oy. All Rights Reserved. Copyright (c) 1995, 2011, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software the terms of the GNU General Public License as published by the Free Software
...@@ -85,6 +85,7 @@ void ...@@ -85,6 +85,7 @@ void
buf_LRU_invalidate_tablespace( buf_LRU_invalidate_tablespace(
/*==========================*/ /*==========================*/
ulint id); /*!< in: space id */ ulint id); /*!< in: space id */
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
/********************************************************************//** /********************************************************************//**
Insert a compressed block into buf_pool->zip_clean in the LRU order. */ Insert a compressed block into buf_pool->zip_clean in the LRU order. */
UNIV_INTERN UNIV_INTERN
...@@ -92,6 +93,7 @@ void ...@@ -92,6 +93,7 @@ void
buf_LRU_insert_zip_clean( buf_LRU_insert_zip_clean(
/*=====================*/ /*=====================*/
buf_page_t* bpage); /*!< in: pointer to the block in question */ buf_page_t* bpage); /*!< in: pointer to the block in question */
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
/******************************************************************//** /******************************************************************//**
Try to free a block. If bpage is a descriptor of a compressed-only Try to free a block. If bpage is a descriptor of a compressed-only
......
/***************************************************************************** /*****************************************************************************
Copyright (c) 2005, 2009, Innobase Oy. All Rights Reserved. Copyright (c) 2005, 2011, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software the terms of the GNU General Public License as published by the Free Software
...@@ -3912,17 +3912,9 @@ page_zip_write_trx_id_and_roll_ptr( ...@@ -3912,17 +3912,9 @@ page_zip_write_trx_id_and_roll_ptr(
UNIV_MEM_ASSERT_RW(page_zip->data, page_zip_get_size(page_zip)); UNIV_MEM_ASSERT_RW(page_zip->data, page_zip_get_size(page_zip));
} }
#ifdef UNIV_ZIP_DEBUG
/** Set this variable in a debugger to disable page_zip_clear_rec().
The only observable effect should be the compression ratio due to
deleted records not being zeroed out. In rare cases, there can be
page_zip_validate() failures on the node_ptr, trx_id and roll_ptr
columns if the space is reallocated for a smaller record. */
UNIV_INTERN ibool page_zip_clear_rec_disable;
#endif /* UNIV_ZIP_DEBUG */
/**********************************************************************//** /**********************************************************************//**
Clear an area on the uncompressed and compressed page, if possible. */ Clear an area on the uncompressed and compressed page.
Do not clear the data payload, as that would grow the modification log. */
static static
void void
page_zip_clear_rec( page_zip_clear_rec(
...@@ -3934,6 +3926,9 @@ page_zip_clear_rec( ...@@ -3934,6 +3926,9 @@ page_zip_clear_rec(
{ {
ulint heap_no; ulint heap_no;
page_t* page = page_align(rec); page_t* page = page_align(rec);
byte* storage;
byte* field;
ulint len;
/* page_zip_validate() would fail here if a record /* page_zip_validate() would fail here if a record
containing externally stored columns is being deleted. */ containing externally stored columns is being deleted. */
ut_ad(rec_offs_validate(rec, index, offsets)); ut_ad(rec_offs_validate(rec, index, offsets));
...@@ -3949,60 +3944,46 @@ page_zip_clear_rec( ...@@ -3949,60 +3944,46 @@ page_zip_clear_rec(
UNIV_MEM_ASSERT_RW(rec - rec_offs_extra_size(offsets), UNIV_MEM_ASSERT_RW(rec - rec_offs_extra_size(offsets),
rec_offs_extra_size(offsets)); rec_offs_extra_size(offsets));
if (
#ifdef UNIV_ZIP_DEBUG
!page_zip_clear_rec_disable &&
#endif /* UNIV_ZIP_DEBUG */
page_zip->m_end
+ 1 + ((heap_no - 1) >= 64)/* size of the log entry */
+ page_zip_get_trailer_len(page_zip,
dict_index_is_clust(index), NULL)
< page_zip_get_size(page_zip)) {
byte* data;
/* Clear only the data bytes, because the allocator and
the decompressor depend on the extra bytes. */
memset(rec, 0, rec_offs_data_size(offsets));
if (!page_is_leaf(page)) { if (!page_is_leaf(page)) {
/* Clear node_ptr on the compressed page. */ /* Clear node_ptr. On the compressed page,
byte* storage = page_zip->data there is an array of node_ptr immediately before the
dense page directory, at the very end of the page. */
storage = page_zip->data
+ page_zip_get_size(page_zip) + page_zip_get_size(page_zip)
- (page_dir_get_n_heap(page) - (page_dir_get_n_heap(page)
- PAGE_HEAP_NO_USER_LOW) - PAGE_HEAP_NO_USER_LOW)
* PAGE_ZIP_DIR_SLOT_SIZE; * PAGE_ZIP_DIR_SLOT_SIZE;
ut_ad(dict_index_get_n_unique_in_tree(index) ==
rec_offs_n_fields(offsets) - 1);
field = rec_get_nth_field(rec, offsets,
rec_offs_n_fields(offsets) - 1,
&len);
ut_ad(len == REC_NODE_PTR_SIZE);
ut_ad(!rec_offs_any_extern(offsets));
memset(field, 0, REC_NODE_PTR_SIZE);
memset(storage - (heap_no - 1) * REC_NODE_PTR_SIZE, memset(storage - (heap_no - 1) * REC_NODE_PTR_SIZE,
0, REC_NODE_PTR_SIZE); 0, REC_NODE_PTR_SIZE);
} else if (dict_index_is_clust(index)) { } else if (dict_index_is_clust(index)) {
/* Clear trx_id and roll_ptr on the compressed page. */ /* Clear trx_id and roll_ptr. On the compressed page,
byte* storage = page_zip->data there is an array of these fields immediately before the
dense page directory, at the very end of the page. */
const ulint trx_id_pos
= dict_col_get_clust_pos(
dict_table_get_sys_col(
index->table, DATA_TRX_ID), index);
storage = page_zip->data
+ page_zip_get_size(page_zip) + page_zip_get_size(page_zip)
- (page_dir_get_n_heap(page) - (page_dir_get_n_heap(page)
- PAGE_HEAP_NO_USER_LOW) - PAGE_HEAP_NO_USER_LOW)
* PAGE_ZIP_DIR_SLOT_SIZE; * PAGE_ZIP_DIR_SLOT_SIZE;
field = rec_get_nth_field(rec, offsets, trx_id_pos, &len);
ut_ad(len == DATA_TRX_ID_LEN);
memset(field, 0, DATA_TRX_ID_LEN + DATA_ROLL_PTR_LEN);
memset(storage - (heap_no - 1) memset(storage - (heap_no - 1)
* (DATA_TRX_ID_LEN + DATA_ROLL_PTR_LEN), * (DATA_TRX_ID_LEN + DATA_ROLL_PTR_LEN),
0, DATA_TRX_ID_LEN + DATA_ROLL_PTR_LEN); 0, DATA_TRX_ID_LEN + DATA_ROLL_PTR_LEN);
}
/* Log that the data was zeroed out. */
data = page_zip->data + page_zip->m_end;
ut_ad(!*data);
if (UNIV_UNLIKELY(heap_no - 1 >= 64)) {
*data++ = (byte) (0x80 | (heap_no - 1) >> 7);
ut_ad(!*data);
}
*data++ = (byte) ((heap_no - 1) << 1 | 1);
ut_ad(!*data);
ut_ad((ulint) (data - page_zip->data)
< page_zip_get_size(page_zip));
page_zip->m_end = data - page_zip->data;
page_zip->m_nonempty = TRUE;
} else if (page_is_leaf(page) && dict_index_is_clust(index)) {
/* Do not clear the record, because there is not enough space
to log the operation. */
if (rec_offs_any_extern(offsets)) { if (rec_offs_any_extern(offsets)) {
ulint i; ulint i;
...@@ -4011,15 +3992,18 @@ page_zip_clear_rec( ...@@ -4011,15 +3992,18 @@ page_zip_clear_rec(
/* Clear all BLOB pointers in order to make /* Clear all BLOB pointers in order to make
page_zip_validate() pass. */ page_zip_validate() pass. */
if (rec_offs_nth_extern(offsets, i)) { if (rec_offs_nth_extern(offsets, i)) {
ulint len; field = rec_get_nth_field(
byte* field = rec_get_nth_field(
rec, offsets, i, &len); rec, offsets, i, &len);
ut_ad(len
== BTR_EXTERN_FIELD_REF_SIZE);
memset(field + len memset(field + len
- BTR_EXTERN_FIELD_REF_SIZE, - BTR_EXTERN_FIELD_REF_SIZE,
0, BTR_EXTERN_FIELD_REF_SIZE); 0, BTR_EXTERN_FIELD_REF_SIZE);
} }
} }
} }
} else {
ut_ad(!rec_offs_any_extern(offsets));
} }
#ifdef UNIV_ZIP_DEBUG #ifdef UNIV_ZIP_DEBUG
......
/***************************************************************************** /*****************************************************************************
Copyright (c) 1994, 2010, Innobase Oy. All Rights Reserved. Copyright (c) 1994, 2011, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software the terms of the GNU General Public License as published by the Free Software
...@@ -408,7 +408,7 @@ rec_init_offsets( ...@@ -408,7 +408,7 @@ rec_init_offsets(
do { do {
ulint len; ulint len;
if (UNIV_UNLIKELY(i == n_node_ptr_field)) { if (UNIV_UNLIKELY(i == n_node_ptr_field)) {
len = offs += 4; len = offs += REC_NODE_PTR_SIZE;
goto resolved; goto resolved;
} }
...@@ -640,7 +640,7 @@ rec_get_offsets_reverse( ...@@ -640,7 +640,7 @@ rec_get_offsets_reverse(
do { do {
ulint len; ulint len;
if (UNIV_UNLIKELY(i == n_node_ptr_field)) { if (UNIV_UNLIKELY(i == n_node_ptr_field)) {
len = offs += 4; len = offs += REC_NODE_PTR_SIZE;
goto resolved; goto resolved;
} }
...@@ -1131,9 +1131,9 @@ rec_convert_dtuple_to_rec_comp( ...@@ -1131,9 +1131,9 @@ rec_convert_dtuple_to_rec_comp(
if (UNIV_UNLIKELY(i == n_node_ptr_field)) { if (UNIV_UNLIKELY(i == n_node_ptr_field)) {
ut_ad(dtype_get_prtype(type) & DATA_NOT_NULL); ut_ad(dtype_get_prtype(type) & DATA_NOT_NULL);
ut_ad(len == 4); ut_ad(len == REC_NODE_PTR_SIZE);
memcpy(end, dfield_get_data(field), len); memcpy(end, dfield_get_data(field), len);
end += 4; end += REC_NODE_PTR_SIZE;
break; break;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment