Commit 1e45a0cb authored by Ingo Struewing's avatar Ingo Struewing

Bug#17332 - changing key_buffer_size on a running server

            can crash under load

Merge from 5.0, after backport from 5.1/5.4 to 5.0.
This makes the fixes for
Bug 44068 (RESTORE can disable the MyISAM Key Cache)
Bug 40944 (Backup: crash after myisampack)
available to 5.1.
parents 7462aff8 d1a6c778
...@@ -13,7 +13,8 @@ ...@@ -13,7 +13,8 @@
along with this program; if not, write to the Free Software along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
/* /**
@file
These functions handle keyblock cacheing for ISAM and MyISAM tables. These functions handle keyblock cacheing for ISAM and MyISAM tables.
One cache can handle many files. One cache can handle many files.
...@@ -36,7 +37,9 @@ ...@@ -36,7 +37,9 @@
blocks_unused is the sum of never used blocks in the pool and of currently blocks_unused is the sum of never used blocks in the pool and of currently
free blocks. blocks_used is the number of blocks fetched from the pool and free blocks. blocks_used is the number of blocks fetched from the pool and
as such gives the maximum number of in-use blocks at any time. as such gives the maximum number of in-use blocks at any time.
*/
/*
Key Cache Locking Key Cache Locking
================= =================
...@@ -760,6 +763,13 @@ void end_key_cache(KEY_CACHE *keycache, my_bool cleanup) ...@@ -760,6 +763,13 @@ void end_key_cache(KEY_CACHE *keycache, my_bool cleanup)
(ulong) keycache->global_cache_r_requests, (ulong) keycache->global_cache_r_requests,
(ulong) keycache->global_cache_read)); (ulong) keycache->global_cache_read));
/*
Reset these values to be able to detect a disabled key cache.
See Bug#44068 (RESTORE can disable the MyISAM Key Cache).
*/
keycache->blocks_used= 0;
keycache->blocks_unused= 0;
if (cleanup) if (cleanup)
{ {
pthread_mutex_destroy(&keycache->cache_lock); pthread_mutex_destroy(&keycache->cache_lock);
...@@ -1343,7 +1353,11 @@ static void unreg_request(KEY_CACHE *keycache, ...@@ -1343,7 +1353,11 @@ static void unreg_request(KEY_CACHE *keycache,
DBUG_ASSERT(block->prev_changed && *block->prev_changed == block); DBUG_ASSERT(block->prev_changed && *block->prev_changed == block);
DBUG_ASSERT(!block->next_used); DBUG_ASSERT(!block->next_used);
DBUG_ASSERT(!block->prev_used); DBUG_ASSERT(!block->prev_used);
if (! --block->requests) /*
Unregister the request, but do not link erroneous blocks into the
LRU ring.
*/
if (!--block->requests && !(block->status & BLOCK_ERROR))
{ {
my_bool hot; my_bool hot;
if (block->hits_left) if (block->hits_left)
...@@ -1425,8 +1439,7 @@ static void wait_for_readers(KEY_CACHE *keycache, ...@@ -1425,8 +1439,7 @@ static void wait_for_readers(KEY_CACHE *keycache,
#ifdef THREAD #ifdef THREAD
struct st_my_thread_var *thread= my_thread_var; struct st_my_thread_var *thread= my_thread_var;
DBUG_ASSERT(block->status & (BLOCK_READ | BLOCK_IN_USE)); DBUG_ASSERT(block->status & (BLOCK_READ | BLOCK_IN_USE));
DBUG_ASSERT(!(block->status & (BLOCK_ERROR | BLOCK_IN_FLUSH | DBUG_ASSERT(!(block->status & (BLOCK_IN_FLUSH | BLOCK_CHANGED)));
BLOCK_CHANGED)));
DBUG_ASSERT(block->hash_link); DBUG_ASSERT(block->hash_link);
DBUG_ASSERT(block->hash_link->block == block); DBUG_ASSERT(block->hash_link->block == block);
/* Linked in file_blocks or changed_blocks hash. */ /* Linked in file_blocks or changed_blocks hash. */
...@@ -2210,9 +2223,9 @@ static BLOCK_LINK *find_key_block(KEY_CACHE *keycache, ...@@ -2210,9 +2223,9 @@ static BLOCK_LINK *find_key_block(KEY_CACHE *keycache,
thread might change the block->hash_link value thread might change the block->hash_link value
*/ */
error= my_pwrite(block->hash_link->file, error= my_pwrite(block->hash_link->file,
block->buffer+block->offset, block->buffer + block->offset,
block->length - block->offset, block->length - block->offset,
block->hash_link->diskpos+ block->offset, block->hash_link->diskpos + block->offset,
MYF(MY_NABP | MY_WAIT_IF_FULL)); MYF(MY_NABP | MY_WAIT_IF_FULL));
keycache_pthread_mutex_lock(&keycache->cache_lock); keycache_pthread_mutex_lock(&keycache->cache_lock);
...@@ -2536,7 +2549,6 @@ uchar *key_cache_read(KEY_CACHE *keycache, ...@@ -2536,7 +2549,6 @@ uchar *key_cache_read(KEY_CACHE *keycache,
reg1 BLOCK_LINK *block; reg1 BLOCK_LINK *block;
uint read_length; uint read_length;
uint offset; uint offset;
uint status;
int page_st; int page_st;
/* /*
...@@ -2571,9 +2583,11 @@ uchar *key_cache_read(KEY_CACHE *keycache, ...@@ -2571,9 +2583,11 @@ uchar *key_cache_read(KEY_CACHE *keycache,
do do
{ {
/* Cache could be disabled in a later iteration. */ /* Cache could be disabled in a later iteration. */
if (!keycache->can_be_used) if (!keycache->can_be_used)
{
KEYCACHE_DBUG_PRINT("key_cache_read", ("keycache cannot be used"));
goto no_key_cache; goto no_key_cache;
}
/* Start reading at the beginning of the cache block. */ /* Start reading at the beginning of the cache block. */
filepos-= offset; filepos-= offset;
/* Do not read beyond the end of the cache block. */ /* Do not read beyond the end of the cache block. */
...@@ -2634,7 +2648,7 @@ uchar *key_cache_read(KEY_CACHE *keycache, ...@@ -2634,7 +2648,7 @@ uchar *key_cache_read(KEY_CACHE *keycache,
} }
/* block status may have added BLOCK_ERROR in the above 'if'. */ /* block status may have added BLOCK_ERROR in the above 'if'. */
if (!((status= block->status) & BLOCK_ERROR)) if (!(block->status & BLOCK_ERROR))
{ {
#ifndef THREAD #ifndef THREAD
if (! return_buffer) if (! return_buffer)
...@@ -2660,14 +2674,22 @@ uchar *key_cache_read(KEY_CACHE *keycache, ...@@ -2660,14 +2674,22 @@ uchar *key_cache_read(KEY_CACHE *keycache,
remove_reader(block); remove_reader(block);
/* Error injection for coverage testing. */
DBUG_EXECUTE_IF("key_cache_read_block_error",
block->status|= BLOCK_ERROR;);
/* Do not link erroneous blocks into the LRU ring, but free them. */
if (!(block->status & BLOCK_ERROR))
{
/* /*
Link the block into the LRU ring if it's the last submitted Link the block into the LRU ring if it's the last submitted
request for the block. This enables eviction for the block. request for the block. This enables eviction for the block.
*/ */
unreg_request(keycache, block, 1); unreg_request(keycache, block, 1);
}
if (status & BLOCK_ERROR) else
{ {
free_block(keycache, block);
error= 1; error= 1;
break; break;
} }
...@@ -2685,6 +2707,7 @@ uchar *key_cache_read(KEY_CACHE *keycache, ...@@ -2685,6 +2707,7 @@ uchar *key_cache_read(KEY_CACHE *keycache,
} while ((length-= read_length)); } while ((length-= read_length));
goto end; goto end;
} }
KEYCACHE_DBUG_PRINT("key_cache_read", ("keycache not initialized"));
no_key_cache: no_key_cache:
/* Key cache is not used */ /* Key cache is not used */
...@@ -2705,6 +2728,7 @@ uchar *key_cache_read(KEY_CACHE *keycache, ...@@ -2705,6 +2728,7 @@ uchar *key_cache_read(KEY_CACHE *keycache,
dec_counter_for_resize_op(keycache); dec_counter_for_resize_op(keycache);
keycache_pthread_mutex_unlock(&keycache->cache_lock); keycache_pthread_mutex_unlock(&keycache->cache_lock);
} }
DBUG_PRINT("exit", ("error: %d", error ));
DBUG_RETURN(error ? (uchar*) 0 : start); DBUG_RETURN(error ? (uchar*) 0 : start);
} }
...@@ -2913,19 +2937,27 @@ int key_cache_insert(KEY_CACHE *keycache, ...@@ -2913,19 +2937,27 @@ int key_cache_insert(KEY_CACHE *keycache,
DBUG_ASSERT(block->status & (BLOCK_READ | BLOCK_IN_USE)); DBUG_ASSERT(block->status & (BLOCK_READ | BLOCK_IN_USE));
} /* end of if (!(block->status & BLOCK_ERROR)) */ } /* end of if (!(block->status & BLOCK_ERROR)) */
remove_reader(block); remove_reader(block);
/* Error injection for coverage testing. */
DBUG_EXECUTE_IF("key_cache_insert_block_error",
block->status|= BLOCK_ERROR; errno=EIO;);
/* Do not link erroneous blocks into the LRU ring, but free them. */
if (!(block->status & BLOCK_ERROR))
{
/* /*
Link the block into the LRU ring if it's the last submitted Link the block into the LRU ring if it's the last submitted
request for the block. This enables eviction for the block. request for the block. This enables eviction for the block.
*/ */
unreg_request(keycache, block, 1); unreg_request(keycache, block, 1);
}
error= (block->status & BLOCK_ERROR); else
{
if (error) free_block(keycache, block);
error= 1;
break; break;
}
buff+= read_length; buff+= read_length;
filepos+= read_length+offset; filepos+= read_length+offset;
...@@ -3206,14 +3238,24 @@ int key_cache_write(KEY_CACHE *keycache, ...@@ -3206,14 +3238,24 @@ int key_cache_write(KEY_CACHE *keycache,
*/ */
remove_reader(block); remove_reader(block);
/* Error injection for coverage testing. */
DBUG_EXECUTE_IF("key_cache_write_block_error",
block->status|= BLOCK_ERROR;);
/* Do not link erroneous blocks into the LRU ring, but free them. */
if (!(block->status & BLOCK_ERROR))
{
/* /*
Link the block into the LRU ring if it's the last submitted Link the block into the LRU ring if it's the last submitted
request for the block. This enables eviction for the block. request for the block. This enables eviction for the block.
*/ */
unreg_request(keycache, block, 1); unreg_request(keycache, block, 1);
}
if (block->status & BLOCK_ERROR) else
{ {
/* Pretend a "clean" block to avoid complications. */
block->status&= ~(BLOCK_CHANGED);
free_block(keycache, block);
error= 1; error= 1;
break; break;
} }
...@@ -3288,8 +3330,9 @@ static void free_block(KEY_CACHE *keycache, BLOCK_LINK *block) ...@@ -3288,8 +3330,9 @@ static void free_block(KEY_CACHE *keycache, BLOCK_LINK *block)
{ {
KEYCACHE_THREAD_TRACE("free block"); KEYCACHE_THREAD_TRACE("free block");
KEYCACHE_DBUG_PRINT("free_block", KEYCACHE_DBUG_PRINT("free_block",
("block %u to be freed, hash_link %p", ("block %u to be freed, hash_link %p status: %u",
BLOCK_NUMBER(block), block->hash_link)); BLOCK_NUMBER(block), block->hash_link,
block->status));
/* /*
Assert that the block is not free already. And that it is in a clean Assert that the block is not free already. And that it is in a clean
state. Note that the block might just be assigned to a hash_link and state. Note that the block might just be assigned to a hash_link and
...@@ -3371,10 +3414,14 @@ static void free_block(KEY_CACHE *keycache, BLOCK_LINK *block) ...@@ -3371,10 +3414,14 @@ static void free_block(KEY_CACHE *keycache, BLOCK_LINK *block)
if (block->status & BLOCK_IN_EVICTION) if (block->status & BLOCK_IN_EVICTION)
return; return;
/* Error blocks are not put into the LRU ring. */
if (!(block->status & BLOCK_ERROR))
{
/* Here the block must be in the LRU ring. Unlink it again. */ /* Here the block must be in the LRU ring. Unlink it again. */
DBUG_ASSERT(block->next_used && block->prev_used && DBUG_ASSERT(block->next_used && block->prev_used &&
*block->prev_used == block); *block->prev_used == block);
unlink_block(keycache, block); unlink_block(keycache, block);
}
if (block->temperature == BLOCK_WARM) if (block->temperature == BLOCK_WARM)
keycache->warm_blocks--; keycache->warm_blocks--;
block->temperature= BLOCK_COLD; block->temperature= BLOCK_COLD;
...@@ -3463,8 +3510,7 @@ static int flush_cached_blocks(KEY_CACHE *keycache, ...@@ -3463,8 +3510,7 @@ static int flush_cached_blocks(KEY_CACHE *keycache,
(BLOCK_READ | BLOCK_IN_FLUSH | BLOCK_CHANGED | BLOCK_IN_USE)); (BLOCK_READ | BLOCK_IN_FLUSH | BLOCK_CHANGED | BLOCK_IN_USE));
block->status|= BLOCK_IN_FLUSHWRITE; block->status|= BLOCK_IN_FLUSHWRITE;
keycache_pthread_mutex_unlock(&keycache->cache_lock); keycache_pthread_mutex_unlock(&keycache->cache_lock);
error= my_pwrite(file, error= my_pwrite(file, block->buffer+block->offset,
block->buffer+block->offset,
block->length - block->offset, block->length - block->offset,
block->hash_link->diskpos+ block->offset, block->hash_link->diskpos+ block->offset,
MYF(MY_NABP | MY_WAIT_IF_FULL)); MYF(MY_NABP | MY_WAIT_IF_FULL));
...@@ -3491,7 +3537,6 @@ static int flush_cached_blocks(KEY_CACHE *keycache, ...@@ -3491,7 +3537,6 @@ static int flush_cached_blocks(KEY_CACHE *keycache,
right queue anyway. right queue anyway.
*/ */
link_to_file_list(keycache, block, file, 1); link_to_file_list(keycache, block, file, 1);
} }
block->status&= ~BLOCK_IN_FLUSH; block->status&= ~BLOCK_IN_FLUSH;
/* /*
...@@ -3527,7 +3572,7 @@ static int flush_cached_blocks(KEY_CACHE *keycache, ...@@ -3527,7 +3572,7 @@ static int flush_cached_blocks(KEY_CACHE *keycache,
/* /*
flush all key blocks for a file to disk, but don't do any mutex locks. Flush all key blocks for a file to disk, but don't do any mutex locks.
SYNOPSIS SYNOPSIS
flush_key_blocks_int() flush_key_blocks_int()
...@@ -3692,7 +3737,6 @@ static int flush_key_blocks_int(KEY_CACHE *keycache, ...@@ -3692,7 +3737,6 @@ static int flush_key_blocks_int(KEY_CACHE *keycache,
{ {
/* It's a temporary file */ /* It's a temporary file */
DBUG_ASSERT(!(block->status & BLOCK_REASSIGNED)); DBUG_ASSERT(!(block->status & BLOCK_REASSIGNED));
/* /*
free_block() must not be called with BLOCK_CHANGED. Note free_block() must not be called with BLOCK_CHANGED. Note
that we must not change the BLOCK_CHANGED flag outside of that we must not change the BLOCK_CHANGED flag outside of
...@@ -4403,8 +4447,8 @@ static void keycache_debug_print(const char * fmt,...) ...@@ -4403,8 +4447,8 @@ static void keycache_debug_print(const char * fmt,...)
va_start(args,fmt); va_start(args,fmt);
if (keycache_debug_log) if (keycache_debug_log)
{ {
VOID(vfprintf(keycache_debug_log, fmt, args)); (void) vfprintf(keycache_debug_log, fmt, args);
VOID(fputc('\n',keycache_debug_log)); (void) fputc('\n',keycache_debug_log);
} }
va_end(args); va_end(args);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment