Commit 19420d46 authored by marko's avatar marko

branches/zip: Merge revisions 968:1009 from trunk.

parent ac05c89b
......@@ -201,6 +201,9 @@ that the whole area may be needed in the near future, and issue
the read requests for the whole area.
*/
/* Value in microseconds */
static const int WAIT_FOR_READ = 20000;
buf_pool_t* buf_pool = NULL; /* The buffer buf_pool of the database */
#ifdef UNIV_DEBUG
......@@ -613,6 +616,8 @@ buf_block_init(
#endif /* UNIV_DEBUG */
page_zip_des_init(&block->page_zip);
mutex_create(&block->mutex, SYNC_BUF_BLOCK);
rw_lock_create(&block->lock, SYNC_LEVEL_VARYING);
ut_ad(rw_lock_validate(&(block->lock)));
......@@ -758,13 +763,22 @@ buf_block_make_young(
/*=================*/
buf_block_t* block) /* in: block to make younger */
{
#ifdef UNIV_SYNC_DEBUG
ut_ad(!mutex_own(&(buf_pool->mutex)));
#endif /* UNIV_SYNC_DEBUG */
/* Note that we read freed_page_clock's without holding any mutex:
this is allowed since the result is used only in heuristics */
if (buf_pool->freed_page_clock >= block->freed_page_clock
+ 1 + (buf_pool->curr_size / 1024)) {
+ 1 + (buf_pool->curr_size / 4)) {
mutex_enter(&buf_pool->mutex);
/* There has been freeing activity in the LRU list:
best to move to the head of the LRU list */
buf_LRU_make_block_young(block);
mutex_exit(&buf_pool->mutex);
}
}
......@@ -981,9 +995,8 @@ buf_page_get_gen(
#endif
buf_pool->n_page_gets++;
loop:
mutex_enter_fast(&(buf_pool->mutex));
block = NULL;
mutex_enter_fast(&(buf_pool->mutex));
if (guess) {
block = guess;
......@@ -1021,6 +1034,8 @@ loop:
goto loop;
}
mutex_enter(&block->mutex);
ut_a(block->state == BUF_BLOCK_FILE_PAGE);
must_read = FALSE;
......@@ -1030,9 +1045,9 @@ loop:
must_read = TRUE;
if (mode == BUF_GET_IF_IN_POOL) {
/* The page is only being read to buffer */
mutex_exit(&(buf_pool->mutex));
mutex_exit(&buf_pool->mutex);
mutex_exit(&block->mutex);
return(NULL);
}
......@@ -1043,7 +1058,7 @@ loop:
#else
buf_block_buf_fix_inc(block);
#endif
buf_block_make_young(block);
mutex_exit(&buf_pool->mutex);
/* Check if this is the first access to the page */
......@@ -1051,10 +1066,13 @@ loop:
block->accessed = TRUE;
mutex_exit(&block->mutex);
buf_block_make_young(block);
#ifdef UNIV_DEBUG_FILE_ACCESSES
ut_a(block->file_page_was_freed == FALSE);
#endif
mutex_exit(&(buf_pool->mutex));
#ifdef UNIV_DEBUG
buf_dbg_counter++;
......@@ -1079,13 +1097,14 @@ loop:
}
if (!success) {
mutex_enter(&(buf_pool->mutex));
mutex_enter(&block->mutex);
block->buf_fix_count--;
mutex_exit(&block->mutex);
#ifdef UNIV_SYNC_DEBUG
rw_lock_s_unlock(&(block->debug_latch));
#endif
mutex_exit(&(buf_pool->mutex));
return(NULL);
}
......@@ -1096,18 +1115,16 @@ loop:
completes */
for (;;) {
mutex_enter(&(buf_pool->mutex));
mutex_enter(&block->mutex);
if (block->io_fix == BUF_IO_READ) {
mutex_exit(&(buf_pool->mutex));
mutex_exit(&block->mutex);
/* Sleep 20 milliseconds */
os_thread_sleep(20000);
os_thread_sleep(WAIT_FOR_READ);
} else {
mutex_exit(&(buf_pool->mutex));
mutex_exit(&block->mutex);
break;
}
......@@ -1165,11 +1182,11 @@ buf_page_optimistic_get_func(
ut_ad(mtr && block);
ut_ad((rw_latch == RW_S_LATCH) || (rw_latch == RW_X_LATCH));
mutex_enter(&(buf_pool->mutex));
mutex_enter(&block->mutex);
if (UNIV_UNLIKELY(block->state != BUF_BLOCK_FILE_PAGE)) {
exit_func:
mutex_exit(&(buf_pool->mutex));
mutex_exit(&block->mutex);
return(FALSE);
}
......@@ -1179,15 +1196,14 @@ exit_func:
#else
buf_block_buf_fix_inc(block);
#endif
buf_block_make_young(block);
/* Check if this is the first access to the page */
accessed = block->accessed;
block->accessed = TRUE;
mutex_exit(&(buf_pool->mutex));
mutex_exit(&block->mutex);
buf_block_make_young(block);
/* Check if this is the first access to the page */
ut_ad(!ibuf_inside()
|| ibuf_page(block->space, buf_block_get_zip_size(block),
......@@ -1204,13 +1220,16 @@ exit_func:
}
if (UNIV_UNLIKELY(!success)) {
mutex_enter(&(buf_pool->mutex));
mutex_enter(&block->mutex);
block->buf_fix_count--;
mutex_exit(&block->mutex);
#ifdef UNIV_SYNC_DEBUG
rw_lock_s_unlock(&(block->debug_latch));
#endif
goto exit_func;
return(FALSE);
}
if (UNIV_UNLIKELY(!UT_DULINT_EQ(modify_clock, block->modify_clock))) {
......@@ -1223,13 +1242,16 @@ exit_func:
rw_lock_x_unlock(&(block->lock));
}
mutex_enter(&(buf_pool->mutex));
mutex_enter(&block->mutex);
block->buf_fix_count--;
mutex_exit(&block->mutex);
#ifdef UNIV_SYNC_DEBUG
rw_lock_s_unlock(&(block->debug_latch));
#endif
goto exit_func;
return(FALSE);
}
mtr_memo_push(mtr, block, fix_type);
......@@ -1286,7 +1308,7 @@ buf_page_get_known_nowait(
ut_ad(mtr);
ut_ad((rw_latch == RW_S_LATCH) || (rw_latch == RW_X_LATCH));
mutex_enter(&(buf_pool->mutex));
mutex_enter(&block->mutex);
if (block->state == BUF_BLOCK_REMOVE_HASH) {
/* Another thread is just freeing the block from the LRU list
......@@ -1296,7 +1318,7 @@ buf_page_get_known_nowait(
we have already removed it from the page address hash table
of the buffer pool. */
mutex_exit(&(buf_pool->mutex));
mutex_exit(&block->mutex);
return(FALSE);
}
......@@ -1308,12 +1330,12 @@ buf_page_get_known_nowait(
#else
buf_block_buf_fix_inc(block);
#endif
mutex_exit(&block->mutex);
if (mode == BUF_MAKE_YOUNG) {
buf_block_make_young(block);
}
mutex_exit(&(buf_pool->mutex));
ut_ad(!ibuf_inside() || (mode == BUF_KEEP_OLD));
if (rw_latch == RW_S_LATCH) {
......@@ -1327,13 +1349,15 @@ buf_page_get_known_nowait(
}
if (!success) {
mutex_enter(&(buf_pool->mutex));
mutex_enter(&block->mutex);
block->buf_fix_count--;
mutex_exit(&block->mutex);
#ifdef UNIV_SYNC_DEBUG
rw_lock_s_unlock(&(block->debug_latch));
#endif
mutex_exit(&(buf_pool->mutex));
return(FALSE);
}
......@@ -1384,7 +1408,6 @@ buf_page_init_for_backup_restore(
block->offset = offset;
block->lock_hash_val = 0;
block->lock_mutex = NULL;
block->freed_page_clock = 0;
......@@ -1424,6 +1447,7 @@ buf_page_init(
{
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(buf_pool->mutex)));
ut_ad(mutex_own(&(block->mutex)));
#endif /* UNIV_SYNC_DEBUG */
ut_a(block->state != BUF_BLOCK_FILE_PAGE);
......@@ -1438,7 +1462,6 @@ buf_page_init(
block->index = NULL;
block->lock_hash_val = lock_rec_hash(space, offset);
block->lock_mutex = NULL;
/* Insert into the hash table of file pages */
......@@ -1533,6 +1556,7 @@ buf_page_init_for_read(
ut_a(block);
mutex_enter(&(buf_pool->mutex));
mutex_enter(&block->mutex);
if (fil_tablespace_deleted_or_being_deleted_in_mem(
space, tablespace_version)) {
......@@ -1546,7 +1570,9 @@ buf_page_init_for_read(
deleted or is being deleted, or the page is
already in buf_pool, return */
mutex_exit(&block->mutex);
mutex_exit(&(buf_pool->mutex));
buf_block_free(block);
if (mode == BUF_READ_IBUF_PAGES_ONLY) {
......@@ -1566,6 +1592,7 @@ buf_page_init_for_read(
buf_LRU_add_block(block, TRUE); /* TRUE == to old blocks */
block->io_fix = BUF_IO_READ;
buf_pool->n_pend_reads++;
/* We set a pass-type x-lock on the frame because then the same
......@@ -1577,6 +1604,7 @@ buf_page_init_for_read(
rw_lock_x_lock_gen(&(block->lock), BUF_IO_READ);
mutex_exit(&block->mutex);
mutex_exit(&(buf_pool->mutex));
if (mode == BUF_READ_IBUF_PAGES_ONLY) {
......@@ -1641,6 +1669,8 @@ buf_page_create(
block = free_block;
mutex_enter(&block->mutex);
buf_page_init(space, offset, block);
/* The block must be put to the LRU list */
......@@ -1651,13 +1681,15 @@ buf_page_create(
#else
buf_block_buf_fix_inc(block);
#endif
buf_pool->n_pages_created++;
mutex_exit(&(buf_pool->mutex));
mtr_memo_push(mtr, block, MTR_MEMO_BUF_FIX);
block->accessed = TRUE;
buf_pool->n_pages_created++;
mutex_exit(&(buf_pool->mutex));
mutex_exit(&block->mutex);
/* Delete possible entries for the page from the insert buffer:
such can exist if the page belonged to an index which was dropped */
......@@ -1709,6 +1741,12 @@ buf_page_io_complete(
ut_a(block->state == BUF_BLOCK_FILE_PAGE);
/* We do not need protect block->io_fix here by block->mutex to read
it because this is the only function where we can change the value
from BUF_IO_READ or BUF_IO_WRITE to some other value, and our code
ensures that this is the only thread that handles the i/o for this
block. */
io_type = block->io_fix;
if (io_type == BUF_IO_READ) {
......@@ -1848,11 +1886,12 @@ corrupt:
}
}
mutex_enter(&(buf_pool->mutex));
mutex_enter(&block->mutex);
#ifdef UNIV_IBUF_DEBUG
ut_a(ibuf_count_get(block->space, block->offset) == 0);
#endif
mutex_enter(&(buf_pool->mutex));
/* Because this thread which does the unlocking is not the same that
did the locking, we use a pass value != 0 in unlock, which simply
removes the newest lock debug record, without checking the thread
......@@ -1895,6 +1934,7 @@ corrupt:
#endif /* UNIV_DEBUG */
}
mutex_exit(&block->mutex);
mutex_exit(&(buf_pool->mutex));
#ifdef UNIV_DEBUG
......@@ -1957,6 +1997,8 @@ buf_validate(void)
block = buf_pool_get_nth_block(buf_pool, i);
mutex_enter(&block->mutex);
if (block->state == BUF_BLOCK_FILE_PAGE) {
ut_a(buf_page_hash_get(block->space,
......@@ -2001,6 +2043,8 @@ buf_validate(void)
} else if (block->state == BUF_BLOCK_NOT_USED) {
n_free++;
}
mutex_exit(&block->mutex);
}
if (n_lru + n_free > buf_pool->curr_size) {
......@@ -2150,9 +2194,14 @@ buf_get_latched_pages_number(void)
block = buf_pool_get_nth_block(buf_pool, i);
if (((block->buf_fix_count != 0) || (block->io_fix != 0))
&& block->magic_n == BUF_BLOCK_MAGIC_N) {
fixed_pages_number++;
if (block->magic_n == BUF_BLOCK_MAGIC_N) {
mutex_enter(&block->mutex);
if (block->buf_fix_count != 0 || block->io_fix != 0) {
fixed_pages_number++;
}
mutex_exit(&block->mutex);
}
}
......@@ -2302,6 +2351,8 @@ buf_all_freed(void)
block = buf_pool_get_nth_block(buf_pool, i);
mutex_enter(&block->mutex);
if (block->state == BUF_BLOCK_FILE_PAGE) {
if (!buf_flush_ready_for_replace(block)) {
......@@ -2313,6 +2364,8 @@ buf_all_freed(void)
ut_error;
}
}
mutex_exit(&block->mutex);
}
mutex_exit(&(buf_pool->mutex));
......
......@@ -114,6 +114,7 @@ buf_flush_ready_for_replace(
{
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(buf_pool->mutex)));
ut_ad(mutex_own(&block->mutex));
#endif /* UNIV_SYNC_DEBUG */
if (block->state != BUF_BLOCK_FILE_PAGE) {
ut_print_timestamp(stderr);
......@@ -149,6 +150,7 @@ buf_flush_ready_for_flush(
{
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(buf_pool->mutex)));
ut_ad(mutex_own(&(block->mutex)));
#endif /* UNIV_SYNC_DEBUG */
ut_a(block->state == BUF_BLOCK_FILE_PAGE);
......@@ -635,8 +637,15 @@ buf_flush_try_page(
ut_a(!block || block->state == BUF_BLOCK_FILE_PAGE);
if (!block) {
mutex_exit(&(buf_pool->mutex));
return(0);
}
mutex_enter(&block->mutex);
if (flush_type == BUF_FLUSH_LIST
&& block && buf_flush_ready_for_flush(block, flush_type)) {
&& buf_flush_ready_for_flush(block, flush_type)) {
block->io_fix = BUF_IO_WRITE;
......@@ -661,6 +670,7 @@ buf_flush_try_page(
locked = TRUE;
}
mutex_exit(&block->mutex);
mutex_exit(&(buf_pool->mutex));
if (!locked) {
......@@ -681,7 +691,7 @@ buf_flush_try_page(
return(1);
} else if (flush_type == BUF_FLUSH_LRU && block
} else if (flush_type == BUF_FLUSH_LRU
&& buf_flush_ready_for_flush(block, flush_type)) {
/* VERY IMPORTANT:
......@@ -709,13 +719,14 @@ buf_flush_try_page(
buf_pool mutex: this ensures that the latch is acquired
immediately. */
mutex_exit(&block->mutex);
mutex_exit(&(buf_pool->mutex));
buf_flush_write_block_low(block);
return(1);
} else if (flush_type == BUF_FLUSH_SINGLE_PAGE && block
} else if (flush_type == BUF_FLUSH_SINGLE_PAGE
&& buf_flush_ready_for_flush(block, flush_type)) {
block->io_fix = BUF_IO_WRITE;
......@@ -729,6 +740,7 @@ buf_flush_try_page(
buf_pool->n_flush[flush_type]++;
mutex_exit(&block->mutex);
mutex_exit(&(buf_pool->mutex));
rw_lock_s_lock_gen(&(block->lock), BUF_IO_WRITE);
......@@ -746,11 +758,12 @@ buf_flush_try_page(
buf_flush_write_block_low(block);
return(1);
} else {
mutex_exit(&(buf_pool->mutex));
return(0);
}
mutex_exit(&block->mutex);
mutex_exit(&(buf_pool->mutex));
return(0);
}
/***************************************************************
......@@ -795,34 +808,48 @@ buf_flush_try_neighbors(
block = buf_page_hash_get(space, i);
ut_a(!block || block->state == BUF_BLOCK_FILE_PAGE);
if (block && flush_type == BUF_FLUSH_LRU && i != offset
&& !block->old) {
if (!block) {
continue;
} else if (flush_type == BUF_FLUSH_LRU && i != offset
&& !block->old) {
/* We avoid flushing 'non-old' blocks in an LRU flush,
because the flushed blocks are soon freed */
continue;
}
} else {
if (block && buf_flush_ready_for_flush(block, flush_type)
&& (i == offset || block->buf_fix_count == 0)) {
/* We only try to flush those neighbors != offset
where the buf fix count is zero, as we then know that
we probably can latch the page without a semaphore
wait. Semaphore waits are expensive because we must
flush the doublewrite buffer before we start
waiting. */
mutex_enter(&block->mutex);
mutex_exit(&(buf_pool->mutex));
if (buf_flush_ready_for_flush(block, flush_type)
&& (i == offset || block->buf_fix_count == 0)) {
/* We only try to flush those
neighbors != offset where the buf fix count is
zero, as we then know that we probably can
latch the page without a semaphore wait.
Semaphore waits are expensive because we must
flush the doublewrite buffer before we start
waiting. */
/* Note: as we release the buf_pool mutex above, in
buf_flush_try_page we cannot be sure the page is still
in a flushable state: therefore we check it again
inside that function. */
mutex_exit(&block->mutex);
count += buf_flush_try_page(space, i, flush_type);
mutex_exit(&(buf_pool->mutex));
mutex_enter(&(buf_pool->mutex));
/* Note: as we release the buf_pool mutex
above, in buf_flush_try_page we cannot be sure
the page is still in a flushable state:
therefore we check it again inside that
function. */
count += buf_flush_try_page(space, i,
flush_type);
mutex_enter(&(buf_pool->mutex));
} else {
mutex_exit(&block->mutex);
}
}
}
......@@ -918,12 +945,15 @@ buf_flush_batch(
while ((block != NULL) && !found) {
ut_a(block->state == BUF_BLOCK_FILE_PAGE);
mutex_enter(&block->mutex);
if (buf_flush_ready_for_flush(block, flush_type)) {
found = TRUE;
space = block->space;
offset = block->offset;
mutex_exit(&block->mutex);
mutex_exit(&(buf_pool->mutex));
old_page_count = page_count;
......@@ -940,10 +970,14 @@ buf_flush_batch(
} else if (flush_type == BUF_FLUSH_LRU) {
mutex_exit(&block->mutex);
block = UT_LIST_GET_PREV(LRU, block);
} else {
ut_ad(flush_type == BUF_FLUSH_LIST);
mutex_exit(&block->mutex);
block = UT_LIST_GET_PREV(flush_list, block);
}
}
......@@ -1026,10 +1060,14 @@ buf_flush_LRU_recommendation(void)
+ BUF_FLUSH_EXTRA_MARGIN)
&& (distance < BUF_LRU_FREE_SEARCH_LEN)) {
mutex_enter(&block->mutex);
if (buf_flush_ready_for_replace(block)) {
n_replaceable++;
}
mutex_exit(&block->mutex);
distance++;
block = UT_LIST_GET_PREV(LRU, block);
......
......@@ -86,6 +86,11 @@ scan_again:
block = UT_LIST_GET_LAST(buf_pool->LRU);
while (block != NULL) {
buf_block_t* prev_block;
mutex_enter(&block->mutex);
prev_block = UT_LIST_GET_PREV(LRU, block);
ut_a(block->state == BUF_BLOCK_FILE_PAGE);
if (block->space == id
......@@ -112,6 +117,8 @@ scan_again:
if (block->is_hashed) {
page_no = block->offset;
mutex_exit(&block->mutex);
mutex_exit(&(buf_pool->mutex));
/* Note that the following call will acquire
......@@ -138,7 +145,8 @@ scan_again:
buf_LRU_block_free_hashed_page(block);
}
next_page:
block = UT_LIST_GET_PREV(LRU, block);
mutex_exit(&block->mutex);
block = prev_block;
}
mutex_exit(&(buf_pool->mutex));
......@@ -211,6 +219,9 @@ buf_LRU_search_and_free_block(
while (block != NULL) {
ut_a(block->in_LRU_list);
mutex_enter(&block->mutex);
if (buf_flush_ready_for_replace(block)) {
#ifdef UNIV_DEBUG
......@@ -226,20 +237,27 @@ buf_LRU_search_and_free_block(
buf_LRU_block_remove_hashed_page(block);
mutex_exit(&(buf_pool->mutex));
mutex_exit(&block->mutex);
/* Remove possible adaptive hash index on the page */
btr_search_drop_page_hash_index(block);
mutex_enter(&(buf_pool->mutex));
ut_a(block->buf_fix_count == 0);
mutex_enter(&(buf_pool->mutex));
mutex_enter(&block->mutex);
buf_LRU_block_free_hashed_page(block);
freed = TRUE;
mutex_exit(&block->mutex);
break;
}
mutex_exit(&block->mutex);
block = UT_LIST_GET_PREV(LRU, block);
distance++;
......@@ -428,9 +446,13 @@ loop:
}
}
mutex_enter(&block->mutex);
block->state = BUF_BLOCK_READY_FOR_USE;
UNIV_MEM_VALID(block->frame, UNIV_PAGE_SIZE);
mutex_exit(&block->mutex);
mutex_exit(&(buf_pool->mutex));
if (started_monitor) {
......@@ -816,6 +838,7 @@ buf_LRU_block_free_non_file_page(
{
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(buf_pool->mutex)));
ut_ad(mutex_own(&block->mutex));
#endif /* UNIV_SYNC_DEBUG */
ut_ad(block);
......@@ -852,6 +875,7 @@ buf_LRU_block_remove_hashed_page(
const buf_block_t* hashed_block;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(buf_pool->mutex)));
ut_ad(mutex_own(&block->mutex));
#endif /* UNIV_SYNC_DEBUG */
ut_ad(block);
......@@ -911,6 +935,7 @@ buf_LRU_block_free_hashed_page(
{
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(buf_pool->mutex)));
ut_ad(mutex_own(&block->mutex));
#endif /* UNIV_SYNC_DEBUG */
ut_a(block->state == BUF_BLOCK_REMOVE_HASH);
......
......@@ -4582,29 +4582,47 @@ fil_flush_file_spaces(
{
fil_system_t* system = fil_system;
fil_space_t* space;
ulint* space_ids;
ulint n_space_ids;
ulint i;
mutex_enter(&(system->mutex));
space = UT_LIST_GET_FIRST(system->unflushed_spaces);
n_space_ids = UT_LIST_GET_LEN(system->unflushed_spaces);
if (n_space_ids == 0) {
while (space) {
if (space->purpose == purpose && !space->is_being_deleted) {
mutex_exit(&system->mutex);
return;
}
space->n_pending_flushes++; /* prevent dropping of
the space while we are
flushing */
mutex_exit(&(system->mutex));
/* Assemble a list of space ids to flush. Previously, we
traversed system->unflushed_spaces and called UT_LIST_GET_NEXT()
on a space that was just removed from the list by fil_flush().
Thus, the space could be dropped and the memory overwritten. */
space_ids = mem_alloc(n_space_ids * sizeof *space_ids);
fil_flush(space->id);
n_space_ids = 0;
mutex_enter(&(system->mutex));
for (space = UT_LIST_GET_FIRST(system->unflushed_spaces);
space;
space = UT_LIST_GET_NEXT(unflushed_spaces, space)) {
space->n_pending_flushes--;
if (space->purpose == purpose && !space->is_being_deleted) {
space_ids[n_space_ids++] = space->id;
}
space = UT_LIST_GET_NEXT(unflushed_spaces, space);
}
mutex_exit(&(system->mutex));
mutex_exit(&system->mutex);
/* Flush the spaces. It will not hurt to call fil_flush() on
a non-existing space id. */
for (i = 0; i < n_space_ids; i++) {
fil_flush(space_ids[i]);
}
mem_free(space_ids);
}
/**********************************************************************
......
......@@ -4433,7 +4433,7 @@ ha_innobase::rnd_pos(
}
if (error) {
DBUG_PRINT("error", ("Got error: %ld", error));
DBUG_PRINT("error", ("Got error: %d", error));
DBUG_RETURN(error);
}
......@@ -4443,7 +4443,7 @@ ha_innobase::rnd_pos(
error = index_read(buf, pos, ref_length, HA_READ_KEY_EXACT);
if (error) {
DBUG_PRINT("error", ("Got error: %ld", error));
DBUG_PRINT("error", ("Got error: %d", error));
}
change_active_index(keynr);
......@@ -5482,7 +5482,7 @@ ha_innobase::read_time(
Returns statistics information of the table to the MySQL interpreter,
in various fields of the handle object. */
void
int
ha_innobase::info(
/*==============*/
uint flag) /* in: what information MySQL requests */
......@@ -5505,7 +5505,7 @@ ha_innobase::info(
if (srv_force_recovery >= SRV_FORCE_NO_IBUF_MERGE) {
DBUG_VOID_RETURN;
DBUG_RETURN(HA_ERR_CRASHED);
}
/* We do not know if MySQL can call this function before calling
......@@ -5697,7 +5697,7 @@ ha_innobase::info(
prebuilt->trx->op_info = (char*)"";
DBUG_VOID_RETURN;
DBUG_RETURN(0);
}
/**************************************************************************
......
......@@ -143,7 +143,7 @@ class ha_innobase: public handler
int rnd_pos(byte * buf, byte *pos);
void position(const byte *record);
void info(uint);
int info(uint);
int analyze(THD* thd,HA_CHECK_OPT* check_opt);
int optimize(THD* thd,HA_CHECK_OPT* check_opt);
int discard_or_import_tablespace(my_bool discard);
......
......@@ -663,7 +663,10 @@ struct buf_block_struct{
ulint magic_n; /* magic number to check */
ulint state; /* state of the control block:
BUF_BLOCK_NOT_USED, ... */
BUF_BLOCK_NOT_USED, ...; changing
this is only allowed when a thread
has BOTH the buffer pool mutex AND
block->mutex locked */
byte* frame; /* pointer to buffer frame which
is of size UNIV_PAGE_SIZE, and
aligned to an address divisible by
......@@ -672,8 +675,12 @@ struct buf_block_struct{
ulint offset; /* page number within the space */
ulint lock_hash_val; /* hashed value of the page address
in the record lock hash table */
mutex_t* lock_mutex; /* mutex protecting the chain in the
record lock hash table */
mutex_t mutex; /* mutex protecting this block:
state (also protected by the buffer
pool mutex), io_fix, buf_fix_count,
and accessed; we introduce this new
mutex in InnoDB-5.1 to relieve
contention on the buffer pool mutex */
rw_lock_t lock; /* read-write lock of the buffer
frame */
buf_block_t* hash; /* node used in chaining to the page
......@@ -725,20 +732,27 @@ struct buf_block_struct{
in heuristic algorithms, because of
the possibility of a wrap-around! */
ulint freed_page_clock;/* the value of freed_page_clock
buffer pool when this block was
last time put to the head of the
LRU list */
of the buffer pool when this block was
the last time put to the head of the
LRU list; a thread is allowed to
read this for heuristic purposes
without holding any mutex or latch */
ibool old; /* TRUE if the block is in the old
blocks in the LRU list */
ibool accessed; /* TRUE if the page has been accessed
while in the buffer pool: read-ahead
may read in pages which have not been
accessed yet */
accessed yet; this is protected by
block->mutex; a thread is allowed to
read this for heuristic purposes
without holding any mutex or latch */
ulint buf_fix_count; /* count of how manyfold this block
is currently bufferfixed */
is currently bufferfixed; this is
protected by block->mutex */
ulint io_fix; /* if a read is pending to the frame,
io_fix is BUF_IO_READ, in the case
of a write BUF_IO_WRITE, otherwise 0 */
of a write BUF_IO_WRITE, otherwise 0;
this is protected by block->mutex */
/* 4. Optimistic search field */
dulint modify_clock; /* this clock is incremented every
......@@ -872,7 +886,9 @@ struct buf_pool_struct{
number of buffer blocks removed from
the end of the LRU list; NOTE that
this counter may wrap around at 4
billion! */
billion! A thread is allowed to
read this for heuristic purposes
without holding any mutex or latch */
ulint LRU_flush_ended;/* when an LRU flush ends for a page,
this is incremented by one; this is
set to zero when a buffer block is
......
......@@ -285,12 +285,16 @@ buf_block_free(
/*===========*/
buf_block_t* block) /* in, own: block to be freed */
{
ut_a(block->state != BUF_BLOCK_FILE_PAGE);
mutex_enter(&(buf_pool->mutex));
mutex_enter(&block->mutex);
ut_a(block->state != BUF_BLOCK_FILE_PAGE);
buf_LRU_block_free_non_file_page(block);
mutex_exit(&block->mutex);
mutex_exit(&(buf_pool->mutex));
}
......@@ -427,7 +431,7 @@ buf_block_buf_fix_inc_debug(
ret = rw_lock_s_lock_func_nowait(&(block->debug_latch), file, line);
ut_a(ret);
ut_a(mutex_own(&block->mutex));
block->buf_fix_count++;
}
#else /* UNIV_SYNC_DEBUG */
......@@ -531,22 +535,23 @@ buf_page_release(
{
ut_ad(block);
mutex_enter_fast(&(buf_pool->mutex));
ut_a(block->state == BUF_BLOCK_FILE_PAGE);
ut_a(block->buf_fix_count > 0);
if (rw_latch == RW_X_LATCH && mtr->modifications) {
mutex_enter(&buf_pool->mutex);
buf_flush_note_modification(block, mtr);
mutex_exit(&buf_pool->mutex);
}
mutex_enter(&block->mutex);
#ifdef UNIV_SYNC_DEBUG
rw_lock_s_unlock(&(block->debug_latch));
#endif
block->buf_fix_count--;
mutex_exit(&(buf_pool->mutex));
mutex_exit(&block->mutex);
if (rw_latch == RW_S_LATCH) {
rw_lock_s_unlock(&(block->lock));
......
......@@ -80,6 +80,7 @@ memory is read outside the allocated blocks. */
#define UNIV_DEBUG_VALGRIND
#define UNIV_DEBUG_PRINT
#define UNIV_DEBUG
#define UNIV_LIST_DEBUG
#define UNIV_MEM_DEBUG
#define UNIV_IBUF_DEBUG
#define UNIV_SYNC_DEBUG
......
......@@ -123,27 +123,36 @@ name, NODE1 and NODE2 are pointers to nodes. */
}\
}\
/* Invalidate the pointers in a list node. */
#ifdef UNIV_LIST_DEBUG
# define UT_LIST_REMOVE_CLEAR(NAME, N) \
((N)->NAME.prev = (N)->NAME.next = (void*) -1)
#else
# define UT_LIST_REMOVE_CLEAR(NAME, N) while (0)
#endif
/***********************************************************************
Removes a node from a two-way linked list. BASE has to be the base node
(not a pointer to it). N has to be the pointer to the node to be removed
from the list. NAME is the list name. */
#define UT_LIST_REMOVE(NAME, BASE, N)\
{\
ut_ad(N);\
ut_a((BASE).count > 0);\
((BASE).count)--;\
if (((N)->NAME).next != NULL) {\
((((N)->NAME).next)->NAME).prev = ((N)->NAME).prev;\
} else {\
(BASE).end = ((N)->NAME).prev;\
}\
if (((N)->NAME).prev != NULL) {\
((((N)->NAME).prev)->NAME).next = ((N)->NAME).next;\
} else {\
(BASE).start = ((N)->NAME).next;\
}\
}\
#define UT_LIST_REMOVE(NAME, BASE, N) \
do { \
ut_ad(N); \
ut_a((BASE).count > 0); \
((BASE).count)--; \
if (((N)->NAME).next != NULL) { \
((((N)->NAME).next)->NAME).prev = ((N)->NAME).prev; \
} else { \
(BASE).end = ((N)->NAME).prev; \
} \
if (((N)->NAME).prev != NULL) { \
((((N)->NAME).prev)->NAME).next = ((N)->NAME).next; \
} else { \
(BASE).start = ((N)->NAME).next; \
} \
UT_LIST_REMOVE_CLEAR(NAME, N); \
} while (0)
/************************************************************************
Gets the next node in a two-way list. NAME is the name of the list
......
......@@ -4548,10 +4548,6 @@ loop:
trx->read_view->up_limit_id));
}
fprintf(file,
"Trx has approximately %lu row locks\n",
(ulong) lock_number_of_rows_locked(trx));
if (trx->que_state == TRX_QUE_LOCK_WAIT) {
fprintf(file,
"------- TRX HAS BEEN WAITING %lu SEC"
......
SET SESSION STORAGE_ENGINE = InnoDB;
drop table if exists t1,t2,t1m,t1i,t2m,t2i,t4;
create table t1(f1 varchar(800) binary not null, key(f1))
character set utf8 collate utf8_general_ci;
Warnings:
Warning 1071 Specified key was too long; max key length is 765 bytes
insert into t1 values('aaa');
drop table t1;
create table t1 (
c_id int(11) not null default '0',
org_id int(11) default null,
......@@ -111,6 +105,14 @@ SELECT `id1` FROM `t1` WHERE `id1` NOT IN (SELECT `id1` FROM `t2` WHERE `id2` =
id1
2
DROP TABLE t1, t2;
create table t1 (c1 int) engine=innodb;
handler t1 open;
handler t1 read first;
c1
Before and after comparison
0
drop table t1;
End of 4.1 tests
create table t1m (a int) engine = MEMORY;
create table t1i (a int);
create table t2m (a int) engine = MEMORY;
......@@ -248,6 +250,22 @@ b
c
d
drop table t1,t4;
DROP TABLE IF EXISTS t2, t1;
CREATE TABLE t1 (i INT NOT NULL PRIMARY KEY) ENGINE= InnoDB;
CREATE TABLE t2 (
i INT NOT NULL,
FOREIGN KEY (i) REFERENCES t1 (i) ON DELETE NO ACTION
) ENGINE= InnoDB;
INSERT INTO t1 VALUES (1);
INSERT INTO t2 VALUES (1);
DELETE IGNORE FROM t1 WHERE i = 1;
Warnings:
Error 1451 Cannot delete or update a parent row: a foreign key constraint fails (`test`.`t2`, CONSTRAINT `t2_ibfk_1` FOREIGN KEY (`i`) REFERENCES `t1` (`i`) ON DELETE NO ACTION)
SELECT * FROM t1, t2;
i i
1 1
DROP TABLE t2, t1;
End of 4.1 tests.
create table t1 (
a varchar(30), b varchar(30), primary key(a), key(b)
);
......@@ -369,6 +387,23 @@ Warnings:
Warning 1071 Specified key was too long; max key length is 765 bytes
insert into t1 values('aaa');
drop table t1;
CREATE TABLE t1 (a INT PRIMARY KEY, b INT, c FLOAT, KEY b(b)) ENGINE = INNODB;
INSERT INTO t1 VALUES ( 1 , 1 , 1);
INSERT INTO t1 SELECT a + 1 , MOD(a + 1 , 20), 1 FROM t1;
INSERT INTO t1 SELECT a + 2 , MOD(a + 2 , 20), 1 FROM t1;
INSERT INTO t1 SELECT a + 4 , MOD(a + 4 , 20), 1 FROM t1;
INSERT INTO t1 SELECT a + 8 , MOD(a + 8 , 20), 1 FROM t1;
INSERT INTO t1 SELECT a + 16, MOD(a + 16, 20), 1 FROM t1;
INSERT INTO t1 SELECT a + 32, MOD(a + 32, 20), 1 FROM t1;
INSERT INTO t1 SELECT a + 64, MOD(a + 64, 20), 1 FROM t1;
EXPLAIN SELECT b, SUM(c) FROM t1 GROUP BY b;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index NULL b 5 NULL 128
EXPLAIN SELECT SQL_BIG_RESULT b, SUM(c) FROM t1 GROUP BY b;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 128 Using filesort
DROP TABLE t1;
End of 5.0 tests
CREATE TABLE `t2` (
`k` int(11) NOT NULL auto_increment,
`a` int(11) default NULL,
......@@ -437,3 +472,4 @@ k a c
11 15 1
12 20 1
drop table t2;
End of 5.1 tests
......@@ -68,4 +68,5 @@ MYSQL_PLUGIN_ACTIONS(innobase, [
storage/innobase/handler/Makefile
storage/innobase/usr/Makefile)
])
MYSQL_PLUGIN_DEPENDS_ON_MYSQL_INTERNALS(innobase, [handler/ha_innodb.cc])
......@@ -1761,11 +1761,10 @@ trx_print(
|| mem_heap_get_size(trx->lock_heap) > 400) {
newline = TRUE;
fprintf(f, "%lu lock struct(s), heap size %lu",
fprintf(f, "%lu lock struct(s), heap size %lu,"
" %lu row lock(s)",
(ulong) UT_LIST_GET_LEN(trx->trx_locks),
(ulong) mem_heap_get_size(trx->lock_heap));
fprintf(f, "%lu row lock(s)",
(ulong) mem_heap_get_size(trx->lock_heap),
(ulong) lock_number_of_rows_locked(trx));
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment