Commit bac65733 authored by marko's avatar marko

branches/zip: Always call buf_block_align() while holding buf_pool->mutex.

This was forgotten from r977.
parent 73a33e71
......@@ -756,7 +756,9 @@ btr_search_guess_on_hash(
goto failure_unlock;
}
mutex_enter(&buf_pool->mutex);
block = buf_block_align(rec);
mutex_exit(&buf_pool->mutex);
page = page_align(rec);
if (UNIV_LIKELY(!has_search_latch)) {
......@@ -1608,6 +1610,7 @@ btr_search_validate(void)
*offsets_ = (sizeof offsets_) / sizeof *offsets_;
rw_lock_x_lock(&btr_search_latch);
mutex_enter(&buf_pool->mutex);
cell_count = hash_get_n_cells(btr_search_sys->hash_index);
......@@ -1615,9 +1618,11 @@ btr_search_validate(void)
/* We release btr_search_latch every once in a while to
give other queries a chance to run. */
if ((i != 0) && ((i % chunk_size) == 0)) {
mutex_exit(&buf_pool->mutex);
rw_lock_x_unlock(&btr_search_latch);
os_thread_yield();
rw_lock_x_lock(&btr_search_latch);
mutex_enter(&buf_pool->mutex);
}
node = hash_get_nth_cell(btr_search_sys->hash_index, i)->node;
......@@ -1688,9 +1693,11 @@ btr_search_validate(void)
/* We release btr_search_latch every once in a while to
give other queries a chance to run. */
if (i != 0) {
mutex_exit(&buf_pool->mutex);
rw_lock_x_unlock(&btr_search_latch);
os_thread_yield();
rw_lock_x_lock(&btr_search_latch);
mutex_enter(&buf_pool->mutex);
}
if (!ha_validate(btr_search_sys->hash_index, i, end_index)) {
......@@ -1698,6 +1705,7 @@ btr_search_validate(void)
}
}
mutex_exit(&buf_pool->mutex);
rw_lock_x_unlock(&btr_search_latch);
if (UNIV_LIKELY_NULL(heap)) {
mem_heap_free(heap);
......
......@@ -99,10 +99,12 @@ ha_insert_for_fold(
if (prev_node->fold == fold) {
#ifdef UNIV_DEBUG
if (table->adaptive) {
mutex_enter(&buf_pool->mutex);
prev_block = buf_block_align(prev_node->data);
ut_a(prev_block->n_pointers > 0);
prev_block->n_pointers--;
buf_block_align(data)->n_pointers++;
mutex_exit(&buf_pool->mutex);
}
#endif /* UNIV_DEBUG */
prev_node->data = data;
......@@ -130,7 +132,9 @@ ha_insert_for_fold(
#ifdef UNIV_DEBUG
if (table->adaptive) {
mutex_enter(&buf_pool->mutex);
buf_block_align(data)->n_pointers++;
mutex_exit(&buf_pool->mutex);
}
#endif /* UNIV_DEBUG */
node->fold = fold;
......@@ -167,8 +171,12 @@ ha_delete_hash_node(
{
#ifdef UNIV_DEBUG
if (table->adaptive) {
ut_a(buf_block_align(del_node->data)->n_pointers > 0);
buf_block_align(del_node->data)->n_pointers--;
buf_block_t* block;
mutex_enter(&buf_pool->mutex);
block = buf_block_align(del_node->data);
mutex_exit(&buf_pool->mutex);
ut_a(block->n_pointers > 0);
block->n_pointers--;
}
#endif /* UNIV_DEBUG */
HASH_DELETE_AND_COMPACT(ha_node_t, next, table, del_node);
......@@ -220,9 +228,11 @@ ha_search_and_update_if_found(
if (node) {
#ifdef UNIV_DEBUG
if (table->adaptive) {
mutex_enter(&buf_pool->mutex);
ut_a(buf_block_align(node->data)->n_pointers > 0);
buf_block_align(node->data)->n_pointers--;
buf_block_align(new_data)->n_pointers++;
mutex_exit(&buf_pool->mutex);
}
#endif /* UNIV_DEBUG */
node->data = new_data;
......
......@@ -221,7 +221,11 @@ buf_frame_get_page_zip(
/* out: compressed page descriptor, or NULL */
byte* ptr) /* in: pointer to the page */
{
return(buf_block_get_page_zip(buf_block_align(ptr)));
page_zip_des_t* page_zip;
mutex_enter(&buf_pool->mutex);
page_zip = buf_block_get_page_zip(buf_block_align(ptr));
mutex_exit(&buf_pool->mutex);
return(page_zip);
}
#endif /* UNIV_DEBUG || UNIV_ZIP_DEBUG */
......@@ -237,7 +241,6 @@ buf_ptr_get_fsp_addr(
fil_addr_t* addr) /* out: page offset and byte offset */
{
const page_t* page = ut_align_down((void*) ptr, UNIV_PAGE_SIZE);
ut_ad(buf_block_align((byte*) ptr));
*space = mach_read_from_4(page + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID);
addr->page = mach_read_from_4(page + FIL_PAGE_OFFSET);
......
......@@ -189,8 +189,10 @@ mlog_write_initial_log_record_fast(
#endif
#ifdef UNIV_DEBUG
mutex_enter(&buf_pool->mutex);
/* We now assume that all x-latched pages have been modified! */
block = buf_block_align(ptr);
mutex_exit(&buf_pool->mutex);
if (!mtr_memo_contains(mtr, block, MTR_MEMO_MODIFY)) {
......
......@@ -116,8 +116,11 @@ row_upd_rec_sys_fields(
ut_ad(dict_index_is_clust(index));
ut_ad(rec_offs_validate(rec, index, offsets));
#ifdef UNIV_SYNC_DEBUG
ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_EX)
|| !buf_block_align(rec)->is_hashed);
if (!rw_lock_own(&btr_search_latch, RW_LOCK_EX)) {
mutex_enter(&buf_pool->mutex);
ut_ad(!buf_block_align(rec)->is_hashed);
mutex_exit(&buf_pool->mutex);
}
#endif /* UNIV_SYNC_DEBUG */
if (UNIV_LIKELY_NULL(page_zip)) {
......
......@@ -322,7 +322,12 @@ mtr_memo_contains_page(
const byte* ptr, /* in: pointer to buffer frame */
ulint type) /* in: type of object */
{
return(mtr_memo_contains(mtr, buf_block_align((byte*) ptr), type));
ibool ret;
mutex_enter(&buf_pool->mutex);
ret = mtr_memo_contains(mtr, buf_block_align((byte*) ptr), type);
mutex_exit(&buf_pool->mutex);
return(ret);
}
/*************************************************************
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment