Commit 96cccd15 authored by marko's avatar marko

Merge patch to MySQL/InnoDB 5.0: Fix Bug #14747.

Note that buf_block_t::index should be protected by btr_search_latch
or an s-latch or x-latch on the index page.
btr_search_drop_page_hash_index(): Read block->index while holding
btr_search_latch and use the cached value in the loop.  Remove some
redundant assertions.
parent 7163d59f
......@@ -904,6 +904,7 @@ btr_search_drop_page_hash_index(
ulint* folds;
ulint i;
mem_heap_t* heap;
dict_index_t* index;
ulint* offsets;
#ifdef UNIV_SYNC_DEBUG
......@@ -932,11 +933,16 @@ btr_search_drop_page_hash_index(
n_fields = block->curr_n_fields;
n_bytes = block->curr_n_bytes;
index = block->index;
ut_a(n_fields + n_bytes > 0);
/* NOTE: The fields of block must not be accessed after
releasing btr_search_latch, as the index page might only
be s-latched! */
rw_lock_s_unlock(&btr_search_latch);
ut_a(n_fields + n_bytes > 0);
n_recs = page_get_n_recs(page);
/* Calculate and cache fold values into an array for fast deletion
......@@ -949,14 +955,6 @@ btr_search_drop_page_hash_index(
rec = page_get_infimum_rec(page);
rec = page_rec_get_next(rec);
if (!page_rec_is_supremum(rec)) {
ut_a(n_fields <= rec_get_n_fields(rec, block->index));
if (n_bytes > 0) {
ut_a(n_fields < rec_get_n_fields(rec, block->index));
}
}
tree_id = btr_page_get_index_id(page);
prev_fold = 0;
......@@ -964,18 +962,12 @@ btr_search_drop_page_hash_index(
heap = NULL;
offsets = NULL;
if (block->index == NULL) {
mem_analyze_corruption((byte*)block);
ut_a(block->index != NULL);
}
while (!page_rec_is_supremum(rec)) {
/* FIXME: in a mixed tree, not all records may have enough
ordering fields: */
offsets = rec_get_offsets(rec, block->index,
offsets, n_fields + (n_bytes > 0), &heap);
offsets = rec_get_offsets(rec, index, offsets,
n_fields + (n_bytes > 0), &heap);
ut_a(rec_offs_n_fields(offsets) == n_fields + (n_bytes > 0));
fold = rec_fold(rec, offsets, n_fields, n_bytes, tree_id);
if (fold == prev_fold && prev_fold != 0) {
......
......@@ -745,8 +745,6 @@ struct buf_block_struct{
buffer pool which are index pages,
but this flag is not set because
we do not keep track of all pages */
dict_index_t* index; /* index for which the adaptive
hash index has been created */
/* 2. Page flushing fields */
UT_LIST_NODE_T(buf_block_t) flush_list;
......@@ -833,7 +831,7 @@ struct buf_block_struct{
records with the same prefix should be
indexed in the hash index */
/* The following 4 fields are protected by btr_search_latch: */
/* The following 6 fields are protected by btr_search_latch: */
ibool is_hashed; /* TRUE if hash index has already been
built on this page; note that it does
......@@ -850,6 +848,12 @@ struct buf_block_struct{
ulint curr_side; /* BTR_SEARCH_LEFT_SIDE or
BTR_SEARCH_RIGHT_SIDE in hash
indexing */
dict_index_t* index; /* Index for which the adaptive
hash index has been created.
This field may only be modified
while holding an s-latch or x-latch
on block->lock and an x-latch on
btr_search_latch. */
/* 6. Debug fields */
#ifdef UNIV_SYNC_DEBUG
rw_lock_t debug_latch; /* in the debug version, each thread
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment