Commit 8f6de3bb authored by marko's avatar marko

branches/zip: Add page_zip_validate() checks.

page_cur_delete_rec(): Do not call page_zip_validate() in the beginning,
because btr_set_min_rec_mark() in btr_cur_pessimistic_delete() will
cause a temporary mismatch.

Document temporary mismatches caused by btr_set_min_rec_mark() calls
and explain why they will not cause any problems.
parent 5837f751
...@@ -2509,6 +2509,11 @@ btr_discard_page( ...@@ -2509,6 +2509,11 @@ btr_discard_page(
ut_ad(page_rec_is_user_rec(node_ptr)); ut_ad(page_rec_is_user_rec(node_ptr));
/* This will make page_zip_validate() fail on merge_page
until btr_level_list_remove() completes. This is harmless,
because everything will take place within a single
mini-transaction and because writing to the redo log
is an atomic operation (performed by mtr_commit()). */
btr_set_min_rec_mark(node_ptr, mtr); btr_set_min_rec_mark(node_ptr, mtr);
} }
...@@ -2516,6 +2521,14 @@ btr_discard_page( ...@@ -2516,6 +2521,14 @@ btr_discard_page(
/* Remove the page from the level list */ /* Remove the page from the level list */
btr_level_list_remove(tree, page, mtr); btr_level_list_remove(tree, page, mtr);
#if defined UNIV_DEBUG || defined UNIV_ZIP_DEBUG
{
page_zip_des_t* merge_page_zip = buf_block_get_page_zip(
buf_block_align(merge_page));
ut_a(!merge_page_zip
|| page_zip_validate(merge_page_zip, merge_page));
}
#endif /* UNIV_DEBUG || UNIV_ZIP_DEBUG */
if (left_page_no != FIL_NULL) { if (left_page_no != FIL_NULL) {
lock_update_discard(page_get_supremum_rec(merge_page), page); lock_update_discard(page_get_supremum_rec(merge_page), page);
......
...@@ -1766,6 +1766,9 @@ btr_cur_optimistic_update( ...@@ -1766,6 +1766,9 @@ btr_cur_optimistic_update(
new_rec_size = rec_get_converted_size(index, new_entry); new_rec_size = rec_get_converted_size(index, new_entry);
page_zip = buf_block_get_page_zip(buf_block_align(page)); page_zip = buf_block_get_page_zip(buf_block_align(page));
#if defined UNIV_DEBUG || defined UNIV_ZIP_DEBUG
ut_a(!page_zip || page_zip_validate(page_zip, page));
#endif /* UNIV_DEBUG || UNIV_ZIP_DEBUG */
if (UNIV_LIKELY_NULL(page_zip) if (UNIV_LIKELY_NULL(page_zip)
&& !page_zip_alloc(page_zip, page, index, && !page_zip_alloc(page_zip, page, index,
...@@ -1968,6 +1971,9 @@ btr_cur_pessimistic_update( ...@@ -1968,6 +1971,9 @@ btr_cur_pessimistic_update(
MTR_MEMO_X_LOCK)); MTR_MEMO_X_LOCK));
ut_ad(mtr_memo_contains(mtr, buf_block_align(page), ut_ad(mtr_memo_contains(mtr, buf_block_align(page),
MTR_MEMO_PAGE_X_FIX)); MTR_MEMO_PAGE_X_FIX));
#if defined UNIV_DEBUG || defined UNIV_ZIP_DEBUG
ut_a(!page_zip || page_zip_validate(page_zip, page));
#endif /* UNIV_DEBUG || UNIV_ZIP_DEBUG */
optim_err = btr_cur_optimistic_update(flags, cursor, update, optim_err = btr_cur_optimistic_update(flags, cursor, update,
cmpl_info, thr, mtr); cmpl_info, thr, mtr);
...@@ -2085,6 +2091,9 @@ btr_cur_pessimistic_update( ...@@ -2085,6 +2091,9 @@ btr_cur_pessimistic_update(
btr_search_update_hash_on_delete(cursor); btr_search_update_hash_on_delete(cursor);
#if defined UNIV_DEBUG || defined UNIV_ZIP_DEBUG
ut_a(!page_zip || page_zip_validate(page_zip, page));
#endif /* UNIV_DEBUG || UNIV_ZIP_DEBUG */
page_cur_delete_rec(page_cursor, index, offsets, page_zip, mtr); page_cur_delete_rec(page_cursor, index, offsets, page_zip, mtr);
page_cur_move_to_prev(page_cursor); page_cur_move_to_prev(page_cursor);
...@@ -2604,17 +2613,24 @@ btr_cur_optimistic_delete( ...@@ -2604,17 +2613,24 @@ btr_cur_optimistic_delete(
if (no_compress_needed) { if (no_compress_needed) {
page_zip_des_t* page_zip;
lock_update_delete(rec); lock_update_delete(rec);
btr_search_update_hash_on_delete(cursor); btr_search_update_hash_on_delete(cursor);
max_ins_size = page_get_max_insert_size_after_reorganize(page, max_ins_size = page_get_max_insert_size_after_reorganize(page,
1); 1);
page_zip = buf_block_get_page_zip(buf_block_align(
btr_cur_get_rec(cursor)));
#if defined UNIV_DEBUG || defined UNIV_ZIP_DEBUGp
ut_a(!page_zip || page_zip_validate(page_zip, page));
#endif /* UNIV_DEBUG || UNIV_ZIP_DEBUG */
page_cur_delete_rec(btr_cur_get_page_cur(cursor), page_cur_delete_rec(btr_cur_get_page_cur(cursor),
cursor->index, offsets, cursor->index, offsets, page_zip, mtr);
buf_block_get_page_zip(buf_block_align( #if defined UNIV_DEBUG || defined UNIV_ZIP_DEBUGp
btr_cur_get_rec(cursor))), ut_a(!page_zip || page_zip_validate(page_zip, page));
mtr); #endif /* UNIV_DEBUG || UNIV_ZIP_DEBUG */
ibuf_update_free_bits_low(cursor->index, page, max_ins_size, ibuf_update_free_bits_low(cursor->index, page, max_ins_size,
mtr); mtr);
...@@ -2695,6 +2711,9 @@ btr_cur_pessimistic_delete( ...@@ -2695,6 +2711,9 @@ btr_cur_pessimistic_delete(
heap = mem_heap_create(1024); heap = mem_heap_create(1024);
rec = btr_cur_get_rec(cursor); rec = btr_cur_get_rec(cursor);
page_zip = buf_block_get_page_zip(buf_block_align(page)); page_zip = buf_block_get_page_zip(buf_block_align(page));
#if defined UNIV_DEBUG || defined UNIV_ZIP_DEBUG
ut_a(!page_zip || page_zip_validate(page_zip, page));
#endif /* UNIV_DEBUG || UNIV_ZIP_DEBUG */
offsets = rec_get_offsets(rec, cursor->index, offsets = rec_get_offsets(rec, cursor->index,
NULL, ULINT_UNDEFINED, &heap); NULL, ULINT_UNDEFINED, &heap);
...@@ -2707,6 +2726,9 @@ btr_cur_pessimistic_delete( ...@@ -2707,6 +2726,9 @@ btr_cur_pessimistic_delete(
|| !rec_get_1byte_offs_flag(rec))) { || !rec_get_1byte_offs_flag(rec))) {
btr_rec_free_externally_stored_fields(cursor->index, rec, btr_rec_free_externally_stored_fields(cursor->index, rec,
offsets, page_zip, in_rollback, mtr); offsets, page_zip, in_rollback, mtr);
#if defined UNIV_DEBUG || defined UNIV_ZIP_DEBUGp
ut_a(!page_zip || page_zip_validate(page_zip, page));
#endif /* UNIV_DEBUG || UNIV_ZIP_DEBUG */
} }
if (UNIV_UNLIKELY(page_get_n_recs(page) < 2) if (UNIV_UNLIKELY(page_get_n_recs(page) < 2)
...@@ -2739,6 +2761,11 @@ btr_cur_pessimistic_delete( ...@@ -2739,6 +2761,11 @@ btr_cur_pessimistic_delete(
non-leaf level, we must mark the new leftmost node non-leaf level, we must mark the new leftmost node
pointer as the predefined minimum record */ pointer as the predefined minimum record */
/* This will make page_zip_validate() fail until
page_cur_delete_rec() completes. This is harmless,
because everything will take place within a single
mini-transaction and because writing to the redo log
is an atomic operation (performed by mtr_commit()). */
btr_set_min_rec_mark(next_rec, mtr); btr_set_min_rec_mark(next_rec, mtr);
} else { } else {
/* Otherwise, if we delete the leftmost node pointer /* Otherwise, if we delete the leftmost node pointer
...@@ -2762,6 +2789,9 @@ btr_cur_pessimistic_delete( ...@@ -2762,6 +2789,9 @@ btr_cur_pessimistic_delete(
page_cur_delete_rec(btr_cur_get_page_cur(cursor), cursor->index, page_cur_delete_rec(btr_cur_get_page_cur(cursor), cursor->index,
offsets, page_zip, mtr); offsets, page_zip, mtr);
#if defined UNIV_DEBUG || defined UNIV_ZIP_DEBUGp
ut_a(!page_zip || page_zip_validate(page_zip, page));
#endif /* UNIV_DEBUG || UNIV_ZIP_DEBUG */
ut_ad(btr_check_node_ptr(tree, page, mtr)); ut_ad(btr_check_node_ptr(tree, page, mtr));
......
...@@ -1514,9 +1514,6 @@ page_cur_delete_rec( ...@@ -1514,9 +1514,6 @@ page_cur_delete_rec(
current_rec = cursor->rec; current_rec = cursor->rec;
ut_ad(rec_offs_validate(current_rec, index, offsets)); ut_ad(rec_offs_validate(current_rec, index, offsets));
ut_ad(!!page_is_comp(page) == dict_table_is_comp(index->table)); ut_ad(!!page_is_comp(page) == dict_table_is_comp(index->table));
#if defined UNIV_DEBUG || defined UNIV_ZIP_DEBUG
ut_a(!page_zip || page_zip_validate(page_zip, page));
#endif /* UNIV_DEBUG || UNIV_ZIP_DEBUG */
/* The record must not be the supremum or infimum record. */ /* The record must not be the supremum or infimum record. */
ut_ad(page_rec_is_user_rec(current_rec)); ut_ad(page_rec_is_user_rec(current_rec));
......
...@@ -831,7 +831,7 @@ page_delete_rec_list_end( ...@@ -831,7 +831,7 @@ page_delete_rec_list_end(
rec_t* last_rec; rec_t* last_rec;
rec_t* prev_rec; rec_t* prev_rec;
ulint n_owned; ulint n_owned;
page_t* page; page_t* page = ut_align_down(rec, UNIV_PAGE_SIZE);
mem_heap_t* heap = NULL; mem_heap_t* heap = NULL;
ulint offsets_[REC_OFFS_NORMAL_SIZE]; ulint offsets_[REC_OFFS_NORMAL_SIZE];
ulint* offsets = offsets_; ulint* offsets = offsets_;
...@@ -839,6 +839,9 @@ page_delete_rec_list_end( ...@@ -839,6 +839,9 @@ page_delete_rec_list_end(
ut_ad(size == ULINT_UNDEFINED || size < UNIV_PAGE_SIZE); ut_ad(size == ULINT_UNDEFINED || size < UNIV_PAGE_SIZE);
ut_ad(!page_zip || page_rec_is_comp(rec)); ut_ad(!page_zip || page_rec_is_comp(rec));
#if defined UNIV_DEBUG || defined UNIV_ZIP_DEBUG
ut_a(!page_zip || page_zip_validate(page_zip, page));
#endif /* UNIV_DEBUG || UNIV_ZIP_DEBUG */
if (page_rec_is_infimum(rec)) { if (page_rec_is_infimum(rec)) {
rec = page_rec_get_next(rec); rec = page_rec_get_next(rec);
...@@ -852,7 +855,6 @@ page_delete_rec_list_end( ...@@ -852,7 +855,6 @@ page_delete_rec_list_end(
/* Reset the last insert info in the page header and increment /* Reset the last insert info in the page header and increment
the modify clock for the frame */ the modify clock for the frame */
page = ut_align_down(rec, UNIV_PAGE_SIZE);
page_header_set_ptr(page, page_zip, PAGE_LAST_INSERT, NULL); page_header_set_ptr(page, page_zip, PAGE_LAST_INSERT, NULL);
/* The page gets invalid for optimistic searches: increment the /* The page gets invalid for optimistic searches: increment the
...@@ -1004,6 +1006,10 @@ page_delete_rec_list_start( ...@@ -1004,6 +1006,10 @@ page_delete_rec_list_start(
ut_ad((ibool) !!page_rec_is_comp(rec) ut_ad((ibool) !!page_rec_is_comp(rec)
== dict_table_is_comp(index->table)); == dict_table_is_comp(index->table));
#if defined UNIV_DEBUG || defined UNIV_ZIP_DEBUG
ut_a(!page_zip || page_zip_validate(page_zip,
ut_align_down(rec, UNIV_PAGE_SIZE)));
#endif /* UNIV_DEBUG || UNIV_ZIP_DEBUG */
if (page_rec_is_infimum(rec)) { if (page_rec_is_infimum(rec)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment