Commit 438cb8a6 authored by marko's avatar marko

branches/zip: Remove all buf_block_align() calls from lock0lock.c.

Replace all page_t* parameters with buf_block_t*, and replace many
rec_t* parameters with heap_no.  This eliminates also many
rec_get_heap_no() calls, which became more expensive with the
introduction of ROW_FORMAT=COMPACT in MySQL/InnoDB 5.0.3.

page_rec_get_heap_no(), lock_get_min_heap_no(): New functions.
parent 483a5708
......@@ -929,7 +929,7 @@ btr_page_reorganize_low(
if (UNIV_LIKELY(!recovery)) {
/* Update the record lock bitmaps */
lock_move_reorganize_page(page, temp_page);
lock_move_reorganize_page(block, temp_block);
}
data_size2 = page_get_data_size(page);
......@@ -1122,7 +1122,7 @@ btr_root_raise_and_insert(
information of the record to be inserted on the infimum of the
root page: we cannot discard the lock structs on the root page */
lock_update_root_raise(new_page, root);
lock_update_root_raise(new_block, root_block);
/* Create a memory heap where the node pointer is stored */
heap = mem_heap_create(100);
......@@ -1851,7 +1851,7 @@ func_start:
left_block = new_block;
right_block = block;
lock_update_split_left(page, new_page);
lock_update_split_left(right_block, left_block);
} else {
/* fputs("Split right\n", stderr); */
......@@ -1879,7 +1879,7 @@ func_start:
left_block = block;
right_block = new_block;
lock_update_split_right(new_page, page);
lock_update_split_right(right_block, left_block);
}
#ifdef UNIV_ZIP_DEBUG
......@@ -2197,7 +2197,7 @@ btr_lift_page_up(
page, index, mtr);
}
lock_update_copy_and_discard(father_page, page);
lock_update_copy_and_discard(father_block, block);
/* Free the file page */
btr_page_free(index, block, mtr);
......@@ -2367,7 +2367,7 @@ err_exit:
btr_level_list_remove(page, mtr);
btr_node_ptr_delete(index, block, mtr);
lock_update_merge_left(merge_page, orig_pred, page);
lock_update_merge_left(merge_block, orig_pred, block);
} else {
rec_t* orig_succ;
#ifdef UNIV_BTR_DEBUG
......@@ -2426,7 +2426,7 @@ err_exit:
offsets, right_page_no, mtr);
btr_node_ptr_delete(index, merge_block, mtr);
lock_update_merge_right(orig_succ, page);
lock_update_merge_right(merge_block, orig_succ, block);
}
mem_heap_free(heap);
......@@ -2473,7 +2473,7 @@ btr_discard_only_page_on_level(
page_level = btr_page_get_level(page, mtr);
lock_update_discard(page_get_supremum_rec(father_page), page);
lock_update_discard(father_block, PAGE_HEAP_NO_SUPREMUM, block);
btr_page_set_level(father_page, father_page_zip, page_level, mtr);
......@@ -2511,6 +2511,7 @@ btr_discard_page(
ulint space;
ulint left_page_no;
ulint right_page_no;
buf_block_t* merge_block;
page_t* merge_page;
buf_block_t* block;
page_t* page;
......@@ -2531,14 +2532,16 @@ btr_discard_page(
right_page_no = btr_page_get_next(buf_block_get_frame(block), mtr);
if (left_page_no != FIL_NULL) {
merge_page = btr_page_get(space, left_page_no, RW_X_LATCH,
mtr);
merge_block = btr_block_get(space, left_page_no, RW_X_LATCH,
mtr);
merge_page = buf_block_get_frame(merge_block);
#ifdef UNIV_BTR_DEBUG
ut_a(btr_page_get_next(merge_page, mtr) == block->offset);
#endif /* UNIV_BTR_DEBUG */
} else if (right_page_no != FIL_NULL) {
merge_page = btr_page_get(space, right_page_no, RW_X_LATCH,
mtr);
merge_block = btr_block_get(space, right_page_no, RW_X_LATCH,
mtr);
merge_page = buf_block_get_frame(merge_block);
#ifdef UNIV_BTR_DEBUG
ut_a(btr_page_get_prev(merge_page, mtr) == block->offset);
#endif /* UNIV_BTR_DEBUG */
......@@ -2575,18 +2578,19 @@ btr_discard_page(
#ifdef UNIV_ZIP_DEBUG
{
page_zip_des_t* merge_page_zip
= buf_frame_get_page_zip(merge_page);
= buf_block_get_page_zip(merge_block);
ut_a(!merge_page_zip
|| page_zip_validate(merge_page_zip, merge_page));
}
#endif /* UNIV_ZIP_DEBUG */
if (left_page_no != FIL_NULL) {
lock_update_discard(page_get_supremum_rec(merge_page), page);
lock_update_discard(merge_block, PAGE_HEAP_NO_SUPREMUM,
block);
} else {
lock_update_discard(page_rec_get_next(
page_get_infimum_rec(merge_page)),
page);
lock_update_discard(merge_block,
lock_get_min_heap_no(merge_block),
block);
}
/* Free the file page */
......
......@@ -1189,7 +1189,7 @@ fail:
if (!(flags & BTR_NO_LOCKING_FLAG) && inherit) {
lock_update_insert(*rec);
lock_update_insert(block, *rec);
}
#if 0
......@@ -1389,7 +1389,7 @@ btr_cur_pessimistic_insert(
#endif
if (!(flags & BTR_NO_LOCKING_FLAG)) {
lock_update_insert(*rec);
lock_update_insert(btr_cur_get_block(cursor), *rec);
}
err = DB_SUCCESS;
......@@ -1434,7 +1434,7 @@ btr_cur_upd_lock_and_undo(
/* We do undo logging only when we update a clustered index
record */
return(lock_sec_rec_modify_check_and_lock(
flags, rec, btr_cur_get_block(cursor),
flags, btr_cur_get_block(cursor), rec,
index, thr));
}
......@@ -1449,7 +1449,7 @@ btr_cur_upd_lock_and_undo(
*offsets_ = (sizeof offsets_) / sizeof *offsets_;
err = lock_clust_rec_modify_check_and_lock(
flags, rec, index,
flags, btr_cur_get_block(cursor), rec, index,
rec_get_offsets(rec, index, offsets_,
ULINT_UNDEFINED, &heap), thr);
if (UNIV_LIKELY_NULL(heap)) {
......@@ -1872,7 +1872,7 @@ btr_cur_optimistic_update(
explicit locks on rec, before deleting rec (see the comment in
.._pessimistic_update). */
lock_rec_store_on_page_infimum(page, rec);
lock_rec_store_on_page_infimum(block, rec);
btr_search_update_hash_on_delete(cursor);
......@@ -1905,7 +1905,7 @@ btr_cur_optimistic_update(
/* Restore the old explicit lock state on the record */
lock_rec_restore_from_page_infimum(rec, page);
lock_rec_restore_from_page_infimum(block, rec, block);
page_cur_move_to_next(page_cursor);
......@@ -1924,16 +1924,16 @@ static
void
btr_cur_pess_upd_restore_supremum(
/*==============================*/
rec_t* rec, /* in: updated record */
mtr_t* mtr) /* in: mtr */
buf_block_t* block, /* in: buffer block of rec */
const rec_t* rec, /* in: updated record */
mtr_t* mtr) /* in: mtr */
{
page_t* page;
buf_block_t* prev_block;
page_t* prev_page;
ulint space;
ulint prev_page_no;
page = page_align(rec);
page = buf_block_get_frame(block);
if (page_rec_get_next(page_get_infimum_rec(page)) != rec) {
/* Updated record is not the first user record on its page */
......@@ -1941,22 +1941,22 @@ btr_cur_pess_upd_restore_supremum(
return;
}
space = page_get_space_id(page);
space = buf_block_get_space(block);
prev_page_no = btr_page_get_prev(page, mtr);
ut_ad(prev_page_no != FIL_NULL);
prev_block = buf_page_get_with_no_latch(space, prev_page_no, mtr);
prev_page = buf_block_get_frame(prev_block);
#ifdef UNIV_BTR_DEBUG
ut_a(btr_page_get_next(prev_page, mtr)
ut_a(btr_page_get_next(prev_block->frame, mtr)
== page_get_page_no(page));
#endif /* UNIV_BTR_DEBUG */
/* We must already have an x-latch to prev_page! */
/* We must already have an x-latch on prev_block! */
ut_ad(mtr_memo_contains(mtr, prev_block, MTR_MEMO_PAGE_X_FIX));
lock_rec_reset_and_inherit_gap_locks(page_get_supremum_rec(prev_page),
rec);
lock_rec_reset_and_inherit_gap_locks(prev_block, block,
PAGE_HEAP_NO_SUPREMUM,
page_rec_get_heap_no(rec));
}
/*****************************************************************
......@@ -2117,8 +2117,6 @@ btr_cur_pessimistic_update(
big_rec_vec, &heap);
}
page_cursor = btr_cur_get_page_cur(cursor);
/* Store state of explicit locks on rec on the page infimum record,
before deleting rec. The page infimum acts as a dummy carrier of the
locks, taking care also of lock releases, before we can move the locks
......@@ -2128,13 +2126,15 @@ btr_cur_pessimistic_update(
delete the lock structs set on the root page even if the root
page carries just node pointers. */
lock_rec_store_on_page_infimum(page_align(rec), rec);
lock_rec_store_on_page_infimum(block, rec);
btr_search_update_hash_on_delete(cursor);
#ifdef UNIV_ZIP_DEBUG
ut_a(!page_zip || page_zip_validate(page_zip, page));
#endif /* UNIV_ZIP_DEBUG */
page_cursor = btr_cur_get_page_cur(cursor);
page_cur_delete_rec(page_cursor, index, offsets, mtr);
page_cur_move_to_prev(page_cursor);
......@@ -2144,7 +2144,8 @@ btr_cur_pessimistic_update(
ut_a(rec || optim_err != DB_UNDERFLOW);
if (rec) {
lock_rec_restore_from_page_infimum(rec, page);
lock_rec_restore_from_page_infimum(btr_cur_get_block(cursor),
rec, block);
offsets = rec_get_offsets(rec, index, offsets,
ULINT_UNDEFINED, &heap);
......@@ -2189,7 +2190,8 @@ btr_cur_pessimistic_update(
rec, index, offsets, mtr);
}
lock_rec_restore_from_page_infimum(rec, page);
lock_rec_restore_from_page_infimum(btr_cur_get_block(cursor),
rec, block);
/* If necessary, restore also the correct lock state for a new,
preceding supremum record created in a page split. While the old
......@@ -2197,7 +2199,8 @@ btr_cur_pessimistic_update(
from a wrong record. */
if (!was_first) {
btr_cur_pess_upd_restore_supremum(rec, mtr);
btr_cur_pess_upd_restore_supremum(btr_cur_get_block(cursor),
rec, mtr);
}
return_after_reservations:
......@@ -2388,6 +2391,7 @@ btr_cur_del_mark_set_clust_rec(
ut_ad(!rec_get_deleted_flag(rec, rec_offs_comp(offsets)));
err = lock_clust_rec_modify_check_and_lock(flags,
btr_cur_get_block(cursor),
rec, index, offsets, thr);
if (err != DB_SUCCESS) {
......@@ -2539,9 +2543,9 @@ btr_cur_del_mark_set_sec_rec(
}
#endif /* UNIV_DEBUG */
err = lock_sec_rec_modify_check_and_lock(flags, rec,
err = lock_sec_rec_modify_check_and_lock(flags,
btr_cur_get_block(cursor),
cursor->index, thr);
rec, cursor->index, thr);
if (err != DB_SUCCESS) {
return(err);
......@@ -2659,7 +2663,7 @@ btr_cur_optimistic_delete(
page_zip_des_t* page_zip= buf_block_get_page_zip(block);
#endif /* UNIV_ZIP_DEBUG */
lock_update_delete(rec);
lock_update_delete(block, rec);
btr_search_update_hash_on_delete(cursor);
......@@ -2790,7 +2794,7 @@ btr_cur_pessimistic_delete(
goto return_after_reservations;
}
lock_update_delete(rec);
lock_update_delete(block, rec);
level = btr_page_get_level(page, mtr);
if (level > 0
......
This diff is collapsed.
......@@ -59,7 +59,7 @@ lock_clust_rec_some_has_impl(
/*=========================*/
/* out: transaction which has the x-lock, or
NULL */
rec_t* rec, /* in: user record */
const rec_t* rec, /* in: user record */
dict_index_t* index, /* in: clustered index */
const ulint* offsets)/* in: rec_get_offsets(rec, index) */
{
......@@ -81,3 +81,29 @@ lock_clust_rec_some_has_impl(
return(NULL);
}
/*************************************************************************
Gets the heap_no of the smallest user record on a page. */
UNIV_INLINE
ulint
lock_get_min_heap_no(
/*=================*/
/* out: heap_no of smallest
user record, or
PAGE_HEAP_NO_SUPREMUM */
const buf_block_t* block) /* in: buffer block */
{
const page_t* page = block->frame;
if (page_is_comp(page)) {
return(rec_get_heap_no_new(
page
+ rec_get_next_offs(page + PAGE_NEW_INFIMUM,
TRUE)));
} else {
return(rec_get_heap_no_old(
page
+ rec_get_next_offs(page + PAGE_OLD_INFIMUM,
FALSE)));
}
}
......@@ -444,6 +444,14 @@ page_rec_is_comp(
/*=============*/
/* out: nonzero if in compact format */
const rec_t* rec); /* in: record */
/*******************************************************************
Returns the heap number of a record. */
UNIV_INLINE
ulint
page_rec_get_heap_no(
/*=================*/
/* out: heap number */
const rec_t* rec); /* in: the physical record */
/****************************************************************
Determine whether the page is a B-tree leaf. */
UNIV_INLINE
......
......@@ -221,6 +221,22 @@ page_rec_is_comp(
return(page_is_comp(page_align((rec_t*) rec)));
}
/*******************************************************************
Returns the heap number of a record. */
UNIV_INLINE
ulint
page_rec_get_heap_no(
/*=================*/
/* out: heap number */
const rec_t* rec) /* in: the physical record */
{
if (page_rec_is_comp(rec)) {
return(rec_get_heap_no_new(rec));
} else {
return(rec_get_heap_no_old(rec));
}
}
/****************************************************************
Determine whether the page is a B-tree leaf. */
UNIV_INLINE
......
This diff is collapsed.
......@@ -645,7 +645,7 @@ page_copy_rec_list_end(
/* Update the lock table, MAX_TRX_ID, and possible hash index */
lock_move_rec_list_end(new_page, page, rec);
lock_move_rec_list_end(new_block, block, rec);
page_update_max_trx_id(new_block, new_page_zip,
page_get_max_trx_id(page));
......@@ -753,7 +753,7 @@ page_copy_rec_list_start(
page_update_max_trx_id(new_block, new_page_zip,
page_get_max_trx_id(page_align(rec)));
lock_move_rec_list_start(new_page, page_align(rec), rec, ret);
lock_move_rec_list_start(new_block, block, rec, ret);
btr_search_move_or_delete_hash_entries(new_block, block, index);
......
......@@ -3558,7 +3558,7 @@ page_zip_reorganize(
return(FALSE);
}
lock_move_reorganize_page(page, temp_page);
lock_move_reorganize_page(block, temp_block);
btr_search_drop_page_hash_index(block);
buf_block_free(temp_block);
......
......@@ -969,7 +969,8 @@ row_ins_foreign_check_on_constraint(
gap if the search criterion was not unique */
err = lock_clust_rec_read_check_and_lock_alt(
0, clust_rec, clust_index, LOCK_X, LOCK_REC_NOT_GAP,
0, btr_pcur_get_block(pcur),
clust_rec, clust_index, LOCK_X, LOCK_REC_NOT_GAP,
thr);
}
......@@ -1128,13 +1129,14 @@ static
ulint
row_ins_set_shared_rec_lock(
/*========================*/
/* out: DB_SUCCESS or error code */
ulint type, /* in: LOCK_ORDINARY, LOCK_GAP, or
LOCK_REC_NOT_GAP type lock */
rec_t* rec, /* in: record */
dict_index_t* index, /* in: index */
const ulint* offsets,/* in: rec_get_offsets(rec, index) */
que_thr_t* thr) /* in: query thread */
/* out: DB_SUCCESS or error code */
ulint type, /* in: LOCK_ORDINARY, LOCK_GAP, or
LOCK_REC_NOT_GAP type lock */
const buf_block_t* block, /* in: buffer block of rec */
const rec_t* rec, /* in: record */
dict_index_t* index, /* in: index */
const ulint* offsets,/* in: rec_get_offsets(rec, index) */
que_thr_t* thr) /* in: query thread */
{
ulint err;
......@@ -1142,10 +1144,10 @@ row_ins_set_shared_rec_lock(
if (dict_index_is_clust(index)) {
err = lock_clust_rec_read_check_and_lock(
0, rec, index, offsets, LOCK_S, type, thr);
0, block, rec, index, offsets, LOCK_S, type, thr);
} else {
err = lock_sec_rec_read_check_and_lock(
0, rec, index, offsets, LOCK_S, type, thr);
0, block, rec, index, offsets, LOCK_S, type, thr);
}
return(err);
......@@ -1159,13 +1161,14 @@ static
ulint
row_ins_set_exclusive_rec_lock(
/*===========================*/
/* out: DB_SUCCESS or error code */
ulint type, /* in: LOCK_ORDINARY, LOCK_GAP, or
LOCK_REC_NOT_GAP type lock */
rec_t* rec, /* in: record */
dict_index_t* index, /* in: index */
const ulint* offsets,/* in: rec_get_offsets(rec, index) */
que_thr_t* thr) /* in: query thread */
/* out: DB_SUCCESS or error code */
ulint type, /* in: LOCK_ORDINARY, LOCK_GAP, or
LOCK_REC_NOT_GAP type lock */
const buf_block_t* block, /* in: buffer block of rec */
const rec_t* rec, /* in: record */
dict_index_t* index, /* in: index */
const ulint* offsets,/* in: rec_get_offsets(rec, index) */
que_thr_t* thr) /* in: query thread */
{
ulint err;
......@@ -1173,10 +1176,10 @@ row_ins_set_exclusive_rec_lock(
if (dict_index_is_clust(index)) {
err = lock_clust_rec_read_check_and_lock(
0, rec, index, offsets, LOCK_X, type, thr);
0, block, rec, index, offsets, LOCK_X, type, thr);
} else {
err = lock_sec_rec_read_check_and_lock(
0, rec, index, offsets, LOCK_X, type, thr);
0, block, rec, index, offsets, LOCK_X, type, thr);
}
return(err);
......@@ -1209,6 +1212,7 @@ row_ins_check_foreign_constraint(
dict_table_t* check_table;
dict_index_t* check_index;
ulint n_fields_cmp;
buf_block_t* block;
rec_t* rec;
btr_pcur_t pcur;
ibool moved;
......@@ -1338,6 +1342,7 @@ run_again:
btr_pcur_open(check_index, entry, PAGE_CUR_GE,
BTR_SEARCH_LEAF, &pcur, &mtr);
block = btr_pcur_get_block(&pcur);
/* Scan index records and check if there is a matching record */
......@@ -1354,8 +1359,9 @@ run_again:
if (page_rec_is_supremum(rec)) {
err = row_ins_set_shared_rec_lock(
LOCK_ORDINARY, rec, check_index, offsets, thr);
err = row_ins_set_shared_rec_lock(LOCK_ORDINARY, block,
rec, check_index,
offsets, thr);
if (err != DB_SUCCESS) {
break;
......@@ -1370,8 +1376,8 @@ run_again:
if (rec_get_deleted_flag(rec,
rec_offs_comp(offsets))) {
err = row_ins_set_shared_rec_lock(
LOCK_ORDINARY, rec, check_index,
offsets, thr);
LOCK_ORDINARY, block,
rec, check_index, offsets, thr);
if (err != DB_SUCCESS) {
break;
......@@ -1382,8 +1388,8 @@ run_again:
into gaps */
err = row_ins_set_shared_rec_lock(
LOCK_REC_NOT_GAP, rec, check_index,
offsets, thr);
LOCK_REC_NOT_GAP, block,
rec, check_index, offsets, thr);
if (err != DB_SUCCESS) {
......@@ -1434,7 +1440,8 @@ run_again:
if (cmp < 0) {
err = row_ins_set_shared_rec_lock(
LOCK_GAP, rec, check_index, offsets, thr);
LOCK_GAP, block,
rec, check_index, offsets, thr);
if (err != DB_SUCCESS) {
break;
......@@ -1589,10 +1596,10 @@ ibool
row_ins_dupl_error_with_rec(
/*========================*/
/* out: TRUE if error */
rec_t* rec, /* in: user record; NOTE that we assume
const rec_t* rec, /* in: user record; NOTE that we assume
that the caller already has a record lock on
the record! */
dtuple_t* entry, /* in: entry to insert */
const dtuple_t* entry, /* in: entry to insert */
dict_index_t* index, /* in: index */
const ulint* offsets)/* in: rec_get_offsets(rec, index) */
{
......@@ -1653,7 +1660,8 @@ row_ins_scan_sec_index_for_duplicate(
ulint i;
int cmp;
ulint n_fields_cmp;
rec_t* rec;
buf_block_t* block;
const rec_t* rec;
btr_pcur_t pcur;
ulint err = DB_SUCCESS;
ibool moved;
......@@ -1687,6 +1695,8 @@ row_ins_scan_sec_index_for_duplicate(
btr_pcur_open(index, entry, PAGE_CUR_GE, BTR_SEARCH_LEAF, &pcur, &mtr);
block = btr_pcur_get_block(&pcur);
/* Scan index records and check if there is a duplicate */
for (;;) {
......@@ -1708,11 +1718,13 @@ row_ins_scan_sec_index_for_duplicate(
INSERT ON DUPLICATE KEY UPDATE). */
err = row_ins_set_exclusive_rec_lock(
LOCK_ORDINARY, rec, index, offsets, thr);
LOCK_ORDINARY, block,
rec, index, offsets, thr);
} else {
err = row_ins_set_shared_rec_lock(
LOCK_ORDINARY, rec, index, offsets, thr);
LOCK_ORDINARY, block,
rec, index, offsets, thr);
}
if (err != DB_SUCCESS) {
......@@ -1837,12 +1849,14 @@ row_ins_duplicate_error_in_clust(
INSERT ON DUPLICATE KEY UPDATE). */
err = row_ins_set_exclusive_rec_lock(
LOCK_REC_NOT_GAP, rec,
cursor->index, offsets, thr);
LOCK_REC_NOT_GAP,
btr_cur_get_block(cursor),
rec, cursor->index, offsets, thr);
} else {
err = row_ins_set_shared_rec_lock(
LOCK_REC_NOT_GAP, rec,
LOCK_REC_NOT_GAP,
btr_cur_get_block(cursor), rec,
cursor->index, offsets, thr);
}
......@@ -1875,13 +1889,15 @@ row_ins_duplicate_error_in_clust(
INSERT ON DUPLICATE KEY UPDATE). */
err = row_ins_set_exclusive_rec_lock(
LOCK_REC_NOT_GAP, rec,
cursor->index, offsets, thr);
LOCK_REC_NOT_GAP,
btr_cur_get_block(cursor),
rec, cursor->index, offsets, thr);
} else {
err = row_ins_set_shared_rec_lock(
LOCK_REC_NOT_GAP, rec,
cursor->index, offsets, thr);
LOCK_REC_NOT_GAP,
btr_cur_get_block(cursor),
rec, cursor->index, offsets, thr);
}
if (err != DB_SUCCESS) {
......
......@@ -1488,7 +1488,8 @@ row_unlock_for_mysql(
rec = btr_pcur_get_rec(pcur);
lock_rec_unlock(trx, rec, prebuilt->select_lock_type);
lock_rec_unlock(trx, btr_pcur_get_block(pcur),
rec, prebuilt->select_lock_type);
mtr_commit(&mtr);
......@@ -1518,7 +1519,8 @@ row_unlock_for_mysql(
rec = btr_pcur_get_rec(clust_pcur);
lock_rec_unlock(trx, rec, prebuilt->select_lock_type);
lock_rec_unlock(trx, btr_pcur_get_block(clust_pcur),
rec, prebuilt->select_lock_type);
mtr_commit(&mtr);
}
......
......@@ -812,7 +812,8 @@ row_sel_get_clust_rec(
}
err = lock_clust_rec_read_check_and_lock(
0, clust_rec, index, offsets,
0, btr_pcur_get_block(&plan->clust_pcur),
clust_rec, index, offsets,
node->row_lock_mode, lock_type, thr);
if (err != DB_SUCCESS) {
......@@ -885,14 +886,15 @@ UNIV_INLINE
ulint
sel_set_rec_lock(
/*=============*/
/* out: DB_SUCCESS or error code */
rec_t* rec, /* in: record */
dict_index_t* index, /* in: index */
const ulint* offsets,/* in: rec_get_offsets(rec, index) */
ulint mode, /* in: lock mode */
ulint type, /* in: LOCK_ORDINARY, LOCK_GAP, or
LOC_REC_NOT_GAP */
que_thr_t* thr) /* in: query thread */
/* out: DB_SUCCESS or error code */
const buf_block_t* block, /* in: buffer block of rec */
const rec_t* rec, /* in: record */
dict_index_t* index, /* in: index */
const ulint* offsets,/* in: rec_get_offsets(rec, index) */
ulint mode, /* in: lock mode */
ulint type, /* in: LOCK_ORDINARY, LOCK_GAP, or
LOC_REC_NOT_GAP */
que_thr_t* thr) /* in: query thread */
{
trx_t* trx;
ulint err;
......@@ -908,10 +910,10 @@ sel_set_rec_lock(
if (dict_index_is_clust(index)) {
err = lock_clust_rec_read_check_and_lock(
0, rec, index, offsets, mode, type, thr);
0, block, rec, index, offsets, mode, type, thr);
} else {
err = lock_sec_rec_read_check_and_lock(
0, rec, index, offsets, mode, type, thr);
0, block, rec, index, offsets, mode, type, thr);
}
return(err);
......@@ -1165,7 +1167,7 @@ row_sel_try_search_shortcut(
ret = SEL_RETRY;
goto func_exit;
}
} else if (!lock_sec_rec_cons_read_sees(rec, index, node->read_view)) {
} else if (!lock_sec_rec_cons_read_sees(rec, node->read_view)) {
ret = SEL_RETRY;
goto func_exit;
......@@ -1429,7 +1431,8 @@ rec_loop:
lock_type = LOCK_ORDINARY;
}
err = sel_set_rec_lock(next_rec, index, offsets,
err = sel_set_rec_lock(btr_pcur_get_block(&plan->pcur),
next_rec, index, offsets,
node->row_lock_mode,
lock_type, thr);
......@@ -1485,7 +1488,8 @@ skip_lock:
lock_type = LOCK_ORDINARY;
}
err = sel_set_rec_lock(rec, index, offsets,
err = sel_set_rec_lock(btr_pcur_get_block(&plan->pcur),
rec, index, offsets,
node->row_lock_mode, lock_type, thr);
if (err != DB_SUCCESS) {
......@@ -1581,7 +1585,7 @@ skip_lock:
rec = old_vers;
}
} else if (!lock_sec_rec_cons_read_sees(rec, index,
} else if (!lock_sec_rec_cons_read_sees(rec,
node->read_view)) {
cons_read_requires_clust_rec = TRUE;
}
......@@ -2899,7 +2903,8 @@ row_sel_get_clust_rec_for_mysql(
we set a LOCK_REC_NOT_GAP type lock */
err = lock_clust_rec_read_check_and_lock(
0, clust_rec, clust_index, *offsets,
0, btr_pcur_get_block(prebuilt->clust_pcur),
clust_rec, clust_index, *offsets,
prebuilt->select_lock_type, LOCK_REC_NOT_GAP, thr);
if (err != DB_SUCCESS) {
......@@ -3749,7 +3754,8 @@ rec_loop:
offsets = rec_get_offsets(rec, index, offsets,
ULINT_UNDEFINED, &heap);
err = sel_set_rec_lock(rec, index, offsets,
err = sel_set_rec_lock(btr_pcur_get_block(pcur),
rec, index, offsets,
prebuilt->select_lock_type,
LOCK_ORDINARY, thr);
......@@ -3883,6 +3889,7 @@ wrong_offs:
using a READ COMMITTED isolation level. */
err = sel_set_rec_lock(
btr_pcur_get_block(pcur),
rec, index, offsets,
prebuilt->select_lock_type, LOCK_GAP,
thr);
......@@ -3918,6 +3925,7 @@ wrong_offs:
using a READ COMMITTED isolation level. */
err = sel_set_rec_lock(
btr_pcur_get_block(pcur),
rec, index, offsets,
prebuilt->select_lock_type, LOCK_GAP,
thr);
......@@ -3986,7 +3994,8 @@ no_gap_lock:
lock_type = LOCK_REC_NOT_GAP;
}
err = sel_set_rec_lock(rec, index, offsets,
err = sel_set_rec_lock(btr_pcur_get_block(pcur),
rec, index, offsets,
prebuilt->select_lock_type,
lock_type, thr);
......@@ -4093,8 +4102,7 @@ no_gap_lock:
rec = old_vers;
}
} else if (!lock_sec_rec_cons_read_sees(rec, index,
trx->read_view)) {
} else if (!lock_sec_rec_cons_read_sees(rec, trx->read_view)) {
/* We are looking into a non-clustered index,
and to get the right version of the record we
have to look also into the clustered index: this
......
......@@ -1764,7 +1764,8 @@ row_upd_clust_step(
if (!node->has_clust_rec_x_lock) {
err = lock_clust_rec_modify_check_and_lock(
0, rec, index, offsets, thr);
0, btr_pcur_get_block(pcur),
rec, index, offsets, thr);
if (err != DB_SUCCESS) {
mtr_commit(mtr);
goto exit_func;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment