Commit a369b009 authored by marko's avatar marko

branches/zip: btr_cur_pessimistic_update(): Add parameter heap.

Previously, when big_rec was returned, the fields would point to
freed memory.  The memory heap was allocated locally, and the data tuple
was allocated from the heap, and the big_rec would point to some fields
in the data tuple.

row_ins_clust_index_entry_by_modify(): Add parameter heap,
for the same reason.
parent acb39de8
......@@ -995,7 +995,7 @@ btr_cur_add_ext(
/*============*/
const ulint* ext, /* in: numbers of externally stored fields
so far */
ulint* n_ext, /* in: number of externally stored fields
ulint* n_ext, /* in/out: number of externally stored fields
so far */
const big_rec_t*big_rec,/* in: additional externally stored fields */
mem_heap_t** heap) /* out: memory heap */
......@@ -2070,6 +2070,7 @@ btr_cur_pessimistic_update(
ulint flags, /* in: undo logging, locking, and rollback
flags */
btr_cur_t* cursor, /* in: cursor on the record to update */
mem_heap_t** heap, /* in/out: pointer to memory heap, or NULL */
big_rec_t** big_rec,/* out: big rec vector whose fields have to
be stored externally by the caller, or NULL */
upd_t* update, /* in: update vector; this is allowed also
......@@ -2089,7 +2090,6 @@ btr_cur_pessimistic_update(
rec_t* rec;
page_cur_t* page_cursor;
dtuple_t* new_entry;
mem_heap_t* heap;
ulint err;
ulint optim_err;
dulint roll_ptr;
......@@ -2157,15 +2157,17 @@ btr_cur_pessimistic_update(
}
}
heap = mem_heap_create(1024);
offsets = rec_get_offsets(rec, index, NULL, ULINT_UNDEFINED, &heap);
if (!*heap) {
*heap = mem_heap_create(1024);
}
offsets = rec_get_offsets(rec, index, NULL, ULINT_UNDEFINED, heap);
trx = thr_get_trx(thr);
new_entry = row_rec_to_index_entry(ROW_COPY_DATA, index, rec, heap);
new_entry = row_rec_to_index_entry(ROW_COPY_DATA, index, rec, *heap);
row_upd_index_replace_new_col_vals_index_pos(new_entry, index, update,
FALSE, heap);
FALSE, *heap);
if (!(flags & BTR_KEEP_SYS_FLAG)) {
row_upd_index_entry_sys_field(new_entry, index, DATA_ROLL_PTR,
roll_ptr);
......@@ -2181,7 +2183,7 @@ btr_cur_pessimistic_update(
updated the primary key to another value, and then
update it back again. */
ut_a(big_rec_vec == NULL);
ut_ad(big_rec_vec == NULL);
btr_rec_free_updated_extern_fields(index, rec, page_zip,
offsets, update, mtr);
......@@ -2190,11 +2192,10 @@ btr_cur_pessimistic_update(
/* We have to set appropriate extern storage bits in the new
record to be inserted: we have to remember which fields were such */
ext_vect = mem_heap_alloc(heap, sizeof(ulint) * 2
ext_vect = mem_heap_alloc(*heap, sizeof(ulint) * 2
* dict_index_get_n_fields(index));
ut_ad(!page_is_comp(page) || !rec_get_node_ptr_flag(rec));
offsets = rec_get_offsets(rec, index, offsets,
ULINT_UNDEFINED, &heap);
offsets = rec_get_offsets(rec, index, offsets, ULINT_UNDEFINED, heap);
n_ext_vect = btr_push_update_extern_fields(ext_vect, offsets, update);
if (page_zip_rec_needs_ext(rec_get_converted_size(index, new_entry,
......@@ -2211,7 +2212,7 @@ btr_cur_pessimistic_update(
}
ext_vect = (ulint*) btr_cur_add_ext(ext_vect, &n_ext_vect,
big_rec_vec, &heap);
big_rec_vec, heap);
}
/* Store state of explicit locks on rec on the page infimum record,
......@@ -2245,7 +2246,7 @@ btr_cur_pessimistic_update(
rec, block);
offsets = rec_get_offsets(rec, index, offsets,
ULINT_UNDEFINED, &heap);
ULINT_UNDEFINED, heap);
if (!rec_get_deleted_flag(rec, rec_offs_comp(offsets))) {
/* The new inserted record owns its possible externally
......@@ -2282,7 +2283,7 @@ btr_cur_pessimistic_update(
stored fields */
offsets = rec_get_offsets(rec, index, offsets,
ULINT_UNDEFINED, &heap);
ULINT_UNDEFINED, heap);
btr_cur_unmark_extern_fields(page_zip,
rec, index, offsets, mtr);
}
......@@ -2304,7 +2305,6 @@ btr_cur_pessimistic_update(
#ifdef UNIV_ZIP_DEBUG
ut_a(!page_zip || page_zip_validate(page_zip, page));
#endif /* UNIV_ZIP_DEBUG */
mem_heap_free(heap);
if (n_extents > 0) {
fil_space_release_free_extents(index->space, n_reserved);
......
......@@ -262,6 +262,7 @@ btr_cur_pessimistic_update(
ulint flags, /* in: undo logging, locking, and rollback
flags */
btr_cur_t* cursor, /* in: cursor on the record to update */
mem_heap_t** heap, /* in/out: pointer to memory heap, or NULL */
big_rec_t** big_rec,/* out: big rec vector whose fields have to
be stored externally by the caller, or NULL */
upd_t* update, /* in: update vector; this is allowed also
......
......@@ -275,8 +275,9 @@ row_ins_sec_index_entry_by_modify(
}
err = btr_cur_pessimistic_update(BTR_KEEP_SYS_FLAG, cursor,
&dummy_big_rec, update,
&heap, &dummy_big_rec, update,
0, thr, mtr);
ut_ad(!dummy_big_rec);
}
func_exit:
mem_heap_free(heap);
......@@ -297,6 +298,7 @@ row_ins_clust_index_entry_by_modify(
depending on whether mtr holds just a leaf
latch or also a tree latch */
btr_cur_t* cursor, /* in: B-tree cursor */
mem_heap_t** heap, /* in/out: pointer to memory heap, or NULL */
big_rec_t** big_rec,/* out: possible big rec vector of fields
which have to be stored externally by the
caller */
......@@ -307,7 +309,6 @@ row_ins_clust_index_entry_by_modify(
que_thr_t* thr, /* in: query thread */
mtr_t* mtr) /* in: mtr */
{
mem_heap_t* heap;
rec_t* rec;
upd_t* update;
ulint err;
......@@ -321,7 +322,9 @@ row_ins_clust_index_entry_by_modify(
ut_ad(rec_get_deleted_flag(rec,
dict_table_is_comp(cursor->index->table)));
heap = mem_heap_create(1024);
if (!*heap) {
*heap = mem_heap_create(1024);
}
/* Build an update vector containing all the fields to be modified;
NOTE that this vector may NOT contain system columns trx_id or
......@@ -329,7 +332,7 @@ row_ins_clust_index_entry_by_modify(
update = row_upd_build_difference_binary(cursor->index, entry, ext_vec,
n_ext_vec, rec,
thr_get_trx(thr), heap);
thr_get_trx(thr), *heap);
if (mode == BTR_MODIFY_LEAF) {
/* Try optimistic updating of the record, keeping changes
within the page */
......@@ -346,15 +349,13 @@ row_ins_clust_index_entry_by_modify(
ut_a(mode == BTR_MODIFY_TREE);
if (buf_LRU_buf_pool_running_out()) {
err = DB_LOCK_TABLE_FULL;
return(DB_LOCK_TABLE_FULL);
goto func_exit;
}
err = btr_cur_pessimistic_update(0, cursor, big_rec, update,
err = btr_cur_pessimistic_update(0, cursor,
heap, big_rec, update,
0, thr, mtr);
}
func_exit:
mem_heap_free(heap);
return(err);
}
......@@ -2009,9 +2010,6 @@ row_ins_index_entry_low(
big_rec_t* big_rec = NULL;
mtr_t mtr;
mem_heap_t* heap = NULL;
ulint offsets_[REC_OFFS_NORMAL_SIZE];
ulint* offsets = offsets_;
*offsets_ = (sizeof offsets_) / sizeof *offsets_;
log_free_check();
......@@ -2107,7 +2105,7 @@ row_ins_index_entry_low(
if (dict_index_is_clust(index)) {
err = row_ins_clust_index_entry_by_modify(
mode, &cursor, &big_rec, entry,
mode, &cursor, &heap, &big_rec, entry,
ext_vec, n_ext_vec, thr, &mtr);
} else {
ut_ad(!n_ext_vec);
......@@ -2137,13 +2135,14 @@ row_ins_index_entry_low(
mtr_commit(&mtr);
if (UNIV_LIKELY_NULL(big_rec)) {
rec_t* rec;
rec_t* rec;
ulint* offsets;
mtr_start(&mtr);
btr_cur_search_to_nth_level(index, 0, entry, PAGE_CUR_LE,
BTR_MODIFY_TREE, &cursor, 0, &mtr);
rec = btr_cur_get_rec(&cursor);
offsets = rec_get_offsets(rec, index, offsets,
offsets = rec_get_offsets(rec, index, NULL,
ULINT_UNDEFINED, &heap);
err = btr_store_big_rec_extern_fields(
......
......@@ -85,7 +85,6 @@ row_undo_mod_clust_low(
mtr_t* mtr, /* in: mtr */
ulint mode) /* in: BTR_MODIFY_LEAF or BTR_MODIFY_TREE */
{
big_rec_t* dummy_big_rec;
btr_pcur_t* pcur;
btr_cur_t* btr_cur;
ulint err;
......@@ -106,14 +105,22 @@ row_undo_mod_clust_low(
btr_cur, node->update,
node->cmpl_info, thr, mtr);
} else {
mem_heap_t* heap = NULL;
big_rec_t* dummy_big_rec;
ut_ad(mode == BTR_MODIFY_TREE);
err = btr_cur_pessimistic_update(
BTR_NO_LOCKING_FLAG
| BTR_NO_UNDO_LOG_FLAG
| BTR_KEEP_SYS_FLAG,
btr_cur, &dummy_big_rec, node->update,
btr_cur, &heap, &dummy_big_rec, node->update,
node->cmpl_info, thr, mtr);
ut_a(!dummy_big_rec);
if (UNIV_LIKELY_NULL(heap)) {
mem_heap_free(heap);
}
}
return(err);
......@@ -467,8 +474,9 @@ row_undo_mod_del_unmark_sec_and_undo_update(
ut_a(mode == BTR_MODIFY_TREE);
err = btr_cur_pessimistic_update(
BTR_KEEP_SYS_FLAG | BTR_NO_LOCKING_FLAG,
btr_cur, &dummy_big_rec,
btr_cur, &heap, &dummy_big_rec,
update, 0, thr, &mtr);
ut_a(!dummy_big_rec);
}
mem_heap_free(heap);
......
......@@ -1537,6 +1537,7 @@ row_upd_clust_rec(
que_thr_t* thr, /* in: query thread */
mtr_t* mtr) /* in: mtr; gets committed here */
{
mem_heap_t* heap = NULL;
big_rec_t* big_rec = NULL;
btr_pcur_t* pcur;
btr_cur_t* btr_cur;
......@@ -1593,12 +1594,11 @@ row_upd_clust_rec(
dict_table_is_comp(index->table)));
err = btr_cur_pessimistic_update(BTR_NO_LOCKING_FLAG, btr_cur,
&big_rec, node->update,
&heap, &big_rec, node->update,
node->cmpl_info, thr, mtr);
mtr_commit(mtr);
if (err == DB_SUCCESS && big_rec) {
mem_heap_t* heap = NULL;
ulint offsets_[REC_OFFS_NORMAL_SIZE];
rec_t* rec;
*offsets_ = (sizeof offsets_) / sizeof *offsets_;
......@@ -1612,12 +1612,13 @@ row_upd_clust_rec(
rec_get_offsets(rec, index, offsets_,
ULINT_UNDEFINED, &heap),
big_rec, mtr);
if (UNIV_LIKELY_NULL(heap)) {
mem_heap_free(heap);
}
mtr_commit(mtr);
}
if (UNIV_LIKELY_NULL(heap)) {
mem_heap_free(heap);
}
if (big_rec) {
dtuple_big_rec_free(big_rec);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment