Commit 10e01b56 authored by Marko Mäkelä's avatar Marko Mäkelä

Fix USE_AFTER_FREE (CWE-416)

A static analysis tool suggested that in the function
row_merge_read_clustered_index(), ut_free(nonnull) could
be invoked twice for nonnull!=NULL. While a manual review
of the code disproved this, it should not hurt to clean up
the code so that the static analysis tool will not complain.

index_tuple_info_t::insert(), row_mtuple_cmp(): Remove the
parameter mtr_committed, which duplicated !mtr->is_active().

row_merge_read_clustered_index(): Initialize row_heap = NULL.
Remove a duplicated call mem_heap_empty(row_heap) that was
inadvertently added in commit cb1e76e4.

Replace a "goto func_exit" with "break", to get consistent error
handling for both failures to create or write a temporary file.

end_of_index: Assign row_heap=NULL and nonnull=NULL to prevent
double freeing.

func_exit: Check for row_heap!=NULL before invoking mem_heap_free().

Closes #959
parent 32eeed21
......@@ -115,14 +115,12 @@ class index_tuple_info_t {
@param[in,out] row_heap memory heap
@param[in] pcur cluster index scanning cursor
@param[in,out] scan_mtr mini-transaction for pcur
@param[out] mtr_committed whether scan_mtr got committed
@return DB_SUCCESS if successful, else error number */
dberr_t insert(
inline dberr_t insert(
trx_id_t trx_id,
mem_heap_t* row_heap,
btr_pcur_t* pcur,
mtr_t* scan_mtr,
bool* mtr_committed)
mtr_t* scan_mtr)
{
big_rec_t* big_rec;
rec_t* rec;
......@@ -150,11 +148,10 @@ class index_tuple_info_t {
ut_ad(dtuple);
if (log_sys->check_flush_or_checkpoint) {
if (!(*mtr_committed)) {
if (scan_mtr->is_active()) {
btr_pcur_move_to_prev_on_page(pcur);
btr_pcur_store_position(pcur, scan_mtr);
mtr_commit(scan_mtr);
*mtr_committed = true;
scan_mtr->commit();
}
log_free_check();
......@@ -1589,7 +1586,6 @@ row_mtuple_cmp(
@param[in,out] sp_heap heap for tuples
@param[in,out] pcur cluster index cursor
@param[in,out] mtr mini transaction
@param[in,out] mtr_committed whether scan_mtr got committed
@return DB_SUCCESS or error number */
static
dberr_t
......@@ -1600,8 +1596,7 @@ row_merge_spatial_rows(
mem_heap_t* row_heap,
mem_heap_t* sp_heap,
btr_pcur_t* pcur,
mtr_t* mtr,
bool* mtr_committed)
mtr_t* mtr)
{
dberr_t err = DB_SUCCESS;
......@@ -1612,9 +1607,7 @@ row_merge_spatial_rows(
ut_ad(sp_heap != NULL);
for (ulint j = 0; j < num_spatial; j++) {
err = sp_tuples[j]->insert(
trx_id, row_heap,
pcur, mtr, mtr_committed);
err = sp_tuples[j]->insert(trx_id, row_heap, pcur, mtr);
if (err != DB_SUCCESS) {
return(err);
......@@ -1716,7 +1709,7 @@ row_merge_read_clustered_index(
struct TABLE* eval_table)
{
dict_index_t* clust_index; /* Clustered index */
mem_heap_t* row_heap; /* Heap memory to create
mem_heap_t* row_heap = NULL;/* Heap memory to create
clustered index tuples */
row_merge_buf_t** merge_buf; /* Temporary list for records*/
mem_heap_t* v_heap = NULL; /* Heap memory to process large
......@@ -1903,22 +1896,19 @@ row_merge_read_clustered_index(
/* Scan the clustered index. */
for (;;) {
const rec_t* rec;
ulint* offsets;
const dtuple_t* row;
row_ext_t* ext;
page_cur_t* cur = btr_pcur_get_page_cur(&pcur);
mem_heap_empty(row_heap);
/* Do not continue if table pages are still encrypted */
if (!old_table->is_readable() ||
!new_table->is_readable()) {
if (!old_table->is_readable() || !new_table->is_readable()) {
err = DB_DECRYPTION_FAILED;
trx->error_key_num = 0;
goto func_exit;
}
const rec_t* rec;
ulint* offsets;
const dtuple_t* row;
row_ext_t* ext;
page_cur_t* cur = btr_pcur_get_page_cur(&pcur);
mem_heap_empty(row_heap);
page_cur_move_to_next(cur);
......@@ -1953,18 +1943,15 @@ row_merge_read_clustered_index(
dbug_run_purge = true;);
/* Insert the cached spatial index rows. */
bool mtr_committed = false;
err = row_merge_spatial_rows(
trx->id, sp_tuples, num_spatial,
row_heap, sp_heap, &pcur,
&mtr, &mtr_committed);
row_heap, sp_heap, &pcur, &mtr);
if (err != DB_SUCCESS) {
goto func_exit;
}
if (mtr_committed) {
if (!mtr.is_active()) {
goto scan_next;
}
......@@ -2019,7 +2006,9 @@ row_merge_read_clustered_index(
row = NULL;
mtr_commit(&mtr);
mem_heap_free(row_heap);
row_heap = NULL;
ut_free(nonnull);
nonnull = NULL;
goto write_buffers;
}
} else {
......@@ -2347,8 +2336,6 @@ row_merge_read_clustered_index(
/* Temporary File is not used.
so insert sorted block to the index */
if (row != NULL) {
bool mtr_committed = false;
/* We have to do insert the
cached spatial index rows, since
after the mtr_commit, the cluster
......@@ -2359,8 +2346,7 @@ row_merge_read_clustered_index(
trx->id, sp_tuples,
num_spatial,
row_heap, sp_heap,
&pcur, &mtr,
&mtr_committed);
&pcur, &mtr);
if (err != DB_SUCCESS) {
goto func_exit;
......@@ -2375,13 +2361,13 @@ row_merge_read_clustered_index(
current row will be invalid, and
we must reread it on the next
loop iteration. */
if (!mtr_committed) {
if (mtr.is_active()) {
btr_pcur_move_to_prev_on_page(
&pcur);
btr_pcur_store_position(
&pcur, &mtr);
mtr_commit(&mtr);
mtr.commit();
}
}
......@@ -2536,7 +2522,7 @@ row_merge_read_clustered_index(
buf->n_tuples, path) < 0) {
err = DB_OUT_OF_MEMORY;
trx->error_key_num = i;
goto func_exit;
break;
}
/* Ensure that duplicates in the
......@@ -2614,12 +2600,12 @@ row_merge_read_clustered_index(
}
func_exit:
/* row_merge_spatial_rows may have committed
the mtr before an error occurs. */
if (mtr.is_active()) {
mtr_commit(&mtr);
}
mem_heap_free(row_heap);
if (row_heap) {
mem_heap_free(row_heap);
}
ut_free(nonnull);
all_done:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment