Commit 07cba6e4 authored by unknown's avatar unknown

Allow UNIV_SYNC_DEBUG to be disabled while UNIV_DEBUG is enabled

parent 34bb1abf
......@@ -299,7 +299,9 @@ btr_page_alloc_for_ibuf(
new_page = buf_page_get(dict_tree_get_space(tree), node_addr.page,
RW_X_LATCH, mtr);
#ifdef UNIV_SYNC_DEBUG
buf_page_dbg_add_level(new_page, SYNC_TREE_NODE_NEW);
#endif /* UNIV_SYNC_DEBUG */
flst_remove(root + PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST,
new_page + PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST_NODE,
......@@ -357,7 +359,9 @@ btr_page_alloc(
new_page = buf_page_get(dict_tree_get_space(tree), new_page_no,
RW_X_LATCH, mtr);
#ifdef UNIV_SYNC_DEBUG
buf_page_dbg_add_level(new_page, SYNC_TREE_NODE_NEW);
#endif /* UNIV_SYNC_DEBUG */
return(new_page);
}
......@@ -663,8 +667,9 @@ btr_create(
ibuf_hdr_frame = fseg_create(space, 0,
IBUF_HEADER + IBUF_TREE_SEG_HEADER, mtr);
#ifdef UNIV_SYNC_DEBUG
buf_page_dbg_add_level(ibuf_hdr_frame, SYNC_TREE_NODE_NEW);
#endif /* UNIV_SYNC_DEBUG */
ut_ad(buf_frame_get_page_no(ibuf_hdr_frame)
== IBUF_HEADER_PAGE_NO);
/* Allocate then the next page to the segment: it will be the
......@@ -689,7 +694,9 @@ btr_create(
page_no = buf_frame_get_page_no(frame);
#ifdef UNIV_SYNC_DEBUG
buf_page_dbg_add_level(frame, SYNC_TREE_NODE_NEW);
#endif /* UNIV_SYNC_DEBUG */
if (type & DICT_IBUF) {
/* It is an insert buffer tree: initialize the free list */
......@@ -704,7 +711,9 @@ btr_create(
mtr);
/* The fseg create acquires a second latch on the page,
therefore we must declare it: */
#ifdef UNIV_SYNC_DEBUG
buf_page_dbg_add_level(frame, SYNC_TREE_NODE_NEW);
#endif /* UNIV_SYNC_DEBUG */
}
/* Create a new index page on the the allocated segment page */
......@@ -1517,7 +1526,9 @@ btr_page_split_and_insert(
ut_ad(mtr_memo_contains(mtr, dict_tree_get_lock(tree),
MTR_MEMO_X_LOCK));
#ifdef UNIV_SYNC_DEBUG
ut_ad(rw_lock_own(dict_tree_get_lock(tree), RW_LOCK_EX));
#endif /* UNIV_SYNC_DEBUG */
page = btr_cur_get_page(cursor);
......
......@@ -3146,8 +3146,10 @@ btr_store_big_rec_extern_fields(
prev_page_no,
RW_X_LATCH, &mtr);
#ifdef UNIV_SYNC_DEBUG
buf_page_dbg_add_level(prev_page,
SYNC_EXTERN_STORAGE);
#endif /* UNIV_SYNC_DEBUG */
mlog_write_ulint(prev_page + FIL_PAGE_DATA
+ BTR_BLOB_HDR_NEXT_PAGE_NO,
......@@ -3182,9 +3184,9 @@ btr_store_big_rec_extern_fields(
rec_page = buf_page_get(space_id,
buf_frame_get_page_no(data),
RW_X_LATCH, &mtr);
#ifdef UNIV_SYNC_DEBUG
buf_page_dbg_add_level(rec_page, SYNC_NO_ORDER_CHECK);
#endif /* UNIV_SYNC_DEBUG */
mlog_write_ulint(data + local_len + BTR_EXTERN_LEN, 0,
MLOG_4BYTES, &mtr);
mlog_write_ulint(data + local_len + BTR_EXTERN_LEN + 4,
......@@ -3276,9 +3278,9 @@ btr_free_externally_stored_field(
rec_page = buf_page_get(buf_frame_get_space_id(data),
buf_frame_get_page_no(data), RW_X_LATCH, &mtr);
#ifdef UNIV_SYNC_DEBUG
buf_page_dbg_add_level(rec_page, SYNC_NO_ORDER_CHECK);
#endif /* UNIV_SYNC_DEBUG */
space_id = mach_read_from_4(data + local_len
+ BTR_EXTERN_SPACE_ID);
......@@ -3321,9 +3323,9 @@ btr_free_externally_stored_field(
}
page = buf_page_get(space_id, page_no, RW_X_LATCH, &mtr);
#ifdef UNIV_SYNC_DEBUG
buf_page_dbg_add_level(page, SYNC_EXTERN_STORAGE);
#endif /* UNIV_SYNC_DEBUG */
next_page_no = mach_read_from_4(page + FIL_PAGE_DATA
+ BTR_BLOB_HDR_NEXT_PAGE_NO);
......@@ -3501,9 +3503,9 @@ btr_copy_externally_stored_field(
mtr_start(&mtr);
page = buf_page_get(space_id, page_no, RW_S_LATCH, &mtr);
#ifdef UNIV_SYNC_DEBUG
buf_page_dbg_add_level(page, SYNC_EXTERN_STORAGE);
#endif /* UNIV_SYNC_DEBUG */
blob_header = page + offset;
part_len = btr_blob_get_part_len(blob_header);
......
......@@ -227,9 +227,9 @@ btr_pcur_restore_position(
if (buf_page_optimistic_get(latch_mode, page,
cursor->modify_clock, mtr)) {
cursor->pos_state = BTR_PCUR_IS_POSITIONED;
#ifdef UNIV_SYNC_DEBUG
buf_page_dbg_add_level(page, SYNC_TREE_NODE);
#endif /* UNIV_SYNC_DEBUG */
if (cursor->rel_pos == BTR_PCUR_ON) {
cursor->latch_mode = latch_mode;
......
......@@ -93,8 +93,10 @@ btr_search_check_free_space_in_heap(void)
hash_table_t* table;
mem_heap_t* heap;
ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED)
&& !rw_lock_own(&btr_search_latch, RW_LOCK_EX));
#ifdef UNIV_SYNC_DEBUG
ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED));
ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
#endif /* UNIV_SYNC_DEBUG */
table = btr_search_sys->hash_index;
......@@ -194,8 +196,10 @@ btr_search_info_update_hash(
ulint n_unique;
int cmp;
ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED)
&& !rw_lock_own(&btr_search_latch, RW_LOCK_EX));
#ifdef UNIV_SYNC_DEBUG
ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED));
ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
#endif /* UNIV_SYNC_DEBUG */
index = cursor->index;
......@@ -317,10 +321,12 @@ btr_search_update_block_hash_info(
buf_block_t* block, /* in: buffer block */
btr_cur_t* cursor) /* in: cursor */
{
ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED)
&& !rw_lock_own(&btr_search_latch, RW_LOCK_EX));
ut_ad(rw_lock_own(&(block->lock), RW_LOCK_SHARED)
|| rw_lock_own(&(block->lock), RW_LOCK_EX));
#ifdef UNIV_SYNC_DEBUG
ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED));
ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
ut_ad(rw_lock_own(&((buf_block_t*) block)->lock, RW_LOCK_SHARED)
|| rw_lock_own(&((buf_block_t*) block)->lock, RW_LOCK_EX));
#endif /* UNIV_SYNC_DEBUG */
ut_ad(cursor);
info->last_hash_succ = FALSE;
......@@ -398,9 +404,11 @@ btr_search_update_hash_ref(
dulint tree_id;
ut_ad(cursor->flag == BTR_CUR_HASH_FAIL);
#ifdef UNIV_SYNC_DEBUG
ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_EX));
ut_ad(rw_lock_own(&(block->lock), RW_LOCK_SHARED)
|| rw_lock_own(&(block->lock), RW_LOCK_EX));
#endif /* UNIV_SYNC_DEBUG */
if (block->is_hashed
&& (info->n_hash_potential > 0)
&& (block->curr_n_fields == info->n_fields)
......@@ -419,7 +427,9 @@ btr_search_update_hash_ref(
fold = rec_fold(rec, block->curr_n_fields,
block->curr_n_bytes, tree_id);
#ifdef UNIV_SYNC_DEBUG
ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_EX));
#endif /* UNIV_SYNC_DEBUG */
ha_insert_for_fold(btr_search_sys->hash_index, fold, rec);
}
......@@ -439,8 +449,10 @@ btr_search_info_update_slow(
ulint* params;
ulint* params2;
ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED)
&& !rw_lock_own(&btr_search_latch, RW_LOCK_EX));
#ifdef UNIV_SYNC_DEBUG
ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED));
ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
#endif /* UNIV_SYNC_DEBUG */
block = buf_block_align(btr_cur_get_rec(cursor));
......@@ -762,7 +774,9 @@ btr_search_guess_on_hash(
can_only_compare_to_cursor_rec = FALSE;
#ifdef UNIV_SYNC_DEBUG
buf_page_dbg_add_level(page, SYNC_TREE_NODE_FROM_HASH);
#endif /* UNIV_SYNC_DEBUG */
}
block = buf_block_align(page);
......@@ -910,10 +924,12 @@ btr_search_drop_page_hash_index(
ulint n_recs;
ulint* folds;
ulint i;
ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED)
&& !rw_lock_own(&btr_search_latch, RW_LOCK_EX));
#ifdef UNIV_SYNC_DEBUG
ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED));
ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
#endif /* UNIV_SYNC_DEBUG */
rw_lock_s_lock(&btr_search_latch);
block = buf_block_align(page);
......@@ -927,9 +943,11 @@ btr_search_drop_page_hash_index(
table = btr_search_sys->hash_index;
#ifdef UNIV_SYNC_DEBUG
ut_ad(rw_lock_own(&(block->lock), RW_LOCK_SHARED)
|| rw_lock_own(&(block->lock), RW_LOCK_EX)
|| (block->buf_fix_count == 0));
#endif /* UNIV_SYNC_DEBUG */
n_fields = block->curr_n_fields;
n_bytes = block->curr_n_bytes;
......@@ -1029,8 +1047,10 @@ btr_search_drop_page_hash_when_freed(
page = buf_page_get(space, page_no, RW_S_LATCH, &mtr);
#ifdef UNIV_SYNC_DEBUG
buf_page_dbg_add_level(page, SYNC_TREE_NODE_FROM_HASH);
#endif /* UNIV_SYNC_DEBUG */
btr_search_drop_page_hash_index(page);
mtr_commit(&mtr);
......@@ -1070,9 +1090,11 @@ btr_search_build_page_hash_index(
block = buf_block_align(page);
table = btr_search_sys->hash_index;
#ifdef UNIV_SYNC_DEBUG
ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
ut_ad(rw_lock_own(&(block->lock), RW_LOCK_SHARED)
|| rw_lock_own(&(block->lock), RW_LOCK_EX));
#endif /* UNIV_SYNC_DEBUG */
rw_lock_s_lock(&btr_search_latch);
......@@ -1235,8 +1257,10 @@ btr_search_move_or_delete_hash_entries(
block = buf_block_align(page);
new_block = buf_block_align(new_page);
ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX)
&& rw_lock_own(&(new_block->lock), RW_LOCK_EX));
#ifdef UNIV_SYNC_DEBUG
ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX));
ut_ad(rw_lock_own(&(new_block->lock), RW_LOCK_EX));
#endif /* UNIV_SYNC_DEBUG */
rw_lock_s_lock(&btr_search_latch);
......@@ -1296,7 +1320,9 @@ btr_search_update_hash_on_delete(
block = buf_block_align(rec);
#ifdef UNIV_SYNC_DEBUG
ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX));
#endif /* UNIV_SYNC_DEBUG */
if (!block->is_hashed) {
......@@ -1337,7 +1363,9 @@ btr_search_update_hash_node_on_insert(
block = buf_block_align(rec);
#ifdef UNIV_SYNC_DEBUG
ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX));
#endif /* UNIV_SYNC_DEBUG */
if (!block->is_hashed) {
......@@ -1398,7 +1426,9 @@ btr_search_update_hash_on_insert(
block = buf_block_align(rec);
#ifdef UNIV_SYNC_DEBUG
ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX));
#endif /* UNIV_SYNC_DEBUG */
if (!block->is_hashed) {
......
......@@ -446,9 +446,10 @@ buf_block_init(
rw_lock_create(&(block->read_lock));
rw_lock_set_level(&(block->read_lock), SYNC_NO_ORDER_CHECK);
#ifdef UNIV_SYNC_DEBUG
rw_lock_create(&(block->debug_latch));
rw_lock_set_level(&(block->debug_latch), SYNC_NO_ORDER_CHECK);
#endif /* UNIV_SYNC_DEBUG */
}
/************************************************************************
......@@ -1088,9 +1089,9 @@ buf_page_optimistic_get_func(
}
if (!UT_DULINT_EQ(modify_clock, block->modify_clock)) {
#ifdef UNIV_SYNC_DEBUG
buf_page_dbg_add_level(block->frame, SYNC_NO_ORDER_CHECK);
#endif /* UNIV_SYNC_DEBUG */
if (rw_latch == RW_S_LATCH) {
rw_lock_s_unlock(&(block->lock));
} else {
......@@ -1285,7 +1286,9 @@ buf_page_init(
in units of a page */
buf_block_t* block) /* in: block to init */
{
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(buf_pool->mutex)));
#endif /* UNIV_SYNC_DEBUG */
ut_ad(block->state == BUF_BLOCK_READY_FOR_USE);
/* Set the state of the block */
......
......@@ -47,7 +47,9 @@ buf_flush_insert_into_flush_list(
/*=============================*/
buf_block_t* block) /* in: block which is modified */
{
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(buf_pool->mutex)));
#endif /* UNIV_SYNC_DEBUG */
ut_ad((UT_LIST_GET_FIRST(buf_pool->flush_list) == NULL)
|| (ut_dulint_cmp(
......@@ -73,7 +75,9 @@ buf_flush_insert_sorted_into_flush_list(
buf_block_t* prev_b;
buf_block_t* b;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(buf_pool->mutex)));
#endif /* UNIV_SYNC_DEBUG */
prev_b = NULL;
b = UT_LIST_GET_FIRST(buf_pool->flush_list);
......@@ -105,7 +109,9 @@ buf_flush_ready_for_replace(
buf_block_t* block) /* in: buffer control block, must be in state
BUF_BLOCK_FILE_PAGE and in the LRU list*/
{
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(buf_pool->mutex)));
#endif /* UNIV_SYNC_DEBUG */
ut_a(block->state == BUF_BLOCK_FILE_PAGE);
if ((ut_dulint_cmp(block->oldest_modification, ut_dulint_zero) > 0)
......@@ -129,7 +135,9 @@ buf_flush_ready_for_flush(
BUF_BLOCK_FILE_PAGE */
ulint flush_type)/* in: BUF_FLUSH_LRU or BUF_FLUSH_LIST */
{
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(buf_pool->mutex)));
#endif /* UNIV_SYNC_DEBUG */
ut_ad(block->state == BUF_BLOCK_FILE_PAGE);
if ((ut_dulint_cmp(block->oldest_modification, ut_dulint_zero) > 0)
......@@ -161,8 +169,9 @@ buf_flush_write_complete(
buf_block_t* block) /* in: pointer to the block in question */
{
ut_ad(block);
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(buf_pool->mutex)));
#endif /* UNIV_SYNC_DEBUG */
block->oldest_modification = ut_dulint_zero;
UT_LIST_REMOVE(flush_list, buf_pool->flush_list, block);
......
......@@ -370,7 +370,9 @@ buf_LRU_old_adjust_len(void)
ulint new_len;
ut_ad(buf_pool->LRU_old);
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(buf_pool->mutex)));
#endif /* UNIV_SYNC_DEBUG */
ut_ad(3 * (BUF_LRU_OLD_MIN_LEN / 8) > BUF_LRU_OLD_TOLERANCE + 5);
for (;;) {
......@@ -440,7 +442,9 @@ buf_LRU_remove_block(
{
ut_ad(buf_pool);
ut_ad(block);
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(buf_pool->mutex)));
#endif /* UNIV_SYNC_DEBUG */
/* If the LRU_old pointer is defined and points to just this block,
move it backward one step */
......@@ -493,7 +497,9 @@ buf_LRU_add_block_to_end_low(
ut_ad(buf_pool);
ut_ad(block);
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(buf_pool->mutex)));
#endif /* UNIV_SYNC_DEBUG */
block->old = TRUE;
......@@ -545,7 +551,9 @@ buf_LRU_add_block_low(
ut_ad(buf_pool);
ut_ad(block);
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(buf_pool->mutex)));
#endif /* UNIV_SYNC_DEBUG */
block->old = old;
cl = buf_pool_clock_tic();
......@@ -632,7 +640,9 @@ buf_LRU_block_free_non_file_page(
/*=============================*/
buf_block_t* block) /* in: block, must not contain a file page */
{
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(buf_pool->mutex)));
#endif /* UNIV_SYNC_DEBUG */
ut_ad(block);
ut_ad((block->state == BUF_BLOCK_MEMORY)
......@@ -658,7 +668,9 @@ buf_LRU_block_remove_hashed_page(
be in a state where it can be freed; there
may or may not be a hash index to the page */
{
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(buf_pool->mutex)));
#endif /* UNIV_SYNC_DEBUG */
ut_ad(block);
ut_ad(block->state == BUF_BLOCK_FILE_PAGE);
......@@ -672,7 +684,7 @@ buf_LRU_block_remove_hashed_page(
buf_pool->freed_page_clock += 1;
buf_frame_modify_clock_inc(block->frame);
HASH_DELETE(buf_block_t, hash, buf_pool->page_hash,
buf_page_address_fold(block->space, block->offset),
block);
......@@ -689,7 +701,9 @@ buf_LRU_block_free_hashed_page(
buf_block_t* block) /* in: block, must contain a file page and
be in a state where it can be freed */
{
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(buf_pool->mutex)));
#endif /* UNIV_SYNC_DEBUG */
ut_ad(block->state == BUF_BLOCK_REMOVE_HASH);
block->state = BUF_BLOCK_MEMORY;
......
......@@ -39,8 +39,9 @@ dict_hdr_get(
header = DICT_HDR + buf_page_get(DICT_HDR_SPACE, DICT_HDR_PAGE_NO,
RW_X_LATCH, mtr);
#ifdef UNIV_SYNC_DEBUG
buf_page_dbg_add_level(header, SYNC_DICT_HEADER);
#endif /* UNIV_SYNC_DEBUG */
return(header);
}
......@@ -94,7 +95,9 @@ dict_hdr_flush_row_id(void)
dulint id;
mtr_t mtr;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(dict_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
id = dict_sys->row_id;
......
......@@ -270,9 +270,10 @@ dict_build_table_def_step(
dict_table_t* cluster_table;
dtuple_t* row;
UT_NOT_USED(thr);
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(dict_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
table = node->table;
table->id = dict_hdr_get_new_id(DICT_HDR_TABLE_ID);
......@@ -341,7 +342,9 @@ dict_create_sys_indexes_tuple(
byte* ptr;
UT_NOT_USED(trx);
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(dict_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
ut_ad(index && heap);
sys_indexes = dict_sys->sys_indexes;
......@@ -534,8 +537,9 @@ dict_build_index_def_step(
dict_index_t* index;
dtuple_t* row;
UT_NOT_USED(thr);
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(dict_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
index = node->index;
......@@ -607,8 +611,10 @@ dict_create_index_tree_step(
dtuple_t* search_tuple;
btr_pcur_t pcur;
mtr_t mtr;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(dict_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
UT_NOT_USED(thr);
index = node->index;
......@@ -670,7 +676,9 @@ dict_drop_index_tree(
byte* ptr;
ulint len;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(dict_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
ptr = rec_get_nth_field(rec, DICT_SYS_INDEXES_PAGE_NO_FIELD, &len);
......@@ -791,8 +799,10 @@ dict_create_table_step(
trx_t* trx;
ut_ad(thr);
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(dict_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
trx = thr_get_trx(thr);
node = thr->run_node;
......@@ -899,7 +909,9 @@ dict_create_index_step(
trx_t* trx;
ut_ad(thr);
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(dict_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
trx = thr_get_trx(thr);
......@@ -1160,7 +1172,9 @@ dict_create_add_foreigns_to_dictionary(
ulint i;
char buf[10000];
ut_ad(mutex_own(&(dict_sys->mutex)));
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(dict_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
if (NULL == dict_table_get_low((char *) "SYS_FOREIGN")) {
fprintf(stderr,
......@@ -1221,7 +1235,7 @@ dict_create_add_foreigns_to_dictionary(
foreign->referenced_col_names[i]);
}
ut_a(len < (sizeof buf) - 19)
ut_a(len < (sizeof buf) - 19);
len += sprintf(buf + len,"COMMIT WORK;\nEND;\n");
graph = pars_sql(buf);
......
......@@ -615,7 +615,9 @@ dict_table_get_on_id(
if we are doing a rollback to handle an error in TABLE
CREATE, for example, we already have the mutex! */
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(dict_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
return(dict_table_get_on_id_low(table_id, trx));
}
......@@ -761,7 +763,9 @@ dict_table_add_to_cache(
ulint i;
ut_ad(table);
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(dict_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
ut_ad(table->n_def == table->n_cols - DATA_N_SYS_COLS);
ut_ad(table->magic_n == DICT_TABLE_MAGIC_N);
ut_ad(table->cached == FALSE);
......@@ -896,8 +900,10 @@ dict_table_rename_in_cache(
ulint i;
ut_ad(table);
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(dict_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
old_size = mem_heap_get_size(table->heap);
fold = ut_fold_string(new_name);
......@@ -1095,7 +1101,9 @@ dict_table_remove_from_cache(
ulint i;
ut_ad(table);
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(dict_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
ut_ad(table->magic_n == DICT_TABLE_MAGIC_N);
/* printf("Removing table %s from dictionary cache\n", table->name); */
......@@ -1166,7 +1174,9 @@ dict_table_LRU_trim(void)
ut_a(0);
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(dict_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
table = UT_LIST_GET_LAST(dict_sys->table_LRU);
......@@ -1195,7 +1205,9 @@ dict_col_add_to_cache(
ulint fold;
ut_ad(table && col);
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(dict_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
ut_ad(table->magic_n == DICT_TABLE_MAGIC_N);
fold = ut_fold_ulint_pair(ut_fold_string(table->name),
......@@ -1226,7 +1238,9 @@ dict_col_remove_from_cache(
ulint fold;
ut_ad(table && col);
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(dict_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
ut_ad(table->magic_n == DICT_TABLE_MAGIC_N);
fold = ut_fold_ulint_pair(ut_fold_string(table->name),
......@@ -1249,7 +1263,9 @@ dict_col_reposition_in_cache(
ulint fold;
ut_ad(table && col);
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(dict_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
ut_ad(table->magic_n == DICT_TABLE_MAGIC_N);
fold = ut_fold_ulint_pair(ut_fold_string(table->name),
......@@ -1283,7 +1299,9 @@ dict_index_add_to_cache(
ulint i;
ut_ad(index);
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(dict_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
ut_ad(index->n_def == index->n_fields);
ut_ad(index->magic_n == DICT_INDEX_MAGIC_N);
......@@ -1409,7 +1427,9 @@ dict_index_remove_from_cache(
ut_ad(table && index);
ut_ad(table->magic_n == DICT_TABLE_MAGIC_N);
ut_ad(index->magic_n == DICT_INDEX_MAGIC_N);
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(dict_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
ut_ad(UT_LIST_GET_LEN((index->tree)->tree_indexes) == 1);
dict_tree_free(index->tree);
......@@ -1453,7 +1473,9 @@ dict_index_find_cols(
ut_ad(table && index);
ut_ad(table->magic_n == DICT_TABLE_MAGIC_N);
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(dict_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
for (i = 0; i < index->n_fields; i++) {
field = dict_index_get_nth_field(index, i);
......@@ -1594,7 +1616,9 @@ dict_index_build_internal_clust(
ut_ad(table && index);
ut_ad(index->type & DICT_CLUSTERED);
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(dict_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
ut_ad(table->magic_n == DICT_TABLE_MAGIC_N);
/* Create a new index object with certainly enough fields */
......@@ -1763,7 +1787,9 @@ dict_index_build_internal_non_clust(
ut_ad(table && index);
ut_ad(0 == (index->type & DICT_CLUSTERED));
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(dict_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
ut_ad(table->magic_n == DICT_TABLE_MAGIC_N);
/* The clustered index should be the first in the list of indexes */
......@@ -1882,7 +1908,9 @@ dict_foreign_remove_from_cache(
/*===========================*/
dict_foreign_t* foreign) /* in, own: foreign constraint */
{
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(dict_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
ut_a(foreign);
if (foreign->referenced_table) {
......@@ -1911,7 +1939,9 @@ dict_foreign_find(
{
dict_foreign_t* foreign;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(dict_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
foreign = UT_LIST_GET_FIRST(table->foreign_list);
......@@ -2020,7 +2050,9 @@ dict_foreign_add_to_cache(
ibool added_to_referenced_list = FALSE;
char* buf = dict_foreign_err_buf;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(dict_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
for_table = dict_table_check_if_in_cache_low(
foreign->foreign_table_name);
......@@ -2691,7 +2723,9 @@ dict_create_foreign_constraints_low(
ulint column_name_lens[500];
char referenced_table_name[2500];
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(dict_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
table = dict_table_get_low(name);
......@@ -3271,7 +3305,9 @@ dict_foreign_parse_drop_constraints(
str = dict_strip_comments(*(trx->mysql_query_str));
ptr = str;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(dict_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
loop:
ptr = dict_scan_to(ptr, (char *) "DROP");
......@@ -3411,7 +3447,9 @@ dict_procedure_reserve_parsed_copy(
que_t* graph;
proc_node_t* proc_node;
#ifdef UNIV_SYNC_DEBUG
ut_ad(!mutex_own(&kernel_mutex));
#endif /* UNIV_SYNC_DEBUG */
mutex_enter(&(dict_sys->mutex));
......@@ -3459,7 +3497,9 @@ dict_procedure_release_parsed_copy(
{
proc_node_t* proc_node;
#ifdef UNIV_SYNC_DEBUG
ut_ad(!mutex_own(&kernel_mutex));
#endif /* UNIV_SYNC_DEBUG */
mutex_enter(&(dict_sys->mutex));
......@@ -3936,7 +3976,9 @@ dict_foreign_print_low(
{
ulint i;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(dict_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
printf(" FOREIGN KEY CONSTRAINT %s: %s (", foreign->id,
foreign->foreign_table_name);
......@@ -4001,7 +4043,9 @@ dict_table_print_low(
dict_foreign_t* foreign;
ulint i;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(dict_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
dict_update_statistics_low(table, TRUE);
......@@ -4054,7 +4098,9 @@ dict_col_print_low(
{
dtype_t* type;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(dict_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
type = dict_col_get_type(col);
printf("%s: ", col->name);
......@@ -4074,7 +4120,9 @@ dict_index_print_low(
ib_longlong n_vals;
ulint i;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(dict_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
tree = index->tree;
......@@ -4120,7 +4168,9 @@ dict_field_print_low(
/*=================*/
dict_field_t* field) /* in: field */
{
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(dict_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
printf(" %s", field->name);
......
......@@ -42,7 +42,9 @@ dict_get_first_table_name_in_db(
char* table_name;
mtr_t mtr;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(dict_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
heap = mem_heap_create(1000);
......@@ -212,7 +214,9 @@ dict_load_columns(
ulint i;
mtr_t mtr;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(dict_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
mtr_start(&mtr);
......@@ -310,7 +314,9 @@ dict_load_fields(
ulint i;
mtr_t mtr;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(dict_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
UT_NOT_USED(table);
......@@ -422,7 +428,9 @@ dict_load_indexes(
dulint id;
mtr_t mtr;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(dict_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
if ((ut_dulint_get_high(table->id) == 0)
&& (ut_dulint_get_low(table->id) < DICT_HDR_FIRST_ID)) {
......@@ -591,7 +599,9 @@ dict_load_table(
ulint err;
mtr_t mtr;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(dict_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
heap = mem_heap_create(1000);
......@@ -744,7 +754,9 @@ dict_load_table_on_id(
char* name;
mtr_t mtr;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(dict_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
/* NOTE that the operation of this function is protected by
the dictionary mutex, and therefore no deadlocks can occur
......@@ -829,7 +841,9 @@ dict_load_sys_table(
{
mem_heap_t* heap;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(dict_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
heap = mem_heap_create(1000);
......@@ -860,7 +874,9 @@ dict_load_foreign_cols(
ulint i;
mtr_t mtr;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(dict_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
foreign->foreign_col_names = mem_heap_alloc(foreign->heap,
foreign->n_fields * sizeof(void*));
......@@ -941,7 +957,9 @@ dict_load_foreign(
ulint err;
mtr_t mtr;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(dict_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
heap2 = mem_heap_create(1000);
......@@ -1073,7 +1091,9 @@ dict_load_foreigns(
ulint err;
mtr_t mtr;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(dict_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
sys_foreign = dict_table_get_low((char *) "SYS_FOREIGN");
......
......@@ -332,7 +332,9 @@ fil_node_close(
ibool ret;
ut_ad(node && system);
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(system->mutex)));
#endif /* UNIV_SYNC_DEBUG */
ut_a(node->open);
ut_a(node->n_pending == 0);
......@@ -356,7 +358,9 @@ fil_node_free(
fil_space_t* space) /* in: space where the file node is chained */
{
ut_ad(node && system && space);
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(system->mutex)));
#endif /* UNIV_SYNC_DEBUG */
ut_a(node->magic_n == FIL_NODE_MAGIC_N);
if (node->open) {
......@@ -875,7 +879,9 @@ fil_node_prepare_for_io(
fil_node_t* last_node;
ut_ad(node && system && space);
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(system->mutex)));
#endif /* UNIV_SYNC_DEBUG */
if (node->open == FALSE) {
/* File is closed */
......@@ -952,7 +958,9 @@ fil_node_complete_io(
{
ut_ad(node);
ut_ad(system);
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(system->mutex)));
#endif /* UNIV_SYNC_DEBUG */
ut_a(node->n_pending > 0);
node->n_pending--;
......
......@@ -301,9 +301,9 @@ fsp_get_space_header(
ut_ad(mtr);
header = FSP_HEADER_OFFSET + buf_page_get(id, 0, RW_X_LATCH, mtr);
#ifdef UNIV_SYNC_DEBUG
buf_page_dbg_add_level(header, SYNC_FSP_PAGE);
#endif /* UNIV_SYNC_DEBUG */
return(header);
}
......@@ -658,7 +658,9 @@ xdes_get_descriptor_with_space_hdr(
} else {
descr_page = buf_page_get(space, descr_page_no, RW_X_LATCH,
mtr);
#ifdef UNIV_SYNC_DEBUG
buf_page_dbg_add_level(descr_page, SYNC_FSP_PAGE);
#endif /* UNIV_SYNC_DEBUG */
}
return(descr_page + XDES_ARR_OFFSET
......@@ -688,8 +690,9 @@ xdes_get_descriptor(
sp_header = FSP_HEADER_OFFSET
+ buf_page_get(space, 0, RW_X_LATCH, mtr);
#ifdef UNIV_SYNC_DEBUG
buf_page_dbg_add_level(sp_header, SYNC_FSP_PAGE);
#endif /* UNIV_SYNC_DEBUG */
return(xdes_get_descriptor_with_space_hdr(sp_header, space, offset,
mtr));
}
......@@ -840,10 +843,13 @@ fsp_header_init(
mtr_x_lock(fil_space_get_latch(space), mtr);
page = buf_page_create(space, 0, mtr);
#ifdef UNIV_SYNC_DEBUG
buf_page_dbg_add_level(page, SYNC_FSP_PAGE);
#endif /* UNIV_SYNC_DEBUG */
buf_page_get(space, 0, RW_X_LATCH, mtr);
#ifdef UNIV_SYNC_DEBUG
buf_page_dbg_add_level(page, SYNC_FSP_PAGE);
#endif /* UNIV_SYNC_DEBUG */
/* The prior contents of the file page should be ignored */
......@@ -1083,11 +1089,15 @@ fsp_fill_free_list(
if (i > 0) {
descr_page = buf_page_create(space, i, mtr);
#ifdef UNIV_SYNC_DEBUG
buf_page_dbg_add_level(descr_page,
SYNC_FSP_PAGE);
#endif /* UNIV_SYNC_DEBUG */
buf_page_get(space, i, RW_X_LATCH, mtr);
#ifdef UNIV_SYNC_DEBUG
buf_page_dbg_add_level(descr_page,
SYNC_FSP_PAGE);
#endif /* UNIV_SYNC_DEBUG */
fsp_init_file_page(descr_page, mtr);
}
......@@ -1100,12 +1110,14 @@ fsp_fill_free_list(
ibuf_page = buf_page_create(space,
i + FSP_IBUF_BITMAP_OFFSET, &ibuf_mtr);
#ifdef UNIV_SYNC_DEBUG
buf_page_dbg_add_level(ibuf_page, SYNC_IBUF_BITMAP);
#endif /* UNIV_SYNC_DEBUG */
buf_page_get(space, i + FSP_IBUF_BITMAP_OFFSET,
RW_X_LATCH, &ibuf_mtr);
#ifdef UNIV_SYNC_DEBUG
buf_page_dbg_add_level(ibuf_page, SYNC_FSP_PAGE);
#endif /* UNIV_SYNC_DEBUG */
fsp_init_file_page(ibuf_page, &ibuf_mtr);
ibuf_bitmap_page_init(ibuf_page, &ibuf_mtr);
......@@ -1297,8 +1309,9 @@ fsp_alloc_free_page(
buf_page_create(space, page_no, mtr);
page = buf_page_get(space, page_no, RW_X_LATCH, mtr);
#ifdef UNIV_SYNC_DEBUG
buf_page_dbg_add_level(page, SYNC_FSP_PAGE);
#endif /* UNIV_SYNC_DEBUG */
/* Prior contents of the page should be ignored */
fsp_init_file_page(page, mtr);
......@@ -1532,8 +1545,9 @@ fsp_alloc_seg_inode_page(
buf_block_align(page)->check_index_page_at_flush = FALSE;
fil_page_set_type(page, FIL_PAGE_INODE);
#ifdef UNIV_SYNC_DEBUG
buf_page_dbg_add_level(page, SYNC_FSP_PAGE);
#endif /* UNIV_SYNC_DEBUG */
for (i = 0; i < FSP_SEG_INODES_PER_PAGE; i++) {
......@@ -1580,7 +1594,9 @@ fsp_alloc_seg_inode(
page = buf_page_get(buf_frame_get_space_id(space_header), page_no,
RW_X_LATCH, mtr);
#ifdef UNIV_SYNC_DEBUG
buf_page_dbg_add_level(page, SYNC_FSP_PAGE);
#endif /* UNIV_SYNC_DEBUG */
n = fsp_seg_inode_page_find_free(page, 0, mtr);
......@@ -1831,9 +1847,11 @@ fseg_create_general(
mtr);
}
#ifdef UNIV_SYNC_DEBUG
ut_ad(!mutex_own(&kernel_mutex)
|| mtr_memo_contains(mtr, fil_space_get_latch(space),
MTR_MEMO_X_LOCK));
#endif /* UNIV_SYNC_DEBUG */
latch = fil_space_get_latch(space);
mtr_x_lock(latch, mtr);
......@@ -1985,9 +2003,11 @@ fseg_n_reserved_pages(
space = buf_frame_get_space_id(header);
#ifdef UNIV_SYNC_DEBUG
ut_ad(!mutex_own(&kernel_mutex)
|| mtr_memo_contains(mtr, fil_space_get_latch(space),
MTR_MEMO_X_LOCK));
#endif /* UNIV_SYNC_DEBUG */
mtr_x_lock(fil_space_get_latch(space), mtr);
inode = fseg_inode_get(header, mtr);
......@@ -2292,7 +2312,9 @@ fseg_alloc_free_page_low(
ut_a(page == buf_page_get(space, ret_page, RW_X_LATCH, mtr));
#ifdef UNIV_SYNC_DEBUG
buf_page_dbg_add_level(page, SYNC_FSP_PAGE);
#endif /* UNIV_SYNC_DEBUG */
/* The prior contents of the page should be ignored */
fsp_init_file_page(page, mtr);
......@@ -2345,9 +2367,11 @@ fseg_alloc_free_page_general(
space = buf_frame_get_space_id(seg_header);
#ifdef UNIV_SYNC_DEBUG
ut_ad(!mutex_own(&kernel_mutex)
|| mtr_memo_contains(mtr, fil_space_get_latch(space),
MTR_MEMO_X_LOCK));
#endif /* UNIV_SYNC_DEBUG */
latch = fil_space_get_latch(space);
mtr_x_lock(latch, mtr);
......@@ -2442,9 +2466,11 @@ fsp_reserve_free_extents(
ulint n_pages_added;
ut_ad(mtr);
#ifdef UNIV_SYNC_DEBUG
ut_ad(!mutex_own(&kernel_mutex)
|| mtr_memo_contains(mtr, fil_space_get_latch(space),
MTR_MEMO_X_LOCK));
#endif /* UNIV_SYNC_DEBUG */
latch = fil_space_get_latch(space);
mtr_x_lock(latch, mtr);
......@@ -2534,8 +2560,9 @@ fsp_get_available_space_in_free_extents(
rw_lock_t* latch;
mtr_t mtr;
#ifdef UNIV_SYNC_DEBUG
ut_ad(!mutex_own(&kernel_mutex));
#endif /* UNIV_SYNC_DEBUG */
mtr_start(&mtr);
latch = fil_space_get_latch(space);
......@@ -2820,9 +2847,11 @@ fseg_free_page(
{
fseg_inode_t* seg_inode;
#ifdef UNIV_SYNC_DEBUG
ut_ad(!mutex_own(&kernel_mutex)
|| mtr_memo_contains(mtr, fil_space_get_latch(space),
MTR_MEMO_X_LOCK));
#endif /* UNIV_SYNC_DEBUG */
mtr_x_lock(fil_space_get_latch(space), mtr);
seg_inode = fseg_inode_get(seg_header, mtr);
......@@ -2929,9 +2958,11 @@ fseg_free_step(
space = buf_frame_get_space_id(header);
#ifdef UNIV_SYNC_DEBUG
ut_ad(!mutex_own(&kernel_mutex)
|| mtr_memo_contains(mtr, fil_space_get_latch(space),
MTR_MEMO_X_LOCK));
#endif /* UNIV_SYNC_DEBUG */
mtr_x_lock(fil_space_get_latch(space), mtr);
descr = xdes_get_descriptor(space, buf_frame_get_page_no(header), mtr);
......@@ -3002,9 +3033,11 @@ fseg_free_step_not_header(
space = buf_frame_get_space_id(header);
#ifdef UNIV_SYNC_DEBUG
ut_ad(!mutex_own(&kernel_mutex)
|| mtr_memo_contains(mtr, fil_space_get_latch(space),
MTR_MEMO_X_LOCK));
#endif /* UNIV_SYNC_DEBUG */
mtr_x_lock(fil_space_get_latch(space), mtr);
inode = fseg_inode_get(header, mtr);
......
......@@ -82,8 +82,9 @@ ha_insert_for_fold(
ulint hash;
ut_ad(table && data);
#ifdef UNIV_SYNC_DEBUG
ut_ad(!table->mutexes || mutex_own(hash_get_mutex(table, fold)));
#endif /* UNIV_SYNC_DEBUG */
hash = hash_calc_hash(fold, table);
cell = hash_get_nth_cell(table, hash);
......@@ -163,8 +164,9 @@ ha_delete(
{
ha_node_t* node;
#ifdef UNIV_SYNC_DEBUG
ut_ad(!table->mutexes || mutex_own(hash_get_mutex(table, fold)));
#endif /* UNIV_SYNC_DEBUG */
node = ha_search_with_data(table, fold, data);
ut_a(node);
......@@ -185,8 +187,9 @@ ha_remove_all_nodes_to_page(
{
ha_node_t* node;
#ifdef UNIV_SYNC_DEBUG
ut_ad(!table->mutexes || mutex_own(hash_get_mutex(table, fold)));
#endif /* UNIV_SYNC_DEBUG */
node = ha_chain_get_first(table, fold);
while (node) {
......
......@@ -49,7 +49,7 @@ The solution is the following: We put into each tablespace an insert buffer
of its own. Let all the tree and page latches connected with the insert buffer
be later in the latching order than the fsp latch and fsp page latches.
Insert buffer pages must be such that the insert buffer is never invoked
when these pages area accessed as this would result in a recursion violating
when these pages are accessed as this would result in a recursion violating
the latching order. We let a special i/o-handler thread take care of i/o to
the insert buffer pages and the ibuf bitmap pages, as well as the fsp bitmap
pages and the first inode page, which contains the inode of the ibuf tree: let
......@@ -239,7 +239,9 @@ ibuf_header_page_get(
page = buf_page_get(space, FSP_IBUF_HEADER_PAGE_NO, RW_X_LATCH, mtr);
#ifdef UNIV_SYNC_DEBUG
buf_page_dbg_add_level(page, SYNC_IBUF_HEADER);
#endif /* UNIV_SYNC_DEBUG */
return(page);
}
......@@ -263,7 +265,9 @@ ibuf_tree_root_get(
page = buf_page_get(space, FSP_IBUF_TREE_ROOT_PAGE_NO, RW_X_LATCH,
mtr);
#ifdef UNIV_SYNC_DEBUG
buf_page_dbg_add_level(page, SYNC_TREE_NODE);
#endif /* UNIV_SYNC_DEBUG */
return(page);
}
......@@ -375,7 +379,9 @@ ibuf_data_sizes_update(
{
ulint old_size;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&ibuf_mutex));
#endif /* UNIV_SYNC_DEBUG */
old_size = data->size;
......@@ -455,7 +461,9 @@ ibuf_data_init_for_space(
root = buf_page_get(space, FSP_IBUF_TREE_ROOT_PAGE_NO, RW_X_LATCH,
&mtr);
#ifdef UNIV_SYNC_DEBUG
buf_page_dbg_add_level(root, SYNC_TREE_NODE);
#endif /* UNIV_SYNC_DEBUG */
data->size = 0;
data->n_inserts = 0;
......@@ -679,7 +687,9 @@ ibuf_bitmap_get_map_page(
page = buf_page_get(space, ibuf_bitmap_page_no_calc(page_no),
RW_X_LATCH, mtr);
#ifdef UNIV_SYNC_DEBUG
buf_page_dbg_add_level(page, SYNC_IBUF_BITMAP);
#endif /* UNIV_SYNC_DEBUG */
return(page);
}
......@@ -1198,7 +1208,9 @@ ibuf_data_enough_free_for_insert(
/* out: TRUE if enough free pages in list */
ibuf_data_t* data) /* in: ibuf data for the space */
{
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&ibuf_mutex));
#endif /* UNIV_SYNC_DEBUG */
/* We want a big margin of free pages, because a B-tree can sometimes
grow in size also if records are deleted from it, as the node pointers
......@@ -1224,7 +1236,9 @@ ibuf_data_too_much_free(
/* out: TRUE if enough free pages in list */
ibuf_data_t* data) /* in: ibuf data for the space */
{
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&ibuf_mutex));
#endif /* UNIV_SYNC_DEBUG */
if (data->free_list_len >= 3 + data->size / 2 + 3 * data->height) {
......@@ -1282,7 +1296,9 @@ ibuf_add_free_page(
page = buf_page_get(space, page_no, RW_X_LATCH, &mtr);
#ifdef UNIV_SYNC_DEBUG
buf_page_dbg_add_level(page, SYNC_TREE_NODE_NEW);
#endif /* UNIV_SYNC_DEBUG */
ibuf_enter();
......@@ -1402,7 +1418,9 @@ ibuf_remove_free_page(
page = buf_page_get(space, page_no, RW_X_LATCH, &mtr);
#ifdef UNIV_SYNC_DEBUG
buf_page_dbg_add_level(page, SYNC_TREE_NODE);
#endif /* UNIV_SYNC_DEBUG */
/* Remove the page from the free list and update the ibuf size data */
......@@ -1443,8 +1461,9 @@ ibuf_free_excess_pages(
{
ibuf_data_t* ibuf_data;
ulint i;
#ifdef UNIV_SYNC_DEBUG
ut_ad(rw_lock_own(fil_space_get_latch(space), RW_LOCK_EX));
#endif /* UNIV_SYNC_DEBUG */
ut_ad(rw_lock_get_x_lock_count(fil_space_get_latch(space)) == 1);
ut_ad(!ibuf_inside());
......@@ -1909,7 +1928,9 @@ ibuf_get_volume_buffered(
prev_page = buf_page_get(space, prev_page_no, RW_X_LATCH, mtr);
#ifdef UNIV_SYNC_DEBUG
buf_page_dbg_add_level(prev_page, SYNC_TREE_NODE);
#endif /* UNIV_SYNC_DEBUG */
rec = page_get_supremum_rec(prev_page);
rec = page_rec_get_prev(rec);
......@@ -1968,7 +1989,9 @@ ibuf_get_volume_buffered(
next_page = buf_page_get(space, next_page_no, RW_X_LATCH, mtr);
#ifdef UNIV_SYNC_DEBUG
buf_page_dbg_add_level(next_page, SYNC_TREE_NODE);
#endif /* UNIV_SYNC_DEBUG */
rec = page_get_infimum_rec(next_page);
rec = page_rec_get_next(rec);
......@@ -2592,8 +2615,9 @@ ibuf_merge_or_delete_for_page(
IB__FILE__, __LINE__,
&mtr);
ut_a(success);
#ifdef UNIV_SYNC_DEBUG
buf_page_dbg_add_level(page, SYNC_TREE_NODE);
#endif /* UNIV_SYNC_DEBUG */
}
/* Position pcur in the insert buffer at the first entry for this
......@@ -2744,7 +2768,9 @@ ibuf_validate_low(void)
ibuf_data_t* data;
ulint sum_sizes;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&ibuf_mutex));
#endif /* UNIV_SYNC_DEBUG */
sum_sizes = 0;
......
......@@ -44,8 +44,10 @@ btr_search_info_update(
{
btr_search_t* info;
ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED)
&& !rw_lock_own(&btr_search_latch, RW_LOCK_EX));
#ifdef UNIV_SYNC_DEBUG
ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED));
ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
#endif /* UNIV_SYNC_DEBUG */
info = btr_search_get_info(index);
......
......@@ -525,11 +525,11 @@ buf_pool_invalidate(void);
--------------------------- LOWER LEVEL ROUTINES -------------------------
=========================================================================*/
#ifdef UNIV_SYNC_DEBUG
/*************************************************************************
Adds latch level info for the rw-lock protecting the buffer frame. This
should be called in the debug version after a successful latching of a
page if we know the latching order level of the acquired latch. If
UNIV_SYNC_DEBUG is not defined, compiles to an empty function. */
page if we know the latching order level of the acquired latch. */
UNIV_INLINE
void
buf_page_dbg_add_level(
......@@ -537,6 +537,7 @@ buf_page_dbg_add_level(
buf_frame_t* frame, /* in: buffer page where we have acquired
a latch */
ulint level); /* in: latching order level */
#endif /* UNIV_SYNC_DEBUG */
/*************************************************************************
Gets a pointer to the memory frame of a block. */
UNIV_INLINE
......@@ -778,11 +779,12 @@ struct buf_block_struct{
BTR_SEARCH_RIGHT_SIDE in hash
indexing */
/* 6. Debug fields */
#ifdef UNIV_SYNC_DEBUG
rw_lock_t debug_latch; /* in the debug version, each thread
which bufferfixes the block acquires
an s-latch here; so we can use the
debug utilities in sync0rw */
#endif
ibool file_page_was_freed;
/* this is set to TRUE when fsp
frees a page in buffer pool */
......@@ -822,7 +824,7 @@ struct buf_pool_struct{
ulint n_pages_created;/* number of pages created in the pool
with no read */
ulint n_page_gets; /* number of page gets performed;
also successful seraches through
also successful searches through
the adaptive hash index are
counted as page gets; this field
is NOT protected by the buffer
......
......@@ -129,7 +129,9 @@ buf_pool_clock_tic(void)
/*====================*/
/* out: new clock value */
{
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(buf_pool->mutex)));
#endif /* UNIV_SYNC_DEBUG */
buf_pool->ulint_clock++;
......@@ -471,8 +473,10 @@ buf_frame_modify_clock_inc(
block = buf_block_align_low(frame);
#ifdef UNIV_SYNC_DEBUG
ut_ad((mutex_own(&(buf_pool->mutex)) && (block->buf_fix_count == 0))
|| rw_lock_own(&(block->lock), RW_LOCK_EXCLUSIVE));
#endif /* UNIV_SYNC_DEBUG */
UT_DULINT_INC(block->modify_clock);
......@@ -495,8 +499,10 @@ buf_frame_get_modify_clock(
block = buf_block_align(frame);
#ifdef UNIV_SYNC_DEBUG
ut_ad(rw_lock_own(&(block->lock), RW_LOCK_SHARED)
|| rw_lock_own(&(block->lock), RW_LOCK_EXCLUSIVE));
#endif /* UNIV_SYNC_DEBUG */
return(block->modify_clock);
}
......@@ -546,7 +552,9 @@ buf_page_hash_get(
ulint fold;
ut_ad(buf_pool);
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(buf_pool->mutex)));
#endif /* UNIV_SYNC_DEBUG */
/* Look for the page in the hash table */
......@@ -644,6 +652,7 @@ buf_page_release(
}
}
#ifdef UNIV_SYNC_DEBUG
/*************************************************************************
Adds latch level info for the rw-lock protecting the buffer frame. This
should be called in the debug version after a successful latching of a
......@@ -658,7 +667,6 @@ buf_page_dbg_add_level(
ulint level __attribute__((unused))) /* in: latching order
level */
{
#ifdef UNIV_SYNC_DEBUG
sync_thread_add_level(&(buf_block_align(frame)->lock), level);
#endif
}
#endif /* UNIV_SYNC_DEBUG */
......@@ -40,8 +40,10 @@ buf_flush_note_modification(
ut_ad(block);
ut_ad(block->state == BUF_BLOCK_FILE_PAGE);
ut_ad(block->buf_fix_count > 0);
#ifdef UNIV_SYNC_DEBUG
ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX));
ut_ad(mutex_own(&(buf_pool->mutex)));
#endif /* UNIV_SYNC_DEBUG */
ut_ad(ut_dulint_cmp(mtr->start_lsn, ut_dulint_zero) != 0);
ut_ad(mtr->modifications);
......@@ -76,7 +78,9 @@ buf_flush_recv_note_modification(
ut_ad(block);
ut_ad(block->state == BUF_BLOCK_FILE_PAGE);
ut_ad(block->buf_fix_count > 0);
#ifdef UNIV_SYNC_DEBUG
ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX));
#endif /* UNIV_SYNC_DEBUG */
mutex_enter(&(buf_pool->mutex));
......
......@@ -543,8 +543,10 @@ dict_table_check_if_in_cache_low(
ulint table_fold;
ut_ad(table_name);
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(dict_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
/* Look for the table name in the hash table */
table_fold = ut_fold_string(table_name);
......@@ -566,8 +568,10 @@ dict_table_get_low(
dict_table_t* table;
ut_ad(table_name);
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(dict_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
table = dict_table_check_if_in_cache_low(table_name);
if (table == NULL) {
......@@ -621,7 +625,9 @@ dict_table_get_on_id_low(
dict_table_t* table;
ulint fold;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(dict_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
UT_NOT_USED(trx);
/* Look for the table name in the hash table */
......
......@@ -30,7 +30,9 @@ fut_get_ptr(
ptr = buf_page_get(space, addr.page, rw_latch, mtr) + addr.boffset;
#ifdef UNIV_SYNC_DEBUG
buf_page_dbg_add_level(ptr, SYNC_NO_ORDER_CHECK);
#endif /* UNIV_SYNC_DEBUG */
return(ptr);
}
......@@ -84,7 +84,9 @@ ha_search(
{
ha_node_t* node;
#ifdef UNIV_SYNC_DEBUG
ut_ad(!table->mutexes || mutex_own(hash_get_mutex(table, fold)));
#endif /* UNIV_SYNC_DEBUG */
node = ha_chain_get_first(table, fold);
......@@ -114,7 +116,9 @@ ha_search_and_get_data(
{
ha_node_t* node;
#ifdef UNIV_SYNC_DEBUG
ut_ad(!table->mutexes || mutex_own(hash_get_mutex(table, fold)));
#endif /* UNIV_SYNC_DEBUG */
node = ha_chain_get_first(table, fold);
......@@ -146,7 +150,9 @@ ha_next(
fold = node->fold;
#ifdef UNIV_SYNC_DEBUG
ut_ad(!table->mutexes || mutex_own(hash_get_mutex(table, fold)));
#endif /* UNIV_SYNC_DEBUG */
node = ha_chain_get_next(table, node);
......@@ -176,7 +182,9 @@ ha_search_with_data(
{
ha_node_t* node;
#ifdef UNIV_SYNC_DEBUG
ut_ad(!table->mutexes || mutex_own(hash_get_mutex(table, fold)));
#endif /* UNIV_SYNC_DEBUG */
node = ha_chain_get_first(table, fold);
......@@ -206,7 +214,9 @@ ha_search_and_update_if_found(
{
ha_node_t* node;
#ifdef UNIV_SYNC_DEBUG
ut_ad(!table->mutexes || mutex_own(hash_get_mutex(table, fold)));
#endif /* UNIV_SYNC_DEBUG */
node = ha_search_with_data(table, fold, data);
......@@ -229,7 +239,9 @@ ha_search_and_delete_if_found(
{
ha_node_t* node;
#ifdef UNIV_SYNC_DEBUG
ut_ad(!table->mutexes || mutex_own(hash_get_mutex(table, fold)));
#endif /* UNIV_SYNC_DEBUG */
node = ha_search_with_data(table, fold, data);
......
......@@ -53,15 +53,24 @@ hash_calc_hash(
/* out: hashed value */
ulint fold, /* in: folded value */
hash_table_t* table); /* in: hash table */
/************************************************************************
Assert that the mutex for the table in a hash operation is owned. */
#ifdef UNIV_SYNC_DEBUG
# define HASH_ASSERT_OWNED(TABLE, FOLD) \
ut_ad(!(TABLE)->mutexes || mutex_own(hash_get_mutex(TABLE, FOLD)));
#else
# define HASH_ASSERT_OWNED(TABLE, FOLD)
#endif
/***********************************************************************
Inserts a struct to a hash table. */
#define HASH_INSERT(TYPE, NAME, TABLE, FOLD, DATA)\
{\
do {\
hash_cell_t* cell3333;\
TYPE* struct3333;\
\
ut_ad(!(TABLE)->mutexes || mutex_own(hash_get_mutex(TABLE, FOLD)));\
HASH_ASSERT_OWNED(TABLE, FOLD)\
\
(DATA)->NAME = NULL;\
\
......@@ -79,17 +88,17 @@ Inserts a struct to a hash table. */
\
struct3333->NAME = DATA;\
}\
}
} while (0)
/***********************************************************************
Deletes a struct from a hash table. */
#define HASH_DELETE(TYPE, NAME, TABLE, FOLD, DATA)\
{\
do {\
hash_cell_t* cell3333;\
TYPE* struct3333;\
\
ut_ad(!(TABLE)->mutexes || mutex_own(hash_get_mutex(TABLE, FOLD)));\
HASH_ASSERT_OWNED(TABLE, FOLD)\
\
cell3333 = hash_get_nth_cell(TABLE, hash_calc_hash(FOLD, TABLE));\
\
......@@ -100,13 +109,13 @@ Deletes a struct from a hash table. */
\
while (struct3333->NAME != DATA) {\
\
ut_ad(struct3333)\
ut_ad(struct3333);\
struct3333 = struct3333->NAME;\
}\
\
struct3333->NAME = DATA->NAME;\
}\
}
} while (0)
/***********************************************************************
Gets the first struct in a hash chain, NULL if none. */
......@@ -124,7 +133,7 @@ Looks for a struct in a hash table. */
#define HASH_SEARCH(NAME, TABLE, FOLD, DATA, TEST)\
{\
\
ut_ad(!(TABLE)->mutexes || mutex_own(hash_get_mutex(TABLE, FOLD)));\
HASH_ASSERT_OWNED(TABLE, FOLD)\
\
(DATA) = HASH_GET_FIRST(TABLE, hash_calc_hash(FOLD, TABLE));\
\
......@@ -160,7 +169,7 @@ the heap. The fold value must be stored in the struct NODE in a field named
'fold'. */
#define HASH_DELETE_AND_COMPACT(TYPE, NAME, TABLE, NODE)\
{\
do {\
TYPE* node111;\
TYPE* top_node111;\
hash_cell_t* cell111;\
......@@ -211,33 +220,7 @@ the heap. The fold value must be stored in the struct NODE in a field named
/* Free the space occupied by the top node */\
\
mem_heap_free_top(hash_get_heap(TABLE, fold111), sizeof(TYPE));\
}
/***********************************************************************
Calculates the number of stored structs in a hash table. */
#define HASH_GET_N_NODES(TYPE, NAME, TABLE, N)\
{\
hash_cell_t* cell3333;\
TYPE* struct3333;\
ulint i3333;\
\
(N) = 0;\
\
for (i3333 = 0; i3333 < hash_get_n_cells(TABLE); i3333++) {\
\
cell3333 = hash_get_nth_cell(TABLE, i3333);\
\
struct3333 = cell3333->node;\
\
while (struct3333) {\
\
(N) = (N) + 1;\
\
struct = HASH_GET_NEXT(NAME, struct3333);\
}\
}\
}
} while (0)
/****************************************************************
Gets the mutex index for a fold value in a hash table. */
......
......@@ -64,7 +64,9 @@ lock_clust_rec_some_has_impl(
{
dulint trx_id;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
#endif /* UNIV_SYNC_DEBUG */
ut_ad(index->type & DICT_CLUSTERED);
ut_ad(page_rec_is_user_rec(rec));
......
......@@ -255,7 +255,9 @@ log_block_init(
{
ulint no;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(log_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
no = log_block_convert_lsn_to_no(lsn);
......@@ -277,7 +279,9 @@ log_block_init_in_old_format(
{
ulint no;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(log_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
no = log_block_convert_lsn_to_no(lsn);
......@@ -407,7 +411,9 @@ log_get_online_backup_lsn_low(void)
/* out: online_backup_lsn, the caller must
own the log_sys mutex */
{
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(log_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
ut_ad(log_sys->online_backup_state);
return(log_sys->online_backup_lsn);
......@@ -422,7 +428,9 @@ log_get_online_backup_state_low(void)
/* out: online backup state, the caller must
own the log_sys mutex */
{
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(log_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
return(log_sys->online_backup_state);
}
......@@ -107,8 +107,10 @@ row_upd_rec_sys_fields(
dulint roll_ptr)/* in: roll ptr of the undo log record */
{
ut_ad(index->type & DICT_CLUSTERED);
#ifdef UNIV_SYNC_DEBUG
ut_ad(!buf_block_align(rec)->is_hashed
|| rw_lock_own(&btr_search_latch, RW_LOCK_EX));
#endif /* UNIV_SYNC_DEBUG */
row_set_rec_trx_id(rec, index, trx->id);
row_set_rec_roll_ptr(rec, index, roll_ptr);
......
......@@ -25,13 +25,16 @@ smaller than 30 and the order of the numerical values like below! */
#define RW_NO_LATCH 3
typedef struct rw_lock_struct rw_lock_t;
#ifdef UNIV_SYNC_DEBUG
typedef struct rw_lock_debug_struct rw_lock_debug_t;
#endif /* UNIV_SYNC_DEBUG */
typedef UT_LIST_BASE_NODE_T(rw_lock_t) rw_lock_list_t;
extern rw_lock_list_t rw_lock_list;
extern mutex_t rw_lock_list_mutex;
#ifdef UNIV_SYNC_DEBUG
/* The global mutex which protects debug info lists of all rw-locks.
To modify the debug info list of an rw-lock, this mutex has to be
......@@ -42,6 +45,7 @@ extern os_event_t rw_lock_debug_event; /* If deadlock detection does
may wait for this event */
extern ibool rw_lock_debug_waiters; /* This is set to TRUE, if
there may be waiters for the event */
#endif /* UNIV_SYNC_DEBUG */
extern ulint rw_s_system_call_count;
extern ulint rw_s_spin_wait_count;
......@@ -327,6 +331,7 @@ ulint
rw_lock_get_reader_count(
/*=====================*/
rw_lock_t* lock);
#ifdef UNIV_SYNC_DEBUG
/**********************************************************************
Checks if the thread has locked the rw-lock in the specified mode, with
the pass value == 0. */
......@@ -337,6 +342,7 @@ rw_lock_own(
rw_lock_t* lock, /* in: rw-lock */
ulint lock_type); /* in: lock type: RW_LOCK_SHARED,
RW_LOCK_EX */
#endif /* UNIV_SYNC_DEBUG */
/**********************************************************************
Checks if somebody has locked the rw-lock in the specified mode. */
......@@ -346,6 +352,7 @@ rw_lock_is_locked(
rw_lock_t* lock, /* in: rw-lock */
ulint lock_type); /* in: lock type: RW_LOCK_SHARED,
RW_LOCK_EX */
#ifdef UNIV_SYNC_DEBUG
/*******************************************************************
Prints debug info of an rw-lock. */
......@@ -392,6 +399,7 @@ void
rw_lock_debug_print(
/*================*/
rw_lock_debug_t* info); /* in: debug struct */
#endif /* UNIV_SYNC_DEBUG */
/* NOTE! The structure appears here only for the compiler to know its size.
Do not use its fields directly! The structure used in the spin lock
......@@ -434,10 +442,12 @@ struct rw_lock_struct {
UT_LIST_NODE_T(rw_lock_t) list;
/* All allocated rw locks are put into a
list */
#ifdef UNIV_SYNC_DEBUG
UT_LIST_BASE_NODE_T(rw_lock_debug_t) debug_list;
/* In the debug version: pointer to the debug
info list of the lock */
ulint level; /* Debug version: level in the global latching
#endif /* UNIV_SYNC_DEBUG */
ulint level; /* Level in the global latching
order; default SYNC_LEVEL_NONE */
char* cfile_name; /* File name where lock created */
ulint cline; /* Line where created */
......@@ -450,6 +460,7 @@ struct rw_lock_struct {
#define RW_LOCK_MAGIC_N 22643
#ifdef UNIV_SYNC_DEBUG
/* The structure for storing debug info of an rw-lock */
struct rw_lock_debug_struct {
......@@ -464,6 +475,7 @@ struct rw_lock_debug_struct {
/* Debug structs are linked in a two-way
list */
};
#endif /* UNIV_SYNC_DEBUG */
#ifndef UNIV_NONINL
#include "sync0rw.ic"
......
......@@ -20,6 +20,7 @@ rw_lock_s_lock_spin(
be passed to another thread to unlock */
char* file_name,/* in: file name where lock requested */
ulint line); /* in: line where requested */
#ifdef UNIV_SYNC_DEBUG
/**********************************************************************
Inserts the debug information for an rw-lock. */
......@@ -40,7 +41,7 @@ rw_lock_remove_debug_info(
rw_lock_t* lock, /* in: rw-lock */
ulint pass, /* in: pass value */
ulint lock_type); /* in: lock type */
#endif /* UNIV_SYNC_DEBUG */
/************************************************************************
Accessor functions for rw lock. */
......@@ -132,8 +133,9 @@ rw_lock_s_lock_low(
char* file_name, /* in: file name where lock requested */
ulint line) /* in: line where requested */
{
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(rw_lock_get_mutex(lock)));
#endif /* UNIV_SYNC_DEBUG */
/* Check if the writer field is free */
if (lock->writer == RW_LOCK_NOT_LOCKED) {
......@@ -144,7 +146,6 @@ rw_lock_s_lock_low(
rw_lock_add_debug_info(lock, pass, RW_LOCK_SHARED, file_name,
line);
#endif
lock->last_s_file_name = file_name;
lock->last_s_line = line;
......@@ -236,7 +237,9 @@ rw_lock_s_lock_func(
the threads which have s-locked a latch. This would use some CPU
time. */
#ifdef UNIV_SYNC_DEBUG
ut_ad(!rw_lock_own(lock, RW_LOCK_SHARED)); /* see NOTE above */
#endif /* UNIV_SYNC_DEBUG */
mutex_enter(rw_lock_get_mutex(lock));
......
......@@ -185,6 +185,7 @@ sync_thread_levels_empty_gen(
allowed to be owned by the thread,
also purge_is_running mutex is
allowed */
#ifdef UNIV_SYNC_DEBUG
/**********************************************************************
Checks that the current thread owns the mutex. Works only
in the debug version. */
......@@ -217,6 +218,7 @@ Prints debug info of currently reserved mutexes. */
void
mutex_list_print_info(void);
/*========================*/
#endif /* UNIV_SYNC_DEBUG */
/**********************************************************************
NOT to be used outside this module except in debugging! Gets the value
of the lock word. */
......@@ -225,6 +227,7 @@ ulint
mutex_get_lock_word(
/*================*/
mutex_t* mutex); /* in: mutex */
#ifdef UNIV_SYNC_DEBUG
/**********************************************************************
NOT to be used outside this module except in debugging! Gets the waiters
field in a mutex. */
......@@ -234,6 +237,7 @@ mutex_get_waiters(
/*==============*/
/* out: value to set */
mutex_t* mutex); /* in: mutex */
#endif /* UNIV_SYNC_DEBUG */
/*
LATCHING ORDER WITHIN THE DATABASE
......@@ -442,13 +446,13 @@ struct mutex_struct {
Otherwise, this is 0. */
UT_LIST_NODE_T(mutex_t) list; /* All allocated mutexes are put into
a list. Pointers to the next and prev. */
#ifdef UNIV_SYNC_DEBUG
const char* file_name; /* File where the mutex was locked */
ulint line; /* Line where the mutex was locked */
os_thread_id_t thread_id; /* Debug version: The thread id of the
thread which locked the mutex. */
char* file_name; /* Debug version: File name where the mutex
was locked */
ulint line; /* Debug version: Line where the mutex was
locked */
ulint level; /* Debug version: level in the global latching
#endif /* UNIV_SYNC_DEBUG */
ulint level; /* Level in the global latching
order; default SYNC_LEVEL_NONE */
char* cfile_name; /* File name where mutex created */
ulint cline; /* Line where created */
......
......@@ -25,6 +25,7 @@ mutex_spin_wait(
mutex_t* mutex, /* in: pointer to mutex */
char* file_name,/* in: file name where mutex requested */
ulint line); /* in: line where requested */
#ifdef UNIV_SYNC_DEBUG
/**********************************************************************
Sets the debug information for a reserved mutex. */
......@@ -34,6 +35,7 @@ mutex_set_debug_info(
mutex_t* mutex, /* in: mutex */
char* file_name, /* in: file where requested */
ulint line); /* in: line where requested */
#endif /* UNIV_SYNC_DEBUG */
/**********************************************************************
Releases the threads waiting in the primary wait array for this mutex. */
......@@ -200,9 +202,9 @@ mutex_exit(
/*=======*/
mutex_t* mutex) /* in: pointer to mutex */
{
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(mutex));
#ifdef UNIV_SYNC_DEBUG
mutex->thread_id = ULINT_UNDEFINED;
sync_thread_reset_level(mutex);
......@@ -249,14 +251,9 @@ mutex_enter_func(
the atomic test_and_set; we could peek, and possibly save time. */
if (!mutex_test_and_set(mutex)) {
#ifdef UNIV_SYNC_DEBUG
mutex_set_debug_info(mutex, file_name, line);
#endif
mutex->file_name = file_name;
mutex->line = line;
return; /* Succeeded! */
}
......
......@@ -24,7 +24,9 @@ trx_rsegf_get(
header = TRX_RSEG + buf_page_get(space, page_no, RW_X_LATCH, mtr);
#ifdef UNIV_SYNC_DEBUG
buf_page_dbg_add_level(header, SYNC_RSEG_HEADER);
#endif /* UNIV_SYNC_DEBUG */
return(header);
}
......@@ -45,7 +47,9 @@ trx_rsegf_get_new(
header = TRX_RSEG + buf_page_get(space, page_no, RW_X_LATCH, mtr);
#ifdef UNIV_SYNC_DEBUG
buf_page_dbg_add_level(header, SYNC_RSEG_HEADER_NEW);
#endif /* UNIV_SYNC_DEBUG */
return(header);
}
......
......@@ -60,7 +60,9 @@ trx_sys_get_nth_rseg(
trx_sys_t* sys, /* in: trx system */
ulint n) /* in: index of slot */
{
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(kernel_mutex)));
#endif /* UNIV_SYNC_DEBUG */
ut_ad(n < TRX_SYS_N_RSEGS);
return(sys->rseg_array[n]);
......@@ -98,7 +100,9 @@ trx_sysf_get(
header = TRX_SYS + buf_page_get(TRX_SYS_SPACE, TRX_SYS_PAGE_NO,
RW_X_LATCH, mtr);
#ifdef UNIV_SYNC_DEBUG
buf_page_dbg_add_level(header, SYNC_TRX_SYS_HEADER);
#endif /* UNIV_SYNC_DEBUG */
return(header);
}
......@@ -115,7 +119,9 @@ trx_sysf_rseg_get_space(
ulint i, /* in: slot index == rseg id */
mtr_t* mtr) /* in: mtr */
{
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(kernel_mutex)));
#endif /* UNIV_SYNC_DEBUG */
ut_ad(sys_header);
ut_ad(i < TRX_SYS_N_RSEGS);
......@@ -138,7 +144,9 @@ trx_sysf_rseg_get_page_no(
mtr_t* mtr) /* in: mtr */
{
ut_ad(sys_header);
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(kernel_mutex)));
#endif /* UNIV_SYNC_DEBUG */
ut_ad(i < TRX_SYS_N_RSEGS);
return(mtr_read_ulint(sys_header + TRX_SYS_RSEGS
......@@ -158,7 +166,9 @@ trx_sysf_rseg_set_space(
ulint space, /* in: space id */
mtr_t* mtr) /* in: mtr */
{
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(kernel_mutex)));
#endif /* UNIV_SYNC_DEBUG */
ut_ad(sys_header);
ut_ad(i < TRX_SYS_N_RSEGS);
......@@ -182,7 +192,9 @@ trx_sysf_rseg_set_page_no(
slot is reset to unused */
mtr_t* mtr) /* in: mtr */
{
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(kernel_mutex)));
#endif /* UNIV_SYNC_DEBUG */
ut_ad(sys_header);
ut_ad(i < TRX_SYS_N_RSEGS);
......@@ -236,7 +248,9 @@ trx_get_on_id(
{
trx_t* trx;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(kernel_mutex)));
#endif /* UNIV_SYNC_DEBUG */
trx = UT_LIST_GET_FIRST(trx_sys->trx_list);
......@@ -266,7 +280,9 @@ trx_list_get_min_trx_id(void)
{
trx_t* trx;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(kernel_mutex)));
#endif /* UNIV_SYNC_DEBUG */
trx = UT_LIST_GET_LAST(trx_sys->trx_list);
......@@ -289,7 +305,9 @@ trx_is_active(
{
trx_t* trx;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(kernel_mutex)));
#endif /* UNIV_SYNC_DEBUG */
if (ut_dulint_cmp(trx_id, trx_list_get_min_trx_id()) < 0) {
......@@ -325,7 +343,9 @@ trx_sys_get_new_trx_id(void)
{
dulint id;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
#endif /* UNIV_SYNC_DEBUG */
/* VERY important: after the database is started, max_trx_id value is
divisible by TRX_SYS_TRX_ID_WRITE_MARGIN, and the following if
......@@ -355,7 +375,9 @@ trx_sys_get_new_trx_no(void)
/*========================*/
/* out: new, allocated trx number */
{
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
#endif /* UNIV_SYNC_DEBUG */
return(trx_sys_get_new_trx_id());
}
......@@ -126,7 +126,9 @@ trx_undo_page_get(
page = buf_page_get(space, page_no, RW_X_LATCH, mtr);
#ifdef UNIV_SYNC_DEBUG
buf_page_dbg_add_level(page, SYNC_TRX_UNDO_PAGE);
#endif /* UNIV_SYNC_DEBUG */
return(page);
}
......@@ -146,7 +148,9 @@ trx_undo_page_get_s_latched(
page = buf_page_get(space, page_no, RW_S_LATCH, mtr);
#ifdef UNIV_SYNC_DEBUG
buf_page_dbg_add_level(page, SYNC_TRX_UNDO_PAGE);
#endif /* UNIV_SYNC_DEBUG */
return(page);
}
......
This diff is collapsed.
......@@ -140,7 +140,9 @@ log_buf_pool_get_oldest_modification(void)
{
dulint lsn;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(log_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
lsn = buf_pool_get_oldest_modification();
......@@ -239,7 +241,9 @@ log_write_low(
ulint data_len;
byte* log_block;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(log->mutex)));
#endif /* UNIV_SYNC_DEBUG */
part_loop:
/* Calculate a part length */
......@@ -306,7 +310,9 @@ log_close(void)
log_t* log = log_sys;
ulint checkpoint_age;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(log->mutex)));
#endif /* UNIV_SYNC_DEBUG */
lsn = log->lsn;
......@@ -421,7 +427,9 @@ log_group_get_capacity(
/* out: capacity in bytes */
log_group_t* group) /* in: log group */
{
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(log_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
return((group->file_size - LOG_FILE_HDR_SIZE) * group->n_files);
}
......@@ -437,7 +445,9 @@ log_group_calc_size_offset(
ulint offset, /* in: real offset within the log group */
log_group_t* group) /* in: log group */
{
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(log_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
return(offset - LOG_FILE_HDR_SIZE * (1 + offset / group->file_size));
}
......@@ -453,7 +463,9 @@ log_group_calc_real_offset(
ulint offset, /* in: size offset within the log group */
log_group_t* group) /* in: log group */
{
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(log_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
return(offset + LOG_FILE_HDR_SIZE
* (1 + offset / (group->file_size - LOG_FILE_HDR_SIZE)));
......@@ -475,7 +487,9 @@ log_group_calc_lsn_offset(
ib_longlong group_size;
ib_longlong offset;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(log_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
/* If total log file size is > 2 GB we can easily get overflows
with 32-bit integers. Use 64-bit integers instead. */
......@@ -589,7 +603,9 @@ log_calc_max_ages(void)
ulint archive_margin;
ulint smallest_archive_margin;
#ifdef UNIV_SYNC_DEBUG
ut_ad(!mutex_own(&(log_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
mutex_enter(&(log_sys->mutex));
......@@ -876,7 +892,9 @@ log_flush_do_unlocks(
ulint code) /* in: any ORed combination of LOG_UNLOCK_FLUSH_LOCK
and LOG_UNLOCK_NONE_FLUSHED_LOCK */
{
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(log_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
/* NOTE that we must own the log mutex when doing the setting of the
events: this is because transactions will wait for these events to
......@@ -908,7 +926,9 @@ log_group_check_flush_completion(
/* out: LOG_UNLOCK_NONE_FLUSHED_LOCK or 0 */
log_group_t* group) /* in: log group */
{
ut_ad(mutex_own(&(log_sys->mutex)));
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(log_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
if (!log_sys->one_flushed && group->n_pending_writes == 0) {
......@@ -941,7 +961,9 @@ log_sys_check_flush_completion(void)
ulint move_start;
ulint move_end;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(log_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
if (log_sys->n_pending_writes == 0) {
......@@ -1048,7 +1070,9 @@ log_group_file_header_flush(
UT_NOT_USED(type);
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(log_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
ut_a(nth_file < group->n_files);
......@@ -1116,7 +1140,9 @@ log_group_write_buf(
ulint next_offset;
ulint i;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(log_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
ut_a(len % OS_FILE_LOG_BLOCK_SIZE == 0);
ut_a(ut_dulint_get_low(start_lsn) % OS_FILE_LOG_BLOCK_SIZE == 0);
......@@ -1513,7 +1539,9 @@ void
log_complete_checkpoint(void)
/*=========================*/
{
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(log_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
ut_ad(log_sys->n_pending_checkpoint_writes == 0);
log_sys->next_checkpoint_no
......@@ -1603,7 +1631,9 @@ log_group_checkpoint(
byte* buf;
ulint i;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(log_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
ut_a(LOG_CHECKPOINT_SIZE <= OS_FILE_LOG_BLOCK_SIZE);
buf = group->checkpoint_buf;
......@@ -1757,7 +1787,9 @@ log_group_read_checkpoint_info(
log_group_t* group, /* in: log group */
ulint field) /* in: LOG_CHECKPOINT_1 or LOG_CHECKPOINT_2 */
{
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(log_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
log_sys->n_log_ios++;
......@@ -1775,7 +1807,9 @@ log_groups_write_checkpoint_info(void)
{
log_group_t* group;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(log_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
group = UT_LIST_GET_FIRST(log_sys->log_groups);
......@@ -2040,7 +2074,9 @@ log_group_read_log_seg(
ulint source_offset;
ibool sync;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(log_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
sync = FALSE;
......@@ -2110,7 +2146,9 @@ log_group_archive_file_header_write(
byte* buf;
ulint dest_offset;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(log_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
ut_a(nth_file < group->n_files);
......@@ -2147,7 +2185,9 @@ log_group_archive_completed_header_write(
byte* buf;
ulint dest_offset;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(log_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
ut_a(nth_file < group->n_files);
buf = *(group->archive_file_header_bufs + nth_file);
......@@ -2186,7 +2226,9 @@ log_group_archive(
ulint n_files;
ulint open_mode;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(log_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
start_lsn = log_sys->archived_lsn;
......@@ -2314,7 +2356,9 @@ log_archive_groups(void)
{
log_group_t* group;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(log_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
group = UT_LIST_GET_FIRST(log_sys->log_groups);
......@@ -2337,7 +2381,9 @@ log_archive_write_complete_groups(void)
dulint end_lsn;
ulint i;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(log_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
group = UT_LIST_GET_FIRST(log_sys->log_groups);
......@@ -2399,7 +2445,9 @@ void
log_archive_check_completion_low(void)
/*==================================*/
{
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(log_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
if (log_sys->n_pending_archive_ios == 0
&& log_sys->archiving_phase == LOG_ARCHIVE_READ) {
......@@ -2630,7 +2678,9 @@ log_archive_close_groups(
log_group_t* group;
ulint trunc_len;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(log_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
group = UT_LIST_GET_FIRST(log_sys->log_groups);
......@@ -3124,7 +3174,9 @@ log_check_log_recs(
byte* buf1;
byte* scan_buf;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(log_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
if (len == 0) {
......
......@@ -152,7 +152,9 @@ void
recv_sys_empty_hash(void)
/*=====================*/
{
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(recv_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
if (recv_sys->n_addrs != 0) {
fprintf(stderr,
"InnoDB: Error: %lu pages with log records were left unprocessed!\n"
......@@ -1044,7 +1046,9 @@ recv_recover_page(
&mtr);
ut_a(success);
#ifdef UNIV_SYNC_DEBUG
buf_page_dbg_add_level(page, SYNC_NO_ORDER_CHECK);
#endif /* UNIV_SYNC_DEBUG */
}
/* Read the newest modification lsn from the page */
......@@ -1245,14 +1249,13 @@ recv_apply_hashed_log_recs(
goto loop;
}
#ifdef UNIV_SYNC_DEBUG
ut_ad(!allow_ibuf == mutex_own(&log_sys->mutex));
#endif /* UNIV_SYNC_DEBUG */
if (!allow_ibuf) {
ut_ad(mutex_own(&(log_sys->mutex)));
recv_no_ibuf_operations = TRUE;
} else {
ut_ad(!mutex_own(&(log_sys->mutex)));
}
recv_sys->apply_log_recs = TRUE;
recv_sys->apply_batch_on = TRUE;
......@@ -1282,8 +1285,10 @@ recv_apply_hashed_log_recs(
page = buf_page_get(space, page_no,
RW_X_LATCH, &mtr);
#ifdef UNIV_SYNC_DEBUG
buf_page_dbg_add_level(page,
SYNC_NO_ORDER_CHECK);
#endif /* UNIV_SYNC_DEBUG */
recv_recover_page(FALSE, FALSE, page,
space, page_no);
mtr_commit(&mtr);
......@@ -1505,7 +1510,9 @@ recv_update_replicate(
replica = buf_page_get(space + RECV_REPLICA_SPACE_ADD, page_no,
RW_X_LATCH, &mtr);
#ifdef UNIV_SYNC_DEBUG
buf_page_dbg_add_level(replica, SYNC_NO_ORDER_CHECK);
#endif /* UNIV_SYNC_DEBUG */
ptr = recv_parse_or_apply_log_rec_body(type, body, end_ptr, replica,
&mtr);
......@@ -1574,7 +1581,9 @@ recv_compare_replicate(
replica = buf_page_get(space + RECV_REPLICA_SPACE_ADD, page_no,
RW_X_LATCH, &mtr);
#ifdef UNIV_SYNC_DEBUG
buf_page_dbg_add_level(replica, SYNC_NO_ORDER_CHECK);
#endif /* UNIV_SYNC_DEBUG */
recv_check_identical(page + FIL_PAGE_DATA,
replica + FIL_PAGE_DATA,
......@@ -1615,7 +1624,9 @@ recv_compare_spaces(
IB__FILE__, __LINE__,
&mtr);
if (frame) {
#ifdef UNIV_SYNC_DEBUG
buf_page_dbg_add_level(frame, SYNC_NO_ORDER_CHECK);
#endif /* UNIV_SYNC_DEBUG */
ut_memcpy(page, frame, UNIV_PAGE_SIZE);
} else {
/* Read it from file */
......@@ -1628,7 +1639,9 @@ recv_compare_spaces(
IB__FILE__, __LINE__,
&mtr);
if (frame) {
#ifdef UNIV_SYNC_DEBUG
buf_page_dbg_add_level(frame, SYNC_NO_ORDER_CHECK);
#endif /* UNIV_SYNC_DEBUG */
ut_memcpy(replica, frame, UNIV_PAGE_SIZE);
} else {
/* Read it from file */
......@@ -1880,7 +1893,9 @@ recv_parse_log_recs(
byte* body;
ulint n_recs;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(log_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
ut_ad(!ut_dulint_is_zero(recv_sys->parse_start_lsn));
loop:
ptr = recv_sys->buf + recv_sys->recovered_offset;
......@@ -2775,8 +2790,9 @@ recv_reset_logs(
{
log_group_t* group;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(log_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
log_sys->lsn = ut_dulint_align_up(lsn, OS_FILE_LOG_BLOCK_SIZE);
group = UT_LIST_GET_FIRST(log_sys->log_groups);
......
......@@ -259,7 +259,9 @@ mem_pool_fill_free_list(
mem_area_t* area2;
ibool ret;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(pool->mutex)));
#endif /* UNIV_SYNC_DEBUG */
if (i >= 63) {
/* We come here when we have run out of space in the
......
......@@ -54,9 +54,10 @@ read_view_oldest_copy_or_open_new(
ulint insert_done = 0;
ulint n;
ulint i;
ut_ad(mutex_own(&kernel_mutex));
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
#endif /* UNIV_SYNC_DEBUG */
old_view = UT_LIST_GET_LAST(trx_sys->view_list);
if (old_view == NULL) {
......@@ -132,9 +133,9 @@ read_view_open_now(
read_view_t* view;
trx_t* trx;
ulint n;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
#endif /* UNIV_SYNC_DEBUG */
view = read_view_create_low(UT_LIST_GET_LEN(trx_sys->trx_list), heap);
view->creator = cr_trx;
......@@ -195,8 +196,9 @@ read_view_close(
/*============*/
read_view_t* view) /* in: read view */
{
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
#endif /* UNIV_SYNC_DEBUG */
UT_LIST_REMOVE(view_list, trx_sys->view_list, view);
}
......
......@@ -1032,8 +1032,10 @@ row_ins_check_foreign_constraint(
mtr_t mtr;
run_again:
#ifdef UNIV_SYNC_DEBUG
ut_ad(rw_lock_own(&dict_operation_lock, RW_LOCK_SHARED));
#endif /* UNIV_SYNC_DEBUG */
err = DB_SUCCESS;
if (thr_get_trx(thr)->check_foreigns == FALSE) {
......
......@@ -1267,9 +1267,11 @@ row_create_table_for_mysql(
ulint err;
ut_ad(trx->mysql_thread_id == os_thread_get_curr_id());
#ifdef UNIV_SYNC_DEBUG
ut_ad(rw_lock_own(&dict_operation_lock, RW_LOCK_EX));
ut_ad(trx->dict_operation_lock_mode == RW_X_LATCH);
ut_ad(mutex_own(&(dict_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
ut_ad(trx->dict_operation_lock_mode == RW_X_LATCH);
if (srv_created_new_raw) {
fprintf(stderr,
......@@ -1471,8 +1473,10 @@ row_create_index_for_mysql(
ulint err;
ulint i, j;
#ifdef UNIV_SYNC_DEBUG
ut_ad(rw_lock_own(&dict_operation_lock, RW_LOCK_EX));
ut_ad(mutex_own(&(dict_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
ut_ad(trx->mysql_thread_id == os_thread_get_curr_id());
trx->op_info = (char *) "creating index";
......@@ -1576,8 +1580,10 @@ row_table_add_foreign_constraints(
ulint keywordlen;
ulint err;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(dict_sys->mutex)));
ut_ad(rw_lock_own(&dict_operation_lock, RW_LOCK_EX));
#endif /* UNIV_SYNC_DEBUG */
ut_a(sql_string);
trx->op_info = (char *) "adding foreign keys";
......@@ -1748,7 +1754,9 @@ row_get_background_drop_list_len_low(void)
/*======================================*/
/* out: how many tables in list */
{
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
#endif /* UNIV_SYNC_DEBUG */
if (!row_mysql_drop_list_inited) {
......@@ -1968,8 +1976,10 @@ row_drop_table_for_mysql(
locked_dictionary = TRUE;
}
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(dict_sys->mutex)));
ut_ad(rw_lock_own(&dict_operation_lock, RW_LOCK_EX));
#endif /* UNIV_SYNC_DEBUG */
graph = pars_sql(buf);
......
......@@ -447,8 +447,10 @@ row_purge_upd_exist_or_extern(
data_field = buf_page_get(0, page_no, RW_X_LATCH, &mtr)
+ offset + internal_offset;
#ifdef UNIV_SYNC_DEBUG
buf_page_dbg_add_level(buf_frame_align(data_field),
SYNC_TRX_UNDO_PAGE);
#endif /* UNIV_SYNC_DEBUG */
data_field_len = ufield->new_val.len;
......
......@@ -930,7 +930,9 @@ row_sel_try_search_shortcut(
ut_ad(node->read_view);
ut_ad(plan->unique_search);
ut_ad(!plan->must_get_clust);
#ifdef UNIV_SYNC_DEBUG
ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_SHARED));
#endif /* UNIV_SYNC_DEBUG */
row_sel_open_pcur(node, plan, TRUE, mtr);
......
......@@ -60,8 +60,10 @@ row_vers_impl_x_locked_off_kernel(
ulint err;
mtr_t mtr;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
ut_ad(!rw_lock_own(&(purge_sys->latch), RW_LOCK_SHARED));
#endif /* UNIV_SYNC_DEBUG */
mutex_exit(&kernel_mutex);
......@@ -254,7 +256,9 @@ row_vers_must_preserve_del_marked(
mtr_t* mtr) /* in: mtr holding the latch on the clustered index
record; it will also hold the latch on purge_view */
{
#ifdef UNIV_SYNC_DEBUG
ut_ad(!rw_lock_own(&(purge_sys->latch), RW_LOCK_SHARED));
#endif /* UNIV_SYNC_DEBUG */
mtr_s_lock(&(purge_sys->latch), mtr);
......@@ -302,7 +306,9 @@ row_vers_old_has_index_entry(
ut_ad(mtr_memo_contains(mtr, buf_block_align(rec), MTR_MEMO_PAGE_X_FIX)
|| mtr_memo_contains(mtr, buf_block_align(rec),
MTR_MEMO_PAGE_S_FIX));
#ifdef UNIV_SYNC_DEBUG
ut_ad(!rw_lock_own(&(purge_sys->latch), RW_LOCK_SHARED));
#endif /* UNIV_SYNC_DEBUG */
mtr_s_lock(&(purge_sys->latch), mtr);
clust_index = dict_table_get_first_index(index->table);
......@@ -411,7 +417,9 @@ row_vers_build_for_consistent_read(
ut_ad(mtr_memo_contains(mtr, buf_block_align(rec), MTR_MEMO_PAGE_X_FIX)
|| mtr_memo_contains(mtr, buf_block_align(rec),
MTR_MEMO_PAGE_S_FIX));
#ifdef UNIV_SYNC_DEBUG
ut_ad(!rw_lock_own(&(purge_sys->latch), RW_LOCK_SHARED));
#endif /* UNIV_SYNC_DEBUG */
ut_ad(!read_view_sees_trx_id(view, row_get_rec_trx_id(rec, index)));
rw_lock_s_lock(&(purge_sys->latch));
......
......@@ -83,7 +83,9 @@ srv_que_task_enqueue_low(
{
ut_ad(thr);
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
#endif /* UNIV_SYNC_DEBUG */
UT_LIST_ADD_LAST(queue, srv_sys->tasks, thr);
......
......@@ -606,7 +606,9 @@ srv_suspend_thread(void)
ulint slot_no;
ulint type;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
#endif /* UNIV_SYNC_DEBUG */
slot_no = thr_local_get_slot_no(os_thread_get_curr_id());
......@@ -656,7 +658,9 @@ srv_release_threads(
ut_ad(type >= SRV_WORKER);
ut_ad(type <= SRV_MASTER);
ut_ad(n > 0);
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
#endif /* UNIV_SYNC_DEBUG */
for (i = 0; i < OS_THREAD_MAX_N; i++) {
......@@ -1148,7 +1152,9 @@ srv_table_reserve_slot_for_mysql(void)
srv_slot_t* slot;
ulint i;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
#endif /* UNIV_SYNC_DEBUG */
i = 0;
slot = srv_mysql_table + i;
......@@ -1213,7 +1219,9 @@ srv_suspend_mysql_thread(
ibool had_dict_lock = FALSE;
ibool was_declared_inside_innodb = FALSE;
#ifdef UNIV_SYNC_DEBUG
ut_ad(!mutex_own(&kernel_mutex));
#endif /* UNIV_SYNC_DEBUG */
trx = thr_get_trx(thr);
......@@ -1332,7 +1340,9 @@ srv_release_mysql_thread_if_suspended(
srv_slot_t* slot;
ulint i;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
#endif /* UNIV_SYNC_DEBUG */
for (i = 0; i < OS_THREAD_MAX_N; i++) {
......
......@@ -100,6 +100,7 @@ struct sync_array_struct {
since creation of the array */
};
#ifdef UNIV_SYNC_DEBUG
/**********************************************************************
This function is called only in the debug version. Detects a deadlock
of one or more threads because of waits of semaphores. */
......@@ -113,6 +114,7 @@ sync_array_detect_deadlock(
sync_cell_t* start, /* in: cell where recursive search started */
sync_cell_t* cell, /* in: cell to search */
ulint depth); /* in: recursion depth */
#endif /* UNIV_SYNC_DEBUG */
/*********************************************************************
Gets the nth cell in array. */
......@@ -464,12 +466,17 @@ sync_array_cell_print(
mutex = cell->old_wait_mutex;
buf += sprintf(buf,
"Mutex at %lx created file %s line %lu, lock var %lu\n",
(ulint)mutex, mutex->cfile_name, mutex->cline,
mutex->lock_word);
buf += sprintf(buf,
"Last time reserved in file %s line %lu, waiters flag %lu\n",
mutex->file_name, mutex->line, mutex->waiters);
"Mutex at %p created file %s line %lu, lock var %lu\n"
#ifdef UNIV_SYNC_DEBUG
"Last time reserved in file %s line %lu, "
#endif /* UNIV_SYNC_DEBUG */
"waiters flag %lu\n",
mutex, mutex->cfile_name, mutex->cline,
mutex->lock_word,
#ifdef UNIV_SYNC_DEBUG
mutex->file_name, mutex->line,
#endif /* UNIV_SYNC_DEBUG */
mutex->waiters);
} else if (type == RW_LOCK_EX || type == RW_LOCK_SHARED) {
......@@ -518,6 +525,7 @@ sync_array_cell_print(
}
}
#ifdef UNIV_SYNC_DEBUG
/**********************************************************************
Looks for a cell with the given thread id. */
static
......@@ -689,7 +697,6 @@ sync_array_detect_deadlock(
sync_array_cell_print(buf, cell);
printf("rw-lock %lx %s ", (ulint) lock, buf);
rw_lock_debug_print(debug);
return(TRUE);
}
}
......@@ -739,6 +746,7 @@ sync_array_detect_deadlock(
return(TRUE); /* Execution never reaches this line: for compiler
fooling only */
}
#endif /* UNIV_SYNC_DEBUG */
/**********************************************************************
Determines if we can wake up the thread waiting for a sempahore. */
......
......@@ -31,6 +31,7 @@ ulint rw_x_exit_count = 0;
rw_lock_list_t rw_lock_list;
mutex_t rw_lock_list_mutex;
#ifdef UNIV_SYNC_DEBUG
/* The global mutex which protects debug info lists of all rw-locks.
To modify the debug info list of an rw-lock, this mutex has to be
acquired in addition to the mutex protecting the lock. */
......@@ -76,6 +77,7 @@ rw_lock_debug_free(
{
mem_free(info);
}
#endif /* UNIV_SYNC_DEBUG */
/**********************************************************************
Creates, or rather, initializes an rw-lock object in a specified memory
......@@ -107,10 +109,12 @@ rw_lock_create_func(
lock->writer_is_wait_ex = FALSE;
#ifdef UNIV_SYNC_DEBUG
UT_LIST_INIT(lock->debug_list);
lock->magic_n = RW_LOCK_MAGIC_N;
lock->level = SYNC_LEVEL_NONE;
#endif /* UNIV_SYNC_DEBUG */
lock->magic_n = RW_LOCK_MAGIC_N;
lock->cfile_name = cfile_name;
lock->cline = cline;
......@@ -307,8 +311,9 @@ rw_lock_x_lock_low(
char* file_name,/* in: file name where lock requested */
ulint line) /* in: line where requested */
{
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(rw_lock_get_mutex(lock)));
#endif /* UNIV_SYNC_DEBUG */
if (rw_lock_get_writer(lock) == RW_LOCK_NOT_LOCKED) {
if (rw_lock_get_reader_count(lock) == 0) {
......@@ -516,6 +521,7 @@ rw_lock_x_lock_func(
goto lock_loop;
}
#ifdef UNIV_SYNC_DEBUG
/**********************************************************************
Acquires the debug mutex. We cannot use the mutex defined in sync0sync,
because the debug mutex is also acquired in sync0arr while holding the OS
......@@ -641,6 +647,7 @@ rw_lock_remove_debug_info(
ut_error;
}
#endif /* UNIV_SYNC_DEBUG */
/**********************************************************************
Sets the rw-lock latching level field. */
......@@ -654,6 +661,7 @@ rw_lock_set_level(
lock->level = level;
}
#ifdef UNIV_SYNC_DEBUG
/**********************************************************************
Checks if the thread has locked the rw-lock in the specified mode, with
the pass value == 0. */
......@@ -671,9 +679,6 @@ rw_lock_own(
ut_ad(lock);
ut_ad(rw_lock_validate(lock));
#ifndef UNIV_SYNC_DEBUG
ut_error;
#endif
mutex_enter(&(lock->mutex));
info = UT_LIST_GET_FIRST(lock->debug_list);
......@@ -696,6 +701,7 @@ rw_lock_own(
return(FALSE);
}
#endif /* UNIV_SYNC_DEBUG */
/**********************************************************************
Checks if somebody has locked the rw-lock in the specified mode. */
......@@ -732,6 +738,7 @@ rw_lock_is_locked(
return(ret);
}
#ifdef UNIV_SYNC_DEBUG
/*******************************************************************
Prints debug info of currently locked rw-locks. */
......@@ -739,8 +746,6 @@ void
rw_lock_list_print_info(void)
/*=========================*/
{
#ifndef UNIV_SYNC_DEBUG
#else
rw_lock_t* lock;
ulint count = 0;
rw_lock_debug_t* info;
......@@ -784,7 +789,6 @@ rw_lock_list_print_info(void)
printf("Total number of rw-locks %ld\n", count);
mutex_exit(&rw_lock_list_mutex);
#endif
}
/*******************************************************************
......@@ -795,11 +799,6 @@ rw_lock_print(
/*==========*/
rw_lock_t* lock __attribute__((unused))) /* in: rw-lock */
{
#ifndef UNIV_SYNC_DEBUG
printf(
"Sorry, cannot give rw-lock info in non-debug version!\n");
#else
ulint count = 0;
rw_lock_debug_t* info;
printf("-------------\n");
......@@ -822,7 +821,6 @@ rw_lock_print(
info = UT_LIST_GET_NEXT(list, info);
}
}
#endif
}
/*************************************************************************
......@@ -862,12 +860,6 @@ ulint
rw_lock_n_locked(void)
/*==================*/
{
#ifndef UNIV_SYNC_DEBUG
printf(
"Sorry, cannot give rw-lock info in non-debug version!\n");
ut_error;
return(0);
#else
rw_lock_t* lock;
ulint count = 0;
......@@ -890,5 +882,5 @@ rw_lock_n_locked(void)
mutex_exit(&rw_lock_list_mutex);
return(count);
#endif
}
#endif /* UNIV_SYNC_DEBUG */
......@@ -188,8 +188,10 @@ mutex_create_func(
#endif
mutex_set_waiters(mutex, 0);
mutex->magic_n = MUTEX_MAGIC_N;
#ifdef UNIV_SYNC_DEBUG
mutex->line = 0;
mutex->file_name = (char *) "not yet reserved";
#endif /* UNIV_SYNC_DEBUG */
mutex->level = SYNC_LEVEL_NONE;
mutex->cfile_name = cfile_name;
mutex->cline = cline;
......@@ -253,9 +255,11 @@ mutex_enter_nowait(
/*===============*/
/* out: 0 if succeed, 1 if not */
mutex_t* mutex, /* in: pointer to mutex */
char* file_name, /* in: file name where mutex
char* file_name __attribute__((unused)),
/* in: file name where mutex
requested */
ulint line) /* in: line where requested */
ulint line __attribute__((unused)))
/* in: line where requested */
{
ut_ad(mutex_validate(mutex));
......@@ -264,9 +268,6 @@ mutex_enter_nowait(
#ifdef UNIV_SYNC_DEBUG
mutex_set_debug_info(mutex, file_name, line);
#endif
mutex->file_name = file_name;
mutex->line = line;
return(0); /* Succeeded! */
}
......@@ -366,9 +367,6 @@ mutex_spin_wait(
mutex_set_debug_info(mutex, file_name, line);
#endif
mutex->file_name = file_name;
mutex->line = line;
return;
}
......@@ -413,9 +411,6 @@ mutex_spin_wait(
mutex_set_debug_info(mutex, file_name, line);
#endif
mutex->file_name = file_name;
mutex->line = line;
if (srv_print_latch_waits) {
printf(
"Thread %lu spin wait succeeds at 2: mutex at %lx\n",
......@@ -465,6 +460,7 @@ mutex_signal_object(
sync_array_signal_object(sync_primary_wait_array, mutex);
}
#ifdef UNIV_SYNC_DEBUG
/**********************************************************************
Sets the debug information for a reserved mutex. */
......@@ -502,7 +498,8 @@ mutex_get_debug_info(
*file_name = mutex->file_name;
*line = mutex->line;
*thread_id = mutex->thread_id;
}
}
#endif /* UNIV_SYNC_DEBUG */
/**********************************************************************
Sets the mutex latching level field. */
......@@ -516,6 +513,7 @@ mutex_set_level(
mutex->level = level;
}
#ifdef UNIV_SYNC_DEBUG
/**********************************************************************
Checks that the current thread owns the mutex. Works only in the debug
version. */
......@@ -548,8 +546,6 @@ void
mutex_list_print_info(void)
/*=======================*/
{
#ifndef UNIV_SYNC_DEBUG
#else
mutex_t* mutex;
char* file_name;
ulint line;
......@@ -582,7 +578,6 @@ mutex_list_print_info(void)
printf("Total number of mutexes %ld\n", count);
mutex_exit(&mutex_list_mutex);
#endif
}
/**********************************************************************
......@@ -592,12 +587,6 @@ ulint
mutex_n_reserved(void)
/*==================*/
{
#ifndef UNIV_SYNC_DEBUG
printf("Sorry, cannot give mutex info in non-debug version!\n");
ut_error;
return(0);
#else
mutex_t* mutex;
ulint count = 0;
......@@ -620,7 +609,6 @@ mutex_n_reserved(void)
return(count - 1); /* Subtract one, because this function itself
was holding one mutex (mutex_list_mutex) */
#endif
}
/**********************************************************************
......@@ -631,19 +619,9 @@ ibool
sync_all_freed(void)
/*================*/
{
#ifdef UNIV_SYNC_DEBUG
if (mutex_n_reserved() + rw_lock_n_locked() == 0) {
return(TRUE);
} else {
return(FALSE);
}
#else
ut_error;
return(FALSE);
#endif
return(mutex_n_reserved() + rw_lock_n_locked() == 0);
}
#endif /* UNIV_SYNC_DEBUG */
/**********************************************************************
Gets the value in the nth slot in the thread level arrays. */
......@@ -740,9 +718,6 @@ sync_thread_levels_g(
thread */
ulint limit) /* in: level limit */
{
char* file_name;
ulint line;
os_thread_id_t thread_id;
sync_level_t* slot;
rw_lock_t* lock;
mutex_t* mutex;
......@@ -767,18 +742,28 @@ sync_thread_levels_g(
mutex->cline);
if (mutex_get_lock_word(mutex) != 0) {
#ifdef UNIV_SYNC_DEBUG
char* file_name;
ulint line;
os_thread_id_t thread_id;
mutex_get_debug_info(mutex,
&file_name, &line, &thread_id);
printf("InnoDB: Locked mutex: addr %lx thread %ld file %s line %ld\n",
(ulint)mutex, os_thread_pf(thread_id),
file_name, line);
fprintf(stderr,
"InnoDB: Locked mutex: addr %p thread %ld file %s line %ld\n",
mutex, os_thread_pf(thread_id), file_name, line);
#else /* UNIV_SYNC_DEBUG */
fprintf(stderr,
"InnoDB: Locked mutex: addr %p\n", mutex);
#endif /* UNIV_SYNC_DEBUG */
} else {
printf("Not locked\n");
fputs("Not locked\n", stderr);
}
} else {
#ifdef UNIV_SYNC_DEBUG
rw_lock_print(lock);
#endif /* UNIV_SYNC_DEBUG */
}
return(FALSE);
......@@ -918,7 +903,9 @@ sync_thread_add_level(
if ((latch == (void*)&sync_thread_mutex)
|| (latch == (void*)&mutex_list_mutex)
#ifdef UNIV_SYNC_DEBUG
|| (latch == (void*)&rw_lock_debug_mutex)
#endif /* UNIV_SYNC_DEBUG */
|| (latch == (void*)&rw_lock_list_mutex)) {
return;
......@@ -1098,7 +1085,9 @@ sync_thread_reset_level(
if ((latch == (void*)&sync_thread_mutex)
|| (latch == (void*)&mutex_list_mutex)
#ifdef UNIV_SYNC_DEBUG
|| (latch == (void*)&rw_lock_debug_mutex)
#endif /* UNIV_SYNC_DEBUG */
|| (latch == (void*)&rw_lock_list_mutex)) {
return(FALSE);
......@@ -1184,11 +1173,13 @@ sync_init(void)
mutex_create(&rw_lock_list_mutex);
mutex_set_level(&rw_lock_list_mutex, SYNC_NO_ORDER_CHECK);
#ifdef UNIV_SYNC_DEBUG
mutex_create(&rw_lock_debug_mutex);
mutex_set_level(&rw_lock_debug_mutex, SYNC_NO_ORDER_CHECK);
rw_lock_debug_event = os_event_create(NULL);
rw_lock_debug_waiters = FALSE;
#endif /* UNIV_SYNC_DEBUG */
}
/**********************************************************************
......@@ -1250,9 +1241,11 @@ sync_print(
char* buf, /* in/out: buffer where to print */
char* buf_end) /* in: buffer end */
{
#ifdef UNIV_SYNC_DEBUG
mutex_list_print_info();
rw_lock_list_print_info();
#endif /* UNIV_SYNC_DEBUG */
sync_array_print_info(buf, buf_end, sync_primary_wait_array);
......
......@@ -65,7 +65,9 @@ thr_local_get(
try_again:
ut_ad(thr_local_hash);
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&thr_local_mutex));
#endif /* UNIV_SYNC_DEBUG */
/* Look for the local struct in the hash table */
......
......@@ -45,7 +45,9 @@ trx_purge_update_undo_must_exist(
the undo log still exists in the system */
dulint trx_id) /* in: transaction id */
{
#ifdef UNIV_SYNC_DEBUG
ut_ad(rw_lock_own(&(purge_sys->latch), RW_LOCK_SHARED));
#endif /* UNIV_SYNC_DEBUG */
if (!read_view_sees_trx_id(purge_sys->view, trx_id)) {
......@@ -195,7 +197,9 @@ void
trx_purge_sys_create(void)
/*======================*/
{
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
#endif /* UNIV_SYNC_DEBUG */
purge_sys = mem_alloc(sizeof(trx_purge_t));
......@@ -258,7 +262,9 @@ trx_purge_add_update_undo_to_history(
ut_ad(undo);
rseg = undo->rseg;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(rseg->mutex)));
#endif /* UNIV_SYNC_DEBUG */
rseg_header = trx_rsegf_get(rseg->space, rseg->page_no, mtr);
......@@ -334,7 +340,9 @@ trx_purge_free_segment(
/* printf("Freeing an update undo log segment\n"); */
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(purge_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
loop:
mtr_start(&mtr);
mutex_enter(&(rseg->mutex));
......@@ -430,7 +438,9 @@ trx_purge_truncate_rseg_history(
ulint n_removed_logs = 0;
mtr_t mtr;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(purge_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
mtr_start(&mtr);
mutex_enter(&(rseg->mutex));
......@@ -516,7 +526,9 @@ trx_purge_truncate_history(void)
dulint limit_trx_no;
dulint limit_undo_no;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(purge_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
trx_purge_arr_get_biggest(purge_sys->arr, &limit_trx_no,
&limit_undo_no);
......@@ -556,7 +568,9 @@ trx_purge_truncate_if_arr_empty(void)
/*=================================*/
/* out: TRUE if array empty */
{
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(purge_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
if (purge_sys->arr->n_used == 0) {
......@@ -585,7 +599,9 @@ trx_purge_rseg_get_next_history_log(
ibool del_marks;
mtr_t mtr;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(purge_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
mutex_enter(&(rseg->mutex));
......@@ -665,7 +681,9 @@ trx_purge_choose_next_log(void)
ulint offset = 0; /* remove warning (??? bug ???) */
mtr_t mtr;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(purge_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
ut_ad(purge_sys->next_stored == FALSE);
rseg = UT_LIST_GET_FIRST(trx_sys->rseg_list);
......@@ -766,7 +784,9 @@ trx_purge_get_next_rec(
ulint cmpl_info;
mtr_t mtr;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(purge_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
ut_ad(purge_sys->next_stored);
space = purge_sys->rseg->space;
......
......@@ -1069,7 +1069,9 @@ trx_undo_report_row_operation(
IB__FILE__, __LINE__,
&mtr);
#ifdef UNIV_SYNC_DEBUG
buf_page_dbg_add_level(undo_page, SYNC_TRX_UNDO_PAGE);
#endif /* UNIV_SYNC_DEBUG */
if (op_type == TRX_UNDO_INSERT_OP) {
offset = trx_undo_page_report_insert(undo_page, trx,
......@@ -1196,7 +1198,9 @@ trx_undo_get_undo_rec(
trx_undo_rec_t** undo_rec, /* out, own: copy of the record */
mem_heap_t* heap) /* in: memory heap where copied */
{
#ifdef UNIV_SYNC_DEBUG
ut_ad(rw_lock_own(&(purge_sys->latch), RW_LOCK_SHARED));
#endif /* UNIV_SYNC_DEBUG */
if (!trx_purge_update_undo_must_exist(trx_id)) {
......@@ -1256,7 +1260,9 @@ trx_undo_prev_version_build(
ulint i;
char err_buf[1000];
#ifdef UNIV_SYNC_DEBUG
ut_ad(rw_lock_own(&(purge_sys->latch), RW_LOCK_SHARED));
#endif /* UNIV_SYNC_DEBUG */
ut_ad(mtr_memo_contains(index_mtr, buf_block_align(index_rec),
MTR_MEMO_PAGE_S_FIX) ||
mtr_memo_contains(index_mtr, buf_block_align(index_rec),
......
......@@ -676,8 +676,10 @@ trx_roll_try_truncate(
dulint limit;
dulint biggest;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(trx->undo_mutex)));
ut_ad(mutex_own(&((trx->rseg)->mutex)));
#endif /* UNIV_SYNC_DEBUG */
trx->pages_undone = 0;
......@@ -720,7 +722,9 @@ trx_roll_pop_top_rec(
trx_undo_rec_t* prev_rec;
page_t* prev_rec_page;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(trx->undo_mutex)));
#endif /* UNIV_SYNC_DEBUG */
undo_page = trx_undo_page_get_s_latched(undo->space,
undo->top_page_no, mtr);
......@@ -944,7 +948,9 @@ trx_rollback(
que_thr_t* thr;
/* que_thr_t* thr2; */
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
#endif /* UNIV_SYNC_DEBUG */
ut_ad((trx->undo_no_arr == NULL) || ((trx->undo_no_arr)->n_used == 0));
/* Initialize the rollback field in the transaction */
......@@ -1013,7 +1019,9 @@ trx_roll_graph_build(
que_thr_t* thr;
/* que_thr_t* thr2; */
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
#endif /* UNIV_SYNC_DEBUG */
heap = mem_heap_create(512);
fork = que_fork_create(NULL, NULL, QUE_FORK_ROLLBACK, heap);
......@@ -1040,7 +1048,9 @@ trx_finish_error_processing(
trx_sig_t* sig;
trx_sig_t* next_sig;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
#endif /* UNIV_SYNC_DEBUG */
sig = UT_LIST_GET_FIRST(trx->signals);
......@@ -1073,7 +1083,9 @@ trx_finish_partial_rollback_off_kernel(
{
trx_sig_t* sig;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
#endif /* UNIV_SYNC_DEBUG */
sig = UT_LIST_GET_FIRST(trx->signals);
......@@ -1104,7 +1116,9 @@ trx_finish_rollback_off_kernel(
trx_sig_t* sig;
trx_sig_t* next_sig;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
#endif /* UNIV_SYNC_DEBUG */
ut_a(trx->undo_no_arr == NULL || trx->undo_no_arr->n_used == 0);
......
......@@ -60,7 +60,9 @@ trx_rseg_header_create(
page_t* page;
ut_ad(mtr);
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
#endif /* UNIV_SYNC_DEBUG */
ut_ad(mtr_memo_contains(mtr, fil_space_get_latch(space),
MTR_MEMO_X_LOCK));
sys_header = trx_sysf_get(mtr);
......@@ -81,7 +83,9 @@ trx_rseg_header_create(
return(FIL_NULL);
}
#ifdef UNIV_SYNC_DEBUG
buf_page_dbg_add_level(page, SYNC_RSEG_HEADER_NEW);
#endif /* UNIV_SYNC_DEBUG */
page_no = buf_frame_get_page_no(page);
......@@ -132,7 +136,9 @@ trx_rseg_mem_create(
fil_addr_t node_addr;
ulint sum_of_undo_sizes;
ut_ad(mutex_own(&kernel_mutex));
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
#endif /* UNIV_SYNC_DEBUG */
rseg = mem_alloc(sizeof(trx_rseg_t));
......
......@@ -56,7 +56,9 @@ trx_sys_mark_downgraded_from_4_1_1(void)
mtr_start(&mtr);
page = buf_page_get(TRX_SYS_SPACE, TRX_SYS_PAGE_NO, RW_X_LATCH, &mtr);
#ifdef UNIV_SYNC_DEBUG
buf_page_dbg_add_level(page, SYNC_NO_ORDER_CHECK);
#endif /* UNIV_SYNC_DEBUG */
doublewrite = page + TRX_SYS_DOUBLEWRITE;
......@@ -169,7 +171,9 @@ trx_sys_create_doublewrite_buf(void)
mtr_start(&mtr);
page = buf_page_get(TRX_SYS_SPACE, TRX_SYS_PAGE_NO, RW_X_LATCH, &mtr);
#ifdef UNIV_SYNC_DEBUG
buf_page_dbg_add_level(page, SYNC_NO_ORDER_CHECK);
#endif /* UNIV_SYNC_DEBUG */
doublewrite = page + TRX_SYS_DOUBLEWRITE;
......@@ -228,7 +232,9 @@ trx_sys_create_doublewrite_buf(void)
/* fseg_create acquires a second latch on the page,
therefore we must declare it: */
#ifdef UNIV_SYNC_DEBUG
buf_page_dbg_add_level(page2, SYNC_NO_ORDER_CHECK);
#endif /* UNIV_SYNC_DEBUG */
if (page2 == NULL) {
fprintf(stderr,
......@@ -271,7 +277,9 @@ trx_sys_create_doublewrite_buf(void)
new_page = buf_page_get(TRX_SYS_SPACE, page_no,
RW_X_LATCH, &mtr);
#ifdef UNIV_SYNC_DEBUG
buf_page_dbg_add_level(new_page, SYNC_NO_ORDER_CHECK);
#endif /* UNIV_SYNC_DEBUG */
/* Make a dummy change to the page to ensure it will
be written to disk in a flush */
......@@ -490,7 +498,9 @@ trx_in_trx_list(
{
trx_t* trx;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(kernel_mutex)));
#endif /* UNIV_SYNC_DEBUG */
trx = UT_LIST_GET_FIRST(trx_sys->trx_list);
......@@ -517,7 +527,9 @@ trx_sys_flush_max_trx_id(void)
trx_sysf_t* sys_header;
mtr_t mtr;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
#endif /* UNIV_SYNC_DEBUG */
mtr_start(&mtr);
......@@ -716,7 +728,9 @@ trx_sysf_rseg_find_free(
ulint page_no;
ulint i;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(kernel_mutex)));
#endif /* UNIV_SYNC_DEBUG */
sys_header = trx_sysf_get(mtr);
......@@ -762,7 +776,9 @@ trx_sysf_create(
mtr);
ut_a(buf_frame_get_page_no(page) == TRX_SYS_PAGE_NO);
#ifdef UNIV_SYNC_DEBUG
buf_page_dbg_add_level(page, SYNC_TRX_SYS_HEADER);
#endif /* UNIV_SYNC_DEBUG */
sys_header = trx_sysf_get(mtr);
......
......@@ -73,7 +73,9 @@ trx_create(
{
trx_t* trx;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
#endif /* UNIV_SYNC_DEBUG */
trx = mem_alloc(sizeof(trx_t));
......@@ -239,7 +241,9 @@ trx_free(
{
char err_buf[1000];
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
#endif /* UNIV_SYNC_DEBUG */
if (trx->declared_to_be_inside_innodb) {
ut_print_timestamp(stderr);
......@@ -345,7 +349,9 @@ trx_list_insert_ordered(
{
trx_t* trx2;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
#endif /* UNIV_SYNC_DEBUG */
trx2 = UT_LIST_GET_FIRST(trx_sys->trx_list);
......@@ -507,7 +513,9 @@ trx_assign_rseg(void)
{
trx_rseg_t* rseg = trx_sys->latest_rseg;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
#endif /* UNIV_SYNC_DEBUG */
loop:
/* Get next rseg in a round-robin fashion */
......@@ -544,7 +552,9 @@ trx_start_low(
{
trx_rseg_t* rseg;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
#endif /* UNIV_SYNC_DEBUG */
ut_ad(trx->rseg == NULL);
if (trx->type == TRX_PURGE) {
......@@ -619,7 +629,9 @@ trx_commit_off_kernel(
ibool must_flush_log = FALSE;
mtr_t mtr;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
#endif /* UNIV_SYNC_DEBUG */
rseg = trx->rseg;
......@@ -713,7 +725,9 @@ trx_commit_off_kernel(
}
ut_ad(trx->conc_state == TRX_ACTIVE);
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
#endif /* UNIV_SYNC_DEBUG */
/* The following assignment makes the transaction committed in memory
and makes its changes to data visible to other transactions.
......@@ -893,7 +907,9 @@ trx_handle_commit_sig_off_kernel(
trx_sig_t* sig;
trx_sig_t* next_sig;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
#endif /* UNIV_SYNC_DEBUG */
trx->que_state = TRX_QUE_COMMITTING;
......@@ -933,7 +949,9 @@ trx_end_lock_wait(
{
que_thr_t* thr;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
#endif /* UNIV_SYNC_DEBUG */
ut_ad(trx->que_state == TRX_QUE_LOCK_WAIT);
thr = UT_LIST_GET_FIRST(trx->wait_thrs);
......@@ -960,7 +978,9 @@ trx_lock_wait_to_suspended(
{
que_thr_t* thr;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
#endif /* UNIV_SYNC_DEBUG */
ut_ad(trx->que_state == TRX_QUE_LOCK_WAIT);
thr = UT_LIST_GET_FIRST(trx->wait_thrs);
......@@ -988,7 +1008,9 @@ trx_sig_reply_wait_to_suspended(
trx_sig_t* sig;
que_thr_t* thr;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
#endif /* UNIV_SYNC_DEBUG */
sig = UT_LIST_GET_FIRST(trx->reply_signals);
......@@ -1021,7 +1043,9 @@ trx_sig_is_compatible(
{
trx_sig_t* sig;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
#endif /* UNIV_SYNC_DEBUG */
if (UT_LIST_GET_LEN(trx->signals) == 0) {
......@@ -1109,7 +1133,9 @@ trx_sig_send(
trx_t* receiver_trx;
ut_ad(trx);
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
#endif /* UNIV_SYNC_DEBUG */
if (!trx_sig_is_compatible(trx, type, sender)) {
/* The signal is not compatible with the other signals in
......@@ -1188,7 +1214,9 @@ trx_end_signal_handling(
/*====================*/
trx_t* trx) /* in: trx */
{
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
#endif /* UNIV_SYNC_DEBUG */
ut_ad(trx->handling_signals == TRUE);
trx->handling_signals = FALSE;
......@@ -1222,7 +1250,9 @@ trx_sig_start_handle(
we can process immediately */
ut_ad(trx);
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
#endif /* UNIV_SYNC_DEBUG */
if (trx->handling_signals && (UT_LIST_GET_LEN(trx->signals) == 0)) {
......@@ -1323,7 +1353,9 @@ trx_sig_reply(
trx_t* receiver_trx;
ut_ad(sig);
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
#endif /* UNIV_SYNC_DEBUG */
if (sig->receiver != NULL) {
ut_ad((sig->receiver)->state == QUE_THR_SIG_REPLY_WAIT);
......@@ -1351,7 +1383,9 @@ trx_sig_remove(
trx_sig_t* sig) /* in, own: signal */
{
ut_ad(trx && sig);
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
#endif /* UNIV_SYNC_DEBUG */
ut_ad(sig->receiver == NULL);
......
......@@ -390,7 +390,9 @@ trx_undo_seg_create(
ibool success;
ut_ad(mtr && id && rseg_hdr);
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(rseg->mutex)));
#endif /* UNIV_SYNC_DEBUG */
/*
if (type == TRX_UNDO_INSERT) {
printf("Creating insert undo log segment\n");
......@@ -430,7 +432,9 @@ trx_undo_seg_create(
return(NULL);
}
#ifdef UNIV_SYNC_DEBUG
buf_page_dbg_add_level(undo_page, SYNC_TRX_UNDO_PAGE);
#endif /* UNIV_SYNC_DEBUG */
page_hdr = undo_page + TRX_UNDO_PAGE_HDR;
seg_hdr = undo_page + TRX_UNDO_SEG_HDR;
......@@ -735,13 +739,14 @@ trx_undo_add_page(
ulint page_no;
ibool success;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(trx->undo_mutex)));
ut_ad(!mutex_own(&kernel_mutex));
ut_ad(mutex_own(&(trx->rseg->mutex)));
#endif /* UNIV_SYNC_DEBUG */
rseg = trx->rseg;
ut_ad(mutex_own(&(rseg->mutex)));
if (rseg->curr_size == rseg->max_size) {
return(FIL_NULL);
......@@ -811,8 +816,10 @@ trx_undo_free_page(
UT_NOT_USED(hdr_offset);
ut_a(hdr_page_no != page_no);
#ifdef UNIV_SYNC_DEBUG
ut_ad(!mutex_own(&kernel_mutex));
ut_ad(mutex_own(&(rseg->mutex)));
#endif /* UNIV_SYNC_DEBUG */
undo_page = trx_undo_page_get(space, page_no, mtr);
......@@ -859,7 +866,9 @@ trx_undo_free_page_in_rollback(
ulint last_page_no;
ut_ad(undo->hdr_page_no != page_no);
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(trx->undo_mutex)));
#endif /* UNIV_SYNC_DEBUG */
last_page_no = trx_undo_free_page(undo->rseg, FALSE, undo->space,
undo->hdr_page_no, undo->hdr_offset,
......@@ -913,12 +922,13 @@ trx_undo_truncate_end(
trx_rseg_t* rseg;
mtr_t mtr;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(trx->undo_mutex)));
ut_ad(mutex_own(&(trx->rseg->mutex)));
#endif /* UNIV_SYNC_DEBUG */
rseg = trx->rseg;
ut_ad(mutex_own(&(rseg->mutex)));
for (;;) {
mtr_start(&mtr);
......@@ -992,7 +1002,9 @@ trx_undo_truncate_start(
ulint page_no;
mtr_t mtr;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(rseg->mutex)));
#endif /* UNIV_SYNC_DEBUG */
if (0 == ut_dulint_cmp(limit, ut_dulint_zero)) {
......@@ -1058,8 +1070,9 @@ trx_undo_seg_free(
while (!finished) {
mtr_start(&mtr);
#ifdef UNIV_SYNC_DEBUG
ut_ad(!mutex_own(&kernel_mutex));
#endif /* UNIV_SYNC_DEBUG */
mutex_enter(&(rseg->mutex));
seg_header = trx_undo_page_get(undo->space, undo->hdr_page_no,
......@@ -1268,7 +1281,9 @@ trx_undo_mem_create(
{
trx_undo_t* undo;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(rseg->mutex)));
#endif /* UNIV_SYNC_DEBUG */
if (id >= TRX_RSEG_N_SLOTS) {
fprintf(stderr,
......@@ -1312,7 +1327,9 @@ trx_undo_mem_init_for_reuse(
is created */
ulint offset) /* in: undo log header byte offset on page */
{
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&((undo->rseg)->mutex)));
#endif /* UNIV_SYNC_DEBUG */
if (undo->id >= TRX_RSEG_N_SLOTS) {
fprintf(stderr, "InnoDB: Error: undo->id is %lu\n", undo->id);
......@@ -1370,7 +1387,9 @@ trx_undo_create(
trx_undo_t* undo;
page_t* undo_page;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(rseg->mutex)));
#endif /* UNIV_SYNC_DEBUG */
if (rseg->curr_size == rseg->max_size) {
......@@ -1421,7 +1440,9 @@ trx_undo_reuse_cached(
page_t* undo_page;
ulint offset;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(rseg->mutex)));
#endif /* UNIV_SYNC_DEBUG */
if (type == TRX_UNDO_INSERT) {
......@@ -1517,11 +1538,15 @@ trx_undo_assign_undo(
rseg = trx->rseg;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(trx->undo_mutex)));
#endif /* UNIV_SYNC_DEBUG */
mtr_start(&mtr);
#ifdef UNIV_SYNC_DEBUG
ut_ad(!mutex_own(&kernel_mutex));
#endif /* UNIV_SYNC_DEBUG */
mutex_enter(&(rseg->mutex));
undo = trx_undo_reuse_cached(rseg, type, trx->id, &mtr);
......@@ -1626,8 +1651,9 @@ trx_undo_update_cleanup(
undo = trx->update_undo;
rseg = trx->rseg;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(rseg->mutex)));
#endif /* UNIV_SYNC_DEBUG */
trx_purge_add_update_undo_to_history(trx, undo_page, mtr);
UT_LIST_REMOVE(undo_list, rseg->update_undo_list, undo);
......@@ -1666,8 +1692,10 @@ trx_undo_update_cleanup_by_discard(
undo = trx->update_undo;
rseg = trx->rseg;
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(rseg->mutex)));
ut_ad(mutex_own(&kernel_mutex));
#endif /* UNIV_SYNC_DEBUG */
ut_ad(undo->size == 1);
ut_ad(undo->del_marks == FALSE);
ut_ad(UT_LIST_GET_LEN(trx_sys->view_list) == 1);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment